summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/acl.c2
-rw-r--r--fs/9p/fid.c26
-rw-r--r--fs/9p/fid.h13
-rw-r--r--fs/9p/vfs_addr.c1
-rw-r--r--fs/9p/vfs_inode.c14
-rw-r--r--fs/9p/vfs_inode_dotl.c20
-rw-r--r--fs/9p/xattr.c4
-rw-r--r--fs/Kconfig9
-rw-r--r--fs/Kconfig.binfmt3
-rw-r--r--fs/Makefile1
-rw-r--r--fs/adfs/dir.c8
-rw-r--r--fs/affs/amigaffs.c4
-rw-r--r--fs/affs/namei.c20
-rw-r--r--fs/afs/rxrpc.c34
-rw-r--r--fs/attr.c19
-rw-r--r--fs/autofs4/root.c8
-rw-r--r--fs/autofs4/waitq.c6
-rw-r--r--fs/binfmt_elf.c34
-rw-r--r--fs/binfmt_elf_fdpic.c38
-rw-r--r--fs/binfmt_em86.c3
-rw-r--r--fs/binfmt_flat.c525
-rw-r--r--fs/binfmt_misc.c53
-rw-r--r--fs/block_dev.c30
-rw-r--r--fs/btrfs/acl.c3
-rw-r--r--fs/btrfs/async-thread.c31
-rw-r--r--fs/btrfs/async-thread.h6
-rw-r--r--fs/btrfs/backref.c4
-rw-r--r--fs/btrfs/check-integrity.c61
-rw-r--r--fs/btrfs/check-integrity.h6
-rw-r--r--fs/btrfs/compression.c27
-rw-r--r--fs/btrfs/ctree.c91
-rw-r--r--fs/btrfs/ctree.h118
-rw-r--r--fs/btrfs/dedupe.h24
-rw-r--r--fs/btrfs/delayed-inode.c72
-rw-r--r--fs/btrfs/delayed-ref.c44
-rw-r--r--fs/btrfs/delayed-ref.h3
-rw-r--r--fs/btrfs/dev-replace.c4
-rw-r--r--fs/btrfs/disk-io.c144
-rw-r--r--fs/btrfs/disk-io.h5
-rw-r--r--fs/btrfs/extent-tree.c857
-rw-r--r--fs/btrfs/extent_io.c130
-rw-r--r--fs/btrfs/extent_io.h8
-rw-r--r--fs/btrfs/extent_map.c2
-rw-r--r--fs/btrfs/file-item.c4
-rw-r--r--fs/btrfs/file.c36
-rw-r--r--fs/btrfs/free-space-cache.c8
-rw-r--r--fs/btrfs/free-space-tree.c16
-rw-r--r--fs/btrfs/inode-map.c16
-rw-r--r--fs/btrfs/inode.c341
-rw-r--r--fs/btrfs/ioctl.c40
-rw-r--r--fs/btrfs/ordered-data.c2
-rw-r--r--fs/btrfs/props.c6
-rw-r--r--fs/btrfs/qgroup.c25
-rw-r--r--fs/btrfs/qgroup.h9
-rw-r--r--fs/btrfs/raid56.c17
-rw-r--r--fs/btrfs/relocation.c65
-rw-r--r--fs/btrfs/root-tree.c10
-rw-r--r--fs/btrfs/scrub.c27
-rw-r--r--fs/btrfs/send.c173
-rw-r--r--fs/btrfs/super.c218
-rw-r--r--fs/btrfs/sysfs.c2
-rw-r--r--fs/btrfs/tests/btrfs-tests.c67
-rw-r--r--fs/btrfs/tests/btrfs-tests.h36
-rw-r--r--fs/btrfs/tests/extent-buffer-tests.c23
-rw-r--r--fs/btrfs/tests/free-space-tests.c14
-rw-r--r--fs/btrfs/tests/free-space-tree-tests.c18
-rw-r--r--fs/btrfs/tests/inode-tests.c46
-rw-r--r--fs/btrfs/tests/qgroup-tests.c23
-rw-r--r--fs/btrfs/transaction.c37
-rw-r--r--fs/btrfs/transaction.h1
-rw-r--r--fs/btrfs/tree-log.c107
-rw-r--r--fs/btrfs/volumes.c233
-rw-r--r--fs/btrfs/volumes.h6
-rw-r--r--fs/buffer.c156
-rw-r--r--fs/cachefiles/namei.c5
-rw-r--r--fs/cachefiles/proc.c1
-rw-r--r--fs/ceph/addr.c77
-rw-r--r--fs/ceph/cache.c2
-rw-r--r--fs/ceph/caps.c874
-rw-r--r--fs/ceph/dir.c73
-rw-r--r--fs/ceph/file.c79
-rw-r--r--fs/ceph/inode.c44
-rw-r--r--fs/ceph/ioctl.c30
-rw-r--r--fs/ceph/mds_client.c361
-rw-r--r--fs/ceph/mds_client.h19
-rw-r--r--fs/ceph/snap.c10
-rw-r--r--fs/ceph/super.c43
-rw-r--r--fs/ceph/super.h48
-rw-r--r--fs/ceph/xattr.c101
-rw-r--r--fs/char_dev.c2
-rw-r--r--fs/cifs/cifs_debug.c7
-rw-r--r--fs/cifs/cifs_fs_sb.h4
-rw-r--r--fs/cifs/cifsencrypt.c16
-rw-r--r--fs/cifs/cifsfs.c14
-rw-r--r--fs/cifs/connect.c53
-rw-r--r--fs/cifs/dir.c52
-rw-r--r--fs/cifs/file.c2
-rw-r--r--fs/cifs/inode.c22
-rw-r--r--fs/cifs/smb2ops.c32
-rw-r--r--fs/coda/pioctl.c1
-rw-r--r--fs/compat_ioctl.c12
-rw-r--r--fs/configfs/file.c6
-rw-r--r--fs/crypto/crypto.c3
-rw-r--r--fs/dax.c86
-rw-r--r--fs/dcache.c280
-rw-r--r--fs/debugfs/inode.c7
-rw-r--r--fs/devpts/inode.c3
-rw-r--r--fs/direct-io.c35
-rw-r--r--fs/dlm/config.c7
-rw-r--r--fs/dlm/config.h1
-rw-r--r--fs/dlm/dlm_internal.h10
-rw-r--r--fs/dlm/lowcomms.c3
-rw-r--r--fs/efivarfs/super.c7
-rw-r--r--fs/exec.c44
-rw-r--r--fs/exofs/ore.c2
-rw-r--r--fs/ext2/balloc.c21
-rw-r--r--fs/ext2/dir.c6
-rw-r--r--fs/ext2/ext2.h7
-rw-r--r--fs/ext2/file.c4
-rw-r--r--fs/ext2/inode.c10
-rw-r--r--fs/ext2/xattr.c9
-rw-r--r--fs/ext4/Kconfig12
-rw-r--r--fs/ext4/Makefile2
-rw-r--r--fs/ext4/balloc.c9
-rw-r--r--fs/ext4/crypto.c536
-rw-r--r--fs/ext4/crypto_fname.c468
-rw-r--r--fs/ext4/crypto_key.c274
-rw-r--r--fs/ext4/crypto_policy.c229
-rw-r--r--fs/ext4/dir.c26
-rw-r--r--fs/ext4/ext4.h209
-rw-r--r--fs/ext4/ext4_crypto.h159
-rw-r--r--fs/ext4/ext4_jbd2.h10
-rw-r--r--fs/ext4/extents.c12
-rw-r--r--fs/ext4/file.c14
-rw-r--r--fs/ext4/fsync.c5
-rw-r--r--fs/ext4/ialloc.c9
-rw-r--r--fs/ext4/inline.c14
-rw-r--r--fs/ext4/inode.c89
-rw-r--r--fs/ext4/ioctl.c38
-rw-r--r--fs/ext4/mballoc.c30
-rw-r--r--fs/ext4/mmp.c4
-rw-r--r--fs/ext4/namei.c147
-rw-r--r--fs/ext4/page-io.c20
-rw-r--r--fs/ext4/readpage.c59
-rw-r--r--fs/ext4/super.c164
-rw-r--r--fs/ext4/symlink.c35
-rw-r--r--fs/ext4/sysfs.c1
-rw-r--r--fs/ext4/xattr.c13
-rw-r--r--fs/f2fs/acl.c9
-rw-r--r--fs/f2fs/acl.h2
-rw-r--r--fs/f2fs/checkpoint.c90
-rw-r--r--fs/f2fs/data.c342
-rw-r--r--fs/f2fs/debug.c5
-rw-r--r--fs/f2fs/dir.c125
-rw-r--r--fs/f2fs/extent_cache.c50
-rw-r--r--fs/f2fs/f2fs.h293
-rw-r--r--fs/f2fs/file.c499
-rw-r--r--fs/f2fs/gc.c64
-rw-r--r--fs/f2fs/inline.c95
-rw-r--r--fs/f2fs/inode.c53
-rw-r--r--fs/f2fs/namei.c147
-rw-r--r--fs/f2fs/node.c154
-rw-r--r--fs/f2fs/node.h14
-rw-r--r--fs/f2fs/recovery.c23
-rw-r--r--fs/f2fs/segment.c73
-rw-r--r--fs/f2fs/segment.h22
-rw-r--r--fs/f2fs/shrinker.c5
-rw-r--r--fs/f2fs/super.c135
-rw-r--r--fs/f2fs/trace.c7
-rw-r--r--fs/f2fs/xattr.c20
-rw-r--r--fs/fat/inode.c4
-rw-r--r--fs/fat/misc.c2
-rw-r--r--fs/fat/namei_msdos.c6
-rw-r--r--fs/fat/namei_vfat.c16
-rw-r--r--fs/freevxfs/Kconfig13
-rw-r--r--fs/freevxfs/vxfs.h185
-rw-r--r--fs/freevxfs/vxfs_bmap.c70
-rw-r--r--fs/freevxfs/vxfs_dir.h17
-rw-r--r--fs/freevxfs/vxfs_extern.h10
-rw-r--r--fs/freevxfs/vxfs_fshead.c37
-rw-r--r--fs/freevxfs/vxfs_fshead.h29
-rw-r--r--fs/freevxfs/vxfs_inode.c265
-rw-r--r--fs/freevxfs/vxfs_inode.h146
-rw-r--r--fs/freevxfs/vxfs_lookup.c226
-rw-r--r--fs/freevxfs/vxfs_olt.c15
-rw-r--r--fs/freevxfs/vxfs_olt.h70
-rw-r--r--fs/freevxfs/vxfs_super.c162
-rw-r--r--fs/fs-writeback.c116
-rw-r--r--fs/fscache/histogram.c1
-rw-r--r--fs/fscache/object-list.c1
-rw-r--r--fs/fscache/stats.c1
-rw-r--r--fs/fuse/dev.c32
-rw-r--r--fs/fuse/dir.c9
-rw-r--r--fs/fuse/file.c43
-rw-r--r--fs/fuse/fuse_i.h3
-rw-r--r--fs/fuse/inode.c10
-rw-r--r--fs/gfs2/aops.c49
-rw-r--r--fs/gfs2/bmap.c5
-rw-r--r--fs/gfs2/dentry.c2
-rw-r--r--fs/gfs2/dir.c5
-rw-r--r--fs/gfs2/export.c11
-rw-r--r--fs/gfs2/file.c2
-rw-r--r--fs/gfs2/glock.c11
-rw-r--r--fs/gfs2/glock.h10
-rw-r--r--fs/gfs2/inode.c132
-rw-r--r--fs/gfs2/inode.h4
-rw-r--r--fs/gfs2/log.c8
-rw-r--r--fs/gfs2/lops.c23
-rw-r--r--fs/gfs2/lops.h2
-rw-r--r--fs/gfs2/main.c1
-rw-r--r--fs/gfs2/meta_io.c18
-rw-r--r--fs/gfs2/ops_fstype.c6
-rw-r--r--fs/gfs2/quota.c4
-rw-r--r--fs/gfs2/recovery.c6
-rw-r--r--fs/gfs2/recovery.h4
-rw-r--r--fs/gfs2/rgrp.c21
-rw-r--r--fs/gfs2/super.c24
-rw-r--r--fs/hfs/catalog.c12
-rw-r--r--fs/hfs/hfs_fs.h16
-rw-r--r--fs/hfs/inode.c4
-rw-r--r--fs/hfs/string.c4
-rw-r--r--fs/hfs/trans.c2
-rw-r--r--fs/hfsplus/catalog.c12
-rw-r--r--fs/hfsplus/hfsplus_fs.h15
-rw-r--r--fs/hfsplus/inode.c2
-rw-r--r--fs/hfsplus/part_tbl.c5
-rw-r--r--fs/hfsplus/super.c6
-rw-r--r--fs/hfsplus/unicode.c6
-rw-r--r--fs/hfsplus/wrapper.c15
-rw-r--r--fs/hostfs/hostfs_kern.c7
-rw-r--r--fs/hpfs/dentry.c6
-rw-r--r--fs/inode.c18
-rw-r--r--fs/internal.h5
-rw-r--r--fs/ioctl.c1
-rw-r--r--fs/iomap.c497
-rw-r--r--fs/isofs/compress.c3
-rw-r--r--fs/isofs/inode.c29
-rw-r--r--fs/isofs/namei.c2
-rw-r--r--fs/jbd2/commit.c8
-rw-r--r--fs/jbd2/journal.c15
-rw-r--r--fs/jbd2/recovery.c4
-rw-r--r--fs/jbd2/transaction.c17
-rw-r--r--fs/jffs2/dir.c8
-rw-r--r--fs/jffs2/readinode.c2
-rw-r--r--fs/jffs2/scan.c2
-rw-r--r--fs/jffs2/summary.c2
-rw-r--r--fs/jffs2/write.c4
-rw-r--r--fs/jfs/jfs_debug.c1
-rw-r--r--fs/jfs/jfs_logmgr.c7
-rw-r--r--fs/jfs/jfs_metapage.c11
-rw-r--r--fs/jfs/jfs_txnmgr.c2
-rw-r--r--fs/jfs/jfs_xtree.c1
-rw-r--r--fs/jfs/namei.c4
-rw-r--r--fs/kernfs/dir.c4
-rw-r--r--fs/kernfs/mount.c5
-rw-r--r--fs/lockd/procfs.c1
-rw-r--r--fs/logfs/dev_bdev.c17
-rw-r--r--fs/logfs/dir.c10
-rw-r--r--fs/mpage.c47
-rw-r--r--fs/namei.c137
-rw-r--r--fs/namespace.c99
-rw-r--r--fs/ncpfs/dir.c8
-rw-r--r--fs/nfs/blocklayout/blocklayout.c22
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/dir.c45
-rw-r--r--fs/nfs/direct.c4
-rw-r--r--fs/nfs/internal.h2
-rw-r--r--fs/nfs/nfs3proc.c8
-rw-r--r--fs/nfs/nfs4_fs.h5
-rw-r--r--fs/nfs/nfs4namespace.c4
-rw-r--r--fs/nfs/nfs4proc.c16
-rw-r--r--fs/nfs/nfs4trace.h4
-rw-r--r--fs/nfs/nfstrace.h4
-rw-r--r--fs/nfs/proc.c8
-rw-r--r--fs/nfs/unlink.c2
-rw-r--r--fs/nfs/write.c2
-rw-r--r--fs/nfsd/Kconfig19
-rw-r--r--fs/nfsd/Makefile1
-rw-r--r--fs/nfsd/blocklayout.c3
-rw-r--r--fs/nfsd/blocklayoutxdr.c5
-rw-r--r--fs/nfsd/export.c14
-rw-r--r--fs/nfsd/export.h2
-rw-r--r--fs/nfsd/flexfilelayout.c133
-rw-r--r--fs/nfsd/flexfilelayoutxdr.c115
-rw-r--r--fs/nfsd/flexfilelayoutxdr.h49
-rw-r--r--fs/nfsd/nfs4layouts.c16
-rw-r--r--fs/nfsd/nfs4proc.c48
-rw-r--r--fs/nfsd/nfs4state.c74
-rw-r--r--fs/nfsd/nfs4xdr.c81
-rw-r--r--fs/nfsd/nfsctl.c16
-rw-r--r--fs/nfsd/nfsd.h5
-rw-r--r--fs/nfsd/nfsfh.c18
-rw-r--r--fs/nfsd/nfsproc.c7
-rw-r--r--fs/nfsd/nfsxdr.c2
-rw-r--r--fs/nfsd/pnfs.h4
-rw-r--r--fs/nfsd/state.h1
-rw-r--r--fs/nfsd/stats.c1
-rw-r--r--fs/nfsd/vfs.c142
-rw-r--r--fs/nfsd/vfs.h3
-rw-r--r--fs/nfsd/xdr4.h5
-rw-r--r--fs/nilfs2/alloc.c45
-rw-r--r--fs/nilfs2/bmap.c4
-rw-r--r--fs/nilfs2/bmap.h2
-rw-r--r--fs/nilfs2/btnode.c10
-rw-r--r--fs/nilfs2/btnode.h2
-rw-r--r--fs/nilfs2/btree.c67
-rw-r--r--fs/nilfs2/btree.h2
-rw-r--r--fs/nilfs2/cpfile.c23
-rw-r--r--fs/nilfs2/cpfile.h3
-rw-r--r--fs/nilfs2/dat.c19
-rw-r--r--fs/nilfs2/dat.h1
-rw-r--r--fs/nilfs2/dir.c60
-rw-r--r--fs/nilfs2/direct.c10
-rw-r--r--fs/nilfs2/direct.h10
-rw-r--r--fs/nilfs2/gcinode.c14
-rw-r--r--fs/nilfs2/ifile.c7
-rw-r--r--fs/nilfs2/ifile.h1
-rw-r--r--fs/nilfs2/inode.c36
-rw-r--r--fs/nilfs2/ioctl.c48
-rw-r--r--fs/nilfs2/mdt.c17
-rw-r--r--fs/nilfs2/namei.c6
-rw-r--r--fs/nilfs2/nilfs.h48
-rw-r--r--fs/nilfs2/page.c45
-rw-r--r--fs/nilfs2/recovery.c72
-rw-r--r--fs/nilfs2/segbuf.c24
-rw-r--r--fs/nilfs2/segment.c61
-rw-r--r--fs/nilfs2/segment.h1
-rw-r--r--fs/nilfs2/sufile.c44
-rw-r--r--fs/nilfs2/sufile.h1
-rw-r--r--fs/nilfs2/super.c206
-rw-r--r--fs/nilfs2/sysfs.c74
-rw-r--r--fs/nilfs2/the_nilfs.c134
-rw-r--r--fs/nilfs2/the_nilfs.h11
-rw-r--r--fs/ntfs/aops.c6
-rw-r--r--fs/ntfs/compress.c2
-rw-r--r--fs/ntfs/file.c2
-rw-r--r--fs/ntfs/inode.c2
-rw-r--r--fs/ntfs/logfile.c2
-rw-r--r--fs/ntfs/mft.c4
-rw-r--r--fs/ntfs/namei.c2
-rw-r--r--fs/ocfs2/alloc.c37
-rw-r--r--fs/ocfs2/alloc.h2
-rw-r--r--fs/ocfs2/aops.c41
-rw-r--r--fs/ocfs2/buffer_head_io.c8
-rw-r--r--fs/ocfs2/cluster/heartbeat.c14
-rw-r--r--fs/ocfs2/cluster/tcp.c8
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h4
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c26
-rw-r--r--fs/ocfs2/dlm/dlmdebug.h1
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c4
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c53
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c29
-rw-r--r--fs/ocfs2/dlm/dlmthread.c57
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c4
-rw-r--r--fs/ocfs2/dlmfs/userdlm.c2
-rw-r--r--fs/ocfs2/dlmfs/userdlm.h2
-rw-r--r--fs/ocfs2/dlmglue.c13
-rw-r--r--fs/ocfs2/inode.h7
-rw-r--r--fs/ocfs2/journal.c41
-rw-r--r--fs/ocfs2/quota_global.c2
-rw-r--r--fs/ocfs2/stack_user.c11
-rw-r--r--fs/ocfs2/stackglue.c2
-rw-r--r--fs/ocfs2/suballoc.c20
-rw-r--r--fs/ocfs2/super.c3
-rw-r--r--fs/ocfs2/xattr.c2
-rw-r--r--fs/open.c28
-rw-r--r--fs/orangefs/acl.c17
-rw-r--r--fs/orangefs/dcache.c4
-rw-r--r--fs/orangefs/devorangefs-req.c7
-rw-r--r--fs/orangefs/file.c2
-rw-r--r--fs/orangefs/inode.c35
-rw-r--r--fs/orangefs/namei.c22
-rw-r--r--fs/orangefs/orangefs-cache.c4
-rw-r--r--fs/orangefs/orangefs-kernel.h29
-rw-r--r--fs/orangefs/orangefs-mod.c2
-rw-r--r--fs/orangefs/orangefs-sysfs.c43
-rw-r--r--fs/orangefs/orangefs-utils.c42
-rw-r--r--fs/orangefs/protocol.h8
-rw-r--r--fs/orangefs/symlink.c2
-rw-r--r--fs/orangefs/xattr.c131
-rw-r--r--fs/overlayfs/copy_up.c1
-rw-r--r--fs/overlayfs/dir.c280
-rw-r--r--fs/overlayfs/inode.c277
-rw-r--r--fs/overlayfs/overlayfs.h33
-rw-r--r--fs/overlayfs/super.c198
-rw-r--r--fs/pipe.c30
-rw-r--r--fs/posix_acl.c8
-rw-r--r--fs/proc/Makefile1
-rw-r--r--fs/proc/base.c192
-rw-r--r--fs/proc/inode.c15
-rw-r--r--fs/proc/internal.h3
-rw-r--r--fs/proc/meminfo.c25
-rw-r--r--fs/proc/proc_sysctl.c6
-rw-r--r--fs/proc/root.c61
-rw-r--r--fs/proc/stat.c10
-rw-r--r--fs/proc/task_mmu.c10
-rw-r--r--fs/pstore/Kconfig31
-rw-r--r--fs/pstore/inode.c1
-rw-r--r--fs/pstore/platform.c269
-rw-r--r--fs/pstore/ram.c102
-rw-r--r--fs/quota/dquot.c24
-rw-r--r--fs/quota/quota.c14
-rw-r--r--fs/read_write.c18
-rw-r--r--fs/reiserfs/ibalance.c3
-rw-r--r--fs/reiserfs/inode.c4
-rw-r--r--fs/reiserfs/journal.c14
-rw-r--r--fs/reiserfs/stree.c4
-rw-r--r--fs/reiserfs/super.c2
-rw-r--r--fs/squashfs/block.c4
-rw-r--r--fs/super.c71
-rw-r--r--fs/sysfs/mount.c5
-rw-r--r--fs/sysv/namei.c2
-rw-r--r--fs/timerfd.c10
-rw-r--r--fs/tracefs/inode.c7
-rw-r--r--fs/ubifs/gc.c4
-rw-r--r--fs/ubifs/super.c19
-rw-r--r--fs/ubifs/ubifs.h4
-rw-r--r--fs/ubifs/xattr.c6
-rw-r--r--fs/udf/dir.c2
-rw-r--r--fs/udf/directory.c2
-rw-r--r--fs/udf/inode.c2
-rw-r--r--fs/ufs/balloc.c2
-rw-r--r--fs/ufs/dir.c17
-rw-r--r--fs/ufs/util.c2
-rw-r--r--fs/userfaultfd.c22
-rw-r--r--fs/utimes.c3
-rw-r--r--fs/xattr.c7
-rw-r--r--fs/xfs/Kconfig1
-rw-r--r--fs/xfs/Makefile8
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c246
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h59
-rw-r--r--fs/xfs/libxfs/xfs_alloc_btree.c12
-rw-r--r--fs/xfs/libxfs/xfs_attr.c71
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c4
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.h3
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c19
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c256
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h54
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c32
-rw-r--r--fs/xfs/libxfs/xfs_btree.c941
-rw-r--r--fs/xfs/libxfs/xfs_btree.h90
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c65
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.h4
-rw-r--r--fs/xfs/libxfs/xfs_da_format.c31
-rw-r--r--fs/xfs/libxfs/xfs_da_format.h44
-rw-r--r--fs/xfs/libxfs/xfs_defer.c463
-rw-r--r--fs/xfs/libxfs/xfs_defer.h97
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c15
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h8
-rw-r--r--fs/xfs/libxfs/xfs_dir2_sf.c38
-rw-r--r--fs/xfs/libxfs/xfs_format.h197
-rw-r--r--fs/xfs/libxfs/xfs_fs.h9
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c43
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.h2
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c18
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c1
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h63
-rw-r--r--fs/xfs/libxfs/xfs_rmap.c1399
-rw-r--r--fs/xfs/libxfs/xfs_rmap.h209
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.c511
-rw-r--r--fs/xfs/libxfs/xfs_rmap_btree.h61
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.c2
-rw-r--r--fs/xfs/libxfs/xfs_sb.c9
-rw-r--r--fs/xfs/libxfs/xfs_shared.h2
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.c62
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.h10
-rw-r--r--fs/xfs/libxfs/xfs_types.h4
-rw-r--r--fs/xfs/xfs_aops.c343
-rw-r--r--fs/xfs/xfs_aops.h3
-rw-r--r--fs/xfs/xfs_attr_inactive.c2
-rw-r--r--fs/xfs/xfs_attr_list.c2
-rw-r--r--fs/xfs/xfs_bmap_util.c458
-rw-r--r--fs/xfs/xfs_bmap_util.h7
-rw-r--r--fs/xfs/xfs_buf.c268
-rw-r--r--fs/xfs/xfs_buf.h7
-rw-r--r--fs/xfs/xfs_buf_item.c31
-rw-r--r--fs/xfs/xfs_discard.c2
-rw-r--r--fs/xfs/xfs_dquot.c14
-rw-r--r--fs/xfs/xfs_dquot_item.c2
-rw-r--r--fs/xfs/xfs_error.c5
-rw-r--r--fs/xfs/xfs_error.h8
-rw-r--r--fs/xfs/xfs_export.c2
-rw-r--r--fs/xfs/xfs_extfree_item.c71
-rw-r--r--fs/xfs/xfs_extfree_item.h3
-rw-r--r--fs/xfs/xfs_file.c431
-rw-r--r--fs/xfs/xfs_filestream.c3
-rw-r--r--fs/xfs/xfs_fsops.c211
-rw-r--r--fs/xfs/xfs_icache.c2
-rw-r--r--fs/xfs/xfs_icache.h1
-rw-r--r--fs/xfs/xfs_inode.c115
-rw-r--r--fs/xfs/xfs_inode.h24
-rw-r--r--fs/xfs/xfs_inode_item.c1
-rw-r--r--fs/xfs/xfs_ioctl.c41
-rw-r--r--fs/xfs/xfs_ioctl.h3
-rw-r--r--fs/xfs/xfs_ioctl32.c6
-rw-r--r--fs/xfs/xfs_iomap.c202
-rw-r--r--fs/xfs/xfs_iomap.h7
-rw-r--r--fs/xfs/xfs_iops.c113
-rw-r--r--fs/xfs/xfs_linux.h7
-rw-r--r--fs/xfs/xfs_log.c13
-rw-r--r--fs/xfs/xfs_log.h5
-rw-r--r--fs/xfs/xfs_log_cil.c258
-rw-r--r--fs/xfs/xfs_log_recover.c336
-rw-r--r--fs/xfs/xfs_mount.c17
-rw-r--r--fs/xfs/xfs_mount.h6
-rw-r--r--fs/xfs/xfs_ondisk.h34
-rw-r--r--fs/xfs/xfs_pnfs.c27
-rw-r--r--fs/xfs/xfs_pnfs.h4
-rw-r--r--fs/xfs/xfs_rmap_item.c536
-rw-r--r--fs/xfs/xfs_rmap_item.h95
-rw-r--r--fs/xfs/xfs_rtalloc.c11
-rw-r--r--fs/xfs/xfs_rtalloc.h2
-rw-r--r--fs/xfs/xfs_stats.c2
-rw-r--r--fs/xfs/xfs_stats.h18
-rw-r--r--fs/xfs/xfs_super.c43
-rw-r--r--fs/xfs/xfs_super.h2
-rw-r--r--fs/xfs/xfs_symlink.c25
-rw-r--r--fs/xfs/xfs_sysfs.c3
-rw-r--r--fs/xfs/xfs_trace.c2
-rw-r--r--fs/xfs/xfs_trace.h399
-rw-r--r--fs/xfs/xfs_trans.h27
-rw-r--r--fs/xfs/xfs_trans_extfree.c215
-rw-r--r--fs/xfs/xfs_trans_rmap.c271
523 files changed, 18189 insertions, 10903 deletions
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
index 0576eaeb60b9..5b6a1743ea17 100644
--- a/fs/9p/acl.c
+++ b/fs/9p/acl.c
@@ -266,7 +266,7 @@ static int v9fs_xattr_set_acl(const struct xattr_handler *handler,
if (IS_ERR(acl))
return PTR_ERR(acl);
else if (acl) {
- retval = posix_acl_valid(acl);
+ retval = posix_acl_valid(inode->i_sb->s_user_ns, acl);
if (retval)
goto err_out;
}
diff --git a/fs/9p/fid.c b/fs/9p/fid.c
index 47db55aee7f2..60fb47469c86 100644
--- a/fs/9p/fid.c
+++ b/fs/9p/fid.c
@@ -257,36 +257,12 @@ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry)
return v9fs_fid_lookup_with_uid(dentry, uid, any);
}
-struct p9_fid *v9fs_fid_clone(struct dentry *dentry)
-{
- struct p9_fid *fid, *ret;
-
- fid = v9fs_fid_lookup(dentry);
- if (IS_ERR(fid))
- return fid;
-
- ret = p9_client_walk(fid, 0, NULL, 1);
- return ret;
-}
-
-static struct p9_fid *v9fs_fid_clone_with_uid(struct dentry *dentry, kuid_t uid)
-{
- struct p9_fid *fid, *ret;
-
- fid = v9fs_fid_lookup_with_uid(dentry, uid, 0);
- if (IS_ERR(fid))
- return fid;
-
- ret = p9_client_walk(fid, 0, NULL, 1);
- return ret;
-}
-
struct p9_fid *v9fs_writeback_fid(struct dentry *dentry)
{
int err;
struct p9_fid *fid;
- fid = v9fs_fid_clone_with_uid(dentry, GLOBAL_ROOT_UID);
+ fid = clone_fid(v9fs_fid_lookup_with_uid(dentry, GLOBAL_ROOT_UID, 0));
if (IS_ERR(fid))
goto error_out;
/*
diff --git a/fs/9p/fid.h b/fs/9p/fid.h
index 2b6787fcb626..4491bcaf42b8 100644
--- a/fs/9p/fid.h
+++ b/fs/9p/fid.h
@@ -24,7 +24,18 @@
#include <linux/list.h>
struct p9_fid *v9fs_fid_lookup(struct dentry *dentry);
-struct p9_fid *v9fs_fid_clone(struct dentry *dentry);
+static inline struct p9_fid *v9fs_parent_fid(struct dentry *dentry)
+{
+ return v9fs_fid_lookup(dentry->d_parent);
+}
void v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid);
struct p9_fid *v9fs_writeback_fid(struct dentry *dentry);
+static inline struct p9_fid *clone_fid(struct p9_fid *fid)
+{
+ return IS_ERR(fid) ? fid : p9_client_walk(fid, 0, NULL, 1);
+}
+static inline struct p9_fid *v9fs_fid_clone(struct dentry *dentry)
+{
+ return clone_fid(v9fs_fid_lookup(dentry));
+}
#endif
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index c37fb9c08970..6181ad79e1a5 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -231,7 +231,6 @@ static int v9fs_launder_page(struct page *page)
/**
* v9fs_direct_IO - 9P address space operation for direct I/O
* @iocb: target I/O control block
- * @pos: offset in file to begin the operation
*
* The presence of v9fs_direct_IO() in the address space ops vector
* allowes open() O_DIRECT flags which would have failed otherwise.
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index f4645c515262..8b1999b528e9 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -595,7 +595,7 @@ static int v9fs_remove(struct inode *dir, struct dentry *dentry, int flags)
v9ses = v9fs_inode2v9ses(dir);
inode = d_inode(dentry);
- dfid = v9fs_fid_lookup(dentry->d_parent);
+ dfid = v9fs_parent_fid(dentry);
if (IS_ERR(dfid)) {
retval = PTR_ERR(dfid);
p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", retval);
@@ -653,7 +653,7 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
ofid = NULL;
fid = NULL;
name = (char *) dentry->d_name.name;
- dfid = v9fs_fid_lookup(dentry->d_parent);
+ dfid = v9fs_parent_fid(dentry);
if (IS_ERR(dfid)) {
err = PTR_ERR(dfid);
p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
@@ -661,7 +661,7 @@ v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir,
}
/* clone a fid to use for creation */
- ofid = p9_client_walk(dfid, 0, NULL, 1);
+ ofid = clone_fid(dfid);
if (IS_ERR(ofid)) {
err = PTR_ERR(ofid);
p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
@@ -798,7 +798,7 @@ struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry,
v9ses = v9fs_inode2v9ses(dir);
/* We can walk d_parent because we hold the dir->i_mutex */
- dfid = v9fs_fid_lookup(dentry->d_parent);
+ dfid = v9fs_parent_fid(dentry);
if (IS_ERR(dfid))
return ERR_CAST(dfid);
@@ -853,7 +853,7 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
struct p9_fid *fid, *inode_fid;
struct dentry *res = NULL;
- if (d_unhashed(dentry)) {
+ if (d_in_lookup(dentry)) {
res = v9fs_vfs_lookup(dir, dentry, 0);
if (IS_ERR(res))
return PTR_ERR(res);
@@ -975,13 +975,13 @@ v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (IS_ERR(oldfid))
return PTR_ERR(oldfid);
- olddirfid = v9fs_fid_clone(old_dentry->d_parent);
+ olddirfid = clone_fid(v9fs_parent_fid(old_dentry));
if (IS_ERR(olddirfid)) {
retval = PTR_ERR(olddirfid);
goto done;
}
- newdirfid = v9fs_fid_clone(new_dentry->d_parent);
+ newdirfid = clone_fid(v9fs_parent_fid(new_dentry));
if (IS_ERR(newdirfid)) {
retval = PTR_ERR(newdirfid);
goto clunk_olddir;
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index a34702c998f5..eeabcb0bad12 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -254,7 +254,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
struct posix_acl *pacl = NULL, *dacl = NULL;
struct dentry *res = NULL;
- if (d_unhashed(dentry)) {
+ if (d_in_lookup(dentry)) {
res = v9fs_vfs_lookup(dir, dentry, 0);
if (IS_ERR(res))
return PTR_ERR(res);
@@ -273,7 +273,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
p9_debug(P9_DEBUG_VFS, "name:%s flags:0x%x mode:0x%hx\n",
name, flags, omode);
- dfid = v9fs_fid_lookup(dentry->d_parent);
+ dfid = v9fs_parent_fid(dentry);
if (IS_ERR(dfid)) {
err = PTR_ERR(dfid);
p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
@@ -281,7 +281,7 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
}
/* clone a fid to use for creation */
- ofid = p9_client_walk(dfid, 0, NULL, 1);
+ ofid = clone_fid(dfid);
if (IS_ERR(ofid)) {
err = PTR_ERR(ofid);
p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err);
@@ -389,7 +389,6 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
umode_t mode;
struct inode *inode;
struct p9_qid qid;
- struct dentry *dir_dentry;
struct posix_acl *dacl = NULL, *pacl = NULL;
p9_debug(P9_DEBUG_VFS, "name %pd\n", dentry);
@@ -400,8 +399,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
if (dir->i_mode & S_ISGID)
omode |= S_ISGID;
- dir_dentry = dentry->d_parent;
- dfid = v9fs_fid_lookup(dir_dentry);
+ dfid = v9fs_parent_fid(dentry);
if (IS_ERR(dfid)) {
err = PTR_ERR(dfid);
p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
@@ -691,7 +689,7 @@ v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry,
p9_debug(P9_DEBUG_VFS, "%lu,%s,%s\n", dir->i_ino, name, symname);
v9ses = v9fs_inode2v9ses(dir);
- dfid = v9fs_fid_lookup(dentry->d_parent);
+ dfid = v9fs_parent_fid(dentry);
if (IS_ERR(dfid)) {
err = PTR_ERR(dfid);
p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
@@ -762,7 +760,6 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
int err;
- struct dentry *dir_dentry;
struct p9_fid *dfid, *oldfid;
struct v9fs_session_info *v9ses;
@@ -770,8 +767,7 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
dir->i_ino, old_dentry, dentry);
v9ses = v9fs_inode2v9ses(dir);
- dir_dentry = dentry->d_parent;
- dfid = v9fs_fid_lookup(dir_dentry);
+ dfid = v9fs_parent_fid(dentry);
if (IS_ERR(dfid))
return PTR_ERR(dfid);
@@ -822,7 +818,6 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
struct p9_fid *fid = NULL, *dfid = NULL;
struct inode *inode;
struct p9_qid qid;
- struct dentry *dir_dentry;
struct posix_acl *dacl = NULL, *pacl = NULL;
p9_debug(P9_DEBUG_VFS, " %lu,%pd mode: %hx MAJOR: %u MINOR: %u\n",
@@ -830,8 +825,7 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
MAJOR(rdev), MINOR(rdev));
v9ses = v9fs_inode2v9ses(dir);
- dir_dentry = dentry->d_parent;
- dfid = v9fs_fid_lookup(dir_dentry);
+ dfid = v9fs_parent_fid(dentry);
if (IS_ERR(dfid)) {
err = PTR_ERR(dfid);
p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err);
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index a6bd349bab23..f329eee6dc93 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -97,8 +97,6 @@ int v9fs_xattr_set(struct dentry *dentry, const char *name,
const void *value, size_t value_len, int flags)
{
struct p9_fid *fid = v9fs_fid_lookup(dentry);
- if (IS_ERR(fid))
- return PTR_ERR(fid);
return v9fs_fid_xattr_set(fid, name, value, value_len, flags);
}
@@ -115,7 +113,7 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name,
name, value_len, flags);
/* Clone it */
- fid = p9_client_walk(fid, 0, NULL, 1);
+ fid = clone_fid(fid);
if (IS_ERR(fid))
return PTR_ERR(fid);
diff --git a/fs/Kconfig b/fs/Kconfig
index b8fcb416be72..2bc7ad775842 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -10,6 +10,9 @@ config DCACHE_WORD_ACCESS
if BLOCK
+config FS_IOMAP
+ bool
+
source "fs/ext2/Kconfig"
source "fs/ext4/Kconfig"
source "fs/jbd2/Kconfig"
@@ -67,6 +70,12 @@ config FS_POSIX_ACL
config EXPORTFS
tristate
+config EXPORTFS_BLOCK_OPS
+ bool "Enable filesystem export operations for block IO"
+ help
+ This option enables the export operations for a filesystem to support
+ external block IO.
+
config FILE_LOCKING
bool "Enable POSIX file locking API" if EXPERT
default y
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index 72c03354c14b..c7efddf6e038 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -89,7 +89,8 @@ config BINFMT_SCRIPT
config BINFMT_FLAT
bool "Kernel support for flat binaries"
- depends on !MMU && (!FRV || BROKEN)
+ depends on !MMU || M68K
+ depends on !FRV || BROKEN
help
Support uClinux FLAT format binaries.
diff --git a/fs/Makefile b/fs/Makefile
index 85b6e13b62d3..ed2b63257ba9 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -49,6 +49,7 @@ obj-$(CONFIG_COREDUMP) += coredump.o
obj-$(CONFIG_SYSCTL) += drop_caches.o
obj-$(CONFIG_FHANDLE) += fhandle.o
+obj-$(CONFIG_FS_IOMAP) += iomap.o
obj-y += quota/
diff --git a/fs/adfs/dir.c b/fs/adfs/dir.c
index fd4cf2c48e48..29444c83da48 100644
--- a/fs/adfs/dir.c
+++ b/fs/adfs/dir.c
@@ -101,7 +101,7 @@ out:
}
static int
-adfs_match(struct qstr *name, struct object_info *obj)
+adfs_match(const struct qstr *name, struct object_info *obj)
{
int i;
@@ -126,7 +126,7 @@ adfs_match(struct qstr *name, struct object_info *obj)
}
static int
-adfs_dir_lookup_byname(struct inode *inode, struct qstr *name, struct object_info *obj)
+adfs_dir_lookup_byname(struct inode *inode, const struct qstr *name, struct object_info *obj)
{
struct super_block *sb = inode->i_sb;
const struct adfs_dir_ops *ops = ADFS_SB(sb)->s_dir;
@@ -207,7 +207,7 @@ adfs_hash(const struct dentry *parent, struct qstr *qstr)
*/
qstr->len = i = name_len;
name = qstr->name;
- hash = init_name_hash();
+ hash = init_name_hash(parent);
while (i--) {
char c;
@@ -227,7 +227,7 @@ adfs_hash(const struct dentry *parent, struct qstr *qstr)
* requirements of the underlying filesystem.
*/
static int
-adfs_compare(const struct dentry *parent, const struct dentry *dentry,
+adfs_compare(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
int i;
diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c
index d6c7a51c93e4..d8f217c711d3 100644
--- a/fs/affs/amigaffs.c
+++ b/fs/affs/amigaffs.c
@@ -472,9 +472,7 @@ affs_warning(struct super_block *sb, const char *function, const char *fmt, ...)
bool
affs_nofilenametruncate(const struct dentry *dentry)
{
- struct inode *inode = d_inode(dentry);
-
- return affs_test_opt(AFFS_SB(inode->i_sb)->s_flags, SF_NO_TRUNCATE);
+ return affs_test_opt(AFFS_SB(dentry->d_sb)->s_flags, SF_NO_TRUNCATE);
}
/* Check if the name is valid for a affs object. */
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index 00d3002a6780..a2d68f828d53 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -14,11 +14,11 @@ typedef int (*toupper_t)(int);
static int affs_toupper(int ch);
static int affs_hash_dentry(const struct dentry *, struct qstr *);
-static int affs_compare_dentry(const struct dentry *parent, const struct dentry *dentry,
+static int affs_compare_dentry(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name);
static int affs_intl_toupper(int ch);
static int affs_intl_hash_dentry(const struct dentry *, struct qstr *);
-static int affs_intl_compare_dentry(const struct dentry *parent, const struct dentry *dentry,
+static int affs_intl_compare_dentry(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name);
const struct dentry_operations affs_dentry_operations = {
@@ -61,7 +61,7 @@ affs_get_toupper(struct super_block *sb)
* Note: the dentry argument is the parent dentry.
*/
static inline int
-__affs_hash_dentry(struct qstr *qstr, toupper_t toupper, bool notruncate)
+__affs_hash_dentry(const struct dentry *dentry, struct qstr *qstr, toupper_t toupper, bool notruncate)
{
const u8 *name = qstr->name;
unsigned long hash;
@@ -72,7 +72,7 @@ __affs_hash_dentry(struct qstr *qstr, toupper_t toupper, bool notruncate)
if (retval)
return retval;
- hash = init_name_hash();
+ hash = init_name_hash(dentry);
len = min(qstr->len, AFFSNAMEMAX);
for (; len > 0; name++, len--)
hash = partial_name_hash(toupper(*name), hash);
@@ -84,7 +84,7 @@ __affs_hash_dentry(struct qstr *qstr, toupper_t toupper, bool notruncate)
static int
affs_hash_dentry(const struct dentry *dentry, struct qstr *qstr)
{
- return __affs_hash_dentry(qstr, affs_toupper,
+ return __affs_hash_dentry(dentry, qstr, affs_toupper,
affs_nofilenametruncate(dentry));
}
@@ -92,7 +92,7 @@ affs_hash_dentry(const struct dentry *dentry, struct qstr *qstr)
static int
affs_intl_hash_dentry(const struct dentry *dentry, struct qstr *qstr)
{
- return __affs_hash_dentry(qstr, affs_intl_toupper,
+ return __affs_hash_dentry(dentry, qstr, affs_intl_toupper,
affs_nofilenametruncate(dentry));
}
@@ -131,20 +131,20 @@ static inline int __affs_compare_dentry(unsigned int len,
}
static int
-affs_compare_dentry(const struct dentry *parent, const struct dentry *dentry,
+affs_compare_dentry(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
return __affs_compare_dentry(len, str, name, affs_toupper,
- affs_nofilenametruncate(parent));
+ affs_nofilenametruncate(dentry));
}
static int
-affs_intl_compare_dentry(const struct dentry *parent, const struct dentry *dentry,
+affs_intl_compare_dentry(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
return __affs_compare_dentry(len, str, name, affs_intl_toupper,
- affs_nofilenametruncate(parent));
+ affs_nofilenametruncate(dentry));
}
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
index 63cd9f939f19..4832de84d52c 100644
--- a/fs/afs/rxrpc.c
+++ b/fs/afs/rxrpc.c
@@ -85,18 +85,14 @@ int afs_open_socket(void)
skb_queue_head_init(&afs_incoming_calls);
+ ret = -ENOMEM;
afs_async_calls = create_singlethread_workqueue("kafsd");
- if (!afs_async_calls) {
- _leave(" = -ENOMEM [wq]");
- return -ENOMEM;
- }
+ if (!afs_async_calls)
+ goto error_0;
ret = sock_create_kern(&init_net, AF_RXRPC, SOCK_DGRAM, PF_INET, &socket);
- if (ret < 0) {
- destroy_workqueue(afs_async_calls);
- _leave(" = %d [socket]", ret);
- return ret;
- }
+ if (ret < 0)
+ goto error_1;
socket->sk->sk_allocation = GFP_NOFS;
@@ -111,18 +107,26 @@ int afs_open_socket(void)
sizeof(srx.transport.sin.sin_addr));
ret = kernel_bind(socket, (struct sockaddr *) &srx, sizeof(srx));
- if (ret < 0) {
- sock_release(socket);
- destroy_workqueue(afs_async_calls);
- _leave(" = %d [bind]", ret);
- return ret;
- }
+ if (ret < 0)
+ goto error_2;
+
+ ret = kernel_listen(socket, INT_MAX);
+ if (ret < 0)
+ goto error_2;
rxrpc_kernel_intercept_rx_messages(socket, afs_rx_interceptor);
afs_socket = socket;
_leave(" = 0");
return 0;
+
+error_2:
+ sock_release(socket);
+error_1:
+ destroy_workqueue(afs_async_calls);
+error_0:
+ _leave(" = %d", ret);
+ return ret;
}
/*
diff --git a/fs/attr.c b/fs/attr.c
index 25b24d0f6c88..42bb42bb3c72 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -255,6 +255,25 @@ int notify_change(struct dentry * dentry, struct iattr * attr, struct inode **de
if (!(attr->ia_valid & ~(ATTR_KILL_SUID | ATTR_KILL_SGID)))
return 0;
+ /*
+ * Verify that uid/gid changes are valid in the target
+ * namespace of the superblock.
+ */
+ if (ia_valid & ATTR_UID &&
+ !kuid_has_mapping(inode->i_sb->s_user_ns, attr->ia_uid))
+ return -EOVERFLOW;
+ if (ia_valid & ATTR_GID &&
+ !kgid_has_mapping(inode->i_sb->s_user_ns, attr->ia_gid))
+ return -EOVERFLOW;
+
+ /* Don't allow modifications of files with invalid uids or
+ * gids unless those uids & gids are being made valid.
+ */
+ if (!(ia_valid & ATTR_UID) && !uid_valid(inode->i_uid))
+ return -EOVERFLOW;
+ if (!(ia_valid & ATTR_GID) && !gid_valid(inode->i_gid))
+ return -EOVERFLOW;
+
error = security_inode_setattr(dentry, attr);
if (error)
return error;
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index 3767f6641af1..fa84bb8832e0 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -159,7 +159,7 @@ static struct dentry *autofs4_lookup_active(struct dentry *dentry)
{
struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
struct dentry *parent = dentry->d_parent;
- struct qstr *name = &dentry->d_name;
+ const struct qstr *name = &dentry->d_name;
unsigned int len = name->len;
unsigned int hash = name->hash;
const unsigned char *str = name->name;
@@ -172,7 +172,7 @@ static struct dentry *autofs4_lookup_active(struct dentry *dentry)
list_for_each(p, head) {
struct autofs_info *ino;
struct dentry *active;
- struct qstr *qstr;
+ const struct qstr *qstr;
ino = list_entry(p, struct autofs_info, active);
active = ino->dentry;
@@ -214,7 +214,7 @@ static struct dentry *autofs4_lookup_expiring(struct dentry *dentry,
{
struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
struct dentry *parent = dentry->d_parent;
- struct qstr *name = &dentry->d_name;
+ const struct qstr *name = &dentry->d_name;
unsigned int len = name->len;
unsigned int hash = name->hash;
const unsigned char *str = name->name;
@@ -227,7 +227,7 @@ static struct dentry *autofs4_lookup_expiring(struct dentry *dentry,
list_for_each(p, head) {
struct autofs_info *ino;
struct dentry *expiring;
- struct qstr *qstr;
+ const struct qstr *qstr;
if (rcu_walk) {
spin_unlock(&sbi->lookup_lock);
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
index 631f1554c87b..431fd7ee3488 100644
--- a/fs/autofs4/waitq.c
+++ b/fs/autofs4/waitq.c
@@ -225,7 +225,7 @@ rename_retry:
}
static struct autofs_wait_queue *
-autofs4_find_wait(struct autofs_sb_info *sbi, struct qstr *qstr)
+autofs4_find_wait(struct autofs_sb_info *sbi, const struct qstr *qstr)
{
struct autofs_wait_queue *wq;
@@ -249,7 +249,7 @@ autofs4_find_wait(struct autofs_sb_info *sbi, struct qstr *qstr)
*/
static int validate_request(struct autofs_wait_queue **wait,
struct autofs_sb_info *sbi,
- struct qstr *qstr,
+ const struct qstr *qstr,
struct dentry *dentry, enum autofs_notify notify)
{
struct autofs_wait_queue *wq;
@@ -398,7 +398,7 @@ int autofs4_wait(struct autofs_sb_info *sbi,
}
}
qstr.name = name;
- qstr.hash = full_name_hash(name, qstr.len);
+ qstr.hash = full_name_hash(dentry, name, qstr.len);
if (mutex_lock_interruptible(&sbi->wq_mutex)) {
kfree(qstr.name);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index a7a28110dc80..7f6aff3f72eb 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -605,28 +605,30 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
* Do the same thing for the memory mapping - between
* elf_bss and last_bss is the bss section.
*/
- k = load_addr + eppnt->p_memsz + eppnt->p_vaddr;
+ k = load_addr + eppnt->p_vaddr + eppnt->p_memsz;
if (k > last_bss)
last_bss = k;
}
}
+ /*
+ * Now fill out the bss section: first pad the last page from
+ * the file up to the page boundary, and zero it from elf_bss
+ * up to the end of the page.
+ */
+ if (padzero(elf_bss)) {
+ error = -EFAULT;
+ goto out;
+ }
+ /*
+ * Next, align both the file and mem bss up to the page size,
+ * since this is where elf_bss was just zeroed up to, and where
+ * last_bss will end after the vm_brk() below.
+ */
+ elf_bss = ELF_PAGEALIGN(elf_bss);
+ last_bss = ELF_PAGEALIGN(last_bss);
+ /* Finally, if there is still more bss to allocate, do it. */
if (last_bss > elf_bss) {
- /*
- * Now fill out the bss section. First pad the last page up
- * to the page boundary, and then perform a mmap to make sure
- * that there are zero-mapped pages up to and including the
- * last bss page.
- */
- if (padzero(elf_bss)) {
- error = -EFAULT;
- goto out;
- }
-
- /* What we have mapped so far */
- elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
-
- /* Map the last of the bss segment */
error = vm_brk(elf_bss, last_bss - elf_bss);
if (error)
goto out;
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 203589311bf8..464a972e88c1 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -67,8 +67,6 @@ static int create_elf_fdpic_tables(struct linux_binprm *, struct mm_struct *,
struct elf_fdpic_params *);
#ifndef CONFIG_MMU
-static int elf_fdpic_transfer_args_to_stack(struct linux_binprm *,
- unsigned long *);
static int elf_fdpic_map_file_constdisp_on_uclinux(struct elf_fdpic_params *,
struct file *,
struct mm_struct *);
@@ -515,8 +513,9 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
sp = mm->start_stack;
/* stack the program arguments and environment */
- if (elf_fdpic_transfer_args_to_stack(bprm, &sp) < 0)
+ if (transfer_args_to_stack(bprm, &sp) < 0)
return -EFAULT;
+ sp &= ~15;
#endif
/*
@@ -711,39 +710,6 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
/*****************************************************************************/
/*
- * transfer the program arguments and environment from the holding pages onto
- * the stack
- */
-#ifndef CONFIG_MMU
-static int elf_fdpic_transfer_args_to_stack(struct linux_binprm *bprm,
- unsigned long *_sp)
-{
- unsigned long index, stop, sp;
- char *src;
- int ret = 0;
-
- stop = bprm->p >> PAGE_SHIFT;
- sp = *_sp;
-
- for (index = MAX_ARG_PAGES - 1; index >= stop; index--) {
- src = kmap(bprm->page[index]);
- sp -= PAGE_SIZE;
- if (copy_to_user((void *) sp, src, PAGE_SIZE) != 0)
- ret = -EFAULT;
- kunmap(bprm->page[index]);
- if (ret < 0)
- goto out;
- }
-
- *_sp = (*_sp - (MAX_ARG_PAGES * PAGE_SIZE - bprm->p)) & ~15;
-
-out:
- return ret;
-}
-#endif
-
-/*****************************************************************************/
-/*
* load the appropriate binary image (executable or interpreter) into memory
* - we assume no MMU is available
* - if no other PIC bits are set in params->hdr->e_flags
diff --git a/fs/binfmt_em86.c b/fs/binfmt_em86.c
index 490538536cb4..dd2d3f0cd55d 100644
--- a/fs/binfmt_em86.c
+++ b/fs/binfmt_em86.c
@@ -24,7 +24,8 @@
static int load_em86(struct linux_binprm *bprm)
{
- char *interp, *i_name, *i_arg;
+ const char *i_name, *i_arg;
+ char *interp;
struct file * file;
int retval;
struct elfhdr elf_ex;
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index caf9e39bb82b..9b2917a30294 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -15,7 +15,8 @@
* JAN/99 -- coded full program relocation (gerg@snapgear.com)
*/
-#include <linux/export.h>
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
@@ -25,8 +26,6 @@
#include <linux/string.h>
#include <linux/fs.h>
#include <linux/file.h>
-#include <linux/stat.h>
-#include <linux/fcntl.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/slab.h>
@@ -34,26 +33,16 @@
#include <linux/personality.h>
#include <linux/init.h>
#include <linux/flat.h>
-#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
#include <asm/byteorder.h>
-#include <asm/uaccess.h>
#include <asm/unaligned.h>
#include <asm/cacheflush.h>
#include <asm/page.h>
/****************************************************************************/
-#if 0
-#define DEBUG 1
-#endif
-
-#ifdef DEBUG
-#define DBG_FLT(a...) printk(a)
-#else
-#define DBG_FLT(a...)
-#endif
-
/*
* User data (data section and bss) needs to be aligned.
* We pick 0x20 here because it is the max value elf2flt has always
@@ -80,7 +69,7 @@ struct lib_info {
unsigned long text_len; /* Length of text segment */
unsigned long entry; /* Start address for this module */
unsigned long build_date; /* When this one was compiled */
- short loaded; /* Has this library been loaded? */
+ bool loaded; /* Has this library been loaded? */
} lib_list[MAX_SHARED_LIBS];
};
@@ -106,59 +95,67 @@ static struct linux_binfmt flat_format = {
static int flat_core_dump(struct coredump_params *cprm)
{
- printk("Process %s:%d received signr %d and should have core dumped\n",
- current->comm, current->pid, (int) cprm->siginfo->si_signo);
- return(1);
+ pr_warn("Process %s:%d received signr %d and should have core dumped\n",
+ current->comm, current->pid, cprm->siginfo->si_signo);
+ return 1;
}
/****************************************************************************/
/*
* create_flat_tables() parses the env- and arg-strings in new user
* memory and creates the pointer tables from them, and puts their
- * addresses on the "stack", returning the new stack pointer value.
+ * addresses on the "stack", recording the new stack pointer value.
*/
-static unsigned long create_flat_tables(
- unsigned long pp,
- struct linux_binprm * bprm)
+static int create_flat_tables(struct linux_binprm *bprm, unsigned long arg_start)
{
- unsigned long *argv,*envp;
- unsigned long * sp;
- char * p = (char*)pp;
- int argc = bprm->argc;
- int envc = bprm->envc;
- char uninitialized_var(dummy);
-
- sp = (unsigned long *)p;
- sp -= (envc + argc + 2) + 1 + (flat_argvp_envp_on_stack() ? 2 : 0);
- sp = (unsigned long *) ((unsigned long)sp & -FLAT_STACK_ALIGN);
- argv = sp + 1 + (flat_argvp_envp_on_stack() ? 2 : 0);
- envp = argv + (argc + 1);
+ char __user *p;
+ unsigned long __user *sp;
+ long i, len;
+
+ p = (char __user *)arg_start;
+ sp = (unsigned long __user *)current->mm->start_stack;
+
+ sp -= bprm->envc + 1;
+ sp -= bprm->argc + 1;
+ sp -= flat_argvp_envp_on_stack() ? 2 : 0;
+ sp -= 1; /* &argc */
+ current->mm->start_stack = (unsigned long)sp & -FLAT_STACK_ALIGN;
+ sp = (unsigned long __user *)current->mm->start_stack;
+
+ __put_user(bprm->argc, sp++);
if (flat_argvp_envp_on_stack()) {
- put_user((unsigned long) envp, sp + 2);
- put_user((unsigned long) argv, sp + 1);
- }
-
- put_user(argc, sp);
- current->mm->arg_start = (unsigned long) p;
- while (argc-->0) {
- put_user((unsigned long) p, argv++);
- do {
- get_user(dummy, p); p++;
- } while (dummy);
- }
- put_user((unsigned long) NULL, argv);
- current->mm->arg_end = current->mm->env_start = (unsigned long) p;
- while (envc-->0) {
- put_user((unsigned long)p, envp); envp++;
- do {
- get_user(dummy, p); p++;
- } while (dummy);
- }
- put_user((unsigned long) NULL, envp);
- current->mm->env_end = (unsigned long) p;
- return (unsigned long)sp;
+ unsigned long argv, envp;
+ argv = (unsigned long)(sp + 2);
+ envp = (unsigned long)(sp + 2 + bprm->argc + 1);
+ __put_user(argv, sp++);
+ __put_user(envp, sp++);
+ }
+
+ current->mm->arg_start = (unsigned long)p;
+ for (i = bprm->argc; i > 0; i--) {
+ __put_user((unsigned long)p, sp++);
+ len = strnlen_user(p, MAX_ARG_STRLEN);
+ if (!len || len > MAX_ARG_STRLEN)
+ return -EINVAL;
+ p += len;
+ }
+ __put_user(0, sp++);
+ current->mm->arg_end = (unsigned long)p;
+
+ current->mm->env_start = (unsigned long) p;
+ for (i = bprm->envc; i > 0; i--) {
+ __put_user((unsigned long)p, sp++);
+ len = strnlen_user(p, MAX_ARG_STRLEN);
+ if (!len || len > MAX_ARG_STRLEN)
+ return -EINVAL;
+ p += len;
+ }
+ __put_user(0, sp++);
+ current->mm->env_end = (unsigned long)p;
+
+ return 0;
}
/****************************************************************************/
@@ -190,17 +187,17 @@ static int decompress_exec(
loff_t fpos;
int ret, retval;
- DBG_FLT("decompress_exec(offset=%x,buf=%x,len=%x)\n",(int)offset, (int)dst, (int)len);
+ pr_debug("decompress_exec(offset=%lx,buf=%p,len=%lx)\n", offset, dst, len);
memset(&strm, 0, sizeof(strm));
strm.workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
if (strm.workspace == NULL) {
- DBG_FLT("binfmt_flat: no memory for decompress workspace\n");
+ pr_debug("no memory for decompress workspace\n");
return -ENOMEM;
}
buf = kmalloc(LBUFSIZE, GFP_KERNEL);
if (buf == NULL) {
- DBG_FLT("binfmt_flat: no memory for read buffer\n");
+ pr_debug("no memory for read buffer\n");
retval = -ENOMEM;
goto out_free;
}
@@ -218,49 +215,49 @@ static int decompress_exec(
/* Check minimum size -- gzip header */
if (ret < 10) {
- DBG_FLT("binfmt_flat: file too small?\n");
+ pr_debug("file too small?\n");
goto out_free_buf;
}
/* Check gzip magic number */
if ((buf[0] != 037) || ((buf[1] != 0213) && (buf[1] != 0236))) {
- DBG_FLT("binfmt_flat: unknown compression magic?\n");
+ pr_debug("unknown compression magic?\n");
goto out_free_buf;
}
/* Check gzip method */
if (buf[2] != 8) {
- DBG_FLT("binfmt_flat: unknown compression method?\n");
+ pr_debug("unknown compression method?\n");
goto out_free_buf;
}
/* Check gzip flags */
if ((buf[3] & ENCRYPTED) || (buf[3] & CONTINUATION) ||
(buf[3] & RESERVED)) {
- DBG_FLT("binfmt_flat: unknown flags?\n");
+ pr_debug("unknown flags?\n");
goto out_free_buf;
}
ret = 10;
if (buf[3] & EXTRA_FIELD) {
ret += 2 + buf[10] + (buf[11] << 8);
- if (unlikely(LBUFSIZE <= ret)) {
- DBG_FLT("binfmt_flat: buffer overflow (EXTRA)?\n");
+ if (unlikely(ret >= LBUFSIZE)) {
+ pr_debug("buffer overflow (EXTRA)?\n");
goto out_free_buf;
}
}
if (buf[3] & ORIG_NAME) {
while (ret < LBUFSIZE && buf[ret++] != 0)
;
- if (unlikely(LBUFSIZE == ret)) {
- DBG_FLT("binfmt_flat: buffer overflow (ORIG_NAME)?\n");
+ if (unlikely(ret == LBUFSIZE)) {
+ pr_debug("buffer overflow (ORIG_NAME)?\n");
goto out_free_buf;
}
}
if (buf[3] & COMMENT) {
while (ret < LBUFSIZE && buf[ret++] != 0)
;
- if (unlikely(LBUFSIZE == ret)) {
- DBG_FLT("binfmt_flat: buffer overflow (COMMENT)?\n");
+ if (unlikely(ret == LBUFSIZE)) {
+ pr_debug("buffer overflow (COMMENT)?\n");
goto out_free_buf;
}
}
@@ -273,7 +270,7 @@ static int decompress_exec(
strm.total_out = 0;
if (zlib_inflateInit2(&strm, -MAX_WBITS) != Z_OK) {
- DBG_FLT("binfmt_flat: zlib init failed?\n");
+ pr_debug("zlib init failed?\n");
goto out_free_buf;
}
@@ -290,7 +287,7 @@ static int decompress_exec(
}
if (ret < 0) {
- DBG_FLT("binfmt_flat: decompression failed (%d), %s\n",
+ pr_debug("decompression failed (%d), %s\n",
ret, strm.msg);
goto out_zlib;
}
@@ -327,24 +324,23 @@ calc_reloc(unsigned long r, struct lib_info *p, int curid, int internalp)
r &= 0x00ffffff; /* Trim ID off here */
}
if (id >= MAX_SHARED_LIBS) {
- printk("BINFMT_FLAT: reference 0x%x to shared library %d",
- (unsigned) r, id);
+ pr_err("reference 0x%lx to shared library %d", r, id);
goto failed;
}
if (curid != id) {
if (internalp) {
- printk("BINFMT_FLAT: reloc address 0x%x not in same module "
- "(%d != %d)", (unsigned) r, curid, id);
+ pr_err("reloc address 0x%lx not in same module "
+ "(%d != %d)", r, curid, id);
goto failed;
- } else if ( ! p->lib_list[id].loaded &&
- load_flat_shared_library(id, p) < 0) {
- printk("BINFMT_FLAT: failed to load library %d", id);
+ } else if (!p->lib_list[id].loaded &&
+ load_flat_shared_library(id, p) < 0) {
+ pr_err("failed to load library %d", id);
goto failed;
}
/* Check versioning information (i.e. time stamps) */
if (p->lib_list[id].build_date && p->lib_list[curid].build_date &&
p->lib_list[curid].build_date < p->lib_list[id].build_date) {
- printk("BINFMT_FLAT: library %d is younger than %d", id, curid);
+ pr_err("library %d is younger than %d", id, curid);
goto failed;
}
}
@@ -358,8 +354,8 @@ calc_reloc(unsigned long r, struct lib_info *p, int curid, int internalp)
text_len = p->lib_list[id].text_len;
if (!flat_reloc_valid(r, start_brk - start_data + text_len)) {
- printk("BINFMT_FLAT: reloc outside program 0x%x (0 - 0x%x/0x%x)",
- (int) r,(int)(start_brk-start_data+text_len),(int)text_len);
+ pr_err("reloc outside program 0x%lx (0 - 0x%lx/0x%lx)",
+ r, start_brk-start_data+text_len, text_len);
goto failed;
}
@@ -369,10 +365,10 @@ calc_reloc(unsigned long r, struct lib_info *p, int curid, int internalp)
addr = r - text_len + start_data;
/* Range checked already above so doing the range tests is redundant...*/
- return(addr);
+ return addr;
failed:
- printk(", killing %s!\n", current->comm);
+ pr_cont(", killing %s!\n", current->comm);
send_sig(SIGSEGV, current, 0);
return RELOC_FAILED;
@@ -382,62 +378,57 @@ failed:
static void old_reloc(unsigned long rl)
{
-#ifdef DEBUG
- char *segment[] = { "TEXT", "DATA", "BSS", "*UNKNOWN*" };
-#endif
+ static const char *segment[] = { "TEXT", "DATA", "BSS", "*UNKNOWN*" };
flat_v2_reloc_t r;
- unsigned long *ptr;
-
+ unsigned long __user *ptr;
+ unsigned long val;
+
r.value = rl;
#if defined(CONFIG_COLDFIRE)
- ptr = (unsigned long *) (current->mm->start_code + r.reloc.offset);
+ ptr = (unsigned long __user *)(current->mm->start_code + r.reloc.offset);
#else
- ptr = (unsigned long *) (current->mm->start_data + r.reloc.offset);
+ ptr = (unsigned long __user *)(current->mm->start_data + r.reloc.offset);
#endif
+ get_user(val, ptr);
+
+ pr_debug("Relocation of variable at DATASEG+%x "
+ "(address %p, currently %lx) into segment %s\n",
+ r.reloc.offset, ptr, val, segment[r.reloc.type]);
-#ifdef DEBUG
- printk("Relocation of variable at DATASEG+%x "
- "(address %p, currently %x) into segment %s\n",
- r.reloc.offset, ptr, (int)*ptr, segment[r.reloc.type]);
-#endif
-
switch (r.reloc.type) {
case OLD_FLAT_RELOC_TYPE_TEXT:
- *ptr += current->mm->start_code;
+ val += current->mm->start_code;
break;
case OLD_FLAT_RELOC_TYPE_DATA:
- *ptr += current->mm->start_data;
+ val += current->mm->start_data;
break;
case OLD_FLAT_RELOC_TYPE_BSS:
- *ptr += current->mm->end_data;
+ val += current->mm->end_data;
break;
default:
- printk("BINFMT_FLAT: Unknown relocation type=%x\n", r.reloc.type);
+ pr_err("Unknown relocation type=%x\n", r.reloc.type);
break;
}
+ put_user(val, ptr);
-#ifdef DEBUG
- printk("Relocation became %x\n", (int)*ptr);
-#endif
-}
+ pr_debug("Relocation became %lx\n", val);
+}
/****************************************************************************/
-static int load_flat_file(struct linux_binprm * bprm,
+static int load_flat_file(struct linux_binprm *bprm,
struct lib_info *libinfo, int id, unsigned long *extra_stack)
{
- struct flat_hdr * hdr;
- unsigned long textpos = 0, datapos = 0, result;
- unsigned long realdatastart = 0;
- unsigned long text_len, data_len, bss_len, stack_len, flags;
- unsigned long full_data;
- unsigned long len, memp = 0;
- unsigned long memp_size, extra, rlim;
- unsigned long *reloc = 0, *rp;
+ struct flat_hdr *hdr;
+ unsigned long textpos, datapos, realdatastart;
+ unsigned long text_len, data_len, bss_len, stack_len, full_data, flags;
+ unsigned long len, memp, memp_size, extra, rlim;
+ unsigned long __user *reloc, *rp;
struct inode *inode;
- int i, rev, relocs = 0;
+ int i, rev, relocs;
loff_t fpos;
unsigned long start_code, end_code;
+ ssize_t result;
int ret;
hdr = ((struct flat_hdr *) bprm->buf); /* exec-header */
@@ -469,20 +460,30 @@ static int load_flat_file(struct linux_binprm * bprm,
}
if (flags & FLAT_FLAG_KTRACE)
- printk("BINFMT_FLAT: Loading file: %s\n", bprm->filename);
+ pr_info("Loading file: %s\n", bprm->filename);
if (rev != FLAT_VERSION && rev != OLD_FLAT_VERSION) {
- printk("BINFMT_FLAT: bad flat file version 0x%x (supported "
- "0x%lx and 0x%lx)\n",
- rev, FLAT_VERSION, OLD_FLAT_VERSION);
+ pr_err("bad flat file version 0x%x (supported 0x%lx and 0x%lx)\n",
+ rev, FLAT_VERSION, OLD_FLAT_VERSION);
ret = -ENOEXEC;
goto err;
}
-
+
/* Don't allow old format executables to use shared libraries */
if (rev == OLD_FLAT_VERSION && id != 0) {
- printk("BINFMT_FLAT: shared libraries are not available before rev 0x%x\n",
- (int) FLAT_VERSION);
+ pr_err("shared libraries are not available before rev 0x%lx\n",
+ FLAT_VERSION);
+ ret = -ENOEXEC;
+ goto err;
+ }
+
+ /*
+ * Make sure the header params are sane.
+ * 28 bits (256 MB) is way more than reasonable in this case.
+ * If some top bits are set we have probable binary corruption.
+ */
+ if ((text_len | data_len | bss_len | stack_len | full_data) >> 28) {
+ pr_err("bad header\n");
ret = -ENOEXEC;
goto err;
}
@@ -496,7 +497,7 @@ static int load_flat_file(struct linux_binprm * bprm,
#ifndef CONFIG_BINFMT_ZFLAT
if (flags & (FLAT_FLAG_GZIP|FLAT_FLAG_GZDATA)) {
- printk("Support for ZFLAT executables is not enabled.\n");
+ pr_err("Support for ZFLAT executables is not enabled.\n");
ret = -ENOEXEC;
goto err;
}
@@ -517,11 +518,9 @@ static int load_flat_file(struct linux_binprm * bprm,
/* Flush all traces of the currently running executable */
if (id == 0) {
- result = flush_old_exec(bprm);
- if (result) {
- ret = result;
+ ret = flush_old_exec(bprm);
+ if (ret)
goto err;
- }
/* OK, This is the point of no return */
set_personality(PER_LINUX_32BIT);
@@ -539,48 +538,48 @@ static int load_flat_file(struct linux_binprm * bprm,
* case, and then the fully copied to RAM case which lumps
* it all together.
*/
- if ((flags & (FLAT_FLAG_RAM|FLAT_FLAG_GZIP)) == 0) {
+ if (!IS_ENABLED(CONFIG_MMU) && !(flags & (FLAT_FLAG_RAM|FLAT_FLAG_GZIP))) {
/*
* this should give us a ROM ptr, but if it doesn't we don't
* really care
*/
- DBG_FLT("BINFMT_FLAT: ROM mapping of file (we hope)\n");
+ pr_debug("ROM mapping of file (we hope)\n");
textpos = vm_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC,
MAP_PRIVATE|MAP_EXECUTABLE, 0);
if (!textpos || IS_ERR_VALUE(textpos)) {
- if (!textpos)
- textpos = (unsigned long) -ENOMEM;
- printk("Unable to mmap process text, errno %d\n", (int)-textpos);
ret = textpos;
+ if (!textpos)
+ ret = -ENOMEM;
+ pr_err("Unable to mmap process text, errno %d\n", ret);
goto err;
}
len = data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long);
len = PAGE_ALIGN(len);
- realdatastart = vm_mmap(0, 0, len,
+ realdatastart = vm_mmap(NULL, 0, len,
PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0);
if (realdatastart == 0 || IS_ERR_VALUE(realdatastart)) {
+ ret = realdatastart;
if (!realdatastart)
- realdatastart = (unsigned long) -ENOMEM;
- printk("Unable to allocate RAM for process data, errno %d\n",
- (int)-realdatastart);
+ ret = -ENOMEM;
+ pr_err("Unable to allocate RAM for process data, "
+ "errno %d\n", ret);
vm_munmap(textpos, text_len);
- ret = realdatastart;
goto err;
}
datapos = ALIGN(realdatastart +
MAX_SHARED_LIBS * sizeof(unsigned long),
FLAT_DATA_ALIGN);
- DBG_FLT("BINFMT_FLAT: Allocated data+bss+stack (%d bytes): %x\n",
- (int)(data_len + bss_len + stack_len), (int)datapos);
+ pr_debug("Allocated data+bss+stack (%ld bytes): %lx\n",
+ data_len + bss_len + stack_len, datapos);
fpos = ntohl(hdr->data_start);
#ifdef CONFIG_BINFMT_ZFLAT
if (flags & FLAT_FLAG_GZDATA) {
- result = decompress_exec(bprm, fpos, (char *) datapos,
+ result = decompress_exec(bprm, fpos, (char *)datapos,
full_data, 0);
} else
#endif
@@ -589,29 +588,30 @@ static int load_flat_file(struct linux_binprm * bprm,
full_data);
}
if (IS_ERR_VALUE(result)) {
- printk("Unable to read data+bss, errno %d\n", (int)-result);
+ ret = result;
+ pr_err("Unable to read data+bss, errno %d\n", ret);
vm_munmap(textpos, text_len);
vm_munmap(realdatastart, len);
- ret = result;
goto err;
}
- reloc = (unsigned long *) (datapos+(ntohl(hdr->reloc_start)-text_len));
+ reloc = (unsigned long __user *)
+ (datapos + (ntohl(hdr->reloc_start) - text_len));
memp = realdatastart;
memp_size = len;
} else {
len = text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long);
len = PAGE_ALIGN(len);
- textpos = vm_mmap(0, 0, len,
+ textpos = vm_mmap(NULL, 0, len,
PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0);
if (!textpos || IS_ERR_VALUE(textpos)) {
- if (!textpos)
- textpos = (unsigned long) -ENOMEM;
- printk("Unable to allocate RAM for process text/data, errno %d\n",
- (int)-textpos);
ret = textpos;
+ if (!textpos)
+ ret = -ENOMEM;
+ pr_err("Unable to allocate RAM for process text/data, "
+ "errno %d\n", ret);
goto err;
}
@@ -620,7 +620,7 @@ static int load_flat_file(struct linux_binprm * bprm,
MAX_SHARED_LIBS * sizeof(unsigned long),
FLAT_DATA_ALIGN);
- reloc = (unsigned long *)
+ reloc = (unsigned long __user *)
(datapos + (ntohl(hdr->reloc_start) - text_len));
memp = textpos;
memp_size = len;
@@ -629,21 +629,59 @@ static int load_flat_file(struct linux_binprm * bprm,
* load it all in and treat it like a RAM load from now on
*/
if (flags & FLAT_FLAG_GZIP) {
- result = decompress_exec(bprm, sizeof (struct flat_hdr),
- (((char *) textpos) + sizeof (struct flat_hdr)),
+#ifndef CONFIG_MMU
+ result = decompress_exec(bprm, sizeof(struct flat_hdr),
+ (((char *)textpos) + sizeof(struct flat_hdr)),
(text_len + full_data
- - sizeof (struct flat_hdr)),
+ - sizeof(struct flat_hdr)),
0);
memmove((void *) datapos, (void *) realdatastart,
full_data);
+#else
+ /*
+ * This is used on MMU systems mainly for testing.
+ * Let's use a kernel buffer to simplify things.
+ */
+ long unz_text_len = text_len - sizeof(struct flat_hdr);
+ long unz_len = unz_text_len + full_data;
+ char *unz_data = vmalloc(unz_len);
+ if (!unz_data) {
+ result = -ENOMEM;
+ } else {
+ result = decompress_exec(bprm, sizeof(struct flat_hdr),
+ unz_data, unz_len, 0);
+ if (result == 0 &&
+ (copy_to_user((void __user *)textpos + sizeof(struct flat_hdr),
+ unz_data, unz_text_len) ||
+ copy_to_user((void __user *)datapos,
+ unz_data + unz_text_len, full_data)))
+ result = -EFAULT;
+ vfree(unz_data);
+ }
+#endif
} else if (flags & FLAT_FLAG_GZDATA) {
result = read_code(bprm->file, textpos, 0, text_len);
- if (!IS_ERR_VALUE(result))
+ if (!IS_ERR_VALUE(result)) {
+#ifndef CONFIG_MMU
result = decompress_exec(bprm, text_len, (char *) datapos,
full_data, 0);
- }
- else
+#else
+ char *unz_data = vmalloc(full_data);
+ if (!unz_data) {
+ result = -ENOMEM;
+ } else {
+ result = decompress_exec(bprm, text_len,
+ unz_data, full_data, 0);
+ if (result == 0 &&
+ copy_to_user((void __user *)datapos,
+ unz_data, full_data))
+ result = -EFAULT;
+ vfree(unz_data);
+ }
#endif
+ }
+ } else
+#endif /* CONFIG_BINFMT_ZFLAT */
{
result = read_code(bprm->file, textpos, 0, text_len);
if (!IS_ERR_VALUE(result))
@@ -652,21 +690,19 @@ static int load_flat_file(struct linux_binprm * bprm,
full_data);
}
if (IS_ERR_VALUE(result)) {
- printk("Unable to read code+data+bss, errno %d\n",(int)-result);
+ ret = result;
+ pr_err("Unable to read code+data+bss, errno %d\n", ret);
vm_munmap(textpos, text_len + data_len + extra +
MAX_SHARED_LIBS * sizeof(unsigned long));
- ret = result;
goto err;
}
}
- if (flags & FLAT_FLAG_KTRACE)
- printk("Mapping is %x, Entry point is %x, data_start is %x\n",
- (int)textpos, 0x00ffffff&ntohl(hdr->entry), ntohl(hdr->data_start));
+ start_code = textpos + sizeof(struct flat_hdr);
+ end_code = textpos + text_len;
+ text_len -= sizeof(struct flat_hdr); /* the real code len */
/* The main program needs a little extra setup in the task structure */
- start_code = textpos + sizeof (struct flat_hdr);
- end_code = textpos + text_len;
if (id == 0) {
current->mm->start_code = start_code;
current->mm->end_code = end_code;
@@ -681,19 +717,19 @@ static int load_flat_file(struct linux_binprm * bprm,
*/
current->mm->start_brk = datapos + data_len + bss_len;
current->mm->brk = (current->mm->start_brk + 3) & ~3;
+#ifndef CONFIG_MMU
current->mm->context.end_brk = memp + memp_size - stack_len;
+#endif
}
- if (flags & FLAT_FLAG_KTRACE)
- printk("%s %s: TEXT=%x-%x DATA=%x-%x BSS=%x-%x\n",
+ if (flags & FLAT_FLAG_KTRACE) {
+ pr_info("Mapping is %lx, Entry point is %x, data_start is %x\n",
+ textpos, 0x00ffffff&ntohl(hdr->entry), ntohl(hdr->data_start));
+ pr_info("%s %s: TEXT=%lx-%lx DATA=%lx-%lx BSS=%lx-%lx\n",
id ? "Lib" : "Load", bprm->filename,
- (int) start_code, (int) end_code,
- (int) datapos,
- (int) (datapos + data_len),
- (int) (datapos + data_len),
- (int) (((datapos + data_len + bss_len) + 3) & ~3));
-
- text_len -= sizeof(struct flat_hdr); /* the real code len */
+ start_code, end_code, datapos, datapos + data_len,
+ datapos + data_len, (datapos + data_len + bss_len + 3) & ~3);
+ }
/* Store the current module values into the global library structure */
libinfo->lib_list[id].start_code = start_code;
@@ -703,7 +739,7 @@ static int load_flat_file(struct linux_binprm * bprm,
libinfo->lib_list[id].loaded = 1;
libinfo->lib_list[id].entry = (0x00ffffff & ntohl(hdr->entry)) + textpos;
libinfo->lib_list[id].build_date = ntohl(hdr->build_date);
-
+
/*
* We just load the allocations into some temporary memory to
* help simplify all this mumbo jumbo
@@ -717,15 +753,20 @@ static int load_flat_file(struct linux_binprm * bprm,
* image.
*/
if (flags & FLAT_FLAG_GOTPIC) {
- for (rp = (unsigned long *)datapos; *rp != 0xffffffff; rp++) {
- unsigned long addr;
- if (*rp) {
- addr = calc_reloc(*rp, libinfo, id, 0);
+ for (rp = (unsigned long __user *)datapos; ; rp++) {
+ unsigned long addr, rp_val;
+ if (get_user(rp_val, rp))
+ return -EFAULT;
+ if (rp_val == 0xffffffff)
+ break;
+ if (rp_val) {
+ addr = calc_reloc(rp_val, libinfo, id, 0);
if (addr == RELOC_FAILED) {
ret = -ENOEXEC;
goto err;
}
- *rp = addr;
+ if (put_user(addr, rp))
+ return -EFAULT;
}
}
}
@@ -742,19 +783,23 @@ static int load_flat_file(struct linux_binprm * bprm,
* __start to address 4 so that is okay).
*/
if (rev > OLD_FLAT_VERSION) {
- unsigned long persistent = 0;
- for (i=0; i < relocs; i++) {
+ unsigned long __maybe_unused persistent = 0;
+ for (i = 0; i < relocs; i++) {
unsigned long addr, relval;
- /* Get the address of the pointer to be
- relocated (of course, the address has to be
- relocated first). */
- relval = ntohl(reloc[i]);
- if (flat_set_persistent (relval, &persistent))
+ /*
+ * Get the address of the pointer to be
+ * relocated (of course, the address has to be
+ * relocated first).
+ */
+ if (get_user(relval, reloc + i))
+ return -EFAULT;
+ relval = ntohl(relval);
+ if (flat_set_persistent(relval, &persistent))
continue;
addr = flat_get_relocate_addr(relval);
- rp = (unsigned long *) calc_reloc(addr, libinfo, id, 1);
- if (rp == (unsigned long *)RELOC_FAILED) {
+ rp = (unsigned long __user *)calc_reloc(addr, libinfo, id, 1);
+ if (rp == (unsigned long __user *)RELOC_FAILED) {
ret = -ENOEXEC;
goto err;
}
@@ -780,17 +825,23 @@ static int load_flat_file(struct linux_binprm * bprm,
}
}
} else {
- for (i=0; i < relocs; i++)
- old_reloc(ntohl(reloc[i]));
+ for (i = 0; i < relocs; i++) {
+ unsigned long relval;
+ if (get_user(relval, reloc + i))
+ return -EFAULT;
+ relval = ntohl(relval);
+ old_reloc(relval);
+ }
}
-
+
flush_icache_range(start_code, end_code);
/* zero the BSS, BRK and stack areas */
- memset((void*)(datapos + data_len), 0, bss_len +
- (memp + memp_size - stack_len - /* end brk */
- libinfo->lib_list[id].start_brk) + /* start brk */
- stack_len);
+ if (clear_user((void __user *)(datapos + data_len), bss_len +
+ (memp + memp_size - stack_len - /* end brk */
+ libinfo->lib_list[id].start_brk) + /* start brk */
+ stack_len))
+ return -EFAULT;
return 0;
err:
@@ -846,7 +897,7 @@ out:
allow_write_access(bprm.file);
fput(bprm.file);
- return(res);
+ return res;
}
#endif /* CONFIG_BINFMT_SHARED_FLAT */
@@ -857,18 +908,17 @@ out:
* libraries. There is no binary dependent code anywhere else.
*/
-static int load_flat_binary(struct linux_binprm * bprm)
+static int load_flat_binary(struct linux_binprm *bprm)
{
struct lib_info libinfo;
struct pt_regs *regs = current_pt_regs();
- unsigned long p = bprm->p;
- unsigned long stack_len;
+ unsigned long stack_len = 0;
unsigned long start_addr;
- unsigned long *sp;
int res;
int i, j;
memset(&libinfo, 0, sizeof(libinfo));
+
/*
* We have to add the size of our arguments to our stack size
* otherwise it's too easy for users to create stack overflows
@@ -876,38 +926,54 @@ static int load_flat_binary(struct linux_binprm * bprm)
* pedantic and include space for the argv/envp array as it may have
* a lot of entries.
*/
-#define TOP_OF_ARGS (PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *))
- stack_len = TOP_OF_ARGS - bprm->p; /* the strings */
- stack_len += (bprm->argc + 1) * sizeof(char *); /* the argv array */
- stack_len += (bprm->envc + 1) * sizeof(char *); /* the envp array */
- stack_len += FLAT_STACK_ALIGN - 1; /* reserve for upcoming alignment */
-
+#ifndef CONFIG_MMU
+ stack_len += PAGE_SIZE * MAX_ARG_PAGES - bprm->p; /* the strings */
+#endif
+ stack_len += (bprm->argc + 1) * sizeof(char *); /* the argv array */
+ stack_len += (bprm->envc + 1) * sizeof(char *); /* the envp array */
+ stack_len = ALIGN(stack_len, FLAT_STACK_ALIGN);
+
res = load_flat_file(bprm, &libinfo, 0, &stack_len);
if (res < 0)
return res;
-
+
/* Update data segment pointers for all libraries */
- for (i=0; i<MAX_SHARED_LIBS; i++)
- if (libinfo.lib_list[i].loaded)
- for (j=0; j<MAX_SHARED_LIBS; j++)
- (-(j+1))[(unsigned long *)(libinfo.lib_list[i].start_data)] =
- (libinfo.lib_list[j].loaded)?
- libinfo.lib_list[j].start_data:UNLOADED_LIB;
+ for (i = 0; i < MAX_SHARED_LIBS; i++) {
+ if (!libinfo.lib_list[i].loaded)
+ continue;
+ for (j = 0; j < MAX_SHARED_LIBS; j++) {
+ unsigned long val = libinfo.lib_list[j].loaded ?
+ libinfo.lib_list[j].start_data : UNLOADED_LIB;
+ unsigned long __user *p = (unsigned long __user *)
+ libinfo.lib_list[i].start_data;
+ p -= j + 1;
+ if (put_user(val, p))
+ return -EFAULT;
+ }
+ }
install_exec_creds(bprm);
set_binfmt(&flat_format);
- p = ((current->mm->context.end_brk + stack_len + 3) & ~3) - 4;
- DBG_FLT("p=%x\n", (int)p);
+#ifdef CONFIG_MMU
+ res = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
+ if (!res)
+ res = create_flat_tables(bprm, bprm->p);
+#else
+ /* Stash our initial stack pointer into the mm structure */
+ current->mm->start_stack =
+ ((current->mm->context.end_brk + stack_len + 3) & ~3) - 4;
+ pr_debug("sp=%lx\n", current->mm->start_stack);
- /* copy the arg pages onto the stack, this could be more efficient :-) */
- for (i = TOP_OF_ARGS - 1; i >= bprm->p; i--)
- * (char *) --p =
- ((char *) page_address(bprm->page[i/PAGE_SIZE]))[i % PAGE_SIZE];
+ /* copy the arg pages onto the stack */
+ res = transfer_args_to_stack(bprm, &current->mm->start_stack);
+ if (!res)
+ res = create_flat_tables(bprm, current->mm->start_stack);
+#endif
+ if (res)
+ return res;
- sp = (unsigned long *) create_flat_tables(p, bprm);
-
/* Fake some return addresses to ensure the call chain will
* initialise library in order for us. We are required to call
* lib 1 first, then 2, ... and finally the main program (id 0).
@@ -915,24 +981,24 @@ static int load_flat_binary(struct linux_binprm * bprm)
start_addr = libinfo.lib_list[0].entry;
#ifdef CONFIG_BINFMT_SHARED_FLAT
- for (i = MAX_SHARED_LIBS-1; i>0; i--) {
+ for (i = MAX_SHARED_LIBS-1; i > 0; i--) {
if (libinfo.lib_list[i].loaded) {
/* Push previos first to call address */
- --sp; put_user(start_addr, sp);
+ unsigned long __user *sp;
+ current->mm->start_stack -= sizeof(unsigned long);
+ sp = (unsigned long __user *)current->mm->start_stack;
+ __put_user(start_addr, sp);
start_addr = libinfo.lib_list[i].entry;
}
}
#endif
-
- /* Stash our initial stack pointer into the mm structure */
- current->mm->start_stack = (unsigned long )sp;
#ifdef FLAT_PLAT_INIT
FLAT_PLAT_INIT(regs);
#endif
- DBG_FLT("start_thread(regs=0x%x, entry=0x%x, start_stack=0x%x)\n",
- (int)regs, (int)start_addr, (int)current->mm->start_stack);
-
+
+ pr_debug("start_thread(regs=0x%p, entry=0x%lx, start_stack=0x%lx)\n",
+ regs, start_addr, current->mm->start_stack);
start_thread(regs, start_addr, current->mm->start_stack);
return 0;
@@ -945,9 +1011,6 @@ static int __init init_flat_binfmt(void)
register_binfmt(&flat_format);
return 0;
}
-
-/****************************************************************************/
-
core_initcall(init_flat_binfmt);
/****************************************************************************/
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 3a3ced779fc7..6103a6362ccd 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -26,6 +26,8 @@
#include <linux/fs.h>
#include <linux/uaccess.h>
+#include "internal.h"
+
#ifdef DEBUG
# define USE_DEBUG 1
#else
@@ -43,6 +45,7 @@ enum {Enabled, Magic};
#define MISC_FMT_PRESERVE_ARGV0 (1 << 31)
#define MISC_FMT_OPEN_BINARY (1 << 30)
#define MISC_FMT_CREDENTIALS (1 << 29)
+#define MISC_FMT_OPEN_FILE (1 << 28)
typedef struct {
struct list_head list;
@@ -54,6 +57,7 @@ typedef struct {
char *interpreter; /* filename of interpreter */
char *name;
struct dentry *dentry;
+ struct file *interp_file;
} Node;
static DEFINE_RWLOCK(entries_lock);
@@ -201,7 +205,13 @@ static int load_misc_binary(struct linux_binprm *bprm)
if (retval < 0)
goto error;
- interp_file = open_exec(iname);
+ if (fmt->flags & MISC_FMT_OPEN_FILE && fmt->interp_file) {
+ interp_file = filp_clone_open(fmt->interp_file);
+ if (!IS_ERR(interp_file))
+ deny_write_access(interp_file);
+ } else {
+ interp_file = open_exec(iname);
+ }
retval = PTR_ERR(interp_file);
if (IS_ERR(interp_file))
goto error;
@@ -285,6 +295,11 @@ static char *check_special_flags(char *sfs, Node *e)
e->flags |= (MISC_FMT_CREDENTIALS |
MISC_FMT_OPEN_BINARY);
break;
+ case 'F':
+ pr_debug("register: flag: F: open interpreter file now\n");
+ p++;
+ e->flags |= MISC_FMT_OPEN_FILE;
+ break;
default:
cont = 0;
}
@@ -543,6 +558,8 @@ static void entry_status(Node *e, char *page)
*dp++ = 'O';
if (e->flags & MISC_FMT_CREDENTIALS)
*dp++ = 'C';
+ if (e->flags & MISC_FMT_OPEN_FILE)
+ *dp++ = 'F';
*dp++ = '\n';
if (!test_bit(Magic, &e->flags)) {
@@ -590,6 +607,11 @@ static void kill_node(Node *e)
}
write_unlock(&entries_lock);
+ if ((e->flags & MISC_FMT_OPEN_FILE) && e->interp_file) {
+ filp_close(e->interp_file, NULL);
+ e->interp_file = NULL;
+ }
+
if (dentry) {
drop_nlink(d_inode(dentry));
d_drop(dentry);
@@ -637,13 +659,12 @@ static ssize_t bm_entry_write(struct file *file, const char __user *buffer,
break;
case 3:
/* Delete this handler. */
- root = dget(file->f_path.dentry->d_sb->s_root);
+ root = file_inode(file)->i_sb->s_root;
inode_lock(d_inode(root));
kill_node(e);
inode_unlock(d_inode(root));
- dput(root);
break;
default:
return res;
@@ -665,8 +686,8 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
{
Node *e;
struct inode *inode;
- struct dentry *root, *dentry;
- struct super_block *sb = file->f_path.dentry->d_sb;
+ struct super_block *sb = file_inode(file)->i_sb;
+ struct dentry *root = sb->s_root, *dentry;
int err = 0;
e = create_entry(buffer, count);
@@ -674,7 +695,6 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
if (IS_ERR(e))
return PTR_ERR(e);
- root = dget(sb->s_root);
inode_lock(d_inode(root));
dentry = lookup_one_len(e->name, root, strlen(e->name));
err = PTR_ERR(dentry);
@@ -698,6 +718,21 @@ static ssize_t bm_register_write(struct file *file, const char __user *buffer,
goto out2;
}
+ if (e->flags & MISC_FMT_OPEN_FILE) {
+ struct file *f;
+
+ f = open_exec(e->interpreter);
+ if (IS_ERR(f)) {
+ err = PTR_ERR(f);
+ pr_notice("register: failed to install interpreter file %s\n", e->interpreter);
+ simple_release_fs(&bm_mnt, &entry_count);
+ iput(inode);
+ inode = NULL;
+ goto out2;
+ }
+ e->interp_file = f;
+ }
+
e->dentry = dget(dentry);
inode->i_private = e;
inode->i_fop = &bm_entry_operations;
@@ -712,11 +747,10 @@ out2:
dput(dentry);
out:
inode_unlock(d_inode(root));
- dput(root);
if (err) {
kfree(e);
- return -EINVAL;
+ return err;
}
return count;
}
@@ -753,14 +787,13 @@ static ssize_t bm_status_write(struct file *file, const char __user *buffer,
break;
case 3:
/* Delete all handlers. */
- root = dget(file->f_path.dentry->d_sb->s_root);
+ root = file_inode(file)->i_sb->s_root;
inode_lock(d_inode(root));
while (!list_empty(&entries))
kill_node(list_entry(entries.next, Node, list));
inode_unlock(d_inode(root));
- dput(root);
break;
default:
return res;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 71ccab1d22c6..c3cdde87cc8c 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -416,7 +416,7 @@ int bdev_read_page(struct block_device *bdev, sector_t sector,
result = blk_queue_enter(bdev->bd_queue, false);
if (result)
return result;
- result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ);
+ result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, false);
blk_queue_exit(bdev->bd_queue);
return result;
}
@@ -445,7 +445,6 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
struct page *page, struct writeback_control *wbc)
{
int result;
- int rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE;
const struct block_device_operations *ops = bdev->bd_disk->fops;
if (!ops->rw_page || bdev_get_integrity(bdev))
@@ -455,7 +454,7 @@ int bdev_write_page(struct block_device *bdev, sector_t sector,
return result;
set_page_writeback(page);
- result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, rw);
+ result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true);
if (result)
end_page_writeback(page);
else
@@ -493,7 +492,7 @@ long bdev_direct_access(struct block_device *bdev, struct blk_dax_ctl *dax)
if (size < 0)
return size;
- if (!ops->direct_access)
+ if (!blk_queue_dax(bdev_get_queue(bdev)) || !ops->direct_access)
return -EOPNOTSUPP;
if ((sector + DIV_ROUND_UP(size, 512)) >
part_nr_sects_read(bdev->bd_part))
@@ -614,7 +613,6 @@ static void init_once(void *foo)
memset(bdev, 0, sizeof(*bdev));
mutex_init(&bdev->bd_mutex);
- INIT_LIST_HEAD(&bdev->bd_inodes);
INIT_LIST_HEAD(&bdev->bd_list);
#ifdef CONFIG_SYSFS
INIT_LIST_HEAD(&bdev->bd_holder_disks);
@@ -624,24 +622,13 @@ static void init_once(void *foo)
mutex_init(&bdev->bd_fsfreeze_mutex);
}
-static inline void __bd_forget(struct inode *inode)
-{
- list_del_init(&inode->i_devices);
- inode->i_bdev = NULL;
- inode->i_mapping = &inode->i_data;
-}
-
static void bdev_evict_inode(struct inode *inode)
{
struct block_device *bdev = &BDEV_I(inode)->bdev;
- struct list_head *p;
truncate_inode_pages_final(&inode->i_data);
invalidate_inode_buffers(inode); /* is it needed here? */
clear_inode(inode);
spin_lock(&bdev_lock);
- while ( (p = bdev->bd_inodes.next) != &bdev->bd_inodes ) {
- __bd_forget(list_entry(p, struct inode, i_devices));
- }
list_del_init(&bdev->bd_list);
spin_unlock(&bdev_lock);
}
@@ -805,7 +792,6 @@ static struct block_device *bd_acquire(struct inode *inode)
bdgrab(bdev);
inode->i_bdev = bdev;
inode->i_mapping = bdev->bd_inode->i_mapping;
- list_add(&inode->i_devices, &bdev->bd_inodes);
}
spin_unlock(&bdev_lock);
}
@@ -821,7 +807,8 @@ void bd_forget(struct inode *inode)
spin_lock(&bdev_lock);
if (!sb_is_blkdev_sb(inode->i_sb))
bdev = inode->i_bdev;
- __bd_forget(inode);
+ inode->i_bdev = NULL;
+ inode->i_mapping = &inode->i_data;
spin_unlock(&bdev_lock);
if (bdev)
@@ -1287,10 +1274,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
bdev->bd_disk = disk;
bdev->bd_queue = disk->queue;
bdev->bd_contains = bdev;
- if (IS_ENABLED(CONFIG_BLK_DEV_DAX) && disk->fops->direct_access)
- bdev->bd_inode->i_flags = S_DAX;
- else
- bdev->bd_inode->i_flags = 0;
+ bdev->bd_inode->i_flags = 0;
if (!partno) {
ret = -ENXIO;
@@ -1857,7 +1841,7 @@ struct block_device *lookup_bdev(const char *pathname)
if (!S_ISBLK(inode->i_mode))
goto fail;
error = -EACCES;
- if (path.mnt->mnt_flags & MNT_NODEV)
+ if (!may_open_dev(&path))
goto fail;
error = -ENOMEM;
bdev = bd_acquire(inode);
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 67a607709d4f..53bb7af4e5f0 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -55,8 +55,7 @@ struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
}
if (size > 0) {
acl = posix_acl_from_xattr(&init_user_ns, value, size);
- } else if (size == -ENOENT || size == -ENODATA || size == 0) {
- /* FIXME, who returns -ENOENT? I think nobody */
+ } else if (size == -ERANGE || size == -ENODATA || size == 0) {
acl = NULL;
} else {
acl = ERR_PTR(-EIO);
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 5fb60ea7eee2..e0f071f6b5a7 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -34,6 +34,10 @@
struct __btrfs_workqueue {
struct workqueue_struct *normal_wq;
+
+ /* File system this workqueue services */
+ struct btrfs_fs_info *fs_info;
+
/* List head pointing to ordered work list */
struct list_head ordered_list;
@@ -70,6 +74,18 @@ void btrfs_##name(struct work_struct *arg) \
normal_work_helper(work); \
}
+struct btrfs_fs_info *
+btrfs_workqueue_owner(struct __btrfs_workqueue *wq)
+{
+ return wq->fs_info;
+}
+
+struct btrfs_fs_info *
+btrfs_work_owner(struct btrfs_work *work)
+{
+ return work->wq->fs_info;
+}
+
BTRFS_WORK_HELPER(worker_helper);
BTRFS_WORK_HELPER(delalloc_helper);
BTRFS_WORK_HELPER(flush_delalloc_helper);
@@ -94,14 +110,15 @@ BTRFS_WORK_HELPER(scrubnc_helper);
BTRFS_WORK_HELPER(scrubparity_helper);
static struct __btrfs_workqueue *
-__btrfs_alloc_workqueue(const char *name, unsigned int flags, int limit_active,
- int thresh)
+__btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name,
+ unsigned int flags, int limit_active, int thresh)
{
struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return NULL;
+ ret->fs_info = fs_info;
ret->limit_active = limit_active;
atomic_set(&ret->pending, 0);
if (thresh == 0)
@@ -143,7 +160,8 @@ __btrfs_alloc_workqueue(const char *name, unsigned int flags, int limit_active,
static inline void
__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
-struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
+struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
+ const char *name,
unsigned int flags,
int limit_active,
int thresh)
@@ -153,7 +171,8 @@ struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
if (!ret)
return NULL;
- ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI,
+ ret->normal = __btrfs_alloc_workqueue(fs_info, name,
+ flags & ~WQ_HIGHPRI,
limit_active, thresh);
if (!ret->normal) {
kfree(ret);
@@ -161,8 +180,8 @@ struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
}
if (flags & WQ_HIGHPRI) {
- ret->high = __btrfs_alloc_workqueue(name, flags, limit_active,
- thresh);
+ ret->high = __btrfs_alloc_workqueue(fs_info, name, flags,
+ limit_active, thresh);
if (!ret->high) {
__btrfs_destroy_workqueue(ret->normal);
kfree(ret);
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index ad4d0647d1a6..8e52484cd461 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -21,6 +21,7 @@
#define __BTRFS_ASYNC_THREAD_
#include <linux/workqueue.h>
+struct btrfs_fs_info;
struct btrfs_workqueue;
/* Internal use only */
struct __btrfs_workqueue;
@@ -67,7 +68,8 @@ BTRFS_WORK_HELPER_PROTO(scrubnc_helper);
BTRFS_WORK_HELPER_PROTO(scrubparity_helper);
-struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
+struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
+ const char *name,
unsigned int flags,
int limit_active,
int thresh);
@@ -80,4 +82,6 @@ void btrfs_queue_work(struct btrfs_workqueue *wq,
void btrfs_destroy_workqueue(struct btrfs_workqueue *wq);
void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max);
void btrfs_set_work_high_priority(struct btrfs_work *work);
+struct btrfs_fs_info *btrfs_work_owner(struct btrfs_work *work);
+struct btrfs_fs_info *btrfs_workqueue_owner(struct __btrfs_workqueue *wq);
#endif
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 8bb3509099e8..2b88439c2ee8 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -139,7 +139,7 @@ int __init btrfs_prelim_ref_init(void)
btrfs_prelim_ref_cache = kmem_cache_create("btrfs_prelim_ref",
sizeof(struct __prelim_ref),
0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
+ SLAB_MEM_SPREAD,
NULL);
if (!btrfs_prelim_ref_cache)
return -ENOMEM;
@@ -361,7 +361,7 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
goto out;
}
- if (btrfs_test_is_dummy_root(root)) {
+ if (btrfs_is_testing(fs_info)) {
srcu_read_unlock(&fs_info->subvol_srcu, index);
ret = -ENOENT;
goto out;
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 7706c8dc5fa6..66789471b49d 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -1673,6 +1673,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
}
bio->bi_bdev = block_ctx->dev->bdev;
bio->bi_iter.bi_sector = dev_bytenr >> 9;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
for (j = i; j < num_pages; j++) {
ret = bio_add_page(bio, block_ctx->pagev[j],
@@ -1685,7 +1686,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
"btrfsic: error, failed to add a single page!\n");
return -1;
}
- if (submit_bio_wait(READ, bio)) {
+ if (submit_bio_wait(bio)) {
printk(KERN_INFO
"btrfsic: read error at logical %llu dev %s!\n",
block_ctx->start, block_ctx->dev->name);
@@ -2206,7 +2207,7 @@ static void btrfsic_bio_end_io(struct bio *bp)
block->dev_bytenr, block->mirror_num);
next_block = block->next_in_same_bio;
block->iodone_w_error = iodone_w_error;
- if (block->submit_bio_bh_rw & REQ_FLUSH) {
+ if (block->submit_bio_bh_rw & REQ_PREFLUSH) {
dev_state->last_flush_gen++;
if ((dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
@@ -2242,7 +2243,7 @@ static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate)
block->dev_bytenr, block->mirror_num);
block->iodone_w_error = iodone_w_error;
- if (block->submit_bio_bh_rw & REQ_FLUSH) {
+ if (block->submit_bio_bh_rw & REQ_PREFLUSH) {
dev_state->last_flush_gen++;
if ((dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
@@ -2855,12 +2856,12 @@ static struct btrfsic_dev_state *btrfsic_dev_state_lookup(
return ds;
}
-int btrfsic_submit_bh(int rw, struct buffer_head *bh)
+int btrfsic_submit_bh(int op, int op_flags, struct buffer_head *bh)
{
struct btrfsic_dev_state *dev_state;
if (!btrfsic_is_initialized)
- return submit_bh(rw, bh);
+ return submit_bh(op, op_flags, bh);
mutex_lock(&btrfsic_mutex);
/* since btrfsic_submit_bh() might also be called before
@@ -2869,26 +2870,26 @@ int btrfsic_submit_bh(int rw, struct buffer_head *bh)
/* Only called to write the superblock (incl. FLUSH/FUA) */
if (NULL != dev_state &&
- (rw & WRITE) && bh->b_size > 0) {
+ (op == REQ_OP_WRITE) && bh->b_size > 0) {
u64 dev_bytenr;
dev_bytenr = 4096 * bh->b_blocknr;
if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
printk(KERN_INFO
- "submit_bh(rw=0x%x, blocknr=%llu (bytenr %llu),"
- " size=%zu, data=%p, bdev=%p)\n",
- rw, (unsigned long long)bh->b_blocknr,
+ "submit_bh(op=0x%x,0x%x, blocknr=%llu "
+ "(bytenr %llu), size=%zu, data=%p, bdev=%p)\n",
+ op, op_flags, (unsigned long long)bh->b_blocknr,
dev_bytenr, bh->b_size, bh->b_data, bh->b_bdev);
btrfsic_process_written_block(dev_state, dev_bytenr,
&bh->b_data, 1, NULL,
- NULL, bh, rw);
- } else if (NULL != dev_state && (rw & REQ_FLUSH)) {
+ NULL, bh, op_flags);
+ } else if (NULL != dev_state && (op_flags & REQ_PREFLUSH)) {
if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
printk(KERN_INFO
- "submit_bh(rw=0x%x FLUSH, bdev=%p)\n",
- rw, bh->b_bdev);
+ "submit_bh(op=0x%x,0x%x FLUSH, bdev=%p)\n",
+ op, op_flags, bh->b_bdev);
if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
if ((dev_state->state->print_mask &
(BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
@@ -2906,7 +2907,7 @@ int btrfsic_submit_bh(int rw, struct buffer_head *bh)
block->never_written = 0;
block->iodone_w_error = 0;
block->flush_gen = dev_state->last_flush_gen + 1;
- block->submit_bio_bh_rw = rw;
+ block->submit_bio_bh_rw = op_flags;
block->orig_bio_bh_private = bh->b_private;
block->orig_bio_bh_end_io.bh = bh->b_end_io;
block->next_in_same_bio = NULL;
@@ -2915,10 +2916,10 @@ int btrfsic_submit_bh(int rw, struct buffer_head *bh)
}
}
mutex_unlock(&btrfsic_mutex);
- return submit_bh(rw, bh);
+ return submit_bh(op, op_flags, bh);
}
-static void __btrfsic_submit_bio(int rw, struct bio *bio)
+static void __btrfsic_submit_bio(struct bio *bio)
{
struct btrfsic_dev_state *dev_state;
@@ -2930,7 +2931,7 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
* btrfsic_mount(), this might return NULL */
dev_state = btrfsic_dev_state_lookup(bio->bi_bdev);
if (NULL != dev_state &&
- (rw & WRITE) && NULL != bio->bi_io_vec) {
+ (bio_op(bio) == REQ_OP_WRITE) && NULL != bio->bi_io_vec) {
unsigned int i;
u64 dev_bytenr;
u64 cur_bytenr;
@@ -2942,9 +2943,9 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
printk(KERN_INFO
- "submit_bio(rw=0x%x, bi_vcnt=%u,"
+ "submit_bio(rw=%d,0x%x, bi_vcnt=%u,"
" bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
- rw, bio->bi_vcnt,
+ bio_op(bio), bio->bi_opf, bio->bi_vcnt,
(unsigned long long)bio->bi_iter.bi_sector,
dev_bytenr, bio->bi_bdev);
@@ -2975,18 +2976,18 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
btrfsic_process_written_block(dev_state, dev_bytenr,
mapped_datav, bio->bi_vcnt,
bio, &bio_is_patched,
- NULL, rw);
+ NULL, bio->bi_opf);
while (i > 0) {
i--;
kunmap(bio->bi_io_vec[i].bv_page);
}
kfree(mapped_datav);
- } else if (NULL != dev_state && (rw & REQ_FLUSH)) {
+ } else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) {
if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
printk(KERN_INFO
- "submit_bio(rw=0x%x FLUSH, bdev=%p)\n",
- rw, bio->bi_bdev);
+ "submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n",
+ bio_op(bio), bio->bi_opf, bio->bi_bdev);
if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
if ((dev_state->state->print_mask &
(BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
@@ -3004,7 +3005,7 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
block->never_written = 0;
block->iodone_w_error = 0;
block->flush_gen = dev_state->last_flush_gen + 1;
- block->submit_bio_bh_rw = rw;
+ block->submit_bio_bh_rw = bio->bi_opf;
block->orig_bio_bh_private = bio->bi_private;
block->orig_bio_bh_end_io.bio = bio->bi_end_io;
block->next_in_same_bio = NULL;
@@ -3016,16 +3017,16 @@ leave:
mutex_unlock(&btrfsic_mutex);
}
-void btrfsic_submit_bio(int rw, struct bio *bio)
+void btrfsic_submit_bio(struct bio *bio)
{
- __btrfsic_submit_bio(rw, bio);
- submit_bio(rw, bio);
+ __btrfsic_submit_bio(bio);
+ submit_bio(bio);
}
-int btrfsic_submit_bio_wait(int rw, struct bio *bio)
+int btrfsic_submit_bio_wait(struct bio *bio)
{
- __btrfsic_submit_bio(rw, bio);
- return submit_bio_wait(rw, bio);
+ __btrfsic_submit_bio(bio);
+ return submit_bio_wait(bio);
}
int btrfsic_mount(struct btrfs_root *root,
diff --git a/fs/btrfs/check-integrity.h b/fs/btrfs/check-integrity.h
index 13b8566c97ab..f78dff1c7e86 100644
--- a/fs/btrfs/check-integrity.h
+++ b/fs/btrfs/check-integrity.h
@@ -20,9 +20,9 @@
#define __BTRFS_CHECK_INTEGRITY__
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
-int btrfsic_submit_bh(int rw, struct buffer_head *bh);
-void btrfsic_submit_bio(int rw, struct bio *bio);
-int btrfsic_submit_bio_wait(int rw, struct bio *bio);
+int btrfsic_submit_bh(int op, int op_flags, struct buffer_head *bh);
+void btrfsic_submit_bio(struct bio *bio);
+int btrfsic_submit_bio_wait(struct bio *bio);
#else
#define btrfsic_submit_bh submit_bh
#define btrfsic_submit_bio submit_bio
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 658c39b70fba..029db6e1105c 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -363,6 +363,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
kfree(cb);
return -ENOMEM;
}
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
atomic_inc(&cb->pending_bios);
@@ -373,7 +374,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
page = compressed_pages[pg_index];
page->mapping = inode->i_mapping;
if (bio->bi_iter.bi_size)
- ret = io_tree->ops->merge_bio_hook(WRITE, page, 0,
+ ret = io_tree->ops->merge_bio_hook(page, 0,
PAGE_SIZE,
bio, 0);
else
@@ -401,13 +402,17 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
BUG_ON(ret); /* -ENOMEM */
}
- ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
- BUG_ON(ret); /* -ENOMEM */
+ ret = btrfs_map_bio(root, bio, 0, 1);
+ if (ret) {
+ bio->bi_error = ret;
+ bio_endio(bio);
+ }
bio_put(bio);
bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
BUG_ON(!bio);
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
bio_add_page(bio, page, PAGE_SIZE, 0);
@@ -431,8 +436,11 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
BUG_ON(ret); /* -ENOMEM */
}
- ret = btrfs_map_bio(root, WRITE, bio, 0, 1);
- BUG_ON(ret); /* -ENOMEM */
+ ret = btrfs_map_bio(root, bio, 0, 1);
+ if (ret) {
+ bio->bi_error = ret;
+ bio_endio(bio);
+ }
bio_put(bio);
return 0;
@@ -646,6 +654,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
if (!comp_bio)
goto fail2;
+ bio_set_op_attrs (comp_bio, REQ_OP_READ, 0);
comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read;
atomic_inc(&cb->pending_bios);
@@ -656,7 +665,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
page->index = em_start >> PAGE_SHIFT;
if (comp_bio->bi_iter.bi_size)
- ret = tree->ops->merge_bio_hook(READ, page, 0,
+ ret = tree->ops->merge_bio_hook(page, 0,
PAGE_SIZE,
comp_bio, 0);
else
@@ -687,8 +696,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
root->sectorsize);
- ret = btrfs_map_bio(root, READ, comp_bio,
- mirror_num, 0);
+ ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
if (ret) {
bio->bi_error = ret;
bio_endio(comp_bio);
@@ -699,6 +707,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
comp_bio = compressed_bio_alloc(bdev, cur_disk_byte,
GFP_NOFS);
BUG_ON(!comp_bio);
+ bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read;
@@ -717,7 +726,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
BUG_ON(ret); /* -ENOMEM */
}
- ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0);
+ ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
if (ret) {
bio->bi_error = ret;
bio_endio(comp_bio);
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index a85cf7d23309..d1c56c94dd5a 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1153,14 +1153,14 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
ret = btrfs_reloc_cow_block(trans, root, buf, cow);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
}
@@ -1198,7 +1198,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
if (last_ref) {
ret = tree_mod_log_free_eb(root->fs_info, buf);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
}
@@ -1505,7 +1505,7 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct extent_buffer *buf)
{
- if (btrfs_test_is_dummy_root(root))
+ if (btrfs_is_testing(root->fs_info))
return 0;
/* ensure we can see the force_cow */
@@ -1771,6 +1771,14 @@ static noinline int generic_bin_search(struct extent_buffer *eb,
unsigned long map_len = 0;
int err;
+ if (low > high) {
+ btrfs_err(eb->fs_info,
+ "%s: low (%d) > high (%d) eb %llu owner %llu level %d",
+ __func__, low, high, eb->start,
+ btrfs_header_owner(eb), btrfs_header_level(eb));
+ return -EINVAL;
+ }
+
while (low < high) {
mid = (low + high) / 2;
offset = p + mid * item_size;
@@ -1858,7 +1866,6 @@ static void root_sub_used(struct btrfs_root *root, u32 size)
/* given a node and slot number, this reads the blocks it points to. The
* extent buffer is returned with a reference taken (but unlocked).
- * NULL is returned on error.
*/
static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
struct extent_buffer *parent, int slot)
@@ -1866,19 +1873,16 @@ static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
int level = btrfs_header_level(parent);
struct extent_buffer *eb;
- if (slot < 0)
- return NULL;
- if (slot >= btrfs_header_nritems(parent))
- return NULL;
+ if (slot < 0 || slot >= btrfs_header_nritems(parent))
+ return ERR_PTR(-ENOENT);
BUG_ON(level == 0);
eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
btrfs_node_ptr_generation(parent, slot));
- if (IS_ERR(eb) || !extent_buffer_uptodate(eb)) {
- if (!IS_ERR(eb))
- free_extent_buffer(eb);
- eb = NULL;
+ if (!IS_ERR(eb) && !extent_buffer_uptodate(eb)) {
+ free_extent_buffer(eb);
+ eb = ERR_PTR(-EIO);
}
return eb;
@@ -1931,8 +1935,8 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
/* promote the child to a root */
child = read_node_slot(root, mid, 0);
- if (!child) {
- ret = -EROFS;
+ if (IS_ERR(child)) {
+ ret = PTR_ERR(child);
btrfs_handle_fs_error(root->fs_info, ret, NULL);
goto enospc;
}
@@ -1970,6 +1974,9 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
return 0;
left = read_node_slot(root, parent, pslot - 1);
+ if (IS_ERR(left))
+ left = NULL;
+
if (left) {
btrfs_tree_lock(left);
btrfs_set_lock_blocking(left);
@@ -1980,7 +1987,11 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
goto enospc;
}
}
+
right = read_node_slot(root, parent, pslot + 1);
+ if (IS_ERR(right))
+ right = NULL;
+
if (right) {
btrfs_tree_lock(right);
btrfs_set_lock_blocking(right);
@@ -2135,6 +2146,8 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
return 1;
left = read_node_slot(root, parent, pslot - 1);
+ if (IS_ERR(left))
+ left = NULL;
/* first, try to make some room in the middle buffer */
if (left) {
@@ -2185,6 +2198,8 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
free_extent_buffer(left);
}
right = read_node_slot(root, parent, pslot + 1);
+ if (IS_ERR(right))
+ right = NULL;
/*
* then try to empty the right most buffer into the middle
@@ -3240,7 +3255,7 @@ static int push_node_left(struct btrfs_trans_handle *trans,
ret = tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
push_items);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
copy_extent_buffer(dst, src,
@@ -3315,7 +3330,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
ret = tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
src_nritems - push_items, push_items);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
copy_extent_buffer(dst, src,
@@ -3519,7 +3534,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
ret = tree_mod_log_eb_copy(root->fs_info, split, c, 0,
mid, c_nritems - mid);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
copy_extent_buffer(split, c,
@@ -3773,7 +3788,11 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
btrfs_assert_tree_locked(path->nodes[1]);
right = read_node_slot(root, upper, slot + 1);
- if (right == NULL)
+ /*
+ * slot + 1 is not valid or we fail to read the right node,
+ * no big deal, just return.
+ */
+ if (IS_ERR(right))
return 1;
btrfs_tree_lock(right);
@@ -4003,7 +4022,11 @@ static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
btrfs_assert_tree_locked(path->nodes[1]);
left = read_node_slot(root, path->nodes[1], slot - 1);
- if (left == NULL)
+ /*
+ * slot - 1 is not valid or we fail to read the left node,
+ * no big deal, just return.
+ */
+ if (IS_ERR(left))
return 1;
btrfs_tree_lock(left);
@@ -5210,7 +5233,10 @@ find_next_key:
}
btrfs_set_path_blocking(path);
cur = read_node_slot(root, cur, slot);
- BUG_ON(!cur); /* -ENOMEM */
+ if (IS_ERR(cur)) {
+ ret = PTR_ERR(cur);
+ goto out;
+ }
btrfs_tree_read_lock(cur);
@@ -5229,15 +5255,21 @@ out:
return ret;
}
-static void tree_move_down(struct btrfs_root *root,
+static int tree_move_down(struct btrfs_root *root,
struct btrfs_path *path,
int *level, int root_level)
{
+ struct extent_buffer *eb;
+
BUG_ON(*level == 0);
- path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
- path->slots[*level]);
+ eb = read_node_slot(root, path->nodes[*level], path->slots[*level]);
+ if (IS_ERR(eb))
+ return PTR_ERR(eb);
+
+ path->nodes[*level - 1] = eb;
path->slots[*level - 1] = 0;
(*level)--;
+ return 0;
}
static int tree_move_next_or_upnext(struct btrfs_root *root,
@@ -5282,8 +5314,7 @@ static int tree_advance(struct btrfs_root *root,
if (*level == 0 || !allow_down) {
ret = tree_move_next_or_upnext(root, path, level, root_level);
} else {
- tree_move_down(root, path, level, root_level);
- ret = 0;
+ ret = tree_move_down(root, path, level, root_level);
}
if (ret >= 0) {
if (*level == 0)
@@ -5457,8 +5488,10 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
left_root_level,
advance_left != ADVANCE_ONLY_NEXT,
&left_key);
- if (ret < 0)
+ if (ret == -1)
left_end_reached = ADVANCE;
+ else if (ret < 0)
+ goto out;
advance_left = 0;
}
if (advance_right && !right_end_reached) {
@@ -5466,8 +5499,10 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
right_root_level,
advance_right != ADVANCE_ONLY_NEXT,
&right_key);
- if (ret < 0)
+ if (ret == -1)
right_end_reached = ADVANCE;
+ else if (ret < 0)
+ goto out;
advance_right = 0;
}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 4274a7bfdaed..2fe8f89091a3 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -117,6 +117,7 @@ static inline unsigned long btrfs_chunk_item_size(int num_stripes)
#define BTRFS_FS_STATE_REMOUNTING 1
#define BTRFS_FS_STATE_TRANS_ABORTED 2
#define BTRFS_FS_STATE_DEV_REPLACING 3
+#define BTRFS_FS_STATE_DUMMY_FS_INFO 4
#define BTRFS_BACKREF_REV_MAX 256
#define BTRFS_BACKREF_REV_SHIFT 56
@@ -144,21 +145,6 @@ struct btrfs_header {
u8 level;
} __attribute__ ((__packed__));
-#define BTRFS_NODEPTRS_PER_BLOCK(r) (((r)->nodesize - \
- sizeof(struct btrfs_header)) / \
- sizeof(struct btrfs_key_ptr))
-#define __BTRFS_LEAF_DATA_SIZE(bs) ((bs) - sizeof(struct btrfs_header))
-#define BTRFS_LEAF_DATA_SIZE(r) (__BTRFS_LEAF_DATA_SIZE(r->nodesize))
-#define BTRFS_FILE_EXTENT_INLINE_DATA_START \
- (offsetof(struct btrfs_file_extent_item, disk_bytenr))
-#define BTRFS_MAX_INLINE_DATA_SIZE(r) (BTRFS_LEAF_DATA_SIZE(r) - \
- sizeof(struct btrfs_item) - \
- BTRFS_FILE_EXTENT_INLINE_DATA_START)
-#define BTRFS_MAX_XATTR_SIZE(r) (BTRFS_LEAF_DATA_SIZE(r) - \
- sizeof(struct btrfs_item) -\
- sizeof(struct btrfs_dir_item))
-
-
/*
* this is a very generous portion of the super block, giving us
* room to translate 14 chunks with 3 stripes each.
@@ -439,6 +425,8 @@ struct btrfs_space_info {
struct list_head list;
/* Protected by the spinlock 'lock'. */
struct list_head ro_bgs;
+ struct list_head priority_tickets;
+ struct list_head tickets;
struct rw_semaphore groups_sem;
/* for block groups in our same type */
@@ -1112,12 +1100,11 @@ struct btrfs_subvolume_writers {
#define BTRFS_ROOT_REF_COWS 1
#define BTRFS_ROOT_TRACK_DIRTY 2
#define BTRFS_ROOT_IN_RADIX 3
-#define BTRFS_ROOT_DUMMY_ROOT 4
-#define BTRFS_ROOT_ORPHAN_ITEM_INSERTED 5
-#define BTRFS_ROOT_DEFRAG_RUNNING 6
-#define BTRFS_ROOT_FORCE_COW 7
-#define BTRFS_ROOT_MULTI_LOG_TASKS 8
-#define BTRFS_ROOT_DIRTY 9
+#define BTRFS_ROOT_ORPHAN_ITEM_INSERTED 4
+#define BTRFS_ROOT_DEFRAG_RUNNING 5
+#define BTRFS_ROOT_FORCE_COW 6
+#define BTRFS_ROOT_MULTI_LOG_TASKS 7
+#define BTRFS_ROOT_DIRTY 8
/*
* in ram representation of the tree. extent_root is used for all allocations
@@ -1179,8 +1166,10 @@ struct btrfs_root {
u64 highest_objectid;
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
/* only used with CONFIG_BTRFS_FS_RUN_SANITY_TESTS is enabled */
u64 alloc_bytenr;
+#endif
u64 defrag_trans_start;
struct btrfs_key defrag_progress;
@@ -1257,6 +1246,39 @@ struct btrfs_root {
atomic_t qgroup_meta_rsv;
};
+static inline u32 __BTRFS_LEAF_DATA_SIZE(u32 blocksize)
+{
+ return blocksize - sizeof(struct btrfs_header);
+}
+
+static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_root *root)
+{
+ return __BTRFS_LEAF_DATA_SIZE(root->nodesize);
+}
+
+static inline u32 BTRFS_MAX_ITEM_SIZE(const struct btrfs_root *root)
+{
+ return BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
+}
+
+static inline u32 BTRFS_NODEPTRS_PER_BLOCK(const struct btrfs_root *root)
+{
+ return BTRFS_LEAF_DATA_SIZE(root) / sizeof(struct btrfs_key_ptr);
+}
+
+#define BTRFS_FILE_EXTENT_INLINE_DATA_START \
+ (offsetof(struct btrfs_file_extent_item, disk_bytenr))
+static inline u32 BTRFS_MAX_INLINE_DATA_SIZE(const struct btrfs_root *root)
+{
+ return BTRFS_MAX_ITEM_SIZE(root) -
+ BTRFS_FILE_EXTENT_INLINE_DATA_START;
+}
+
+static inline u32 BTRFS_MAX_XATTR_SIZE(const struct btrfs_root *root)
+{
+ return BTRFS_MAX_ITEM_SIZE(root) - sizeof(struct btrfs_dir_item);
+}
+
/*
* Flags for mount options.
*
@@ -1297,21 +1319,21 @@ struct btrfs_root {
#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
#define btrfs_raw_test_opt(o, opt) ((o) & BTRFS_MOUNT_##opt)
-#define btrfs_test_opt(root, opt) ((root)->fs_info->mount_opt & \
+#define btrfs_test_opt(fs_info, opt) ((fs_info)->mount_opt & \
BTRFS_MOUNT_##opt)
-#define btrfs_set_and_info(root, opt, fmt, args...) \
+#define btrfs_set_and_info(fs_info, opt, fmt, args...) \
{ \
- if (!btrfs_test_opt(root, opt)) \
- btrfs_info(root->fs_info, fmt, ##args); \
- btrfs_set_opt(root->fs_info->mount_opt, opt); \
+ if (!btrfs_test_opt(fs_info, opt)) \
+ btrfs_info(fs_info, fmt, ##args); \
+ btrfs_set_opt(fs_info->mount_opt, opt); \
}
-#define btrfs_clear_and_info(root, opt, fmt, args...) \
+#define btrfs_clear_and_info(fs_info, opt, fmt, args...) \
{ \
- if (btrfs_test_opt(root, opt)) \
- btrfs_info(root->fs_info, fmt, ##args); \
- btrfs_clear_opt(root->fs_info->mount_opt, opt); \
+ if (btrfs_test_opt(fs_info, opt)) \
+ btrfs_info(fs_info, fmt, ##args); \
+ btrfs_clear_opt(fs_info->mount_opt, opt); \
}
#ifdef CONFIG_BTRFS_DEBUG
@@ -1319,9 +1341,9 @@ static inline int
btrfs_should_fragment_free_space(struct btrfs_root *root,
struct btrfs_block_group_cache *block_group)
{
- return (btrfs_test_opt(root, FRAGMENT_METADATA) &&
+ return (btrfs_test_opt(root->fs_info, FRAGMENT_METADATA) &&
block_group->flags & BTRFS_BLOCK_GROUP_METADATA) ||
- (btrfs_test_opt(root, FRAGMENT_DATA) &&
+ (btrfs_test_opt(root->fs_info, FRAGMENT_DATA) &&
block_group->flags & BTRFS_BLOCK_GROUP_DATA);
}
#endif
@@ -2624,6 +2646,15 @@ enum btrfs_reserve_flush_enum {
BTRFS_RESERVE_FLUSH_ALL,
};
+enum btrfs_flush_state {
+ FLUSH_DELAYED_ITEMS_NR = 1,
+ FLUSH_DELAYED_ITEMS = 2,
+ FLUSH_DELALLOC = 3,
+ FLUSH_DELALLOC_WAIT = 4,
+ ALLOC_CHUNK = 5,
+ COMMIT_TRANS = 6,
+};
+
int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len);
int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes);
void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len);
@@ -2661,8 +2692,8 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, u64 min_reserved,
enum btrfs_reserve_flush_enum flush);
int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
- struct btrfs_block_rsv *dst_rsv,
- u64 num_bytes);
+ struct btrfs_block_rsv *dst_rsv, u64 num_bytes,
+ int update_size);
int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *dest, u64 num_bytes,
int min_factor);
@@ -2875,9 +2906,6 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq);
/* root-item.c */
-int btrfs_find_root_ref(struct btrfs_root *tree_root,
- struct btrfs_path *path,
- u64 root_id, u64 ref_id);
int btrfs_add_root_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *tree_root,
u64 root_id, u64 ref_id, u64 dirid, u64 sequence,
@@ -3091,7 +3119,7 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
struct btrfs_root *new_root,
struct btrfs_root *parent_root,
u64 new_dirid);
-int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
+int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
size_t size, struct bio *bio,
unsigned long bio_flags);
int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
@@ -3351,23 +3379,23 @@ const char *btrfs_decode_error(int errno);
__cold
void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, const char *function,
+ const char *function,
unsigned int line, int errno);
/*
* Call btrfs_abort_transaction as early as possible when an error condition is
* detected, that way the exact line number is reported.
*/
-#define btrfs_abort_transaction(trans, root, errno) \
+#define btrfs_abort_transaction(trans, errno) \
do { \
/* Report first abort since mount */ \
if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \
- &((root)->fs_info->fs_state))) { \
+ &((trans)->fs_info->fs_state))) { \
WARN(1, KERN_DEBUG \
"BTRFS: Transaction aborted (error %d)\n", \
(errno)); \
} \
- __btrfs_abort_transaction((trans), (root), __func__, \
+ __btrfs_abort_transaction((trans), __func__, \
__LINE__, (errno)); \
} while (0)
@@ -3599,13 +3627,13 @@ static inline int btrfs_defrag_cancelled(struct btrfs_fs_info *fs_info)
void btrfs_test_destroy_inode(struct inode *inode);
#endif
-static inline int btrfs_test_is_dummy_root(struct btrfs_root *root)
+static inline int btrfs_is_testing(struct btrfs_fs_info *fs_info)
{
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
- if (unlikely(test_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state)))
+ if (unlikely(test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO,
+ &fs_info->fs_state)))
return 1;
#endif
return 0;
}
-
#endif
diff --git a/fs/btrfs/dedupe.h b/fs/btrfs/dedupe.h
new file mode 100644
index 000000000000..83ebfe28da9e
--- /dev/null
+++ b/fs/btrfs/dedupe.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2016 Fujitsu. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#ifndef __BTRFS_DEDUPE__
+#define __BTRFS_DEDUPE__
+
+/* later in-band dedupe will expand this struct */
+struct btrfs_dedupe_hash;
+#endif
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index d3aaabbfada0..3eeb9cd8cfa5 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -34,7 +34,7 @@ int __init btrfs_delayed_inode_init(void)
delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
sizeof(struct btrfs_delayed_node),
0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
+ SLAB_MEM_SPREAD,
NULL);
if (!delayed_node_cache)
return -ENOMEM;
@@ -553,7 +553,7 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
dst_rsv = &root->fs_info->delayed_block_rsv;
num_bytes = btrfs_calc_trans_metadata_size(root, 1);
- ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
+ ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
if (!ret) {
trace_btrfs_space_reservation(root->fs_info, "delayed_item",
item->key.objectid,
@@ -598,6 +598,29 @@ static int btrfs_delayed_inode_reserve_metadata(
num_bytes = btrfs_calc_trans_metadata_size(root, 1);
/*
+ * If our block_rsv is the delalloc block reserve then check and see if
+ * we have our extra reservation for updating the inode. If not fall
+ * through and try to reserve space quickly.
+ *
+ * We used to try and steal from the delalloc block rsv or the global
+ * reserve, but we'd steal a full reservation, which isn't kind. We are
+ * here through delalloc which means we've likely just cowed down close
+ * to the leaf that contains the inode, so we would steal less just
+ * doing the fallback inode update, so if we do end up having to steal
+ * from the global block rsv we hopefully only steal one or two blocks
+ * worth which is less likely to hurt us.
+ */
+ if (src_rsv && src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) {
+ spin_lock(&BTRFS_I(inode)->lock);
+ if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
+ &BTRFS_I(inode)->runtime_flags))
+ release = true;
+ else
+ src_rsv = NULL;
+ spin_unlock(&BTRFS_I(inode)->lock);
+ }
+
+ /*
* btrfs_dirty_inode will update the inode under btrfs_join_transaction
* which doesn't reserve space for speed. This is a problem since we
* still need to reserve space for this update, so try to reserve the
@@ -626,51 +649,10 @@ static int btrfs_delayed_inode_reserve_metadata(
num_bytes, 1);
}
return ret;
- } else if (src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) {
- spin_lock(&BTRFS_I(inode)->lock);
- if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
- &BTRFS_I(inode)->runtime_flags)) {
- spin_unlock(&BTRFS_I(inode)->lock);
- release = true;
- goto migrate;
- }
- spin_unlock(&BTRFS_I(inode)->lock);
-
- /* Ok we didn't have space pre-reserved. This shouldn't happen
- * too often but it can happen if we do delalloc to an existing
- * inode which gets dirtied because of the time update, and then
- * isn't touched again until after the transaction commits and
- * then we try to write out the data. First try to be nice and
- * reserve something strictly for us. If not be a pain and try
- * to steal from the delalloc block rsv.
- */
- ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes,
- BTRFS_RESERVE_NO_FLUSH);
- if (!ret)
- goto out;
-
- ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
- if (!ret)
- goto out;
-
- if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
- btrfs_debug(root->fs_info,
- "block rsv migrate returned %d", ret);
- WARN_ON(1);
- }
- /*
- * Ok this is a problem, let's just steal from the global rsv
- * since this really shouldn't happen that often.
- */
- ret = btrfs_block_rsv_migrate(&root->fs_info->global_block_rsv,
- dst_rsv, num_bytes);
- goto out;
}
-migrate:
- ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
+ ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
-out:
/*
* Migrate only takes a reservation, it doesn't touch the size of the
* block_rsv. This is to simplify people who don't normally have things
@@ -1188,7 +1170,7 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
if (ret) {
btrfs_release_delayed_node(curr_node);
curr_node = NULL;
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
break;
}
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 430b3689b112..d9ddcfc18c91 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -606,7 +606,8 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
qrecord->num_bytes = num_bytes;
qrecord->old_roots = NULL;
- qexisting = btrfs_qgroup_insert_dirty_extent(delayed_refs,
+ qexisting = btrfs_qgroup_insert_dirty_extent(fs_info,
+ delayed_refs,
qrecord);
if (qexisting)
kfree(qrecord);
@@ -615,7 +616,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info,
spin_lock_init(&head_ref->lock);
mutex_init(&head_ref->mutex);
- trace_add_delayed_ref_head(ref, head_ref, action);
+ trace_add_delayed_ref_head(fs_info, ref, head_ref, action);
existing = htree_insert(&delayed_refs->href_root,
&head_ref->href_node);
@@ -682,7 +683,7 @@ add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
ref->type = BTRFS_TREE_BLOCK_REF_KEY;
full_ref->level = level;
- trace_add_delayed_tree_ref(ref, full_ref, action);
+ trace_add_delayed_tree_ref(fs_info, ref, full_ref, action);
ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
@@ -739,7 +740,7 @@ add_delayed_data_ref(struct btrfs_fs_info *fs_info,
full_ref->objectid = owner;
full_ref->offset = offset;
- trace_add_delayed_data_ref(ref, full_ref, action);
+ trace_add_delayed_data_ref(fs_info, ref, full_ref, action);
ret = add_delayed_ref_tail_merge(trans, delayed_refs, head_ref, ref);
@@ -861,33 +862,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
return 0;
}
-int btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info *fs_info,
- struct btrfs_trans_handle *trans,
- u64 ref_root, u64 bytenr, u64 num_bytes)
-{
- struct btrfs_delayed_ref_root *delayed_refs;
- struct btrfs_delayed_ref_head *ref_head;
- int ret = 0;
-
- if (!fs_info->quota_enabled || !is_fstree(ref_root))
- return 0;
-
- delayed_refs = &trans->transaction->delayed_refs;
-
- spin_lock(&delayed_refs->lock);
- ref_head = find_ref_head(&delayed_refs->href_root, bytenr, 0);
- if (!ref_head) {
- ret = -ENOENT;
- goto out;
- }
- WARN_ON(ref_head->qgroup_reserved || ref_head->qgroup_ref_root);
- ref_head->qgroup_ref_root = ref_root;
- ref_head->qgroup_reserved = num_bytes;
-out:
- spin_unlock(&delayed_refs->lock);
- return ret;
-}
-
int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes,
@@ -940,28 +914,28 @@ int btrfs_delayed_ref_init(void)
btrfs_delayed_ref_head_cachep = kmem_cache_create(
"btrfs_delayed_ref_head",
sizeof(struct btrfs_delayed_ref_head), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ SLAB_MEM_SPREAD, NULL);
if (!btrfs_delayed_ref_head_cachep)
goto fail;
btrfs_delayed_tree_ref_cachep = kmem_cache_create(
"btrfs_delayed_tree_ref",
sizeof(struct btrfs_delayed_tree_ref), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ SLAB_MEM_SPREAD, NULL);
if (!btrfs_delayed_tree_ref_cachep)
goto fail;
btrfs_delayed_data_ref_cachep = kmem_cache_create(
"btrfs_delayed_data_ref",
sizeof(struct btrfs_delayed_data_ref), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ SLAB_MEM_SPREAD, NULL);
if (!btrfs_delayed_data_ref_cachep)
goto fail;
btrfs_delayed_extent_op_cachep = kmem_cache_create(
"btrfs_delayed_extent_op",
sizeof(struct btrfs_delayed_extent_op), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ SLAB_MEM_SPREAD, NULL);
if (!btrfs_delayed_extent_op_cachep)
goto fail;
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index 5fca9534a271..43f3629760e9 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -250,9 +250,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
u64 parent, u64 ref_root,
u64 owner, u64 offset, u64 reserved, int action,
struct btrfs_delayed_extent_op *extent_op);
-int btrfs_add_delayed_qgroup_reserve(struct btrfs_fs_info *fs_info,
- struct btrfs_trans_handle *trans,
- u64 ref_root, u64 bytenr, u64 num_bytes);
int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes,
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 63ef9cdf0144..e9bbff3c0029 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -142,7 +142,7 @@ no_valid_dev_replace_entry_found:
* missing
*/
if (!dev_replace->srcdev &&
- !btrfs_test_opt(dev_root, DEGRADED)) {
+ !btrfs_test_opt(dev_root->fs_info, DEGRADED)) {
ret = -EIO;
btrfs_warn(fs_info,
"cannot mount because device replace operation is ongoing and");
@@ -151,7 +151,7 @@ no_valid_dev_replace_entry_found:
src_devid);
}
if (!dev_replace->tgtdev &&
- !btrfs_test_opt(dev_root, DEGRADED)) {
+ !btrfs_test_opt(dev_root->fs_info, DEGRADED)) {
ret = -EIO;
btrfs_warn(fs_info,
"cannot mount because device replace operation is ongoing and");
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 60ce1190307b..59febfb8d04a 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -101,7 +101,7 @@ int __init btrfs_end_io_wq_init(void)
btrfs_end_io_wq_cache = kmem_cache_create("btrfs_end_io_wq",
sizeof(struct btrfs_end_io_wq),
0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
+ SLAB_MEM_SPREAD,
NULL);
if (!btrfs_end_io_wq_cache)
return -ENOMEM;
@@ -124,7 +124,6 @@ struct async_submit_bio {
struct list_head list;
extent_submit_bio_hook_t *submit_bio_start;
extent_submit_bio_hook_t *submit_bio_done;
- int rw;
int mirror_num;
unsigned long bio_flags;
/*
@@ -727,7 +726,7 @@ static void end_workqueue_bio(struct bio *bio)
fs_info = end_io_wq->info;
end_io_wq->error = bio->bi_error;
- if (bio->bi_rw & REQ_WRITE) {
+ if (bio_op(bio) == REQ_OP_WRITE) {
if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) {
wq = fs_info->endio_meta_write_workers;
func = btrfs_endio_meta_write_helper;
@@ -797,7 +796,7 @@ static void run_one_async_start(struct btrfs_work *work)
int ret;
async = container_of(work, struct async_submit_bio, work);
- ret = async->submit_bio_start(async->inode, async->rw, async->bio,
+ ret = async->submit_bio_start(async->inode, async->bio,
async->mirror_num, async->bio_flags,
async->bio_offset);
if (ret)
@@ -830,9 +829,8 @@ static void run_one_async_done(struct btrfs_work *work)
return;
}
- async->submit_bio_done(async->inode, async->rw, async->bio,
- async->mirror_num, async->bio_flags,
- async->bio_offset);
+ async->submit_bio_done(async->inode, async->bio, async->mirror_num,
+ async->bio_flags, async->bio_offset);
}
static void run_one_async_free(struct btrfs_work *work)
@@ -844,7 +842,7 @@ static void run_one_async_free(struct btrfs_work *work)
}
int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
- int rw, struct bio *bio, int mirror_num,
+ struct bio *bio, int mirror_num,
unsigned long bio_flags,
u64 bio_offset,
extent_submit_bio_hook_t *submit_bio_start,
@@ -857,7 +855,6 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
return -ENOMEM;
async->inode = inode;
- async->rw = rw;
async->bio = bio;
async->mirror_num = mirror_num;
async->submit_bio_start = submit_bio_start;
@@ -873,7 +870,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
atomic_inc(&fs_info->nr_async_submits);
- if (rw & REQ_SYNC)
+ if (bio->bi_opf & REQ_SYNC)
btrfs_set_work_high_priority(&async->work);
btrfs_queue_work(fs_info->workers, &async->work);
@@ -903,9 +900,8 @@ static int btree_csum_one_bio(struct bio *bio)
return ret;
}
-static int __btree_submit_bio_start(struct inode *inode, int rw,
- struct bio *bio, int mirror_num,
- unsigned long bio_flags,
+static int __btree_submit_bio_start(struct inode *inode, struct bio *bio,
+ int mirror_num, unsigned long bio_flags,
u64 bio_offset)
{
/*
@@ -915,7 +911,7 @@ static int __btree_submit_bio_start(struct inode *inode, int rw,
return btree_csum_one_bio(bio);
}
-static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
+static int __btree_submit_bio_done(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags,
u64 bio_offset)
{
@@ -925,7 +921,7 @@ static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
* when we're called for a write, we're already in the async
* submission context. Just jump into btrfs_map_bio
*/
- ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1);
+ ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 1);
if (ret) {
bio->bi_error = ret;
bio_endio(bio);
@@ -944,14 +940,14 @@ static int check_async_write(struct inode *inode, unsigned long bio_flags)
return 1;
}
-static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
+static int btree_submit_bio_hook(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags,
u64 bio_offset)
{
int async = check_async_write(inode, bio_flags);
int ret;
- if (!(rw & REQ_WRITE)) {
+ if (bio_op(bio) != REQ_OP_WRITE) {
/*
* called for a read, do the setup so that checksum validation
* can happen in the async kernel threads
@@ -960,21 +956,19 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
bio, BTRFS_WQ_ENDIO_METADATA);
if (ret)
goto out_w_error;
- ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
- mirror_num, 0);
+ ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 0);
} else if (!async) {
ret = btree_csum_one_bio(bio);
if (ret)
goto out_w_error;
- ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio,
- mirror_num, 0);
+ ret = btrfs_map_bio(BTRFS_I(inode)->root, bio, mirror_num, 0);
} else {
/*
* kthread helpers are used to submit writes so that
* checksumming can happen in parallel across all CPUs
*/
ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
- inode, rw, bio, mirror_num, 0,
+ inode, bio, mirror_num, 0,
bio_offset,
__btree_submit_bio_start,
__btree_submit_bio_done);
@@ -1146,7 +1140,7 @@ struct extent_buffer *btrfs_find_tree_block(struct btrfs_fs_info *fs_info,
struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
u64 bytenr)
{
- if (btrfs_test_is_dummy_root(root))
+ if (btrfs_is_testing(root->fs_info))
return alloc_test_extent_buffer(root->fs_info, bytenr,
root->nodesize);
return alloc_extent_buffer(root->fs_info, bytenr);
@@ -1233,6 +1227,7 @@ static void __setup_root(u32 nodesize, u32 sectorsize, u32 stripesize,
struct btrfs_root *root, struct btrfs_fs_info *fs_info,
u64 objectid)
{
+ bool dummy = test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
root->node = NULL;
root->commit_root = NULL;
root->sectorsize = sectorsize;
@@ -1287,14 +1282,14 @@ static void __setup_root(u32 nodesize, u32 sectorsize, u32 stripesize,
root->log_transid = 0;
root->log_transid_committed = -1;
root->last_log_commit = 0;
- if (fs_info)
+ if (!dummy)
extent_io_tree_init(&root->dirty_log_pages,
fs_info->btree_inode->i_mapping);
memset(&root->root_key, 0, sizeof(root->root_key));
memset(&root->root_item, 0, sizeof(root->root_item));
memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
- if (fs_info)
+ if (!dummy)
root->defrag_trans_start = fs_info->generation;
else
root->defrag_trans_start = 0;
@@ -1315,17 +1310,20 @@ static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info,
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
/* Should only be used by the testing infrastructure */
-struct btrfs_root *btrfs_alloc_dummy_root(u32 sectorsize, u32 nodesize)
+struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info,
+ u32 sectorsize, u32 nodesize)
{
struct btrfs_root *root;
- root = btrfs_alloc_root(NULL, GFP_KERNEL);
+ if (!fs_info)
+ return ERR_PTR(-EINVAL);
+
+ root = btrfs_alloc_root(fs_info, GFP_KERNEL);
if (!root)
return ERR_PTR(-ENOMEM);
/* We don't use the stripesize in selftest, set it as sectorsize */
- __setup_root(nodesize, sectorsize, sectorsize, root, NULL,
+ __setup_root(nodesize, sectorsize, sectorsize, root, fs_info,
BTRFS_ROOT_TREE_OBJECTID);
- set_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state);
root->alloc_bytenr = 0;
return root;
@@ -1600,14 +1598,14 @@ int btrfs_init_fs_root(struct btrfs_root *root)
ret = get_anon_bdev(&root->anon_dev);
if (ret)
- goto free_writers;
+ goto fail;
mutex_lock(&root->objectid_mutex);
ret = btrfs_find_highest_objectid(root,
&root->highest_objectid);
if (ret) {
mutex_unlock(&root->objectid_mutex);
- goto free_root_dev;
+ goto fail;
}
ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
@@ -1615,14 +1613,8 @@ int btrfs_init_fs_root(struct btrfs_root *root)
mutex_unlock(&root->objectid_mutex);
return 0;
-
-free_root_dev:
- free_anon_bdev(root->anon_dev);
-free_writers:
- btrfs_free_subvolume_writers(root->subv_writers);
fail:
- kfree(root->free_ino_ctl);
- kfree(root->free_ino_pinned);
+ /* the caller is responsible to call free_fs_root */
return ret;
}
@@ -2316,17 +2308,19 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
fs_info->workers =
- btrfs_alloc_workqueue("worker", flags | WQ_HIGHPRI,
- max_active, 16);
+ btrfs_alloc_workqueue(fs_info, "worker",
+ flags | WQ_HIGHPRI, max_active, 16);
fs_info->delalloc_workers =
- btrfs_alloc_workqueue("delalloc", flags, max_active, 2);
+ btrfs_alloc_workqueue(fs_info, "delalloc",
+ flags, max_active, 2);
fs_info->flush_workers =
- btrfs_alloc_workqueue("flush_delalloc", flags, max_active, 0);
+ btrfs_alloc_workqueue(fs_info, "flush_delalloc",
+ flags, max_active, 0);
fs_info->caching_workers =
- btrfs_alloc_workqueue("cache", flags, max_active, 0);
+ btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0);
/*
* a higher idle thresh on the submit workers makes it much more
@@ -2334,41 +2328,48 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
* devices
*/
fs_info->submit_workers =
- btrfs_alloc_workqueue("submit", flags,
+ btrfs_alloc_workqueue(fs_info, "submit", flags,
min_t(u64, fs_devices->num_devices,
max_active), 64);
fs_info->fixup_workers =
- btrfs_alloc_workqueue("fixup", flags, 1, 0);
+ btrfs_alloc_workqueue(fs_info, "fixup", flags, 1, 0);
/*
* endios are largely parallel and should have a very
* low idle thresh
*/
fs_info->endio_workers =
- btrfs_alloc_workqueue("endio", flags, max_active, 4);
+ btrfs_alloc_workqueue(fs_info, "endio", flags, max_active, 4);
fs_info->endio_meta_workers =
- btrfs_alloc_workqueue("endio-meta", flags, max_active, 4);
+ btrfs_alloc_workqueue(fs_info, "endio-meta", flags,
+ max_active, 4);
fs_info->endio_meta_write_workers =
- btrfs_alloc_workqueue("endio-meta-write", flags, max_active, 2);
+ btrfs_alloc_workqueue(fs_info, "endio-meta-write", flags,
+ max_active, 2);
fs_info->endio_raid56_workers =
- btrfs_alloc_workqueue("endio-raid56", flags, max_active, 4);
+ btrfs_alloc_workqueue(fs_info, "endio-raid56", flags,
+ max_active, 4);
fs_info->endio_repair_workers =
- btrfs_alloc_workqueue("endio-repair", flags, 1, 0);
+ btrfs_alloc_workqueue(fs_info, "endio-repair", flags, 1, 0);
fs_info->rmw_workers =
- btrfs_alloc_workqueue("rmw", flags, max_active, 2);
+ btrfs_alloc_workqueue(fs_info, "rmw", flags, max_active, 2);
fs_info->endio_write_workers =
- btrfs_alloc_workqueue("endio-write", flags, max_active, 2);
+ btrfs_alloc_workqueue(fs_info, "endio-write", flags,
+ max_active, 2);
fs_info->endio_freespace_worker =
- btrfs_alloc_workqueue("freespace-write", flags, max_active, 0);
+ btrfs_alloc_workqueue(fs_info, "freespace-write", flags,
+ max_active, 0);
fs_info->delayed_workers =
- btrfs_alloc_workqueue("delayed-meta", flags, max_active, 0);
+ btrfs_alloc_workqueue(fs_info, "delayed-meta", flags,
+ max_active, 0);
fs_info->readahead_workers =
- btrfs_alloc_workqueue("readahead", flags, max_active, 2);
+ btrfs_alloc_workqueue(fs_info, "readahead", flags,
+ max_active, 2);
fs_info->qgroup_rescan_workers =
- btrfs_alloc_workqueue("qgroup-rescan", flags, 1, 0);
+ btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
fs_info->extent_workers =
- btrfs_alloc_workqueue("extent-refs", flags,
+ btrfs_alloc_workqueue(fs_info, "extent-refs", flags,
min_t(u64, fs_devices->num_devices,
max_active), 8);
@@ -3016,8 +3017,8 @@ retry_root_backup:
if (IS_ERR(fs_info->transaction_kthread))
goto fail_cleaner;
- if (!btrfs_test_opt(tree_root, SSD) &&
- !btrfs_test_opt(tree_root, NOSSD) &&
+ if (!btrfs_test_opt(tree_root->fs_info, SSD) &&
+ !btrfs_test_opt(tree_root->fs_info, NOSSD) &&
!fs_info->fs_devices->rotating) {
btrfs_info(fs_info, "detected SSD devices, enabling SSD mode");
btrfs_set_opt(fs_info->mount_opt, SSD);
@@ -3030,9 +3031,9 @@ retry_root_backup:
btrfs_apply_pending_changes(fs_info);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) {
+ if (btrfs_test_opt(tree_root->fs_info, CHECK_INTEGRITY)) {
ret = btrfsic_mount(tree_root, fs_devices,
- btrfs_test_opt(tree_root,
+ btrfs_test_opt(tree_root->fs_info,
CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
1 : 0,
fs_info->check_integrity_print_mask);
@@ -3048,7 +3049,7 @@ retry_root_backup:
/* do not make disk changes in broken FS or nologreplay is given */
if (btrfs_super_log_root(disk_super) != 0 &&
- !btrfs_test_opt(tree_root, NOLOGREPLAY)) {
+ !btrfs_test_opt(tree_root->fs_info, NOLOGREPLAY)) {
ret = btrfs_replay_log(fs_info, fs_devices);
if (ret) {
err = ret;
@@ -3089,7 +3090,7 @@ retry_root_backup:
if (sb->s_flags & MS_RDONLY)
return 0;
- if (btrfs_test_opt(tree_root, FREE_SPACE_TREE) &&
+ if (btrfs_test_opt(tree_root->fs_info, FREE_SPACE_TREE) &&
!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
btrfs_info(fs_info, "creating free space tree");
ret = btrfs_create_free_space_tree(fs_info);
@@ -3126,7 +3127,7 @@ retry_root_backup:
btrfs_qgroup_rescan_resume(fs_info);
- if (btrfs_test_opt(tree_root, CLEAR_CACHE) &&
+ if (btrfs_test_opt(tree_root->fs_info, CLEAR_CACHE) &&
btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) {
btrfs_info(fs_info, "clearing free space tree");
ret = btrfs_clear_free_space_tree(fs_info);
@@ -3147,7 +3148,7 @@ retry_root_backup:
close_ctree(tree_root);
return ret;
}
- } else if (btrfs_test_opt(tree_root, RESCAN_UUID_TREE) ||
+ } else if (btrfs_test_opt(tree_root->fs_info, RESCAN_UUID_TREE) ||
fs_info->generation !=
btrfs_super_uuid_tree_generation(disk_super)) {
btrfs_info(fs_info, "checking UUID tree");
@@ -3224,7 +3225,7 @@ fail:
return err;
recovery_tree_root:
- if (!btrfs_test_opt(tree_root, USEBACKUPROOT))
+ if (!btrfs_test_opt(tree_root->fs_info, USEBACKUPROOT))
goto fail_tree_roots;
free_root_pointers(fs_info, 0);
@@ -3418,9 +3419,9 @@ static int write_dev_supers(struct btrfs_device *device,
* to go down lazy.
*/
if (i == 0)
- ret = btrfsic_submit_bh(WRITE_FUA, bh);
+ ret = btrfsic_submit_bh(REQ_OP_WRITE, WRITE_FUA, bh);
else
- ret = btrfsic_submit_bh(WRITE_SYNC, bh);
+ ret = btrfsic_submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh);
if (ret)
errors++;
}
@@ -3484,12 +3485,13 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
bio->bi_end_io = btrfs_end_empty_barrier;
bio->bi_bdev = device->bdev;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
init_completion(&device->flush_wait);
bio->bi_private = &device->flush_wait;
device->flush_bio = bio;
bio_get(bio);
- btrfsic_submit_bio(WRITE_FLUSH, bio);
+ btrfsic_submit_bio(bio);
return 0;
}
@@ -3639,7 +3641,7 @@ static int write_all_supers(struct btrfs_root *root, int max_mirrors)
int total_errors = 0;
u64 flags;
- do_barriers = !btrfs_test_opt(root, NOBARRIER);
+ do_barriers = !btrfs_test_opt(root->fs_info, NOBARRIER);
backup_super_roots(root->fs_info);
sb = root->fs_info->super_for_commit;
@@ -3923,7 +3925,7 @@ void close_ctree(struct btrfs_root *root)
iput(fs_info->btree_inode);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- if (btrfs_test_opt(root, CHECK_INTEGRITY))
+ if (btrfs_test_opt(root->fs_info, CHECK_INTEGRITY))
btrfsic_unmount(root, fs_info->fs_devices);
#endif
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index acba821499a9..b3207a0e09f7 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -90,7 +90,8 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info,
void btrfs_free_fs_root(struct btrfs_root *root);
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
-struct btrfs_root *btrfs_alloc_dummy_root(u32 sectorsize, u32 nodesize);
+struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info,
+ u32 sectorsize, u32 nodesize);
#endif
/*
@@ -122,7 +123,7 @@ void btrfs_csum_final(u32 crc, char *result);
int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio,
enum btrfs_wq_endio_type metadata);
int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
- int rw, struct bio *bio, int mirror_num,
+ struct bio *bio, int mirror_num,
unsigned long bio_flags, u64 bio_offset,
extent_submit_bio_hook_t *submit_bio_start,
extent_submit_bio_hook_t *submit_bio_done);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 82b912a293ab..61b494e8e604 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -111,6 +111,16 @@ static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
u64 num_bytes);
int btrfs_pin_extent(struct btrfs_root *root,
u64 bytenr, u64 num_bytes, int reserved);
+static int __reserve_metadata_bytes(struct btrfs_root *root,
+ struct btrfs_space_info *space_info,
+ u64 orig_bytes,
+ enum btrfs_reserve_flush_enum flush);
+static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info,
+ u64 num_bytes);
+static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info,
+ u64 num_bytes);
static noinline int
block_group_cache_done(struct btrfs_block_group_cache *cache)
@@ -2048,7 +2058,7 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
*/
btrfs_bio_counter_inc_blocked(root->fs_info);
/* Tell the block device(s) that the sectors can be discarded */
- ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
+ ret = btrfs_map_block(root->fs_info, REQ_OP_DISCARD,
bytenr, &num_bytes, &bbio, 0);
/* Error condition is -ENOMEM */
if (!ret) {
@@ -2170,7 +2180,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
path, bytenr, parent, root_objectid,
owner, offset, refs_to_add);
if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
out:
btrfs_free_path(path);
return ret;
@@ -2194,7 +2204,7 @@ static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
ins.type = BTRFS_EXTENT_ITEM_KEY;
ref = btrfs_delayed_node_to_data_ref(node);
- trace_run_delayed_data_ref(node, ref, node->action);
+ trace_run_delayed_data_ref(root->fs_info, node, ref, node->action);
if (node->type == BTRFS_SHARED_DATA_REF_KEY)
parent = ref->parent;
@@ -2349,7 +2359,7 @@ static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
SKINNY_METADATA);
ref = btrfs_delayed_node_to_tree_ref(node);
- trace_run_delayed_tree_ref(node, ref, node->action);
+ trace_run_delayed_tree_ref(root->fs_info, node, ref, node->action);
if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
parent = ref->parent;
@@ -2413,7 +2423,8 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
*/
BUG_ON(extent_op);
head = btrfs_delayed_node_to_head(node);
- trace_run_delayed_ref_head(node, head, node->action);
+ trace_run_delayed_ref_head(root->fs_info, node, head,
+ node->action);
if (insert_reserved) {
btrfs_pin_extent(root, node->bytenr,
@@ -2768,7 +2779,7 @@ u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
u64 num_csums_per_leaf;
u64 num_csums;
- csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
+ csum_size = BTRFS_MAX_ITEM_SIZE(root);
num_csums_per_leaf = div64_u64(csum_size,
(u64)btrfs_super_csum_size(root->fs_info->super_copy));
num_csums = div64_u64(csum_bytes, root->sectorsize);
@@ -2960,7 +2971,7 @@ again:
trans->can_flush_pending_bgs = false;
ret = __btrfs_run_delayed_refs(trans, root, count);
if (ret < 0) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -3224,7 +3235,7 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
u64, u64, u64, u64, u64, u64);
- if (btrfs_test_is_dummy_root(root))
+ if (btrfs_is_testing(root->fs_info))
return 0;
ref_root = btrfs_header_owner(buf);
@@ -3419,7 +3430,7 @@ again:
* transaction, this only happens in really bad situations
* anyway.
*/
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_put;
}
WARN_ON(ret);
@@ -3437,7 +3448,7 @@ again:
spin_lock(&block_group->lock);
if (block_group->cached != BTRFS_CACHE_FINISHED ||
- !btrfs_test_opt(root, SPACE_CACHE)) {
+ !btrfs_test_opt(root->fs_info, SPACE_CACHE)) {
/*
* don't bother trying to write stuff out _if_
* a) we're not cached,
@@ -3514,7 +3525,7 @@ int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
struct btrfs_path *path;
if (list_empty(&cur_trans->dirty_bgs) ||
- !btrfs_test_opt(root, SPACE_CACHE))
+ !btrfs_test_opt(root->fs_info, SPACE_CACHE))
return 0;
path = btrfs_alloc_path();
@@ -3659,7 +3670,7 @@ again:
}
spin_unlock(&cur_trans->dirty_bgs_lock);
} else if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
}
}
@@ -3805,7 +3816,7 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
cache);
}
if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
}
/* if its not on the io list, we need to put the block group */
@@ -3913,6 +3924,7 @@ static const char *alloc_name(u64 flags)
static int update_space_info(struct btrfs_fs_info *info, u64 flags,
u64 total_bytes, u64 bytes_used,
+ u64 bytes_readonly,
struct btrfs_space_info **space_info)
{
struct btrfs_space_info *found;
@@ -3933,8 +3945,11 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
found->disk_total += total_bytes * factor;
found->bytes_used += bytes_used;
found->disk_used += bytes_used * factor;
+ found->bytes_readonly += bytes_readonly;
if (total_bytes > 0)
found->full = 0;
+ space_info_add_new_bytes(info, found, total_bytes -
+ bytes_used - bytes_readonly);
spin_unlock(&found->lock);
*space_info = found;
return 0;
@@ -3960,7 +3975,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
found->disk_used = bytes_used * factor;
found->bytes_pinned = 0;
found->bytes_reserved = 0;
- found->bytes_readonly = 0;
+ found->bytes_readonly = bytes_readonly;
found->bytes_may_use = 0;
found->full = 0;
found->max_extent_size = 0;
@@ -3969,6 +3984,8 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
found->flush = 0;
init_waitqueue_head(&found->wait);
INIT_LIST_HEAD(&found->ro_bgs);
+ INIT_LIST_HEAD(&found->tickets);
+ INIT_LIST_HEAD(&found->priority_tickets);
ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
info->space_info_kobj, "%s",
@@ -4427,7 +4444,7 @@ void check_system_chunk(struct btrfs_trans_handle *trans,
thresh = btrfs_calc_trunc_metadata_size(root, num_devs) +
btrfs_calc_trans_metadata_size(root, 1);
- if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
+ if (left < thresh && btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
left, thresh, type);
dump_space_info(info, 0, 0);
@@ -4470,7 +4487,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
space_info = __find_space_info(extent_root->fs_info, flags);
if (!space_info) {
ret = update_space_info(extent_root->fs_info, flags,
- 0, 0, &space_info);
+ 0, 0, 0, &space_info);
BUG_ON(ret); /* -ENOMEM */
}
BUG_ON(!space_info); /* Logic error */
@@ -4572,7 +4589,7 @@ out:
*/
if (trans->can_flush_pending_bgs &&
trans->chunk_bytes_reserved >= (u64)SZ_2M) {
- btrfs_create_pending_block_groups(trans, trans->root);
+ btrfs_create_pending_block_groups(trans, extent_root);
btrfs_trans_release_chunk_metadata(trans);
}
return ret;
@@ -4582,12 +4599,19 @@ static int can_overcommit(struct btrfs_root *root,
struct btrfs_space_info *space_info, u64 bytes,
enum btrfs_reserve_flush_enum flush)
{
- struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
- u64 profile = btrfs_get_alloc_profile(root, 0);
+ struct btrfs_block_rsv *global_rsv;
+ u64 profile;
u64 space_size;
u64 avail;
u64 used;
+ /* Don't overcommit when in mixed mode. */
+ if (space_info->flags & BTRFS_BLOCK_GROUP_DATA)
+ return 0;
+
+ BUG_ON(root->fs_info == NULL);
+ global_rsv = &root->fs_info->global_block_rsv;
+ profile = btrfs_get_alloc_profile(root, 0);
used = space_info->bytes_used + space_info->bytes_reserved +
space_info->bytes_pinned + space_info->bytes_readonly;
@@ -4739,6 +4763,11 @@ skip_async:
spin_unlock(&space_info->lock);
break;
}
+ if (list_empty(&space_info->tickets) &&
+ list_empty(&space_info->priority_tickets)) {
+ spin_unlock(&space_info->lock);
+ break;
+ }
spin_unlock(&space_info->lock);
loops++;
@@ -4807,13 +4836,11 @@ commit:
return btrfs_commit_transaction(trans, root);
}
-enum flush_state {
- FLUSH_DELAYED_ITEMS_NR = 1,
- FLUSH_DELAYED_ITEMS = 2,
- FLUSH_DELALLOC = 3,
- FLUSH_DELALLOC_WAIT = 4,
- ALLOC_CHUNK = 5,
- COMMIT_TRANS = 6,
+struct reserve_ticket {
+ u64 bytes;
+ int error;
+ struct list_head list;
+ wait_queue_head_t wait;
};
static int flush_space(struct btrfs_root *root,
@@ -4866,6 +4893,8 @@ static int flush_space(struct btrfs_root *root,
break;
}
+ trace_btrfs_flush_space(root->fs_info, space_info->flags, num_bytes,
+ orig_bytes, state, ret);
return ret;
}
@@ -4873,17 +4902,22 @@ static inline u64
btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
struct btrfs_space_info *space_info)
{
+ struct reserve_ticket *ticket;
u64 used;
u64 expected;
- u64 to_reclaim;
+ u64 to_reclaim = 0;
to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
- spin_lock(&space_info->lock);
if (can_overcommit(root, space_info, to_reclaim,
- BTRFS_RESERVE_FLUSH_ALL)) {
- to_reclaim = 0;
- goto out;
- }
+ BTRFS_RESERVE_FLUSH_ALL))
+ return 0;
+
+ list_for_each_entry(ticket, &space_info->tickets, list)
+ to_reclaim += ticket->bytes;
+ list_for_each_entry(ticket, &space_info->priority_tickets, list)
+ to_reclaim += ticket->bytes;
+ if (to_reclaim)
+ return to_reclaim;
used = space_info->bytes_used + space_info->bytes_reserved +
space_info->bytes_pinned + space_info->bytes_readonly +
@@ -4899,14 +4933,11 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
to_reclaim = 0;
to_reclaim = min(to_reclaim, space_info->bytes_may_use +
space_info->bytes_reserved);
-out:
- spin_unlock(&space_info->lock);
-
return to_reclaim;
}
static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
- struct btrfs_fs_info *fs_info, u64 used)
+ struct btrfs_root *root, u64 used)
{
u64 thresh = div_factor_fine(space_info->total_bytes, 98);
@@ -4914,73 +4945,177 @@ static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh)
return 0;
- return (used >= thresh && !btrfs_fs_closing(fs_info) &&
- !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
+ if (!btrfs_calc_reclaim_metadata_size(root, space_info))
+ return 0;
+
+ return (used >= thresh && !btrfs_fs_closing(root->fs_info) &&
+ !test_bit(BTRFS_FS_STATE_REMOUNTING,
+ &root->fs_info->fs_state));
}
-static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
- struct btrfs_fs_info *fs_info,
- int flush_state)
+static void wake_all_tickets(struct list_head *head)
{
- u64 used;
-
- spin_lock(&space_info->lock);
- /*
- * We run out of space and have not got any free space via flush_space,
- * so don't bother doing async reclaim.
- */
- if (flush_state > COMMIT_TRANS && space_info->full) {
- spin_unlock(&space_info->lock);
- return 0;
- }
+ struct reserve_ticket *ticket;
- used = space_info->bytes_used + space_info->bytes_reserved +
- space_info->bytes_pinned + space_info->bytes_readonly +
- space_info->bytes_may_use;
- if (need_do_async_reclaim(space_info, fs_info, used)) {
- spin_unlock(&space_info->lock);
- return 1;
+ while (!list_empty(head)) {
+ ticket = list_first_entry(head, struct reserve_ticket, list);
+ list_del_init(&ticket->list);
+ ticket->error = -ENOSPC;
+ wake_up(&ticket->wait);
}
- spin_unlock(&space_info->lock);
-
- return 0;
}
+/*
+ * This is for normal flushers, we can wait all goddamned day if we want to. We
+ * will loop and continuously try to flush as long as we are making progress.
+ * We count progress as clearing off tickets each time we have to loop.
+ */
static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
{
+ struct reserve_ticket *last_ticket = NULL;
struct btrfs_fs_info *fs_info;
struct btrfs_space_info *space_info;
u64 to_reclaim;
int flush_state;
+ int commit_cycles = 0;
fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
+ spin_lock(&space_info->lock);
to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
space_info);
- if (!to_reclaim)
+ if (!to_reclaim) {
+ space_info->flush = 0;
+ spin_unlock(&space_info->lock);
return;
+ }
+ last_ticket = list_first_entry(&space_info->tickets,
+ struct reserve_ticket, list);
+ spin_unlock(&space_info->lock);
flush_state = FLUSH_DELAYED_ITEMS_NR;
do {
+ struct reserve_ticket *ticket;
+ int ret;
+
+ ret = flush_space(fs_info->fs_root, space_info, to_reclaim,
+ to_reclaim, flush_state);
+ spin_lock(&space_info->lock);
+ if (list_empty(&space_info->tickets)) {
+ space_info->flush = 0;
+ spin_unlock(&space_info->lock);
+ return;
+ }
+ to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
+ space_info);
+ ticket = list_first_entry(&space_info->tickets,
+ struct reserve_ticket, list);
+ if (last_ticket == ticket) {
+ flush_state++;
+ } else {
+ last_ticket = ticket;
+ flush_state = FLUSH_DELAYED_ITEMS_NR;
+ if (commit_cycles)
+ commit_cycles--;
+ }
+
+ if (flush_state > COMMIT_TRANS) {
+ commit_cycles++;
+ if (commit_cycles > 2) {
+ wake_all_tickets(&space_info->tickets);
+ space_info->flush = 0;
+ } else {
+ flush_state = FLUSH_DELAYED_ITEMS_NR;
+ }
+ }
+ spin_unlock(&space_info->lock);
+ } while (flush_state <= COMMIT_TRANS);
+}
+
+void btrfs_init_async_reclaim_work(struct work_struct *work)
+{
+ INIT_WORK(work, btrfs_async_reclaim_metadata_space);
+}
+
+static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info,
+ struct reserve_ticket *ticket)
+{
+ u64 to_reclaim;
+ int flush_state = FLUSH_DELAYED_ITEMS_NR;
+
+ spin_lock(&space_info->lock);
+ to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
+ space_info);
+ if (!to_reclaim) {
+ spin_unlock(&space_info->lock);
+ return;
+ }
+ spin_unlock(&space_info->lock);
+
+ do {
flush_space(fs_info->fs_root, space_info, to_reclaim,
to_reclaim, flush_state);
flush_state++;
- if (!btrfs_need_do_async_reclaim(space_info, fs_info,
- flush_state))
+ spin_lock(&space_info->lock);
+ if (ticket->bytes == 0) {
+ spin_unlock(&space_info->lock);
return;
+ }
+ spin_unlock(&space_info->lock);
+
+ /*
+ * Priority flushers can't wait on delalloc without
+ * deadlocking.
+ */
+ if (flush_state == FLUSH_DELALLOC ||
+ flush_state == FLUSH_DELALLOC_WAIT)
+ flush_state = ALLOC_CHUNK;
} while (flush_state < COMMIT_TRANS);
}
-void btrfs_init_async_reclaim_work(struct work_struct *work)
+static int wait_reserve_ticket(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info,
+ struct reserve_ticket *ticket, u64 orig_bytes)
+
{
- INIT_WORK(work, btrfs_async_reclaim_metadata_space);
+ DEFINE_WAIT(wait);
+ int ret = 0;
+
+ spin_lock(&space_info->lock);
+ while (ticket->bytes > 0 && ticket->error == 0) {
+ ret = prepare_to_wait_event(&ticket->wait, &wait, TASK_KILLABLE);
+ if (ret) {
+ ret = -EINTR;
+ break;
+ }
+ spin_unlock(&space_info->lock);
+
+ schedule();
+
+ finish_wait(&ticket->wait, &wait);
+ spin_lock(&space_info->lock);
+ }
+ if (!ret)
+ ret = ticket->error;
+ if (!list_empty(&ticket->list))
+ list_del_init(&ticket->list);
+ if (ticket->bytes && ticket->bytes < orig_bytes) {
+ u64 num_bytes = orig_bytes - ticket->bytes;
+ space_info->bytes_may_use -= num_bytes;
+ trace_btrfs_space_reservation(fs_info, "space_info",
+ space_info->flags, num_bytes, 0);
+ }
+ spin_unlock(&space_info->lock);
+
+ return ret;
}
/**
* reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
* @root - the root we're allocating for
- * @block_rsv - the block_rsv we're allocating for
+ * @space_info - the space info we want to allocate from
* @orig_bytes - the number of bytes we want
* @flush - whether or not we can flush to make our reservation
*
@@ -4991,81 +5126,36 @@ void btrfs_init_async_reclaim_work(struct work_struct *work)
* regain reservations will be made and this will fail if there is not enough
* space already.
*/
-static int reserve_metadata_bytes(struct btrfs_root *root,
- struct btrfs_block_rsv *block_rsv,
- u64 orig_bytes,
- enum btrfs_reserve_flush_enum flush)
+static int __reserve_metadata_bytes(struct btrfs_root *root,
+ struct btrfs_space_info *space_info,
+ u64 orig_bytes,
+ enum btrfs_reserve_flush_enum flush)
{
- struct btrfs_space_info *space_info = block_rsv->space_info;
+ struct reserve_ticket ticket;
u64 used;
- u64 num_bytes = orig_bytes;
- int flush_state = FLUSH_DELAYED_ITEMS_NR;
int ret = 0;
- bool flushing = false;
-
-again:
- ret = 0;
- spin_lock(&space_info->lock);
- /*
- * We only want to wait if somebody other than us is flushing and we
- * are actually allowed to flush all things.
- */
- while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
- space_info->flush) {
- spin_unlock(&space_info->lock);
- /*
- * If we have a trans handle we can't wait because the flusher
- * may have to commit the transaction, which would mean we would
- * deadlock since we are waiting for the flusher to finish, but
- * hold the current transaction open.
- */
- if (current->journal_info)
- return -EAGAIN;
- ret = wait_event_killable(space_info->wait, !space_info->flush);
- /* Must have been killed, return */
- if (ret)
- return -EINTR;
- spin_lock(&space_info->lock);
- }
+ ASSERT(orig_bytes);
+ ASSERT(!current->journal_info || flush != BTRFS_RESERVE_FLUSH_ALL);
+ spin_lock(&space_info->lock);
ret = -ENOSPC;
used = space_info->bytes_used + space_info->bytes_reserved +
space_info->bytes_pinned + space_info->bytes_readonly +
space_info->bytes_may_use;
/*
- * The idea here is that we've not already over-reserved the block group
- * then we can go ahead and save our reservation first and then start
- * flushing if we need to. Otherwise if we've already overcommitted
- * lets start flushing stuff first and then come back and try to make
- * our reservation.
+ * If we have enough space then hooray, make our reservation and carry
+ * on. If not see if we can overcommit, and if we can, hooray carry on.
+ * If not things get more complicated.
*/
- if (used <= space_info->total_bytes) {
- if (used + orig_bytes <= space_info->total_bytes) {
- space_info->bytes_may_use += orig_bytes;
- trace_btrfs_space_reservation(root->fs_info,
- "space_info", space_info->flags, orig_bytes, 1);
- ret = 0;
- } else {
- /*
- * Ok set num_bytes to orig_bytes since we aren't
- * overocmmitted, this way we only try and reclaim what
- * we need.
- */
- num_bytes = orig_bytes;
- }
- } else {
- /*
- * Ok we're over committed, set num_bytes to the overcommitted
- * amount plus the amount of bytes that we need for this
- * reservation.
- */
- num_bytes = used - space_info->total_bytes +
- (orig_bytes * 2);
- }
-
- if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
+ if (used + orig_bytes <= space_info->total_bytes) {
+ space_info->bytes_may_use += orig_bytes;
+ trace_btrfs_space_reservation(root->fs_info, "space_info",
+ space_info->flags, orig_bytes,
+ 1);
+ ret = 0;
+ } else if (can_overcommit(root, space_info, orig_bytes, flush)) {
space_info->bytes_may_use += orig_bytes;
trace_btrfs_space_reservation(root->fs_info, "space_info",
space_info->flags, orig_bytes,
@@ -5074,16 +5164,31 @@ again:
}
/*
- * Couldn't make our reservation, save our place so while we're trying
- * to reclaim space we can actually use it instead of somebody else
- * stealing it from us.
+ * If we couldn't make a reservation then setup our reservation ticket
+ * and kick the async worker if it's not already running.
*
- * We make the other tasks wait for the flush only when we can flush
- * all things.
+ * If we are a priority flusher then we just need to add our ticket to
+ * the list and we will do our own flushing further down.
*/
if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
- flushing = true;
- space_info->flush = 1;
+ ticket.bytes = orig_bytes;
+ ticket.error = 0;
+ init_waitqueue_head(&ticket.wait);
+ if (flush == BTRFS_RESERVE_FLUSH_ALL) {
+ list_add_tail(&ticket.list, &space_info->tickets);
+ if (!space_info->flush) {
+ space_info->flush = 1;
+ trace_btrfs_trigger_flush(root->fs_info,
+ space_info->flags,
+ orig_bytes, flush,
+ "enospc");
+ queue_work(system_unbound_wq,
+ &root->fs_info->async_reclaim_work);
+ }
+ } else {
+ list_add_tail(&ticket.list,
+ &space_info->priority_tickets);
+ }
} else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
used += orig_bytes;
/*
@@ -5092,39 +5197,67 @@ again:
* the async reclaim as we will panic.
*/
if (!root->fs_info->log_root_recovering &&
- need_do_async_reclaim(space_info, root->fs_info, used) &&
- !work_busy(&root->fs_info->async_reclaim_work))
+ need_do_async_reclaim(space_info, root, used) &&
+ !work_busy(&root->fs_info->async_reclaim_work)) {
+ trace_btrfs_trigger_flush(root->fs_info,
+ space_info->flags,
+ orig_bytes, flush,
+ "preempt");
queue_work(system_unbound_wq,
&root->fs_info->async_reclaim_work);
+ }
}
spin_unlock(&space_info->lock);
-
if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
- goto out;
+ return ret;
- ret = flush_space(root, space_info, num_bytes, orig_bytes,
- flush_state);
- flush_state++;
+ if (flush == BTRFS_RESERVE_FLUSH_ALL)
+ return wait_reserve_ticket(root->fs_info, space_info, &ticket,
+ orig_bytes);
- /*
- * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
- * would happen. So skip delalloc flush.
- */
- if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
- (flush_state == FLUSH_DELALLOC ||
- flush_state == FLUSH_DELALLOC_WAIT))
- flush_state = ALLOC_CHUNK;
+ ret = 0;
+ priority_reclaim_metadata_space(root->fs_info, space_info, &ticket);
+ spin_lock(&space_info->lock);
+ if (ticket.bytes) {
+ if (ticket.bytes < orig_bytes) {
+ u64 num_bytes = orig_bytes - ticket.bytes;
+ space_info->bytes_may_use -= num_bytes;
+ trace_btrfs_space_reservation(root->fs_info,
+ "space_info", space_info->flags,
+ num_bytes, 0);
- if (!ret)
- goto again;
- else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
- flush_state < COMMIT_TRANS)
- goto again;
- else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
- flush_state <= COMMIT_TRANS)
- goto again;
+ }
+ list_del_init(&ticket.list);
+ ret = -ENOSPC;
+ }
+ spin_unlock(&space_info->lock);
+ ASSERT(list_empty(&ticket.list));
+ return ret;
+}
-out:
+/**
+ * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
+ * @root - the root we're allocating for
+ * @block_rsv - the block_rsv we're allocating for
+ * @orig_bytes - the number of bytes we want
+ * @flush - whether or not we can flush to make our reservation
+ *
+ * This will reserve orgi_bytes number of bytes from the space info associated
+ * with the block_rsv. If there is not enough space it will make an attempt to
+ * flush out space to make room. It will do this by flushing delalloc if
+ * possible or committing the transaction. If flush is 0 then no attempts to
+ * regain reservations will be made and this will fail if there is not enough
+ * space already.
+ */
+static int reserve_metadata_bytes(struct btrfs_root *root,
+ struct btrfs_block_rsv *block_rsv,
+ u64 orig_bytes,
+ enum btrfs_reserve_flush_enum flush)
+{
+ int ret;
+
+ ret = __reserve_metadata_bytes(root, block_rsv->space_info, orig_bytes,
+ flush);
if (ret == -ENOSPC &&
unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
struct btrfs_block_rsv *global_rsv =
@@ -5137,13 +5270,8 @@ out:
if (ret == -ENOSPC)
trace_btrfs_space_reservation(root->fs_info,
"space_info:enospc",
- space_info->flags, orig_bytes, 1);
- if (flushing) {
- spin_lock(&space_info->lock);
- space_info->flush = 0;
- wake_up_all(&space_info->wait);
- spin_unlock(&space_info->lock);
- }
+ block_rsv->space_info->flags,
+ orig_bytes, 1);
return ret;
}
@@ -5219,6 +5347,108 @@ int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
return 0;
}
+/*
+ * This is for space we already have accounted in space_info->bytes_may_use, so
+ * basically when we're returning space from block_rsv's.
+ */
+static void space_info_add_old_bytes(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info,
+ u64 num_bytes)
+{
+ struct reserve_ticket *ticket;
+ struct list_head *head;
+ u64 used;
+ enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_NO_FLUSH;
+ bool check_overcommit = false;
+
+ spin_lock(&space_info->lock);
+ head = &space_info->priority_tickets;
+
+ /*
+ * If we are over our limit then we need to check and see if we can
+ * overcommit, and if we can't then we just need to free up our space
+ * and not satisfy any requests.
+ */
+ used = space_info->bytes_used + space_info->bytes_reserved +
+ space_info->bytes_pinned + space_info->bytes_readonly +
+ space_info->bytes_may_use;
+ if (used - num_bytes >= space_info->total_bytes)
+ check_overcommit = true;
+again:
+ while (!list_empty(head) && num_bytes) {
+ ticket = list_first_entry(head, struct reserve_ticket,
+ list);
+ /*
+ * We use 0 bytes because this space is already reserved, so
+ * adding the ticket space would be a double count.
+ */
+ if (check_overcommit &&
+ !can_overcommit(fs_info->extent_root, space_info, 0,
+ flush))
+ break;
+ if (num_bytes >= ticket->bytes) {
+ list_del_init(&ticket->list);
+ num_bytes -= ticket->bytes;
+ ticket->bytes = 0;
+ wake_up(&ticket->wait);
+ } else {
+ ticket->bytes -= num_bytes;
+ num_bytes = 0;
+ }
+ }
+
+ if (num_bytes && head == &space_info->priority_tickets) {
+ head = &space_info->tickets;
+ flush = BTRFS_RESERVE_FLUSH_ALL;
+ goto again;
+ }
+ space_info->bytes_may_use -= num_bytes;
+ trace_btrfs_space_reservation(fs_info, "space_info",
+ space_info->flags, num_bytes, 0);
+ spin_unlock(&space_info->lock);
+}
+
+/*
+ * This is for newly allocated space that isn't accounted in
+ * space_info->bytes_may_use yet. So if we allocate a chunk or unpin an extent
+ * we use this helper.
+ */
+static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info,
+ struct btrfs_space_info *space_info,
+ u64 num_bytes)
+{
+ struct reserve_ticket *ticket;
+ struct list_head *head = &space_info->priority_tickets;
+
+again:
+ while (!list_empty(head) && num_bytes) {
+ ticket = list_first_entry(head, struct reserve_ticket,
+ list);
+ if (num_bytes >= ticket->bytes) {
+ trace_btrfs_space_reservation(fs_info, "space_info",
+ space_info->flags,
+ ticket->bytes, 1);
+ list_del_init(&ticket->list);
+ num_bytes -= ticket->bytes;
+ space_info->bytes_may_use += ticket->bytes;
+ ticket->bytes = 0;
+ wake_up(&ticket->wait);
+ } else {
+ trace_btrfs_space_reservation(fs_info, "space_info",
+ space_info->flags,
+ num_bytes, 1);
+ space_info->bytes_may_use += num_bytes;
+ ticket->bytes -= num_bytes;
+ num_bytes = 0;
+ }
+ }
+
+ if (num_bytes && head == &space_info->priority_tickets) {
+ head = &space_info->tickets;
+ goto again;
+ }
+}
+
static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *block_rsv,
struct btrfs_block_rsv *dest, u64 num_bytes)
@@ -5253,18 +5483,15 @@ static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
}
spin_unlock(&dest->lock);
}
- if (num_bytes) {
- spin_lock(&space_info->lock);
- space_info->bytes_may_use -= num_bytes;
- trace_btrfs_space_reservation(fs_info, "space_info",
- space_info->flags, num_bytes, 0);
- spin_unlock(&space_info->lock);
- }
+ if (num_bytes)
+ space_info_add_old_bytes(fs_info, space_info,
+ num_bytes);
}
}
-static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
- struct btrfs_block_rsv *dst, u64 num_bytes)
+int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src,
+ struct btrfs_block_rsv *dst, u64 num_bytes,
+ int update_size)
{
int ret;
@@ -5272,7 +5499,7 @@ static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
if (ret)
return ret;
- block_rsv_add_bytes(dst, num_bytes, 1);
+ block_rsv_add_bytes(dst, num_bytes, update_size);
return 0;
}
@@ -5379,13 +5606,6 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
return ret;
}
-int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
- struct btrfs_block_rsv *dst_rsv,
- u64 num_bytes)
-{
- return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
-}
-
void btrfs_block_rsv_release(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv,
u64 num_bytes)
@@ -5398,48 +5618,21 @@ void btrfs_block_rsv_release(struct btrfs_root *root,
num_bytes);
}
-/*
- * helper to calculate size of global block reservation.
- * the desired value is sum of space used by extent tree,
- * checksum tree and root tree
- */
-static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
-{
- struct btrfs_space_info *sinfo;
- u64 num_bytes;
- u64 meta_used;
- u64 data_used;
- int csum_size = btrfs_super_csum_size(fs_info->super_copy);
-
- sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
- spin_lock(&sinfo->lock);
- data_used = sinfo->bytes_used;
- spin_unlock(&sinfo->lock);
-
- sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
- spin_lock(&sinfo->lock);
- if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
- data_used = 0;
- meta_used = sinfo->bytes_used;
- spin_unlock(&sinfo->lock);
-
- num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
- csum_size * 2;
- num_bytes += div_u64(data_used + meta_used, 50);
-
- if (num_bytes * 3 > meta_used)
- num_bytes = div_u64(meta_used, 3);
-
- return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
-}
-
static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
{
struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
struct btrfs_space_info *sinfo = block_rsv->space_info;
u64 num_bytes;
- num_bytes = calc_global_metadata_size(fs_info);
+ /*
+ * The global block rsv is based on the size of the extent tree, the
+ * checksum tree and the root tree. If the fs is empty we want to set
+ * it to a minimal amount for safety.
+ */
+ num_bytes = btrfs_root_used(&fs_info->extent_root->root_item) +
+ btrfs_root_used(&fs_info->csum_root->root_item) +
+ btrfs_root_used(&fs_info->tree_root->root_item);
+ num_bytes = max_t(u64, num_bytes, SZ_16M);
spin_lock(&sinfo->lock);
spin_lock(&block_rsv->lock);
@@ -5537,7 +5730,7 @@ void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
*/
void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
{
- struct btrfs_fs_info *fs_info = trans->root->fs_info;
+ struct btrfs_fs_info *fs_info = trans->fs_info;
if (!trans->chunk_bytes_reserved)
return;
@@ -5554,7 +5747,13 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
- struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
+ /*
+ * We always use trans->block_rsv here as we will have reserved space
+ * for our orphan when starting the transaction, using get_block_rsv()
+ * here will sometimes make us choose the wrong block rsv as we could be
+ * doing a reloc inode for a non refcounted root.
+ */
+ struct btrfs_block_rsv *src_rsv = trans->block_rsv;
struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
/*
@@ -5565,7 +5764,7 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
trace_btrfs_space_reservation(root->fs_info, "orphan",
btrfs_ino(inode), num_bytes, 1);
- return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
+ return btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1);
}
void btrfs_orphan_release_metadata(struct inode *inode)
@@ -5620,7 +5819,7 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
BTRFS_RESERVE_FLUSH_ALL);
if (ret == -ENOSPC && use_global_rsv)
- ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
+ ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes, 1);
if (ret && *qgroup_reserved)
btrfs_qgroup_free_meta(root, *qgroup_reserved);
@@ -5730,21 +5929,26 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
u64 to_reserve = 0;
u64 csum_bytes;
unsigned nr_extents = 0;
- int extra_reserve = 0;
enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
int ret = 0;
bool delalloc_lock = true;
u64 to_free = 0;
unsigned dropped;
+ bool release_extra = false;
/* If we are a free space inode we need to not flush since we will be in
* the middle of a transaction commit. We also don't need the delalloc
* mutex since we won't race with anybody. We need this mostly to make
* lockdep shut its filthy mouth.
+ *
+ * If we have a transaction open (can happen if we call truncate_block
+ * from truncate), then we need FLUSH_LIMIT so we don't deadlock.
*/
if (btrfs_is_free_space_inode(inode)) {
flush = BTRFS_RESERVE_NO_FLUSH;
delalloc_lock = false;
+ } else if (current->journal_info) {
+ flush = BTRFS_RESERVE_FLUSH_LIMIT;
}
if (flush != BTRFS_RESERVE_NO_FLUSH &&
@@ -5761,24 +5965,15 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
BTRFS_MAX_EXTENT_SIZE - 1,
BTRFS_MAX_EXTENT_SIZE);
BTRFS_I(inode)->outstanding_extents += nr_extents;
- nr_extents = 0;
+ nr_extents = 0;
if (BTRFS_I(inode)->outstanding_extents >
BTRFS_I(inode)->reserved_extents)
- nr_extents = BTRFS_I(inode)->outstanding_extents -
+ nr_extents += BTRFS_I(inode)->outstanding_extents -
BTRFS_I(inode)->reserved_extents;
- /*
- * Add an item to reserve for updating the inode when we complete the
- * delalloc io.
- */
- if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
- &BTRFS_I(inode)->runtime_flags)) {
- nr_extents++;
- extra_reserve = 1;
- }
-
- to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
+ /* We always want to reserve a slot for updating the inode. */
+ to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents + 1);
to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
csum_bytes = BTRFS_I(inode)->csum_bytes;
spin_unlock(&BTRFS_I(inode)->lock);
@@ -5790,17 +5985,17 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
goto out_fail;
}
- ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
+ ret = btrfs_block_rsv_add(root, block_rsv, to_reserve, flush);
if (unlikely(ret)) {
btrfs_qgroup_free_meta(root, nr_extents * root->nodesize);
goto out_fail;
}
spin_lock(&BTRFS_I(inode)->lock);
- if (extra_reserve) {
- set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
- &BTRFS_I(inode)->runtime_flags);
- nr_extents--;
+ if (test_and_set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
+ &BTRFS_I(inode)->runtime_flags)) {
+ to_reserve -= btrfs_calc_trans_metadata_size(root, 1);
+ release_extra = true;
}
BTRFS_I(inode)->reserved_extents += nr_extents;
spin_unlock(&BTRFS_I(inode)->lock);
@@ -5811,8 +6006,10 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
if (to_reserve)
trace_btrfs_space_reservation(root->fs_info, "delalloc",
btrfs_ino(inode), to_reserve, 1);
- block_rsv_add_bytes(block_rsv, to_reserve, 1);
-
+ if (release_extra)
+ btrfs_block_rsv_release(root, block_rsv,
+ btrfs_calc_trans_metadata_size(root,
+ 1));
return 0;
out_fail:
@@ -5904,7 +6101,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
if (dropped > 0)
to_free += btrfs_calc_trans_metadata_size(root, dropped);
- if (btrfs_test_is_dummy_root(root))
+ if (btrfs_is_testing(root->fs_info))
return;
trace_btrfs_space_reservation(root->fs_info, "delalloc",
@@ -6019,7 +6216,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
spin_lock(&cache->space_info->lock);
spin_lock(&cache->lock);
- if (btrfs_test_opt(root, SPACE_CACHE) &&
+ if (btrfs_test_opt(root->fs_info, SPACE_CACHE) &&
cache->disk_cache_state < BTRFS_DC_CLEAR)
cache->disk_cache_state = BTRFS_DC_CLEAR;
@@ -6044,6 +6241,9 @@ static int update_block_group(struct btrfs_trans_handle *trans,
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
+ trace_btrfs_space_reservation(root->fs_info, "pinned",
+ cache->space_info->flags,
+ num_bytes, 1);
set_extent_dirty(info->pinned_extents,
bytenr, bytenr + num_bytes - 1,
GFP_NOFS | __GFP_NOFAIL);
@@ -6118,10 +6318,10 @@ static int pin_down_extent(struct btrfs_root *root,
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
+ trace_btrfs_space_reservation(root->fs_info, "pinned",
+ cache->space_info->flags, num_bytes, 1);
set_extent_dirty(root->fs_info->pinned_extents, bytenr,
bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
- if (reserved)
- trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
return 0;
}
@@ -6398,7 +6598,7 @@ fetch_cluster_info(struct btrfs_root *root, struct btrfs_space_info *space_info,
u64 *empty_cluster)
{
struct btrfs_free_cluster *ret = NULL;
- bool ssd = btrfs_test_opt(root, SSD);
+ bool ssd = btrfs_test_opt(root->fs_info, SSD);
*empty_cluster = 0;
if (btrfs_mixed_space_info(space_info))
@@ -6476,6 +6676,9 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
spin_lock(&cache->lock);
cache->pinned -= len;
space_info->bytes_pinned -= len;
+
+ trace_btrfs_space_reservation(fs_info, "pinned",
+ space_info->flags, len, 0);
space_info->max_extent_size = 0;
percpu_counter_add(&space_info->total_bytes_pinned, -len);
if (cache->ro) {
@@ -6483,17 +6686,29 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
readonly = true;
}
spin_unlock(&cache->lock);
- if (!readonly && global_rsv->space_info == space_info) {
+ if (!readonly && return_free_space &&
+ global_rsv->space_info == space_info) {
+ u64 to_add = len;
+ WARN_ON(!return_free_space);
spin_lock(&global_rsv->lock);
if (!global_rsv->full) {
- len = min(len, global_rsv->size -
- global_rsv->reserved);
- global_rsv->reserved += len;
- space_info->bytes_may_use += len;
+ to_add = min(len, global_rsv->size -
+ global_rsv->reserved);
+ global_rsv->reserved += to_add;
+ space_info->bytes_may_use += to_add;
if (global_rsv->reserved >= global_rsv->size)
global_rsv->full = 1;
+ trace_btrfs_space_reservation(fs_info,
+ "space_info",
+ space_info->flags,
+ to_add, 1);
+ len -= to_add;
}
spin_unlock(&global_rsv->lock);
+ /* Add to any tickets we may have */
+ if (len)
+ space_info_add_new_bytes(fs_info, space_info,
+ len);
}
spin_unlock(&space_info->lock);
}
@@ -6528,7 +6743,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
break;
}
- if (btrfs_test_opt(root, DISCARD))
+ if (btrfs_test_opt(root->fs_info, DISCARD))
ret = btrfs_discard_extent(root, start,
end + 1 - start, NULL);
@@ -6666,7 +6881,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
NULL, refs_to_drop,
is_data, &last_ref);
if (ret) {
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
btrfs_release_path(path);
@@ -6715,7 +6930,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
path->nodes[0]);
}
if (ret < 0) {
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
extent_slot = path->slots[0];
@@ -6726,10 +6941,10 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
"unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu",
bytenr, parent, root_objectid, owner_objectid,
owner_offset);
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
} else {
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -6741,7 +6956,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
ret = convert_extent_item_v0(trans, extent_root, path,
owner_objectid, 0);
if (ret < 0) {
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -6760,7 +6975,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
btrfs_print_leaf(extent_root, path->nodes[0]);
}
if (ret < 0) {
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -6785,7 +7000,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
btrfs_err(info, "trying to drop %d refs but we only have %Lu "
"for bytenr %Lu", refs_to_drop, refs, bytenr);
ret = -EINVAL;
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
refs -= refs_to_drop;
@@ -6808,7 +7023,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
iref, refs_to_drop,
is_data, &last_ref);
if (ret) {
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
}
@@ -6831,7 +7046,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
num_to_del);
if (ret) {
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
btrfs_release_path(path);
@@ -6839,7 +7054,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
if (is_data) {
ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
if (ret) {
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
}
@@ -6847,13 +7062,13 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
ret = add_to_free_space_tree(trans, root->fs_info, bytenr,
num_bytes);
if (ret) {
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
ret = update_block_group(trans, root, bytenr, num_bytes, 0);
if (ret) {
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
}
@@ -7002,7 +7217,7 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
int ret;
struct btrfs_fs_info *fs_info = root->fs_info;
- if (btrfs_test_is_dummy_root(root))
+ if (btrfs_is_testing(fs_info))
return 0;
add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
@@ -7637,8 +7852,7 @@ loop:
* can do more things.
*/
if (ret < 0 && ret != -ENOSPC)
- btrfs_abort_transaction(trans,
- root, ret);
+ btrfs_abort_transaction(trans, ret);
else
ret = 0;
if (!exist)
@@ -7692,8 +7906,8 @@ static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
info->flags,
info->total_bytes - info->bytes_used - info->bytes_pinned -
- info->bytes_reserved - info->bytes_readonly,
- (info->full) ? "" : "not ");
+ info->bytes_reserved - info->bytes_readonly -
+ info->bytes_may_use, (info->full) ? "" : "not ");
printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
"reserved=%llu, may_use=%llu, readonly=%llu\n",
info->total_bytes, info->bytes_used, info->bytes_pinned,
@@ -7747,7 +7961,7 @@ again:
if (num_bytes == min_alloc_size)
final_tried = true;
goto again;
- } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
+ } else if (btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
struct btrfs_space_info *sinfo;
sinfo = __find_space_info(root->fs_info, flags);
@@ -7778,16 +7992,14 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root,
if (pin)
pin_down_extent(root, cache, start, len, 1);
else {
- if (btrfs_test_opt(root, DISCARD))
+ if (btrfs_test_opt(root->fs_info, DISCARD))
ret = btrfs_discard_extent(root, start, len, NULL);
btrfs_add_free_space(cache, start, len);
btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
+ trace_btrfs_reserved_extent_free(root, start, len);
}
btrfs_put_block_group(cache);
-
- trace_btrfs_reserved_extent_free(root, start, len);
-
return ret;
}
@@ -8088,7 +8300,7 @@ again:
goto again;
}
- if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
+ if (btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
static DEFINE_RATELIMIT_STATE(_rs,
DEFAULT_RATELIMIT_INTERVAL * 10,
/*DEFAULT_RATELIMIT_BURST*/ 1);
@@ -8142,13 +8354,15 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
SKINNY_METADATA);
- if (btrfs_test_is_dummy_root(root)) {
+#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+ if (btrfs_is_testing(root->fs_info)) {
buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
level);
if (!IS_ERR(buf))
root->alloc_bytenr += blocksize;
return buf;
}
+#endif
block_rsv = use_block_rsv(trans, root, blocksize);
if (IS_ERR(block_rsv))
@@ -8328,7 +8542,8 @@ static int record_one_subtree_extent(struct btrfs_trans_handle *trans,
delayed_refs = &trans->transaction->delayed_refs;
spin_lock(&delayed_refs->lock);
- if (btrfs_qgroup_insert_dirty_extent(delayed_refs, qrecord))
+ if (btrfs_qgroup_insert_dirty_extent(trans->fs_info,
+ delayed_refs, qrecord))
kfree(qrecord);
spin_unlock(&delayed_refs->lock);
@@ -9113,7 +9328,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
&root->root_key,
root_item);
if (ret) {
- btrfs_abort_transaction(trans, tree_root, ret);
+ btrfs_abort_transaction(trans, ret);
err = ret;
goto out_end_trans;
}
@@ -9140,7 +9355,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
ret = btrfs_del_root(trans, tree_root, &root->root_key);
if (ret) {
- btrfs_abort_transaction(trans, tree_root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_end_trans;
}
@@ -9148,7 +9363,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
ret = btrfs_find_root(tree_root, &root->root_key, path,
NULL, NULL);
if (ret < 0) {
- btrfs_abort_transaction(trans, tree_root, ret);
+ btrfs_abort_transaction(trans, ret);
err = ret;
goto out_end_trans;
} else if (ret > 0) {
@@ -9519,7 +9734,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
int full = 0;
int ret = 0;
- debug = btrfs_test_opt(root, ENOSPC_DEBUG);
+ debug = btrfs_test_opt(root->fs_info, ENOSPC_DEBUG);
block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
@@ -9675,7 +9890,22 @@ static int find_first_block_group(struct btrfs_root *root,
if (found_key.objectid >= key->objectid &&
found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
- ret = 0;
+ struct extent_map_tree *em_tree;
+ struct extent_map *em;
+
+ em_tree = &root->fs_info->mapping_tree.map_tree;
+ read_lock(&em_tree->lock);
+ em = lookup_extent_mapping(em_tree, found_key.objectid,
+ found_key.offset);
+ read_unlock(&em_tree->lock);
+ if (!em) {
+ btrfs_err(root->fs_info,
+ "logical %llu len %llu found bg but no related chunk",
+ found_key.objectid, found_key.offset);
+ ret = -ENOENT;
+ } else {
+ ret = 0;
+ }
goto out;
}
path->slots[0]++;
@@ -9791,13 +10021,15 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
space_info = list_entry(info->space_info.next,
struct btrfs_space_info,
list);
- if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
- if (WARN_ON(space_info->bytes_pinned > 0 ||
+
+ /*
+ * Do not hide this behind enospc_debug, this is actually
+ * important and indicates a real bug if this happens.
+ */
+ if (WARN_ON(space_info->bytes_pinned > 0 ||
space_info->bytes_reserved > 0 ||
- space_info->bytes_may_use > 0)) {
- dump_space_info(space_info, 0, 0);
- }
- }
+ space_info->bytes_may_use > 0))
+ dump_space_info(space_info, 0, 0);
list_del(&space_info->list);
for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
struct kobject *kobj;
@@ -9915,10 +10147,10 @@ int btrfs_read_block_groups(struct btrfs_root *root)
path->reada = READA_FORWARD;
cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
- if (btrfs_test_opt(root, SPACE_CACHE) &&
+ if (btrfs_test_opt(root->fs_info, SPACE_CACHE) &&
btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
need_clear = 1;
- if (btrfs_test_opt(root, CLEAR_CACHE))
+ if (btrfs_test_opt(root->fs_info, CLEAR_CACHE))
need_clear = 1;
while (1) {
@@ -9949,7 +10181,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
* b) Setting 'dirty flag' makes sure that we flush
* the new space cache info onto disk.
*/
- if (btrfs_test_opt(root, SPACE_CACHE))
+ if (btrfs_test_opt(root->fs_info, SPACE_CACHE))
cache->disk_cache_state = BTRFS_DC_CLEAR;
}
@@ -10005,9 +10237,10 @@ int btrfs_read_block_groups(struct btrfs_root *root)
goto error;
}
+ trace_btrfs_add_block_group(root->fs_info, cache, 0);
ret = update_space_info(info, cache->flags, found_key.offset,
btrfs_block_group_used(&cache->item),
- &space_info);
+ cache->bytes_super, &space_info);
if (ret) {
btrfs_remove_free_space_cache(cache);
spin_lock(&info->block_group_cache_lock);
@@ -10020,9 +10253,6 @@ int btrfs_read_block_groups(struct btrfs_root *root)
}
cache->space_info = space_info;
- spin_lock(&cache->space_info->lock);
- cache->space_info->bytes_readonly += cache->bytes_super;
- spin_unlock(&cache->space_info->lock);
__link_block_group(space_info, cache);
@@ -10093,11 +10323,11 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
ret = btrfs_insert_item(trans, extent_root, &key, &item,
sizeof(item));
if (ret)
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
ret = btrfs_finish_chunk_alloc(trans, extent_root,
key.objectid, key.offset);
if (ret)
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
add_block_group_free_space(trans, root->fs_info, block_group);
/* already aborted the transaction if it failed. */
next:
@@ -10114,7 +10344,6 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
int ret;
struct btrfs_root *extent_root;
struct btrfs_block_group_cache *cache;
-
extent_root = root->fs_info->extent_root;
btrfs_set_log_full_commit(root->fs_info, trans);
@@ -10160,7 +10389,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
* assigned to our block group, but don't update its counters just yet.
* We want our bg to be added to the rbtree with its ->space_info set.
*/
- ret = update_space_info(root->fs_info, cache->flags, 0, 0,
+ ret = update_space_info(root->fs_info, cache->flags, 0, 0, 0,
&cache->space_info);
if (ret) {
btrfs_remove_free_space_cache(cache);
@@ -10179,8 +10408,9 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
* Now that our block group has its ->space_info set and is inserted in
* the rbtree, update the space info's counters.
*/
+ trace_btrfs_add_block_group(root->fs_info, cache, 1);
ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
- &cache->space_info);
+ cache->bytes_super, &cache->space_info);
if (ret) {
btrfs_remove_free_space_cache(cache);
spin_lock(&root->fs_info->block_group_cache_lock);
@@ -10193,16 +10423,11 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
}
update_global_block_rsv(root->fs_info);
- spin_lock(&cache->space_info->lock);
- cache->space_info->bytes_readonly += cache->bytes_super;
- spin_unlock(&cache->space_info->lock);
-
__link_block_group(cache->space_info, cache);
list_add_tail(&cache->bg_list, &trans->new_bgs);
set_avail_alloc_bits(extent_root->fs_info, type);
-
return 0;
}
@@ -10415,7 +10640,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
spin_lock(&block_group->space_info->lock);
list_del_init(&block_group->ro_list);
- if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
+ if (btrfs_test_opt(root->fs_info, ENOSPC_DEBUG)) {
WARN_ON(block_group->space_info->total_bytes
< block_group->key.offset);
WARN_ON(block_group->space_info->bytes_readonly
@@ -10683,7 +10908,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
spin_unlock(&space_info->lock);
/* DISCARD can flip during remount */
- trimming = btrfs_test_opt(root, DISCARD);
+ trimming = btrfs_test_opt(root->fs_info, DISCARD);
/* Implicit trim during transaction commit. */
if (trimming)
@@ -10747,21 +10972,21 @@ int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
mixed = 1;
flags = BTRFS_BLOCK_GROUP_SYSTEM;
- ret = update_space_info(fs_info, flags, 0, 0, &space_info);
+ ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
if (ret)
goto out;
if (mixed) {
flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
- ret = update_space_info(fs_info, flags, 0, 0, &space_info);
+ ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
} else {
flags = BTRFS_BLOCK_GROUP_METADATA;
- ret = update_space_info(fs_info, flags, 0, 0, &space_info);
+ ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
if (ret)
goto out;
flags = BTRFS_BLOCK_GROUP_DATA;
- ret = update_space_info(fs_info, flags, 0, 0, &space_info);
+ ret = update_space_info(fs_info, flags, 0, 0, 0, &space_info);
}
out:
return ret;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 75533adef998..44fe66b53c8b 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -163,13 +163,13 @@ int __init extent_io_init(void)
{
extent_state_cache = kmem_cache_create("btrfs_extent_state",
sizeof(struct extent_state), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ SLAB_MEM_SPREAD, NULL);
if (!extent_state_cache)
return -ENOMEM;
extent_buffer_cache = kmem_cache_create("btrfs_extent_buffer",
sizeof(struct extent_buffer), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ SLAB_MEM_SPREAD, NULL);
if (!extent_buffer_cache)
goto free_state_cache;
@@ -2049,9 +2049,10 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
return -EIO;
}
bio->bi_bdev = dev->bdev;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC);
bio_add_page(bio, page, length, pg_offset);
- if (btrfsic_submit_bio_wait(WRITE_SYNC, bio)) {
+ if (btrfsic_submit_bio_wait(bio)) {
/* try to remap that extent elsewhere? */
btrfs_bio_counter_dec(fs_info);
bio_put(bio);
@@ -2386,7 +2387,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
int read_mode;
int ret;
- BUG_ON(failed_bio->bi_rw & REQ_WRITE);
+ BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
if (ret)
@@ -2412,12 +2413,12 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
free_io_failure(inode, failrec);
return -EIO;
}
+ bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
pr_debug("Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d\n",
read_mode, failrec->this_mirror, failrec->in_validation);
- ret = tree->ops->submit_bio_hook(inode, read_mode, bio,
- failrec->this_mirror,
+ ret = tree->ops->submit_bio_hook(inode, bio, failrec->this_mirror,
failrec->bio_flags, 0);
if (ret) {
free_io_failure(inode, failrec);
@@ -2696,12 +2697,6 @@ struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask)
btrfs_bio->csum = NULL;
btrfs_bio->csum_allocated = NULL;
btrfs_bio->end_io = NULL;
-
-#ifdef CONFIG_BLK_CGROUP
- /* FIXME, put this into bio_clone_bioset */
- if (bio->bi_css)
- bio_associate_blkcg(new, bio->bi_css);
-#endif
}
return new;
}
@@ -2723,8 +2718,8 @@ struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
}
-static int __must_check submit_one_bio(int rw, struct bio *bio,
- int mirror_num, unsigned long bio_flags)
+static int __must_check submit_one_bio(struct bio *bio, int mirror_num,
+ unsigned long bio_flags)
{
int ret = 0;
struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
@@ -2735,33 +2730,31 @@ static int __must_check submit_one_bio(int rw, struct bio *bio,
start = page_offset(page) + bvec->bv_offset;
bio->bi_private = NULL;
-
bio_get(bio);
if (tree->ops && tree->ops->submit_bio_hook)
- ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
+ ret = tree->ops->submit_bio_hook(page->mapping->host, bio,
mirror_num, bio_flags, start);
else
- btrfsic_submit_bio(rw, bio);
+ btrfsic_submit_bio(bio);
bio_put(bio);
return ret;
}
-static int merge_bio(int rw, struct extent_io_tree *tree, struct page *page,
+static int merge_bio(struct extent_io_tree *tree, struct page *page,
unsigned long offset, size_t size, struct bio *bio,
unsigned long bio_flags)
{
int ret = 0;
if (tree->ops && tree->ops->merge_bio_hook)
- ret = tree->ops->merge_bio_hook(rw, page, offset, size, bio,
+ ret = tree->ops->merge_bio_hook(page, offset, size, bio,
bio_flags);
- BUG_ON(ret < 0);
return ret;
}
-static int submit_extent_page(int rw, struct extent_io_tree *tree,
+static int submit_extent_page(int op, int op_flags, struct extent_io_tree *tree,
struct writeback_control *wbc,
struct page *page, sector_t sector,
size_t size, unsigned long offset,
@@ -2789,10 +2782,9 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
if (prev_bio_flags != bio_flags || !contig ||
force_bio_submit ||
- merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) ||
+ merge_bio(tree, page, offset, page_size, bio, bio_flags) ||
bio_add_page(bio, page, page_size, offset) < page_size) {
- ret = submit_one_bio(rw, bio, mirror_num,
- prev_bio_flags);
+ ret = submit_one_bio(bio, mirror_num, prev_bio_flags);
if (ret < 0) {
*bio_ret = NULL;
return ret;
@@ -2813,6 +2805,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
bio_add_page(bio, page, page_size, offset);
bio->bi_end_io = end_io_func;
bio->bi_private = tree;
+ bio_set_op_attrs(bio, op, op_flags);
if (wbc) {
wbc_init_bio(wbc, bio);
wbc_account_io(wbc, page, page_size);
@@ -2821,7 +2814,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
if (bio_ret)
*bio_ret = bio;
else
- ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
+ ret = submit_one_bio(bio, mirror_num, bio_flags);
return ret;
}
@@ -2879,13 +2872,14 @@ __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset,
* into the tree that are removed when the IO is done (by the end_io
* handlers)
* XXX JDM: This needs looking at to ensure proper page locking
+ * return 0 on success, otherwise return error
*/
static int __do_readpage(struct extent_io_tree *tree,
struct page *page,
get_extent_t *get_extent,
struct extent_map **em_cached,
struct bio **bio, int mirror_num,
- unsigned long *bio_flags, int rw,
+ unsigned long *bio_flags, int read_flags,
u64 *prev_em_start)
{
struct inode *inode = page->mapping->host;
@@ -2900,7 +2894,7 @@ static int __do_readpage(struct extent_io_tree *tree,
sector_t sector;
struct extent_map *em;
struct block_device *bdev;
- int ret;
+ int ret = 0;
int nr = 0;
size_t pg_offset = 0;
size_t iosize;
@@ -3068,8 +3062,8 @@ static int __do_readpage(struct extent_io_tree *tree,
}
pnr -= page->index;
- ret = submit_extent_page(rw, tree, NULL, page,
- sector, disk_io_size, pg_offset,
+ ret = submit_extent_page(REQ_OP_READ, read_flags, tree, NULL,
+ page, sector, disk_io_size, pg_offset,
bdev, bio, pnr,
end_bio_extent_readpage, mirror_num,
*bio_flags,
@@ -3081,6 +3075,7 @@ static int __do_readpage(struct extent_io_tree *tree,
} else {
SetPageError(page);
unlock_extent(tree, cur, cur + iosize - 1);
+ goto out;
}
cur = cur + iosize;
pg_offset += iosize;
@@ -3091,7 +3086,7 @@ out:
SetPageUptodate(page);
unlock_page(page);
}
- return 0;
+ return ret;
}
static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
@@ -3100,7 +3095,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
get_extent_t *get_extent,
struct extent_map **em_cached,
struct bio **bio, int mirror_num,
- unsigned long *bio_flags, int rw,
+ unsigned long *bio_flags,
u64 *prev_em_start)
{
struct inode *inode;
@@ -3121,7 +3116,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree,
for (index = 0; index < nr_pages; index++) {
__do_readpage(tree, pages[index], get_extent, em_cached, bio,
- mirror_num, bio_flags, rw, prev_em_start);
+ mirror_num, bio_flags, 0, prev_em_start);
put_page(pages[index]);
}
}
@@ -3131,7 +3126,7 @@ static void __extent_readpages(struct extent_io_tree *tree,
int nr_pages, get_extent_t *get_extent,
struct extent_map **em_cached,
struct bio **bio, int mirror_num,
- unsigned long *bio_flags, int rw,
+ unsigned long *bio_flags,
u64 *prev_em_start)
{
u64 start = 0;
@@ -3153,7 +3148,7 @@ static void __extent_readpages(struct extent_io_tree *tree,
index - first_index, start,
end, get_extent, em_cached,
bio, mirror_num, bio_flags,
- rw, prev_em_start);
+ prev_em_start);
start = page_start;
end = start + PAGE_SIZE - 1;
first_index = index;
@@ -3164,7 +3159,7 @@ static void __extent_readpages(struct extent_io_tree *tree,
__do_contiguous_readpages(tree, &pages[first_index],
index - first_index, start,
end, get_extent, em_cached, bio,
- mirror_num, bio_flags, rw,
+ mirror_num, bio_flags,
prev_em_start);
}
@@ -3172,7 +3167,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
struct page *page,
get_extent_t *get_extent,
struct bio **bio, int mirror_num,
- unsigned long *bio_flags, int rw)
+ unsigned long *bio_flags, int read_flags)
{
struct inode *inode = page->mapping->host;
struct btrfs_ordered_extent *ordered;
@@ -3192,7 +3187,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
}
ret = __do_readpage(tree, page, get_extent, NULL, bio, mirror_num,
- bio_flags, rw, NULL);
+ bio_flags, read_flags, NULL);
return ret;
}
@@ -3204,9 +3199,9 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
int ret;
ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
- &bio_flags, READ);
+ &bio_flags, 0);
if (bio)
- ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
+ ret = submit_one_bio(bio, mirror_num, bio_flags);
return ret;
}
@@ -3440,8 +3435,8 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode,
page->index, cur, end);
}
- ret = submit_extent_page(write_flags, tree, wbc, page,
- sector, iosize, pg_offset,
+ ret = submit_extent_page(REQ_OP_WRITE, write_flags, tree, wbc,
+ page, sector, iosize, pg_offset,
bdev, &epd->bio, max_nr,
end_bio_extent_writepage,
0, 0, 0, false);
@@ -3480,13 +3475,11 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
size_t pg_offset = 0;
loff_t i_size = i_size_read(inode);
unsigned long end_index = i_size >> PAGE_SHIFT;
- int write_flags;
+ int write_flags = 0;
unsigned long nr_written = 0;
if (wbc->sync_mode == WB_SYNC_ALL)
write_flags = WRITE_SYNC;
- else
- write_flags = WRITE;
trace___extent_writepage(page, inode, wbc);
@@ -3730,7 +3723,7 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
u64 offset = eb->start;
unsigned long i, num_pages;
unsigned long bio_flags = 0;
- int rw = (epd->sync_io ? WRITE_SYNC : WRITE) | REQ_META;
+ int write_flags = (epd->sync_io ? WRITE_SYNC : 0) | REQ_META;
int ret = 0;
clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags);
@@ -3744,9 +3737,10 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb,
clear_page_dirty_for_io(p);
set_page_writeback(p);
- ret = submit_extent_page(rw, tree, wbc, p, offset >> 9,
- PAGE_SIZE, 0, bdev, &epd->bio,
- -1, end_bio_extent_buffer_writepage,
+ ret = submit_extent_page(REQ_OP_WRITE, write_flags, tree, wbc,
+ p, offset >> 9, PAGE_SIZE, 0, bdev,
+ &epd->bio, -1,
+ end_bio_extent_buffer_writepage,
0, epd->bio_flags, bio_flags, false);
epd->bio_flags = bio_flags;
if (ret) {
@@ -4056,13 +4050,12 @@ retry:
static void flush_epd_write_bio(struct extent_page_data *epd)
{
if (epd->bio) {
- int rw = WRITE;
int ret;
- if (epd->sync_io)
- rw = WRITE_SYNC;
+ bio_set_op_attrs(epd->bio, REQ_OP_WRITE,
+ epd->sync_io ? WRITE_SYNC : 0);
- ret = submit_one_bio(rw, epd->bio, 0, epd->bio_flags);
+ ret = submit_one_bio(epd->bio, 0, epd->bio_flags);
BUG_ON(ret < 0); /* -ENOMEM */
epd->bio = NULL;
}
@@ -4180,7 +4173,8 @@ int extent_readpages(struct extent_io_tree *tree,
prefetchw(&page->flags);
list_del(&page->lru);
if (add_to_page_cache_lru(page, mapping,
- page->index, GFP_NOFS)) {
+ page->index,
+ readahead_gfp_mask(mapping))) {
put_page(page);
continue;
}
@@ -4189,19 +4183,19 @@ int extent_readpages(struct extent_io_tree *tree,
if (nr < ARRAY_SIZE(pagepool))
continue;
__extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
- &bio, 0, &bio_flags, READ, &prev_em_start);
+ &bio, 0, &bio_flags, &prev_em_start);
nr = 0;
}
if (nr)
__extent_readpages(tree, pagepool, nr, get_extent, &em_cached,
- &bio, 0, &bio_flags, READ, &prev_em_start);
+ &bio, 0, &bio_flags, &prev_em_start);
if (em_cached)
free_extent_map(em_cached);
BUG_ON(!list_empty(pages));
if (bio)
- return submit_one_bio(READ, bio, 0, bio_flags);
+ return submit_one_bio(bio, 0, bio_flags);
return 0;
}
@@ -5231,22 +5225,38 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
atomic_set(&eb->io_pages, num_reads);
for (i = start_i; i < num_pages; i++) {
page = eb->pages[i];
+
if (!PageUptodate(page)) {
+ if (ret) {
+ atomic_dec(&eb->io_pages);
+ unlock_page(page);
+ continue;
+ }
+
ClearPageError(page);
err = __extent_read_full_page(tree, page,
get_extent, &bio,
mirror_num, &bio_flags,
- READ | REQ_META);
- if (err)
+ REQ_META);
+ if (err) {
ret = err;
+ /*
+ * We use &bio in above __extent_read_full_page,
+ * so we ensure that if it returns error, the
+ * current page fails to add itself to bio and
+ * it's been unlocked.
+ *
+ * We must dec io_pages by ourselves.
+ */
+ atomic_dec(&eb->io_pages);
+ }
} else {
unlock_page(page);
}
}
if (bio) {
- err = submit_one_bio(READ | REQ_META, bio, mirror_num,
- bio_flags);
+ err = submit_one_bio(bio, mirror_num, bio_flags);
if (err)
return err;
}
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index c0c1c4fef6ce..bc2729a7612d 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -63,16 +63,16 @@ struct btrfs_root;
struct btrfs_io_bio;
struct io_failure_record;
-typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
- struct bio *bio, int mirror_num,
- unsigned long bio_flags, u64 bio_offset);
+typedef int (extent_submit_bio_hook_t)(struct inode *inode, struct bio *bio,
+ int mirror_num, unsigned long bio_flags,
+ u64 bio_offset);
struct extent_io_ops {
int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
u64 start, u64 end, int *page_started,
unsigned long *nr_written);
int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
extent_submit_bio_hook_t *submit_bio_hook;
- int (*merge_bio_hook)(int rw, struct page *page, unsigned long offset,
+ int (*merge_bio_hook)(struct page *page, unsigned long offset,
size_t size, struct bio *bio,
unsigned long bio_flags);
int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index e0715fcfb11e..26f9ac719d20 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -13,7 +13,7 @@ int __init extent_map_init(void)
{
extent_map_cache = kmem_cache_create("btrfs_extent_map",
sizeof(struct extent_map), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ SLAB_MEM_SPREAD, NULL);
if (!extent_map_cache)
return -ENOMEM;
return 0;
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 62a81ee13a5f..d0d571c47d33 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -250,7 +250,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
offset + root->sectorsize - 1,
EXTENT_NODATASUM);
} else {
- btrfs_info(BTRFS_I(inode)->root->fs_info,
+ btrfs_info_rl(BTRFS_I(inode)->root->fs_info,
"no csum found for inode %llu start %llu",
btrfs_ino(inode), offset);
}
@@ -699,7 +699,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
*/
ret = btrfs_split_item(trans, root, path, &key, offset);
if (ret && ret != -EAGAIN) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 2234e88cf674..5842423f8f47 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -132,7 +132,7 @@ static int __btrfs_add_inode_defrag(struct inode *inode,
static inline int __need_auto_defrag(struct btrfs_root *root)
{
- if (!btrfs_test_opt(root, AUTO_DEFRAG))
+ if (!btrfs_test_opt(root->fs_info, AUTO_DEFRAG))
return 0;
if (btrfs_fs_closing(root->fs_info))
@@ -950,7 +950,7 @@ delete_extent_item:
ret = btrfs_del_items(trans, root, path, del_slot,
del_nr);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
break;
}
@@ -974,7 +974,7 @@ delete_extent_item:
path->slots[0] = del_slot;
ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
}
leaf = path->nodes[0];
@@ -1190,7 +1190,7 @@ again:
goto again;
}
if (ret < 0) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -1278,7 +1278,7 @@ again:
ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
if (ret < 0) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
}
@@ -1629,13 +1629,11 @@ again:
* managed to copy.
*/
if (num_sectors > dirty_sectors) {
- /*
- * we round down because we don't want to count
- * any partial blocks actually sent through the
- * IO machines
- */
- release_bytes = round_down(release_bytes - copied,
- root->sectorsize);
+
+ /* release everything except the sectors we dirtied */
+ release_bytes -= dirty_sectors <<
+ root->fs_info->sb->s_blocksize_bits;
+
if (copied > 0) {
spin_lock(&BTRFS_I(inode)->lock);
BTRFS_I(inode)->outstanding_extents++;
@@ -2035,6 +2033,14 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
*/
clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&BTRFS_I(inode)->runtime_flags);
+ /*
+ * An ordered extent might have started before and completed
+ * already with io errors, in which case the inode was not
+ * updated and we end up here. So check the inode's mapping
+ * flags for any errors that might have happened while doing
+ * writeback of file data.
+ */
+ ret = btrfs_inode_check_errors(inode);
inode_unlock(inode);
goto out;
}
@@ -2479,7 +2485,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
}
ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
- min_size);
+ min_size, 0);
BUG_ON(ret);
trans->block_rsv = rsv;
@@ -2522,7 +2528,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
}
ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
- rsv, min_size);
+ rsv, min_size, 0);
BUG_ON(ret); /* shouldn't happen */
trans->block_rsv = rsv;
@@ -2977,7 +2983,7 @@ int btrfs_auto_defrag_init(void)
{
btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
sizeof(struct inode_defrag), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
+ SLAB_MEM_SPREAD,
NULL);
if (!btrfs_inode_defrag_cachep)
return -ENOMEM;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 69d270f6602c..d571bd2b697b 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -280,7 +280,7 @@ fail:
if (locked)
mutex_unlock(&trans->transaction->cache_write_mutex);
if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -3026,7 +3026,7 @@ int btrfs_find_space_cluster(struct btrfs_root *root,
* For metadata, allow allocates with smaller extents. For
* data, keep it dense.
*/
- if (btrfs_test_opt(root, SSD_SPREAD)) {
+ if (btrfs_test_opt(root->fs_info, SSD_SPREAD)) {
cont1_bytes = min_bytes = bytes + empty_size;
} else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
cont1_bytes = bytes;
@@ -3470,7 +3470,7 @@ int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
int ret = 0;
u64 root_gen = btrfs_root_generation(&root->root_item);
- if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+ if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
return 0;
/*
@@ -3514,7 +3514,7 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
struct btrfs_io_ctl io_ctl;
bool release_metadata = true;
- if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+ if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
return 0;
memset(&io_ctl, 0, sizeof(io_ctl));
diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
index 53dbeaf6ce94..87e7e3d3e676 100644
--- a/fs/btrfs/free-space-tree.c
+++ b/fs/btrfs/free-space-tree.c
@@ -305,7 +305,7 @@ int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
out:
kvfree(bitmap);
if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -454,7 +454,7 @@ int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
out:
kvfree(bitmap);
if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -851,7 +851,7 @@ int remove_from_free_space_tree(struct btrfs_trans_handle *trans,
out:
btrfs_free_path(path);
if (ret)
- btrfs_abort_transaction(trans, fs_info->free_space_root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -1047,7 +1047,7 @@ int add_to_free_space_tree(struct btrfs_trans_handle *trans,
out:
btrfs_free_path(path);
if (ret)
- btrfs_abort_transaction(trans, fs_info->free_space_root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -1193,7 +1193,7 @@ int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info)
abort:
fs_info->creating_free_space_tree = 0;
- btrfs_abort_transaction(trans, tree_root, ret);
+ btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans, tree_root);
return ret;
}
@@ -1280,7 +1280,7 @@ int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info)
return 0;
abort:
- btrfs_abort_transaction(trans, tree_root, ret);
+ btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans, tree_root);
return ret;
}
@@ -1333,7 +1333,7 @@ out:
btrfs_free_path(path);
mutex_unlock(&block_group->free_space_lock);
if (ret)
- btrfs_abort_transaction(trans, fs_info->free_space_root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -1410,7 +1410,7 @@ int remove_block_group_free_space(struct btrfs_trans_handle *trans,
out:
btrfs_free_path(path);
if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 70107f7c9307..aa6fabaee72e 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -38,7 +38,7 @@ static int caching_kthread(void *data)
int slot;
int ret;
- if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+ if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
return 0;
path = btrfs_alloc_path();
@@ -141,7 +141,7 @@ static void start_caching(struct btrfs_root *root)
int ret;
u64 objectid;
- if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+ if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
return;
spin_lock(&root->ino_cache_lock);
@@ -185,7 +185,7 @@ static void start_caching(struct btrfs_root *root)
int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
{
- if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+ if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
return btrfs_find_free_objectid(root, objectid);
again:
@@ -211,7 +211,7 @@ void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
{
struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
- if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+ if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
return;
again:
if (root->ino_cache_state == BTRFS_CACHE_FINISHED) {
@@ -251,7 +251,7 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
struct rb_node *n;
u64 count;
- if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+ if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
return;
while (1) {
@@ -412,7 +412,7 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
if (btrfs_root_refs(&root->root_item) == 0)
return 0;
- if (!btrfs_test_opt(root, INODE_MAP_CACHE))
+ if (!btrfs_test_opt(root->fs_info, INODE_MAP_CACHE))
return 0;
path = btrfs_alloc_path();
@@ -458,7 +458,7 @@ again:
BTRFS_I(inode)->generation = 0;
ret = btrfs_update_inode(trans, root, inode);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_put;
}
@@ -466,7 +466,7 @@ again:
ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
if (ret) {
if (ret != -ENOSPC)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_put;
}
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 4421954720b8..08dfc57e2270 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -60,6 +60,7 @@
#include "hash.h"
#include "props.h"
#include "qgroup.h"
+#include "dedupe.h"
struct btrfs_iget_args {
struct btrfs_key *location;
@@ -105,8 +106,9 @@ static int btrfs_truncate(struct inode *inode);
static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
static noinline int cow_file_range(struct inode *inode,
struct page *locked_page,
- u64 start, u64 end, int *page_started,
- unsigned long *nr_written, int unlock);
+ u64 start, u64 end, u64 delalloc_end,
+ int *page_started, unsigned long *nr_written,
+ int unlock, struct btrfs_dedupe_hash *hash);
static struct extent_map *create_pinned_em(struct inode *inode, u64 start,
u64 len, u64 orig_start,
u64 block_start, u64 block_len,
@@ -294,7 +296,7 @@ static noinline int cow_file_range_inline(struct btrfs_root *root,
start, aligned_end, NULL,
1, 1, extent_item_size, &extent_inserted);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -305,7 +307,7 @@ static noinline int cow_file_range_inline(struct btrfs_root *root,
inline_len, compressed_size,
compress_type, compressed_pages);
if (ret && ret != -ENOSPC) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
} else if (ret == -ENOSPC) {
ret = 1;
@@ -374,12 +376,12 @@ static inline int inode_need_compress(struct inode *inode)
struct btrfs_root *root = BTRFS_I(inode)->root;
/* force compress */
- if (btrfs_test_opt(root, FORCE_COMPRESS))
+ if (btrfs_test_opt(root->fs_info, FORCE_COMPRESS))
return 1;
/* bad compression ratios */
if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS)
return 0;
- if (btrfs_test_opt(root, COMPRESS) ||
+ if (btrfs_test_opt(root->fs_info, COMPRESS) ||
BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS ||
BTRFS_I(inode)->force_compress)
return 1;
@@ -585,9 +587,27 @@ cont:
will_compress = 0;
} else {
num_bytes = total_in;
+ *num_added += 1;
+
+ /*
+ * The async work queues will take care of doing actual
+ * allocation on disk for these compressed pages, and
+ * will submit them to the elevator.
+ */
+ add_async_extent(async_cow, start, num_bytes,
+ total_compressed, pages, nr_pages_ret,
+ compress_type);
+
+ if (start + num_bytes < end) {
+ start += num_bytes;
+ pages = NULL;
+ cond_resched();
+ goto again;
+ }
+ return;
}
}
- if (!will_compress && pages) {
+ if (pages) {
/*
* the compression code ran but failed to make things smaller,
* free any pages it allocated and our page pointer array
@@ -602,48 +622,28 @@ cont:
nr_pages_ret = 0;
/* flag the file so we don't compress in the future */
- if (!btrfs_test_opt(root, FORCE_COMPRESS) &&
+ if (!btrfs_test_opt(root->fs_info, FORCE_COMPRESS) &&
!(BTRFS_I(inode)->force_compress)) {
BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
}
}
- if (will_compress) {
- *num_added += 1;
-
- /* the async work queues will take care of doing actual
- * allocation on disk for these compressed pages,
- * and will submit them to the elevator.
- */
- add_async_extent(async_cow, start, num_bytes,
- total_compressed, pages, nr_pages_ret,
- compress_type);
-
- if (start + num_bytes < end) {
- start += num_bytes;
- pages = NULL;
- cond_resched();
- goto again;
- }
- } else {
cleanup_and_bail_uncompressed:
- /*
- * No compression, but we still need to write the pages in
- * the file we've been given so far. redirty the locked
- * page if it corresponds to our extent and set things up
- * for the async work queue to run cow_file_range to do
- * the normal delalloc dance
- */
- if (page_offset(locked_page) >= start &&
- page_offset(locked_page) <= end) {
- __set_page_dirty_nobuffers(locked_page);
- /* unlocked later on in the async handlers */
- }
- if (redirty)
- extent_range_redirty_for_io(inode, start, end);
- add_async_extent(async_cow, start, end - start + 1,
- 0, NULL, 0, BTRFS_COMPRESS_NONE);
- *num_added += 1;
- }
+ /*
+ * No compression, but we still need to write the pages in the file
+ * we've been given so far. redirty the locked page if it corresponds
+ * to our extent and set things up for the async work queue to run
+ * cow_file_range to do the normal delalloc dance.
+ */
+ if (page_offset(locked_page) >= start &&
+ page_offset(locked_page) <= end)
+ __set_page_dirty_nobuffers(locked_page);
+ /* unlocked later on in the async handlers */
+
+ if (redirty)
+ extent_range_redirty_for_io(inode, start, end);
+ add_async_extent(async_cow, start, end - start + 1, 0, NULL, 0,
+ BTRFS_COMPRESS_NONE);
+ *num_added += 1;
return;
@@ -712,7 +712,10 @@ retry:
async_extent->start,
async_extent->start +
async_extent->ram_size - 1,
- &page_started, &nr_written, 0);
+ async_extent->start +
+ async_extent->ram_size - 1,
+ &page_started, &nr_written, 0,
+ NULL);
/* JDM XXX */
@@ -925,9 +928,9 @@ static u64 get_extent_allocation_hint(struct inode *inode, u64 start,
*/
static noinline int cow_file_range(struct inode *inode,
struct page *locked_page,
- u64 start, u64 end, int *page_started,
- unsigned long *nr_written,
- int unlock)
+ u64 start, u64 end, u64 delalloc_end,
+ int *page_started, unsigned long *nr_written,
+ int unlock, struct btrfs_dedupe_hash *hash)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
u64 alloc_hint = 0;
@@ -1156,7 +1159,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
async_cow->start = start;
if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS &&
- !btrfs_test_opt(root, FORCE_COMPRESS))
+ !btrfs_test_opt(root->fs_info, FORCE_COMPRESS))
cur_end = end;
else
cur_end = min(end, start + SZ_512K - 1);
@@ -1418,7 +1421,8 @@ out_check:
if (cow_start != (u64)-1) {
ret = cow_file_range(inode, locked_page,
cow_start, found_key.offset - 1,
- page_started, nr_written, 1);
+ end, page_started, nr_written, 1,
+ NULL);
if (ret) {
if (!nolock && nocow)
btrfs_end_write_no_snapshoting(root);
@@ -1501,8 +1505,8 @@ out_check:
}
if (cow_start != (u64)-1) {
- ret = cow_file_range(inode, locked_page, cow_start, end,
- page_started, nr_written, 1);
+ ret = cow_file_range(inode, locked_page, cow_start, end, end,
+ page_started, nr_written, 1, NULL);
if (ret)
goto error;
}
@@ -1561,8 +1565,8 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
ret = run_delalloc_nocow(inode, locked_page, start, end,
page_started, 0, nr_written);
} else if (!inode_need_compress(inode)) {
- ret = cow_file_range(inode, locked_page, start, end,
- page_started, nr_written, 1);
+ ret = cow_file_range(inode, locked_page, start, end, end,
+ page_started, nr_written, 1, NULL);
} else {
set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
&BTRFS_I(inode)->runtime_flags);
@@ -1740,7 +1744,7 @@ static void btrfs_set_bit_hook(struct inode *inode,
}
/* For sanity tests */
- if (btrfs_test_is_dummy_root(root))
+ if (btrfs_is_testing(root->fs_info))
return;
__percpu_counter_add(&root->fs_info->delalloc_bytes, len,
@@ -1799,7 +1803,7 @@ static void btrfs_clear_bit_hook(struct inode *inode,
btrfs_delalloc_release_metadata(inode, len);
/* For sanity tests. */
- if (btrfs_test_is_dummy_root(root))
+ if (btrfs_is_testing(root->fs_info))
return;
if (root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID
@@ -1822,8 +1826,12 @@ static void btrfs_clear_bit_hook(struct inode *inode,
/*
* extent_io.c merge_bio_hook, this must check the chunk tree to make sure
* we don't create bios that span stripes or chunks
+ *
+ * return 1 if page cannot be merged to bio
+ * return 0 if page can be merged to bio
+ * return error otherwise
*/
-int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
+int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
size_t size, struct bio *bio,
unsigned long bio_flags)
{
@@ -1838,10 +1846,10 @@ int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
length = bio->bi_iter.bi_size;
map_length = length;
- ret = btrfs_map_block(root->fs_info, rw, logical,
+ ret = btrfs_map_block(root->fs_info, bio_op(bio), logical,
&map_length, NULL, 0);
- /* Will always return 0 with map_multi == NULL */
- BUG_ON(ret < 0);
+ if (ret < 0)
+ return ret;
if (map_length < length + size)
return 1;
return 0;
@@ -1855,9 +1863,8 @@ int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
* At IO completion time the cums attached on the ordered extent record
* are inserted into the btree
*/
-static int __btrfs_submit_bio_start(struct inode *inode, int rw,
- struct bio *bio, int mirror_num,
- unsigned long bio_flags,
+static int __btrfs_submit_bio_start(struct inode *inode, struct bio *bio,
+ int mirror_num, unsigned long bio_flags,
u64 bio_offset)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -1876,14 +1883,14 @@ static int __btrfs_submit_bio_start(struct inode *inode, int rw,
* At IO completion time the cums attached on the ordered extent record
* are inserted into the btree
*/
-static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
+static int __btrfs_submit_bio_done(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags,
u64 bio_offset)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
- ret = btrfs_map_bio(root, rw, bio, mirror_num, 1);
+ ret = btrfs_map_bio(root, bio, mirror_num, 1);
if (ret) {
bio->bi_error = ret;
bio_endio(bio);
@@ -1895,7 +1902,7 @@ static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio,
* extent_io.c submission hook. This does the right thing for csum calculation
* on write, or reading the csums from the tree before a read
*/
-static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
+static int btrfs_submit_bio_hook(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags,
u64 bio_offset)
{
@@ -1910,7 +1917,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
if (btrfs_is_free_space_inode(inode))
metadata = BTRFS_WQ_ENDIO_FREE_SPACE;
- if (!(rw & REQ_WRITE)) {
+ if (bio_op(bio) != REQ_OP_WRITE) {
ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
if (ret)
goto out;
@@ -1932,7 +1939,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
goto mapit;
/* we're doing a write, do the async checksumming */
ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
- inode, rw, bio, mirror_num,
+ inode, bio, mirror_num,
bio_flags, bio_offset,
__btrfs_submit_bio_start,
__btrfs_submit_bio_done);
@@ -1944,7 +1951,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
}
mapit:
- ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
+ ret = btrfs_map_bio(root, bio, mirror_num, 0);
out:
if (ret < 0) {
@@ -2595,7 +2602,7 @@ again:
ret = btrfs_insert_empty_item(trans, root, path, &key,
sizeof(*extent));
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_free_path;
}
@@ -2622,7 +2629,7 @@ again:
backref->root_id, backref->inum,
new->file_pos); /* start - extent_offset */
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_free_path;
}
@@ -2891,7 +2898,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
ret = btrfs_update_inode_fallback(trans, root, inode);
if (ret) /* -ENOMEM or corruption */
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -2951,7 +2958,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
ordered_extent->file_offset, ordered_extent->len,
trans->transid);
if (ret < 0) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_unlock;
}
@@ -2961,7 +2968,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
btrfs_ordered_update_i_size(inode, 0, ordered_extent);
ret = btrfs_update_inode_fallback(trans, root, inode);
if (ret) { /* -ENOMEM or corruption */
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_unlock;
}
ret = 0;
@@ -3205,7 +3212,7 @@ void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
root->root_key.objectid);
if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
else
clear_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED,
&root->state);
@@ -3296,7 +3303,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
if (ret != -EEXIST) {
clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
&BTRFS_I(inode)->runtime_flags);
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
}
@@ -3308,7 +3315,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
ret = btrfs_insert_orphan_item(trans, root->fs_info->tree_root,
root->root_key.objectid);
if (ret && ret != -EEXIST) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
}
@@ -3428,10 +3435,10 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
found_key.offset = 0;
inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
ret = PTR_ERR_OR_ZERO(inode);
- if (ret && ret != -ESTALE)
+ if (ret && ret != -ENOENT)
goto out;
- if (ret == -ESTALE && root == root->fs_info->tree_root) {
+ if (ret == -ENOENT && root == root->fs_info->tree_root) {
struct btrfs_root *dead_root;
struct btrfs_fs_info *fs_info = root->fs_info;
int is_dead_root = 0;
@@ -3467,7 +3474,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
* Inode is already gone but the orphan item is still there,
* kill the orphan item.
*/
- if (ret == -ESTALE) {
+ if (ret == -ENOENT) {
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
@@ -3626,7 +3633,7 @@ static noinline int acls_after_inode_item(struct extent_buffer *leaf,
/*
* read an inode from the btree into the in-memory inode
*/
-static void btrfs_read_locked_inode(struct inode *inode)
+static int btrfs_read_locked_inode(struct inode *inode)
{
struct btrfs_path *path;
struct extent_buffer *leaf;
@@ -3645,14 +3652,19 @@ static void btrfs_read_locked_inode(struct inode *inode)
filled = true;
path = btrfs_alloc_path();
- if (!path)
+ if (!path) {
+ ret = -ENOMEM;
goto make_bad;
+ }
memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
- if (ret)
+ if (ret) {
+ if (ret > 0)
+ ret = -ENOENT;
goto make_bad;
+ }
leaf = path->nodes[0];
@@ -3805,11 +3817,12 @@ cache_acl:
}
btrfs_update_iflags(inode);
- return;
+ return 0;
make_bad:
btrfs_free_path(path);
make_bad_inode(inode);
+ return ret;
}
/*
@@ -4007,20 +4020,20 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans,
btrfs_info(root->fs_info,
"failed to delete reference to %.*s, inode %llu parent %llu",
name_len, name, ino, dir_ino);
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto err;
}
skip_backref:
ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto err;
}
ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len,
inode, dir_ino);
if (ret != 0 && ret != -ENOENT) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto err;
}
@@ -4029,7 +4042,7 @@ skip_backref:
if (ret == -ENOENT)
ret = 0;
else if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
err:
btrfs_free_path(path);
if (ret)
@@ -4143,7 +4156,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid);
ret = btrfs_delete_one_dir_name(trans, root, path, di);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
btrfs_release_path(path);
@@ -4153,7 +4166,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
dir_ino, &index, name, name_len);
if (ret < 0) {
if (ret != -ENOENT) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
di = btrfs_search_dir_index_item(root, path, dir_ino,
@@ -4163,7 +4176,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
ret = -ENOENT;
else
ret = PTR_ERR(di);
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -4176,7 +4189,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
ret = btrfs_delete_delayed_dir_index(trans, root, dir, index);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -4185,7 +4198,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
dir->i_mtime = dir->i_ctime = current_fs_time(dir->i_sb);
ret = btrfs_update_inode_fallback(trans, root, dir);
if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
out:
btrfs_free_path(path);
return ret;
@@ -4197,6 +4210,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
int err = 0;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_trans_handle *trans;
+ u64 last_unlink_trans;
if (inode->i_size > BTRFS_EMPTY_DIR_SIZE)
return -ENOTEMPTY;
@@ -4219,11 +4233,27 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
if (err)
goto out;
+ last_unlink_trans = BTRFS_I(inode)->last_unlink_trans;
+
/* now the directory is empty */
err = btrfs_unlink_inode(trans, root, dir, d_inode(dentry),
dentry->d_name.name, dentry->d_name.len);
- if (!err)
+ if (!err) {
btrfs_i_size_write(inode, 0);
+ /*
+ * Propagate the last_unlink_trans value of the deleted dir to
+ * its parent directory. This is to prevent an unrecoverable
+ * log tree in the case we do something like this:
+ * 1) create dir foo
+ * 2) create snapshot under dir foo
+ * 3) delete the snapshot
+ * 4) rmdir foo
+ * 5) mkdir foo
+ * 6) fsync foo or some file inside foo
+ */
+ if (last_unlink_trans >= trans->transid)
+ BTRFS_I(dir)->last_unlink_trans = last_unlink_trans;
+ }
out:
btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root);
@@ -4506,7 +4536,6 @@ search_again:
pending_del_nr);
if (err) {
btrfs_abort_transaction(trans,
- root,
err);
goto error;
}
@@ -4518,8 +4547,7 @@ search_again:
item_end,
new_size);
if (err) {
- btrfs_abort_transaction(trans,
- root, err);
+ btrfs_abort_transaction(trans, err);
goto error;
}
} else if (test_bit(BTRFS_ROOT_REF_COWS,
@@ -4583,8 +4611,7 @@ delete:
pending_del_slot,
pending_del_nr);
if (ret) {
- btrfs_abort_transaction(trans,
- root, ret);
+ btrfs_abort_transaction(trans, ret);
goto error;
}
pending_del_nr = 0;
@@ -4617,7 +4644,7 @@ out:
ret = btrfs_del_items(trans, root, path, pending_del_slot,
pending_del_nr);
if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
}
error:
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
@@ -4786,7 +4813,7 @@ static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
ret = btrfs_drop_extents(trans, root, inode, offset, offset + len, 1);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans, root);
return ret;
}
@@ -4794,7 +4821,7 @@ static int maybe_insert_hole(struct btrfs_root *root, struct inode *inode,
ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset,
0, 0, len, 0, len, 0, 0, 0);
if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
else
btrfs_update_inode(trans, root, inode);
btrfs_end_transaction(trans, root);
@@ -5021,7 +5048,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
i_size_write(inode, BTRFS_I(inode)->disk_i_size);
err = btrfs_orphan_del(trans, inode);
if (err)
- btrfs_abort_transaction(trans, root, err);
+ btrfs_abort_transaction(trans, err);
btrfs_end_transaction(trans, root);
}
}
@@ -5159,11 +5186,18 @@ void btrfs_evict_inode(struct inode *inode)
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_block_rsv *rsv, *global_rsv;
int steal_from_global = 0;
- u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
+ u64 min_size;
int ret;
trace_btrfs_inode_evict(inode);
+ if (!root) {
+ kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode));
+ return;
+ }
+
+ min_size = btrfs_calc_trunc_metadata_size(root, 1);
+
evict_inode_truncate_pages(inode);
if (inode->i_nlink &&
@@ -5264,7 +5298,7 @@ void btrfs_evict_inode(struct inode *inode)
if (steal_from_global) {
if (!btrfs_check_space_for_delayed_refs(trans, root))
ret = btrfs_block_rsv_migrate(global_rsv, rsv,
- min_size);
+ min_size, 0);
else
ret = -ENOSPC;
}
@@ -5595,7 +5629,9 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
return ERR_PTR(-ENOMEM);
if (inode->i_state & I_NEW) {
- btrfs_read_locked_inode(inode);
+ int ret;
+
+ ret = btrfs_read_locked_inode(inode);
if (!is_bad_inode(inode)) {
inode_tree_add(inode);
unlock_new_inode(inode);
@@ -5604,7 +5640,8 @@ struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
} else {
unlock_new_inode(inode);
iput(inode);
- inode = ERR_PTR(-ESTALE);
+ ASSERT(ret < 0);
+ inode = ERR_PTR(ret < 0 ? ret : -ESTALE);
}
}
@@ -6240,9 +6277,9 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
btrfs_inherit_iflags(inode, dir);
if (S_ISREG(mode)) {
- if (btrfs_test_opt(root, NODATASUM))
+ if (btrfs_test_opt(root->fs_info, NODATASUM))
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATASUM;
- if (btrfs_test_opt(root, NODATACOW))
+ if (btrfs_test_opt(root->fs_info, NODATACOW))
BTRFS_I(inode)->flags |= BTRFS_INODE_NODATACOW |
BTRFS_INODE_NODATASUM;
}
@@ -6320,7 +6357,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
if (ret == -EEXIST || ret == -EOVERFLOW)
goto fail_dir_item;
else if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
}
@@ -6331,7 +6368,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
current_fs_time(parent_inode->i_sb);
ret = btrfs_update_inode(trans, root, parent_inode);
if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
return ret;
fail_dir_item:
@@ -7790,12 +7827,12 @@ err:
}
static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio,
- int rw, int mirror_num)
+ int mirror_num)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
- BUG_ON(rw & REQ_WRITE);
+ BUG_ON(bio_op(bio) == REQ_OP_WRITE);
bio_get(bio);
@@ -7804,7 +7841,7 @@ static inline int submit_dio_repair_bio(struct inode *inode, struct bio *bio,
if (ret)
goto err;
- ret = btrfs_map_bio(root, rw, bio, mirror_num, 0);
+ ret = btrfs_map_bio(root, bio, mirror_num, 0);
err:
bio_put(bio);
return ret;
@@ -7855,7 +7892,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
int read_mode;
int ret;
- BUG_ON(failed_bio->bi_rw & REQ_WRITE);
+ BUG_ON(bio_op(failed_bio) == REQ_OP_WRITE);
ret = btrfs_get_io_failure_record(inode, start, end, &failrec);
if (ret)
@@ -7883,13 +7920,13 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio,
free_io_failure(inode, failrec);
return -EIO;
}
+ bio_set_op_attrs(bio, REQ_OP_READ, read_mode);
btrfs_debug(BTRFS_I(inode)->root->fs_info,
"Repair DIO Read Error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d\n",
read_mode, failrec->this_mirror, failrec->in_validation);
- ret = submit_dio_repair_bio(inode, bio, read_mode,
- failrec->this_mirror);
+ ret = submit_dio_repair_bio(inode, bio, failrec->this_mirror);
if (ret) {
free_io_failure(inode, failrec);
bio_put(bio);
@@ -8179,7 +8216,7 @@ static void btrfs_endio_direct_write(struct bio *bio)
bio_put(bio);
}
-static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
+static int __btrfs_submit_bio_start_direct_io(struct inode *inode,
struct bio *bio, int mirror_num,
unsigned long bio_flags, u64 offset)
{
@@ -8197,8 +8234,8 @@ static void btrfs_end_dio_bio(struct bio *bio)
if (err)
btrfs_warn(BTRFS_I(dip->inode)->root->fs_info,
- "direct IO failed ino %llu rw %lu sector %#Lx len %u err no %d",
- btrfs_ino(dip->inode), bio->bi_rw,
+ "direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d",
+ btrfs_ino(dip->inode), bio_op(bio), bio->bi_opf,
(unsigned long long)bio->bi_iter.bi_sector,
bio->bi_iter.bi_size, err);
@@ -8272,11 +8309,11 @@ static inline int btrfs_lookup_and_bind_dio_csum(struct btrfs_root *root,
}
static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
- int rw, u64 file_offset, int skip_sum,
+ u64 file_offset, int skip_sum,
int async_submit)
{
struct btrfs_dio_private *dip = bio->bi_private;
- int write = rw & REQ_WRITE;
+ bool write = bio_op(bio) == REQ_OP_WRITE;
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret;
@@ -8297,8 +8334,7 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
if (write && async_submit) {
ret = btrfs_wq_submit_bio(root->fs_info,
- inode, rw, bio, 0, 0,
- file_offset,
+ inode, bio, 0, 0, file_offset,
__btrfs_submit_bio_start_direct_io,
__btrfs_submit_bio_done);
goto err;
@@ -8317,13 +8353,13 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
goto err;
}
map:
- ret = btrfs_map_bio(root, rw, bio, 0, async_submit);
+ ret = btrfs_map_bio(root, bio, 0, async_submit);
err:
bio_put(bio);
return ret;
}
-static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
+static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip,
int skip_sum)
{
struct inode *inode = dip->inode;
@@ -8342,8 +8378,8 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
int i;
map_length = orig_bio->bi_iter.bi_size;
- ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
- &map_length, NULL, 0);
+ ret = btrfs_map_block(root->fs_info, bio_op(orig_bio),
+ start_sector << 9, &map_length, NULL, 0);
if (ret)
return -EIO;
@@ -8363,6 +8399,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
if (!bio)
return -ENOMEM;
+ bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_opf);
bio->bi_private = dip;
bio->bi_end_io = btrfs_end_dio_bio;
btrfs_io_bio(bio)->logical = file_offset;
@@ -8382,7 +8419,7 @@ next_block:
* before we're done setting it up
*/
atomic_inc(&dip->pending_bios);
- ret = __btrfs_submit_dio_bio(bio, inode, rw,
+ ret = __btrfs_submit_dio_bio(bio, inode,
file_offset, skip_sum,
async_submit);
if (ret) {
@@ -8400,12 +8437,13 @@ next_block:
start_sector, GFP_NOFS);
if (!bio)
goto out_err;
+ bio_set_op_attrs(bio, bio_op(orig_bio), orig_bio->bi_opf);
bio->bi_private = dip;
bio->bi_end_io = btrfs_end_dio_bio;
btrfs_io_bio(bio)->logical = file_offset;
map_length = orig_bio->bi_iter.bi_size;
- ret = btrfs_map_block(root->fs_info, rw,
+ ret = btrfs_map_block(root->fs_info, bio_op(orig_bio),
start_sector << 9,
&map_length, NULL, 0);
if (ret) {
@@ -8425,7 +8463,7 @@ next_block:
}
submit:
- ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
+ ret = __btrfs_submit_dio_bio(bio, inode, file_offset, skip_sum,
async_submit);
if (!ret)
return 0;
@@ -8445,14 +8483,14 @@ out_err:
return 0;
}
-static void btrfs_submit_direct(int rw, struct bio *dio_bio,
- struct inode *inode, loff_t file_offset)
+static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode,
+ loff_t file_offset)
{
struct btrfs_dio_private *dip = NULL;
struct bio *io_bio = NULL;
struct btrfs_io_bio *btrfs_bio;
int skip_sum;
- int write = rw & REQ_WRITE;
+ bool write = (bio_op(dio_bio) == REQ_OP_WRITE);
int ret = 0;
skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
@@ -8503,7 +8541,7 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
dio_data->unsubmitted_oe_range_end;
}
- ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
+ ret = btrfs_submit_direct_hook(dip, skip_sum);
if (!ret)
return;
@@ -9116,7 +9154,7 @@ static int btrfs_truncate(struct inode *inode)
/* Migrate the slack space for the truncate to our reserve */
ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv, rsv,
- min_size);
+ min_size, 0);
BUG_ON(ret);
/*
@@ -9156,7 +9194,7 @@ static int btrfs_truncate(struct inode *inode)
}
ret = btrfs_block_rsv_migrate(&root->fs_info->trans_block_rsv,
- rsv, min_size);
+ rsv, min_size, 0);
BUG_ON(ret); /* shouldn't happen */
trans->block_rsv = rsv;
}
@@ -9177,7 +9215,6 @@ static int btrfs_truncate(struct inode *inode)
ret = btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root);
}
-
out:
btrfs_free_block_rsv(root, rsv);
@@ -9386,25 +9423,25 @@ int btrfs_init_cachep(void)
btrfs_trans_handle_cachep = kmem_cache_create("btrfs_trans_handle",
sizeof(struct btrfs_trans_handle), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL);
if (!btrfs_trans_handle_cachep)
goto fail;
btrfs_transaction_cachep = kmem_cache_create("btrfs_transaction",
sizeof(struct btrfs_transaction), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ SLAB_TEMPORARY | SLAB_MEM_SPREAD, NULL);
if (!btrfs_transaction_cachep)
goto fail;
btrfs_path_cachep = kmem_cache_create("btrfs_path",
sizeof(struct btrfs_path), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ SLAB_MEM_SPREAD, NULL);
if (!btrfs_path_cachep)
goto fail;
btrfs_free_space_cachep = kmem_cache_create("btrfs_free_space",
sizeof(struct btrfs_free_space), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+ SLAB_MEM_SPREAD, NULL);
if (!btrfs_free_space_cachep)
goto fail;
@@ -9554,7 +9591,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
ret = btrfs_update_inode(trans, root, old_inode);
}
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_fail;
}
@@ -9574,7 +9611,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
ret = btrfs_update_inode(trans, dest, new_inode);
}
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_fail;
}
@@ -9582,7 +9619,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
new_dentry->d_name.name,
new_dentry->d_name.len, 0, old_idx);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_fail;
}
@@ -9590,7 +9627,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
old_dentry->d_name.name,
old_dentry->d_name.len, 0, new_idx);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_fail;
}
@@ -9829,7 +9866,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
ret = btrfs_update_inode(trans, root, old_inode);
}
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_fail;
}
@@ -9853,7 +9890,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (!ret && new_inode->i_nlink == 0)
ret = btrfs_orphan_add(trans, d_inode(new_dentry));
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_fail;
}
}
@@ -9862,7 +9899,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
new_dentry->d_name.name,
new_dentry->d_name.len, 0, index);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_fail;
}
@@ -9882,7 +9919,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
old_dentry);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_fail;
}
}
@@ -10308,7 +10345,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
if (ret) {
btrfs_free_reserved_extent(root, ins.objectid,
ins.offset, 0);
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
if (own_trans)
btrfs_end_transaction(trans, root);
break;
@@ -10368,7 +10405,7 @@ next:
ret = btrfs_update_inode(trans, root, inode);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
if (own_trans)
btrfs_end_transaction(trans, root);
break;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 05173563e4a6..14ed1e9e6bc8 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -561,7 +561,7 @@ static noinline int create_subvol(struct inode *dir,
new_root = btrfs_read_fs_root_no_name(root->fs_info, &key);
if (IS_ERR(new_root)) {
ret = PTR_ERR(new_root);
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -570,7 +570,7 @@ static noinline int create_subvol(struct inode *dir,
ret = btrfs_create_subvol_root(trans, new_root, root, new_dirid);
if (ret) {
/* We potentially lose an unused inode item here */
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -583,7 +583,7 @@ static noinline int create_subvol(struct inode *dir,
*/
ret = btrfs_set_inode_index(dir, &index);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -591,7 +591,7 @@ static noinline int create_subvol(struct inode *dir,
name, namelen, dir, &key,
BTRFS_FT_DIR, index);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -608,7 +608,7 @@ static noinline int create_subvol(struct inode *dir,
root_item->uuid, BTRFS_UUID_KEY_SUBVOL,
objectid);
if (ret)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
fail:
kfree(root_item);
@@ -1948,8 +1948,7 @@ static noinline int key_in_sk(struct btrfs_key *key,
return 1;
}
-static noinline int copy_to_sk(struct btrfs_root *root,
- struct btrfs_path *path,
+static noinline int copy_to_sk(struct btrfs_path *path,
struct btrfs_key *key,
struct btrfs_ioctl_search_key *sk,
size_t *buf_size,
@@ -2120,7 +2119,7 @@ static noinline int search_ioctl(struct inode *inode,
ret = 0;
goto err;
}
- ret = copy_to_sk(root, path, &key, sk, buf_size, ubuf,
+ ret = copy_to_sk(path, &key, sk, buf_size, ubuf,
&sk_offset, &num_found);
btrfs_release_path(path);
if (ret)
@@ -2406,7 +2405,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
* rmdir(2).
*/
err = -EPERM;
- if (!btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED))
+ if (!btrfs_test_opt(root->fs_info, USER_SUBVOL_RM_ALLOWED))
goto out_dput;
/*
@@ -2489,7 +2488,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
dentry->d_name.len);
if (ret) {
err = ret;
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_end_trans;
}
@@ -2505,7 +2504,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
root->fs_info->tree_root,
dest->root_key.objectid);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
err = ret;
goto out_end_trans;
}
@@ -2515,7 +2514,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
dest->root_item.uuid, BTRFS_UUID_KEY_SUBVOL,
dest->root_key.objectid);
if (ret && ret != -ENOENT) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
err = ret;
goto out_end_trans;
}
@@ -2525,7 +2524,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
dest->root_key.objectid);
if (ret && ret != -ENOENT) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
err = ret;
goto out_end_trans;
}
@@ -3292,7 +3291,7 @@ static int clone_finish_inode_update(struct btrfs_trans_handle *trans,
ret = btrfs_update_inode(trans, root, inode);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans, root);
goto out;
}
@@ -3694,7 +3693,7 @@ process_slot:
if (ret) {
if (ret != -EOPNOTSUPP)
btrfs_abort_transaction(trans,
- root, ret);
+ ret);
btrfs_end_transaction(trans, root);
goto out;
}
@@ -3702,8 +3701,7 @@ process_slot:
ret = btrfs_insert_empty_item(trans, root, path,
&new_key, size);
if (ret) {
- btrfs_abort_transaction(trans, root,
- ret);
+ btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans, root);
goto out;
}
@@ -3735,7 +3733,6 @@ process_slot:
new_key.offset - datao);
if (ret) {
btrfs_abort_transaction(trans,
- root,
ret);
btrfs_end_transaction(trans,
root);
@@ -3772,7 +3769,6 @@ process_slot:
if (ret) {
if (ret != -EOPNOTSUPP)
btrfs_abort_transaction(trans,
- root,
ret);
btrfs_end_transaction(trans, root);
goto out;
@@ -3828,7 +3824,7 @@ process_slot:
last_dest_end, destoff + len, 1);
if (ret) {
if (ret != -EOPNOTSUPP)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans, root);
goto out;
}
@@ -5164,13 +5160,13 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
root->root_key.objectid);
if (ret < 0 && ret != -EEXIST) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
}
ret = btrfs_commit_transaction(trans, root);
if (ret < 0) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index aca8264f4a49..3b78d38173b3 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -1122,7 +1122,7 @@ int __init ordered_data_init(void)
{
btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
sizeof(struct btrfs_ordered_extent), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
+ SLAB_MEM_SPREAD,
NULL);
if (!btrfs_ordered_extent_cache)
return -ENOMEM;
diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
index 36992128c746..cf0b444ac4f3 100644
--- a/fs/btrfs/props.c
+++ b/fs/btrfs/props.c
@@ -350,6 +350,7 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_root *parent_root)
{
+ struct super_block *sb = root->fs_info->sb;
struct btrfs_key key;
struct inode *parent_inode, *child_inode;
int ret;
@@ -358,12 +359,11 @@ int btrfs_subvol_inherit_props(struct btrfs_trans_handle *trans,
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
- parent_inode = btrfs_iget(parent_root->fs_info->sb, &key,
- parent_root, NULL);
+ parent_inode = btrfs_iget(sb, &key, parent_root, NULL);
if (IS_ERR(parent_inode))
return PTR_ERR(parent_inode);
- child_inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
+ child_inode = btrfs_iget(sb, &key, root, NULL);
if (IS_ERR(child_inode)) {
iput(parent_inode);
return PTR_ERR(child_inode);
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 9d4c05b14f6e..93ee1c18ef9d 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -571,7 +571,7 @@ static int add_qgroup_item(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf;
struct btrfs_key key;
- if (btrfs_test_is_dummy_root(quota_root))
+ if (btrfs_is_testing(quota_root->fs_info))
return 0;
path = btrfs_alloc_path();
@@ -728,7 +728,7 @@ static int update_qgroup_info_item(struct btrfs_trans_handle *trans,
int ret;
int slot;
- if (btrfs_test_is_dummy_root(root))
+ if (btrfs_is_testing(root->fs_info))
return 0;
key.objectid = 0;
@@ -1453,9 +1453,10 @@ int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
return ret;
}
-struct btrfs_qgroup_extent_record
-*btrfs_qgroup_insert_dirty_extent(struct btrfs_delayed_ref_root *delayed_refs,
- struct btrfs_qgroup_extent_record *record)
+struct btrfs_qgroup_extent_record *
+btrfs_qgroup_insert_dirty_extent(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_qgroup_extent_record *record)
{
struct rb_node **p = &delayed_refs->dirty_extent_root.rb_node;
struct rb_node *parent_node = NULL;
@@ -1463,7 +1464,7 @@ struct btrfs_qgroup_extent_record
u64 bytenr = record->bytenr;
assert_spin_locked(&delayed_refs->lock);
- trace_btrfs_qgroup_insert_dirty_extent(record);
+ trace_btrfs_qgroup_insert_dirty_extent(fs_info, record);
while (*p) {
parent_node = *p;
@@ -1595,8 +1596,8 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
cur_old_count = btrfs_qgroup_get_old_refcnt(qg, seq);
cur_new_count = btrfs_qgroup_get_new_refcnt(qg, seq);
- trace_qgroup_update_counters(qg->qgroupid, cur_old_count,
- cur_new_count);
+ trace_qgroup_update_counters(fs_info, qg->qgroupid,
+ cur_old_count, cur_new_count);
/* Rfer update part */
if (cur_old_count == 0 && cur_new_count > 0) {
@@ -1687,8 +1688,8 @@ btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
goto out_free;
BUG_ON(!fs_info->quota_root);
- trace_btrfs_qgroup_account_extent(bytenr, num_bytes, nr_old_roots,
- nr_new_roots);
+ trace_btrfs_qgroup_account_extent(fs_info, bytenr, num_bytes,
+ nr_old_roots, nr_new_roots);
qgroups = ulist_alloc(GFP_NOFS);
if (!qgroups) {
@@ -1759,7 +1760,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans,
record = rb_entry(node, struct btrfs_qgroup_extent_record,
node);
- trace_btrfs_qgroup_account_extents(record);
+ trace_btrfs_qgroup_account_extents(fs_info, record);
if (!ret) {
/*
@@ -2195,7 +2196,7 @@ void assert_qgroups_uptodate(struct btrfs_trans_handle *trans)
{
if (list_empty(&trans->qgroup_ref_list) && !trans->delayed_ref_elem.seq)
return;
- btrfs_err(trans->root->fs_info,
+ btrfs_err(trans->fs_info,
"qgroups not uptodate in trans handle %p: list is%s empty, "
"seq is %#x.%x",
trans, list_empty(&trans->qgroup_ref_list) ? "" : " not",
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index ecb2c143ef75..710887c06aaf 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -63,9 +63,10 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info);
struct btrfs_delayed_extent_op;
int btrfs_qgroup_prepare_account_extents(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info);
-struct btrfs_qgroup_extent_record
-*btrfs_qgroup_insert_dirty_extent(struct btrfs_delayed_ref_root *delayed_refs,
- struct btrfs_qgroup_extent_record *record);
+struct btrfs_qgroup_extent_record *
+btrfs_qgroup_insert_dirty_extent(struct btrfs_fs_info *fs_info,
+ struct btrfs_delayed_ref_root *delayed_refs,
+ struct btrfs_qgroup_extent_record *record);
int
btrfs_qgroup_account_extent(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
@@ -88,7 +89,7 @@ static inline void btrfs_qgroup_free_delayed_ref(struct btrfs_fs_info *fs_info,
u64 ref_root, u64 num_bytes)
{
btrfs_qgroup_free_refroot(fs_info, ref_root, num_bytes);
- trace_btrfs_qgroup_free_delayed_ref(ref_root, num_bytes);
+ trace_btrfs_qgroup_free_delayed_ref(fs_info, ref_root, num_bytes);
}
void assert_qgroups_uptodate(struct btrfs_trans_handle *trans);
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index f8b6d411a034..cd8d302a1f61 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -1320,7 +1320,9 @@ write_data:
bio->bi_private = rbio;
bio->bi_end_io = raid_write_end_io;
- submit_bio(WRITE, bio);
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+
+ submit_bio(bio);
}
return;
@@ -1573,11 +1575,12 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
bio->bi_private = rbio;
bio->bi_end_io = raid_rmw_end_io;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
btrfs_bio_wq_end_io(rbio->fs_info, bio,
BTRFS_WQ_ENDIO_RAID56);
- submit_bio(READ, bio);
+ submit_bio(bio);
}
/* the actual write will happen once the reads are done */
return 0;
@@ -2097,11 +2100,12 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
bio->bi_private = rbio;
bio->bi_end_io = raid_recover_end_io;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
btrfs_bio_wq_end_io(rbio->fs_info, bio,
BTRFS_WQ_ENDIO_RAID56);
- submit_bio(READ, bio);
+ submit_bio(bio);
}
out:
return 0;
@@ -2433,7 +2437,9 @@ submit_write:
bio->bi_private = rbio;
bio->bi_end_io = raid_write_end_io;
- submit_bio(WRITE, bio);
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+
+ submit_bio(bio);
}
return;
@@ -2610,11 +2616,12 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
bio->bi_private = rbio;
bio->bi_end_io = raid56_parity_scrub_end_io;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
btrfs_bio_wq_end_io(rbio->fs_info, bio,
BTRFS_WQ_ENDIO_RAID56);
- submit_bio(READ, bio);
+ submit_bio(bio);
}
/* the actual write will happen once the reads are done */
return;
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 0477dca154ed..b26a5aea41b4 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -235,12 +235,12 @@ static void backref_cache_cleanup(struct backref_cache *cache)
cache->last_trans = 0;
for (i = 0; i < BTRFS_MAX_LEVEL; i++)
- BUG_ON(!list_empty(&cache->pending[i]));
- BUG_ON(!list_empty(&cache->changed));
- BUG_ON(!list_empty(&cache->detached));
- BUG_ON(!RB_EMPTY_ROOT(&cache->rb_root));
- BUG_ON(cache->nr_nodes);
- BUG_ON(cache->nr_edges);
+ ASSERT(list_empty(&cache->pending[i]));
+ ASSERT(list_empty(&cache->changed));
+ ASSERT(list_empty(&cache->detached));
+ ASSERT(RB_EMPTY_ROOT(&cache->rb_root));
+ ASSERT(!cache->nr_nodes);
+ ASSERT(!cache->nr_edges);
}
static struct backref_node *alloc_backref_node(struct backref_cache *cache)
@@ -1171,8 +1171,12 @@ out:
lower = list_entry(useless.next,
struct backref_node, list);
list_del_init(&lower->list);
+ if (lower == node)
+ node = NULL;
free_backref_node(cache, lower);
}
+
+ free_backref_node(cache, node);
return ERR_PTR(err);
}
ASSERT(!node || !node->detached);
@@ -1719,7 +1723,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
btrfs_header_owner(leaf),
key.objectid, key.offset);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
break;
}
@@ -1727,7 +1731,7 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
parent, btrfs_header_owner(leaf),
key.objectid, key.offset);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
break;
}
}
@@ -2604,25 +2608,28 @@ static int reserve_metadata_space(struct btrfs_trans_handle *trans,
trans->block_rsv = rc->block_rsv;
rc->reserved_bytes += num_bytes;
+
+ /*
+ * We are under a transaction here so we can only do limited flushing.
+ * If we get an enospc just kick back -EAGAIN so we know to drop the
+ * transaction and try to refill when we can flush all the things.
+ */
ret = btrfs_block_rsv_refill(root, rc->block_rsv, num_bytes,
- BTRFS_RESERVE_FLUSH_ALL);
+ BTRFS_RESERVE_FLUSH_LIMIT);
if (ret) {
- if (ret == -EAGAIN) {
- tmp = rc->extent_root->nodesize *
- RELOCATION_RESERVED_NODES;
- while (tmp <= rc->reserved_bytes)
- tmp <<= 1;
- /*
- * only one thread can access block_rsv at this point,
- * so we don't need hold lock to protect block_rsv.
- * we expand more reservation size here to allow enough
- * space for relocation and we will return earlier in
- * enospc case.
- */
- rc->block_rsv->size = tmp + rc->extent_root->nodesize *
- RELOCATION_RESERVED_NODES;
- }
- return ret;
+ tmp = rc->extent_root->nodesize * RELOCATION_RESERVED_NODES;
+ while (tmp <= rc->reserved_bytes)
+ tmp <<= 1;
+ /*
+ * only one thread can access block_rsv at this point,
+ * so we don't need hold lock to protect block_rsv.
+ * we expand more reservation size here to allow enough
+ * space for relocation and we will return eailer in
+ * enospc case.
+ */
+ rc->block_rsv->size = tmp + rc->extent_root->nodesize *
+ RELOCATION_RESERVED_NODES;
+ return -EAGAIN;
}
return 0;
@@ -3871,6 +3878,7 @@ static noinline_for_stack
int prepare_to_relocate(struct reloc_control *rc)
{
struct btrfs_trans_handle *trans;
+ int ret;
rc->block_rsv = btrfs_alloc_block_rsv(rc->extent_root,
BTRFS_BLOCK_RSV_TEMP);
@@ -3885,6 +3893,11 @@ int prepare_to_relocate(struct reloc_control *rc)
rc->reserved_bytes = 0;
rc->block_rsv->size = rc->extent_root->nodesize *
RELOCATION_RESERVED_NODES;
+ ret = btrfs_block_rsv_refill(rc->extent_root,
+ rc->block_rsv, rc->block_rsv->size,
+ BTRFS_RESERVE_FLUSH_ALL);
+ if (ret)
+ return ret;
rc->create_reloc_tree = 1;
set_reloc_control(rc);
@@ -4643,7 +4656,7 @@ int btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans,
if (rc->merge_reloc_tree) {
ret = btrfs_block_rsv_migrate(&pending->block_rsv,
rc->block_rsv,
- rc->nodes_relocated);
+ rc->nodes_relocated, 1);
if (ret)
return ret;
}
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
index f1c30861d062..7fd7e1830cfe 100644
--- a/fs/btrfs/root-tree.c
+++ b/fs/btrfs/root-tree.c
@@ -150,7 +150,7 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
ret = btrfs_search_slot(trans, root, key, path, 0, 1);
if (ret < 0) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -176,20 +176,20 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
ret = btrfs_search_slot(trans, root, key, path,
-1, 1);
if (ret < 0) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
ret = btrfs_del_item(trans, root, path);
if (ret < 0) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
btrfs_release_path(path);
ret = btrfs_insert_empty_item(trans, root, path,
key, sizeof(*item));
if (ret < 0) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
l = path->nodes[0];
@@ -448,7 +448,7 @@ again:
ret = btrfs_insert_empty_item(trans, tree_root, path, &key,
sizeof(*ref) + name_len);
if (ret) {
- btrfs_abort_transaction(trans, tree_root, ret);
+ btrfs_abort_transaction(trans, ret);
btrfs_free_path(path);
return ret;
}
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 70427ef66b04..1d195d2b32c6 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1504,8 +1504,9 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
sblock->no_io_error_seen = 0;
} else {
bio->bi_iter.bi_sector = page->physical >> 9;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
- if (btrfsic_submit_bio_wait(READ, bio))
+ if (btrfsic_submit_bio_wait(bio))
sblock->no_io_error_seen = 0;
}
@@ -1583,6 +1584,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
return -EIO;
bio->bi_bdev = page_bad->dev->bdev;
bio->bi_iter.bi_sector = page_bad->physical >> 9;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
if (PAGE_SIZE != ret) {
@@ -1590,7 +1592,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
return -EIO;
}
- if (btrfsic_submit_bio_wait(WRITE, bio)) {
+ if (btrfsic_submit_bio_wait(bio)) {
btrfs_dev_stat_inc_and_print(page_bad->dev,
BTRFS_DEV_STAT_WRITE_ERRS);
btrfs_dev_replace_stats_inc(
@@ -1684,6 +1686,7 @@ again:
bio->bi_end_io = scrub_wr_bio_end_io;
bio->bi_bdev = sbio->dev->bdev;
bio->bi_iter.bi_sector = sbio->physical >> 9;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
sbio->err = 0;
} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
spage->physical_for_dev_replace ||
@@ -1731,7 +1734,7 @@ static void scrub_wr_submit(struct scrub_ctx *sctx)
* orders the requests before sending them to the driver which
* doubled the write performance on spinning disks when measured
* with Linux 3.5 */
- btrfsic_submit_bio(WRITE, sbio->bio);
+ btrfsic_submit_bio(sbio->bio);
}
static void scrub_wr_bio_end_io(struct bio *bio)
@@ -2041,7 +2044,7 @@ static void scrub_submit(struct scrub_ctx *sctx)
sbio = sctx->bios[sctx->curr];
sctx->curr = -1;
scrub_pending_bio_inc(sctx);
- btrfsic_submit_bio(READ, sbio->bio);
+ btrfsic_submit_bio(sbio->bio);
}
static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
@@ -2088,6 +2091,7 @@ again:
bio->bi_end_io = scrub_bio_end_io;
bio->bi_bdev = sbio->dev->bdev;
bio->bi_iter.bi_sector = sbio->physical >> 9;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
sbio->err = 0;
} else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
spage->physical ||
@@ -3781,27 +3785,27 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
if (fs_info->scrub_workers_refcnt == 0) {
if (is_dev_replace)
fs_info->scrub_workers =
- btrfs_alloc_workqueue("scrub", flags,
+ btrfs_alloc_workqueue(fs_info, "scrub", flags,
1, 4);
else
fs_info->scrub_workers =
- btrfs_alloc_workqueue("scrub", flags,
+ btrfs_alloc_workqueue(fs_info, "scrub", flags,
max_active, 4);
if (!fs_info->scrub_workers)
goto fail_scrub_workers;
fs_info->scrub_wr_completion_workers =
- btrfs_alloc_workqueue("scrubwrc", flags,
+ btrfs_alloc_workqueue(fs_info, "scrubwrc", flags,
max_active, 2);
if (!fs_info->scrub_wr_completion_workers)
goto fail_scrub_wr_completion_workers;
fs_info->scrub_nocow_workers =
- btrfs_alloc_workqueue("scrubnc", flags, 1, 0);
+ btrfs_alloc_workqueue(fs_info, "scrubnc", flags, 1, 0);
if (!fs_info->scrub_nocow_workers)
goto fail_scrub_nocow_workers;
fs_info->scrub_parity_workers =
- btrfs_alloc_workqueue("scrubparity", flags,
+ btrfs_alloc_workqueue(fs_info, "scrubparity", flags,
max_active, 2);
if (!fs_info->scrub_parity_workers)
goto fail_scrub_parity_workers;
@@ -3856,7 +3860,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
/* not supported for data w/o checksums */
- btrfs_err(fs_info,
+ btrfs_err_rl(fs_info,
"scrub: size assumption sectorsize != PAGE_SIZE "
"(%d != %lu) fails",
fs_info->chunk_root->sectorsize, PAGE_SIZE);
@@ -4436,6 +4440,7 @@ static int write_page_nocow(struct scrub_ctx *sctx,
bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
bio->bi_bdev = dev->bdev;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC);
ret = bio_add_page(bio, page, PAGE_SIZE, 0);
if (ret != PAGE_SIZE) {
leave_with_eio:
@@ -4444,7 +4449,7 @@ leave_with_eio:
return -EIO;
}
- if (btrfsic_submit_bio_wait(WRITE_SYNC, bio))
+ if (btrfsic_submit_bio_wait(bio))
goto leave_with_eio;
bio_put(bio);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index b71dd298385c..efe129fe2678 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -231,7 +231,6 @@ struct pending_dir_move {
u64 parent_ino;
u64 ino;
u64 gen;
- bool is_orphan;
struct list_head update_refs;
};
@@ -274,6 +273,39 @@ struct name_cache_entry {
char name[];
};
+static void inconsistent_snapshot_error(struct send_ctx *sctx,
+ enum btrfs_compare_tree_result result,
+ const char *what)
+{
+ const char *result_string;
+
+ switch (result) {
+ case BTRFS_COMPARE_TREE_NEW:
+ result_string = "new";
+ break;
+ case BTRFS_COMPARE_TREE_DELETED:
+ result_string = "deleted";
+ break;
+ case BTRFS_COMPARE_TREE_CHANGED:
+ result_string = "updated";
+ break;
+ case BTRFS_COMPARE_TREE_SAME:
+ ASSERT(0);
+ result_string = "unchanged";
+ break;
+ default:
+ ASSERT(0);
+ result_string = "unexpected";
+ }
+
+ btrfs_err(sctx->send_root->fs_info,
+ "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu",
+ result_string, what, sctx->cmp_key->objectid,
+ sctx->send_root->root_key.objectid,
+ (sctx->parent_root ?
+ sctx->parent_root->root_key.objectid : 0));
+}
+
static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
static struct waiting_dir_move *
@@ -1861,7 +1893,8 @@ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen,
* was already unlinked/moved, so we can safely assume that we will not
* overwrite anything at this point in time.
*/
- if (other_inode > sctx->send_progress) {
+ if (other_inode > sctx->send_progress ||
+ is_waiting_for_move(sctx, other_inode)) {
ret = get_inode_info(sctx->parent_root, other_inode, NULL,
who_gen, NULL, NULL, NULL, NULL);
if (ret < 0)
@@ -2502,6 +2535,8 @@ verbose_printk("btrfs: send_utimes %llu\n", ino);
key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0;
ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0);
+ if (ret > 0)
+ ret = -ENOENT;
if (ret < 0)
goto out;
@@ -2947,6 +2982,10 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
}
if (loc.objectid > send_progress) {
+ struct orphan_dir_info *odi;
+
+ odi = get_orphan_dir_info(sctx, dir);
+ free_orphan_dir_info(sctx, odi);
ret = 0;
goto out;
}
@@ -3047,7 +3086,6 @@ static int add_pending_dir_move(struct send_ctx *sctx,
pm->parent_ino = parent_ino;
pm->ino = ino;
pm->gen = ino_gen;
- pm->is_orphan = is_orphan;
INIT_LIST_HEAD(&pm->list);
INIT_LIST_HEAD(&pm->update_refs);
RB_CLEAR_NODE(&pm->node);
@@ -3113,6 +3151,48 @@ static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx,
return NULL;
}
+static int path_loop(struct send_ctx *sctx, struct fs_path *name,
+ u64 ino, u64 gen, u64 *ancestor_ino)
+{
+ int ret = 0;
+ u64 parent_inode = 0;
+ u64 parent_gen = 0;
+ u64 start_ino = ino;
+
+ *ancestor_ino = 0;
+ while (ino != BTRFS_FIRST_FREE_OBJECTID) {
+ fs_path_reset(name);
+
+ if (is_waiting_for_rm(sctx, ino))
+ break;
+ if (is_waiting_for_move(sctx, ino)) {
+ if (*ancestor_ino == 0)
+ *ancestor_ino = ino;
+ ret = get_first_ref(sctx->parent_root, ino,
+ &parent_inode, &parent_gen, name);
+ } else {
+ ret = __get_cur_name_and_parent(sctx, ino, gen,
+ &parent_inode,
+ &parent_gen, name);
+ if (ret > 0) {
+ ret = 0;
+ break;
+ }
+ }
+ if (ret < 0)
+ break;
+ if (parent_inode == start_ino) {
+ ret = 1;
+ if (*ancestor_ino == 0)
+ *ancestor_ino = ino;
+ break;
+ }
+ ino = parent_inode;
+ gen = parent_gen;
+ }
+ return ret;
+}
+
static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
{
struct fs_path *from_path = NULL;
@@ -3123,6 +3203,8 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
u64 parent_ino, parent_gen;
struct waiting_dir_move *dm = NULL;
u64 rmdir_ino = 0;
+ u64 ancestor;
+ bool is_orphan;
int ret;
name = fs_path_alloc();
@@ -3135,9 +3217,10 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
dm = get_waiting_dir_move(sctx, pm->ino);
ASSERT(dm);
rmdir_ino = dm->rmdir_ino;
+ is_orphan = dm->orphanized;
free_waiting_dir_move(sctx, dm);
- if (pm->is_orphan) {
+ if (is_orphan) {
ret = gen_unique_name(sctx, pm->ino,
pm->gen, from_path);
} else {
@@ -3155,6 +3238,24 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
goto out;
sctx->send_progress = sctx->cur_ino + 1;
+ ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor);
+ if (ret < 0)
+ goto out;
+ if (ret) {
+ LIST_HEAD(deleted_refs);
+ ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
+ ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
+ &pm->update_refs, &deleted_refs,
+ is_orphan);
+ if (ret < 0)
+ goto out;
+ if (rmdir_ino) {
+ dm = get_waiting_dir_move(sctx, pm->ino);
+ ASSERT(dm);
+ dm->rmdir_ino = rmdir_ino;
+ }
+ goto out;
+ }
fs_path_reset(name);
to_path = name;
name = NULL;
@@ -3174,7 +3275,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
/* already deleted */
goto finish;
}
- ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino + 1);
+ ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino);
if (ret < 0)
goto out;
if (!ret)
@@ -3204,8 +3305,18 @@ finish:
* and old parent(s).
*/
list_for_each_entry(cur, &pm->update_refs, list) {
- if (cur->dir == rmdir_ino)
+ /*
+ * The parent inode might have been deleted in the send snapshot
+ */
+ ret = get_inode_info(sctx->send_root, cur->dir, NULL,
+ NULL, NULL, NULL, NULL, NULL);
+ if (ret == -ENOENT) {
+ ret = 0;
continue;
+ }
+ if (ret < 0)
+ goto out;
+
ret = send_utimes(sctx, cur->dir, cur->dir_gen);
if (ret < 0)
goto out;
@@ -3325,6 +3436,7 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
u64 left_gen;
u64 right_gen;
int ret = 0;
+ struct waiting_dir_move *wdm;
if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
return 0;
@@ -3383,7 +3495,8 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
goto out;
}
- if (is_waiting_for_move(sctx, di_key.objectid)) {
+ wdm = get_waiting_dir_move(sctx, di_key.objectid);
+ if (wdm && !wdm->orphanized) {
ret = add_pending_dir_move(sctx,
sctx->cur_ino,
sctx->cur_inode_gen,
@@ -3470,7 +3583,8 @@ static int wait_for_parent_move(struct send_ctx *sctx,
ret = is_ancestor(sctx->parent_root,
sctx->cur_ino, sctx->cur_inode_gen,
ino, path_before);
- break;
+ if (ret)
+ break;
}
fs_path_reset(path_before);
@@ -3643,11 +3757,26 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
goto out;
if (ret) {
struct name_cache_entry *nce;
+ struct waiting_dir_move *wdm;
ret = orphanize_inode(sctx, ow_inode, ow_gen,
cur->full_path);
if (ret < 0)
goto out;
+
+ /*
+ * If ow_inode has its rename operation delayed
+ * make sure that its orphanized name is used in
+ * the source path when performing its rename
+ * operation.
+ */
+ if (is_waiting_for_move(sctx, ow_inode)) {
+ wdm = get_waiting_dir_move(sctx,
+ ow_inode);
+ ASSERT(wdm);
+ wdm->orphanized = true;
+ }
+
/*
* Make sure we clear our orphanized inode's
* name from the name cache. This is because the
@@ -3663,6 +3792,19 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
name_cache_delete(sctx, nce);
kfree(nce);
}
+
+ /*
+ * ow_inode might currently be an ancestor of
+ * cur_ino, therefore compute valid_path (the
+ * current path of cur_ino) again because it
+ * might contain the pre-orphanization name of
+ * ow_inode, which is no longer valid.
+ */
+ fs_path_reset(valid_path);
+ ret = get_cur_path(sctx, sctx->cur_ino,
+ sctx->cur_inode_gen, valid_path);
+ if (ret < 0)
+ goto out;
} else {
ret = send_unlink(sctx, cur->full_path);
if (ret < 0)
@@ -5602,7 +5744,10 @@ static int changed_ref(struct send_ctx *sctx,
{
int ret = 0;
- BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
+ if (sctx->cur_ino != sctx->cmp_key->objectid) {
+ inconsistent_snapshot_error(sctx, result, "reference");
+ return -EIO;
+ }
if (!sctx->cur_inode_new_gen &&
sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) {
@@ -5627,7 +5772,10 @@ static int changed_xattr(struct send_ctx *sctx,
{
int ret = 0;
- BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
+ if (sctx->cur_ino != sctx->cmp_key->objectid) {
+ inconsistent_snapshot_error(sctx, result, "xattr");
+ return -EIO;
+ }
if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
if (result == BTRFS_COMPARE_TREE_NEW)
@@ -5651,7 +5799,10 @@ static int changed_extent(struct send_ctx *sctx,
{
int ret = 0;
- BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid);
+ if (sctx->cur_ino != sctx->cmp_key->objectid) {
+ inconsistent_snapshot_error(sctx, result, "extent");
+ return -EIO;
+ }
if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
if (result != BTRFS_COMPARE_TREE_DELETED)
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 60e7179ed4b7..864ce334f696 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -184,6 +184,22 @@ static const char * const logtypes[] = {
"debug",
};
+
+/*
+ * Use one ratelimit state per log level so that a flood of less important
+ * messages doesn't cause more important ones to be dropped.
+ */
+static struct ratelimit_state printk_limits[] = {
+ RATELIMIT_STATE_INIT(printk_limits[0], DEFAULT_RATELIMIT_INTERVAL, 100),
+ RATELIMIT_STATE_INIT(printk_limits[1], DEFAULT_RATELIMIT_INTERVAL, 100),
+ RATELIMIT_STATE_INIT(printk_limits[2], DEFAULT_RATELIMIT_INTERVAL, 100),
+ RATELIMIT_STATE_INIT(printk_limits[3], DEFAULT_RATELIMIT_INTERVAL, 100),
+ RATELIMIT_STATE_INIT(printk_limits[4], DEFAULT_RATELIMIT_INTERVAL, 100),
+ RATELIMIT_STATE_INIT(printk_limits[5], DEFAULT_RATELIMIT_INTERVAL, 100),
+ RATELIMIT_STATE_INIT(printk_limits[6], DEFAULT_RATELIMIT_INTERVAL, 100),
+ RATELIMIT_STATE_INIT(printk_limits[7], DEFAULT_RATELIMIT_INTERVAL, 100),
+};
+
void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
{
struct super_block *sb = fs_info->sb;
@@ -192,6 +208,7 @@ void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
va_list args;
const char *type = logtypes[4];
int kern_level;
+ struct ratelimit_state *ratelimit;
va_start(args, fmt);
@@ -202,13 +219,18 @@ void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
lvl[size] = '\0';
fmt += size;
type = logtypes[kern_level - '0'];
- } else
+ ratelimit = &printk_limits[kern_level - '0'];
+ } else {
*lvl = '\0';
+ /* Default to debug output */
+ ratelimit = &printk_limits[7];
+ }
vaf.fmt = fmt;
vaf.va = &args;
- printk("%sBTRFS %s (device %s): %pV\n", lvl, type, sb->s_id, &vaf);
+ if (__ratelimit(ratelimit))
+ printk("%sBTRFS %s (device %s): %pV\n", lvl, type, sb->s_id, &vaf);
va_end(args);
}
@@ -229,9 +251,11 @@ void btrfs_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
*/
__cold
void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
- struct btrfs_root *root, const char *function,
+ const char *function,
unsigned int line, int errno)
{
+ struct btrfs_fs_info *fs_info = trans->fs_info;
+
trans->aborted = errno;
/* Nothing used. The other threads that have joined this
* transaction may be able to continue. */
@@ -239,16 +263,16 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
const char *errstr;
errstr = btrfs_decode_error(errno);
- btrfs_warn(root->fs_info,
+ btrfs_warn(fs_info,
"%s:%d: Aborting unused transaction(%s).",
function, line, errstr);
return;
}
ACCESS_ONCE(trans->transaction->aborted) = errno;
/* Wake up anybody who may be waiting on this transaction */
- wake_up(&root->fs_info->transaction_wait);
- wake_up(&root->fs_info->transaction_blocked_wait);
- __btrfs_handle_fs_error(root->fs_info, function, line, errno, NULL);
+ wake_up(&fs_info->transaction_wait);
+ wake_up(&fs_info->transaction_blocked_wait);
+ __btrfs_handle_fs_error(fs_info, function, line, errno, NULL);
}
/*
* __btrfs_panic decodes unexpected, fatal errors from the caller,
@@ -432,12 +456,12 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
*/
break;
case Opt_nodatasum:
- btrfs_set_and_info(root, NODATASUM,
+ btrfs_set_and_info(info, NODATASUM,
"setting nodatasum");
break;
case Opt_datasum:
- if (btrfs_test_opt(root, NODATASUM)) {
- if (btrfs_test_opt(root, NODATACOW))
+ if (btrfs_test_opt(info, NODATASUM)) {
+ if (btrfs_test_opt(info, NODATACOW))
btrfs_info(root->fs_info, "setting datasum, datacow enabled");
else
btrfs_info(root->fs_info, "setting datasum");
@@ -446,9 +470,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
btrfs_clear_opt(info->mount_opt, NODATASUM);
break;
case Opt_nodatacow:
- if (!btrfs_test_opt(root, NODATACOW)) {
- if (!btrfs_test_opt(root, COMPRESS) ||
- !btrfs_test_opt(root, FORCE_COMPRESS)) {
+ if (!btrfs_test_opt(info, NODATACOW)) {
+ if (!btrfs_test_opt(info, COMPRESS) ||
+ !btrfs_test_opt(info, FORCE_COMPRESS)) {
btrfs_info(root->fs_info,
"setting nodatacow, compression disabled");
} else {
@@ -461,7 +485,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
btrfs_set_opt(info->mount_opt, NODATASUM);
break;
case Opt_datacow:
- btrfs_clear_and_info(root, NODATACOW,
+ btrfs_clear_and_info(info, NODATACOW,
"setting datacow");
break;
case Opt_compress_force:
@@ -470,10 +494,11 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
/* Fallthrough */
case Opt_compress:
case Opt_compress_type:
- saved_compress_type = btrfs_test_opt(root, COMPRESS) ?
+ saved_compress_type = btrfs_test_opt(info,
+ COMPRESS) ?
info->compress_type : BTRFS_COMPRESS_NONE;
saved_compress_force =
- btrfs_test_opt(root, FORCE_COMPRESS);
+ btrfs_test_opt(info, FORCE_COMPRESS);
if (token == Opt_compress ||
token == Opt_compress_force ||
strcmp(args[0].from, "zlib") == 0) {
@@ -513,10 +538,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
*/
btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
}
- if ((btrfs_test_opt(root, COMPRESS) &&
+ if ((btrfs_test_opt(info, COMPRESS) &&
(info->compress_type != saved_compress_type ||
compress_force != saved_compress_force)) ||
- (!btrfs_test_opt(root, COMPRESS) &&
+ (!btrfs_test_opt(info, COMPRESS) &&
no_compress == 1)) {
btrfs_info(root->fs_info,
"%s %s compression",
@@ -526,25 +551,25 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
compress_force = false;
break;
case Opt_ssd:
- btrfs_set_and_info(root, SSD,
+ btrfs_set_and_info(info, SSD,
"use ssd allocation scheme");
break;
case Opt_ssd_spread:
- btrfs_set_and_info(root, SSD_SPREAD,
+ btrfs_set_and_info(info, SSD_SPREAD,
"use spread ssd allocation scheme");
btrfs_set_opt(info->mount_opt, SSD);
break;
case Opt_nossd:
- btrfs_set_and_info(root, NOSSD,
+ btrfs_set_and_info(info, NOSSD,
"not using ssd allocation scheme");
btrfs_clear_opt(info->mount_opt, SSD);
break;
case Opt_barrier:
- btrfs_clear_and_info(root, NOBARRIER,
+ btrfs_clear_and_info(info, NOBARRIER,
"turning on barriers");
break;
case Opt_nobarrier:
- btrfs_set_and_info(root, NOBARRIER,
+ btrfs_set_and_info(info, NOBARRIER,
"turning off barriers");
break;
case Opt_thread_pool:
@@ -604,24 +629,24 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
root->fs_info->sb->s_flags &= ~MS_POSIXACL;
break;
case Opt_notreelog:
- btrfs_set_and_info(root, NOTREELOG,
+ btrfs_set_and_info(info, NOTREELOG,
"disabling tree log");
break;
case Opt_treelog:
- btrfs_clear_and_info(root, NOTREELOG,
+ btrfs_clear_and_info(info, NOTREELOG,
"enabling tree log");
break;
case Opt_norecovery:
case Opt_nologreplay:
- btrfs_set_and_info(root, NOLOGREPLAY,
+ btrfs_set_and_info(info, NOLOGREPLAY,
"disabling log replay at mount time");
break;
case Opt_flushoncommit:
- btrfs_set_and_info(root, FLUSHONCOMMIT,
+ btrfs_set_and_info(info, FLUSHONCOMMIT,
"turning on flush-on-commit");
break;
case Opt_noflushoncommit:
- btrfs_clear_and_info(root, FLUSHONCOMMIT,
+ btrfs_clear_and_info(info, FLUSHONCOMMIT,
"turning off flush-on-commit");
break;
case Opt_ratio:
@@ -638,11 +663,11 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
}
break;
case Opt_discard:
- btrfs_set_and_info(root, DISCARD,
+ btrfs_set_and_info(info, DISCARD,
"turning on discard");
break;
case Opt_nodiscard:
- btrfs_clear_and_info(root, DISCARD,
+ btrfs_clear_and_info(info, DISCARD,
"turning off discard");
break;
case Opt_space_cache:
@@ -651,12 +676,13 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
strcmp(args[0].from, "v1") == 0) {
btrfs_clear_opt(root->fs_info->mount_opt,
FREE_SPACE_TREE);
- btrfs_set_and_info(root, SPACE_CACHE,
+ btrfs_set_and_info(info, SPACE_CACHE,
"enabling disk space caching");
} else if (strcmp(args[0].from, "v2") == 0) {
btrfs_clear_opt(root->fs_info->mount_opt,
SPACE_CACHE);
- btrfs_set_and_info(root, FREE_SPACE_TREE,
+ btrfs_set_and_info(info,
+ FREE_SPACE_TREE,
"enabling free space tree");
} else {
ret = -EINVAL;
@@ -667,12 +693,14 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
btrfs_set_opt(info->mount_opt, RESCAN_UUID_TREE);
break;
case Opt_no_space_cache:
- if (btrfs_test_opt(root, SPACE_CACHE)) {
- btrfs_clear_and_info(root, SPACE_CACHE,
+ if (btrfs_test_opt(info, SPACE_CACHE)) {
+ btrfs_clear_and_info(info,
+ SPACE_CACHE,
"disabling disk space caching");
}
- if (btrfs_test_opt(root, FREE_SPACE_TREE)) {
- btrfs_clear_and_info(root, FREE_SPACE_TREE,
+ if (btrfs_test_opt(info, FREE_SPACE_TREE)) {
+ btrfs_clear_and_info(info,
+ FREE_SPACE_TREE,
"disabling free space tree");
}
break;
@@ -685,7 +713,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
"disabling inode map caching");
break;
case Opt_clear_cache:
- btrfs_set_and_info(root, CLEAR_CACHE,
+ btrfs_set_and_info(info, CLEAR_CACHE,
"force clearing of disk cache");
break;
case Opt_user_subvol_rm_allowed:
@@ -698,11 +726,11 @@ int btrfs_parse_options(struct btrfs_root *root, char *options,
btrfs_clear_opt(info->mount_opt, ENOSPC_DEBUG);
break;
case Opt_defrag:
- btrfs_set_and_info(root, AUTO_DEFRAG,
+ btrfs_set_and_info(info, AUTO_DEFRAG,
"enabling auto defrag");
break;
case Opt_nodefrag:
- btrfs_clear_and_info(root, AUTO_DEFRAG,
+ btrfs_clear_and_info(info, AUTO_DEFRAG,
"disabling auto defrag");
break;
case Opt_recovery:
@@ -810,22 +838,22 @@ check:
/*
* Extra check for current option against current flag
*/
- if (btrfs_test_opt(root, NOLOGREPLAY) && !(new_flags & MS_RDONLY)) {
+ if (btrfs_test_opt(info, NOLOGREPLAY) && !(new_flags & MS_RDONLY)) {
btrfs_err(root->fs_info,
"nologreplay must be used with ro mount option");
ret = -EINVAL;
}
out:
if (btrfs_fs_compat_ro(root->fs_info, FREE_SPACE_TREE) &&
- !btrfs_test_opt(root, FREE_SPACE_TREE) &&
- !btrfs_test_opt(root, CLEAR_CACHE)) {
+ !btrfs_test_opt(info, FREE_SPACE_TREE) &&
+ !btrfs_test_opt(info, CLEAR_CACHE)) {
btrfs_err(root->fs_info, "cannot disable free space tree");
ret = -EINVAL;
}
- if (!ret && btrfs_test_opt(root, SPACE_CACHE))
+ if (!ret && btrfs_test_opt(info, SPACE_CACHE))
btrfs_info(root->fs_info, "disk space caching is enabled");
- if (!ret && btrfs_test_opt(root, FREE_SPACE_TREE))
+ if (!ret && btrfs_test_opt(info, FREE_SPACE_TREE))
btrfs_info(root->fs_info, "using free space tree");
kfree(orig);
return ret;
@@ -1149,7 +1177,7 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
struct btrfs_root *root = fs_info->tree_root;
- trace_btrfs_sync_fs(wait);
+ trace_btrfs_sync_fs(fs_info, wait);
if (!wait) {
filemap_flush(fs_info->btree_inode->i_mapping);
@@ -1192,13 +1220,13 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
struct btrfs_root *root = info->tree_root;
char *compress_type;
- if (btrfs_test_opt(root, DEGRADED))
+ if (btrfs_test_opt(info, DEGRADED))
seq_puts(seq, ",degraded");
- if (btrfs_test_opt(root, NODATASUM))
+ if (btrfs_test_opt(info, NODATASUM))
seq_puts(seq, ",nodatasum");
- if (btrfs_test_opt(root, NODATACOW))
+ if (btrfs_test_opt(info, NODATACOW))
seq_puts(seq, ",nodatacow");
- if (btrfs_test_opt(root, NOBARRIER))
+ if (btrfs_test_opt(info, NOBARRIER))
seq_puts(seq, ",nobarrier");
if (info->max_inline != BTRFS_DEFAULT_MAX_INLINE)
seq_printf(seq, ",max_inline=%llu", info->max_inline);
@@ -1207,56 +1235,56 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
if (info->thread_pool_size != min_t(unsigned long,
num_online_cpus() + 2, 8))
seq_printf(seq, ",thread_pool=%d", info->thread_pool_size);
- if (btrfs_test_opt(root, COMPRESS)) {
+ if (btrfs_test_opt(info, COMPRESS)) {
if (info->compress_type == BTRFS_COMPRESS_ZLIB)
compress_type = "zlib";
else
compress_type = "lzo";
- if (btrfs_test_opt(root, FORCE_COMPRESS))
+ if (btrfs_test_opt(info, FORCE_COMPRESS))
seq_printf(seq, ",compress-force=%s", compress_type);
else
seq_printf(seq, ",compress=%s", compress_type);
}
- if (btrfs_test_opt(root, NOSSD))
+ if (btrfs_test_opt(info, NOSSD))
seq_puts(seq, ",nossd");
- if (btrfs_test_opt(root, SSD_SPREAD))
+ if (btrfs_test_opt(info, SSD_SPREAD))
seq_puts(seq, ",ssd_spread");
- else if (btrfs_test_opt(root, SSD))
+ else if (btrfs_test_opt(info, SSD))
seq_puts(seq, ",ssd");
- if (btrfs_test_opt(root, NOTREELOG))
+ if (btrfs_test_opt(info, NOTREELOG))
seq_puts(seq, ",notreelog");
- if (btrfs_test_opt(root, NOLOGREPLAY))
+ if (btrfs_test_opt(info, NOLOGREPLAY))
seq_puts(seq, ",nologreplay");
- if (btrfs_test_opt(root, FLUSHONCOMMIT))
+ if (btrfs_test_opt(info, FLUSHONCOMMIT))
seq_puts(seq, ",flushoncommit");
- if (btrfs_test_opt(root, DISCARD))
+ if (btrfs_test_opt(info, DISCARD))
seq_puts(seq, ",discard");
if (!(root->fs_info->sb->s_flags & MS_POSIXACL))
seq_puts(seq, ",noacl");
- if (btrfs_test_opt(root, SPACE_CACHE))
+ if (btrfs_test_opt(info, SPACE_CACHE))
seq_puts(seq, ",space_cache");
- else if (btrfs_test_opt(root, FREE_SPACE_TREE))
+ else if (btrfs_test_opt(info, FREE_SPACE_TREE))
seq_puts(seq, ",space_cache=v2");
else
seq_puts(seq, ",nospace_cache");
- if (btrfs_test_opt(root, RESCAN_UUID_TREE))
+ if (btrfs_test_opt(info, RESCAN_UUID_TREE))
seq_puts(seq, ",rescan_uuid_tree");
- if (btrfs_test_opt(root, CLEAR_CACHE))
+ if (btrfs_test_opt(info, CLEAR_CACHE))
seq_puts(seq, ",clear_cache");
- if (btrfs_test_opt(root, USER_SUBVOL_RM_ALLOWED))
+ if (btrfs_test_opt(info, USER_SUBVOL_RM_ALLOWED))
seq_puts(seq, ",user_subvol_rm_allowed");
- if (btrfs_test_opt(root, ENOSPC_DEBUG))
+ if (btrfs_test_opt(info, ENOSPC_DEBUG))
seq_puts(seq, ",enospc_debug");
- if (btrfs_test_opt(root, AUTO_DEFRAG))
+ if (btrfs_test_opt(info, AUTO_DEFRAG))
seq_puts(seq, ",autodefrag");
- if (btrfs_test_opt(root, INODE_MAP_CACHE))
+ if (btrfs_test_opt(info, INODE_MAP_CACHE))
seq_puts(seq, ",inode_cache");
- if (btrfs_test_opt(root, SKIP_BALANCE))
+ if (btrfs_test_opt(info, SKIP_BALANCE))
seq_puts(seq, ",skip_balance");
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- if (btrfs_test_opt(root, CHECK_INTEGRITY_INCLUDING_EXTENT_DATA))
+ if (btrfs_test_opt(info, CHECK_INTEGRITY_INCLUDING_EXTENT_DATA))
seq_puts(seq, ",check_int_data");
- else if (btrfs_test_opt(root, CHECK_INTEGRITY))
+ else if (btrfs_test_opt(info, CHECK_INTEGRITY))
seq_puts(seq, ",check_int");
if (info->check_integrity_print_mask)
seq_printf(seq, ",check_int_print_mask=%d",
@@ -1265,14 +1293,14 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
if (info->metadata_ratio)
seq_printf(seq, ",metadata_ratio=%d",
info->metadata_ratio);
- if (btrfs_test_opt(root, PANIC_ON_FATAL_ERROR))
+ if (btrfs_test_opt(info, PANIC_ON_FATAL_ERROR))
seq_puts(seq, ",fatal_errors=panic");
if (info->commit_interval != BTRFS_DEFAULT_COMMIT_INTERVAL)
seq_printf(seq, ",commit=%d", info->commit_interval);
#ifdef CONFIG_BTRFS_DEBUG
- if (btrfs_test_opt(root, FRAGMENT_DATA))
+ if (btrfs_test_opt(info, FRAGMENT_DATA))
seq_puts(seq, ",fragment=data");
- if (btrfs_test_opt(root, FRAGMENT_METADATA))
+ if (btrfs_test_opt(info, FRAGMENT_METADATA))
seq_puts(seq, ",fragment=metadata");
#endif
seq_printf(seq, ",subvolid=%llu",
@@ -2030,9 +2058,6 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
* chunk).
*
* If metadata is exhausted, f_bavail will be 0.
- *
- * FIXME: not accurate for mixed block groups, total and free/used are ok,
- * available appears slightly larger.
*/
static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
@@ -2319,49 +2344,6 @@ static void btrfs_print_mod_info(void)
btrfs_crc32c_impl());
}
-static int btrfs_run_sanity_tests(void)
-{
- int ret, i;
- u32 sectorsize, nodesize;
- u32 test_sectorsize[] = {
- PAGE_SIZE,
- };
- ret = btrfs_init_test_fs();
- if (ret)
- return ret;
- for (i = 0; i < ARRAY_SIZE(test_sectorsize); i++) {
- sectorsize = test_sectorsize[i];
- for (nodesize = sectorsize;
- nodesize <= BTRFS_MAX_METADATA_BLOCKSIZE;
- nodesize <<= 1) {
- pr_info("BTRFS: selftest: sectorsize: %u nodesize: %u\n",
- sectorsize, nodesize);
- ret = btrfs_test_free_space_cache(sectorsize, nodesize);
- if (ret)
- goto out;
- ret = btrfs_test_extent_buffer_operations(sectorsize,
- nodesize);
- if (ret)
- goto out;
- ret = btrfs_test_extent_io(sectorsize, nodesize);
- if (ret)
- goto out;
- ret = btrfs_test_inodes(sectorsize, nodesize);
- if (ret)
- goto out;
- ret = btrfs_test_qgroups(sectorsize, nodesize);
- if (ret)
- goto out;
- ret = btrfs_test_free_space_tree(sectorsize, nodesize);
- if (ret)
- goto out;
- }
- }
-out:
- btrfs_destroy_test_fs();
- return ret;
-}
-
static int __init init_btrfs_fs(void)
{
int err;
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 4879656bda3c..c6569905d3d1 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -326,6 +326,7 @@ SPACE_INFO_ATTR(bytes_used);
SPACE_INFO_ATTR(bytes_pinned);
SPACE_INFO_ATTR(bytes_reserved);
SPACE_INFO_ATTR(bytes_may_use);
+SPACE_INFO_ATTR(bytes_readonly);
SPACE_INFO_ATTR(disk_used);
SPACE_INFO_ATTR(disk_total);
BTRFS_ATTR(total_bytes_pinned, btrfs_space_info_show_total_bytes_pinned);
@@ -337,6 +338,7 @@ static struct attribute *space_info_attrs[] = {
BTRFS_ATTR_PTR(bytes_pinned),
BTRFS_ATTR_PTR(bytes_reserved),
BTRFS_ATTR_PTR(bytes_may_use),
+ BTRFS_ATTR_PTR(bytes_readonly),
BTRFS_ATTR_PTR(disk_used),
BTRFS_ATTR_PTR(disk_total),
BTRFS_ATTR_PTR(total_bytes_pinned),
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c
index 02223f3f78f4..bf62ad919a95 100644
--- a/fs/btrfs/tests/btrfs-tests.c
+++ b/fs/btrfs/tests/btrfs-tests.c
@@ -54,7 +54,7 @@ struct inode *btrfs_new_test_inode(void)
return new_inode(test_mnt->mnt_sb);
}
-int btrfs_init_test_fs(void)
+static int btrfs_init_test_fs(void)
{
int ret;
@@ -73,7 +73,7 @@ int btrfs_init_test_fs(void)
return 0;
}
-void btrfs_destroy_test_fs(void)
+static void btrfs_destroy_test_fs(void)
{
kern_unmount(test_mnt);
unregister_filesystem(&test_type);
@@ -128,14 +128,27 @@ struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void)
extent_io_tree_init(&fs_info->freed_extents[0], NULL);
extent_io_tree_init(&fs_info->freed_extents[1], NULL);
fs_info->pinned_extents = &fs_info->freed_extents[0];
+ set_bit(BTRFS_FS_STATE_DUMMY_FS_INFO, &fs_info->fs_state);
+
+ test_mnt->mnt_sb->s_fs_info = fs_info;
+
return fs_info;
}
-static void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info)
+void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info)
{
struct radix_tree_iter iter;
void **slot;
+ if (!fs_info)
+ return;
+
+ if (WARN_ON(!test_bit(BTRFS_FS_STATE_DUMMY_FS_INFO,
+ &fs_info->fs_state)))
+ return;
+
+ test_mnt->mnt_sb->s_fs_info = NULL;
+
spin_lock(&fs_info->buffer_lock);
radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, 0) {
struct extent_buffer *eb;
@@ -167,10 +180,11 @@ void btrfs_free_dummy_root(struct btrfs_root *root)
{
if (!root)
return;
+ /* Will be freed by btrfs_free_fs_roots */
+ if (WARN_ON(test_bit(BTRFS_ROOT_IN_RADIX, &root->state)))
+ return;
if (root->node)
free_extent_buffer(root->node);
- if (root->fs_info)
- btrfs_free_dummy_fs_info(root->fs_info);
kfree(root);
}
@@ -220,3 +234,46 @@ void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans)
INIT_LIST_HEAD(&trans->qgroup_ref_list);
trans->type = __TRANS_DUMMY;
}
+
+int btrfs_run_sanity_tests(void)
+{
+ int ret, i;
+ u32 sectorsize, nodesize;
+ u32 test_sectorsize[] = {
+ PAGE_SIZE,
+ };
+ ret = btrfs_init_test_fs();
+ if (ret)
+ return ret;
+ for (i = 0; i < ARRAY_SIZE(test_sectorsize); i++) {
+ sectorsize = test_sectorsize[i];
+ for (nodesize = sectorsize;
+ nodesize <= BTRFS_MAX_METADATA_BLOCKSIZE;
+ nodesize <<= 1) {
+ pr_info("BTRFS: selftest: sectorsize: %u nodesize: %u\n",
+ sectorsize, nodesize);
+ ret = btrfs_test_free_space_cache(sectorsize, nodesize);
+ if (ret)
+ goto out;
+ ret = btrfs_test_extent_buffer_operations(sectorsize,
+ nodesize);
+ if (ret)
+ goto out;
+ ret = btrfs_test_extent_io(sectorsize, nodesize);
+ if (ret)
+ goto out;
+ ret = btrfs_test_inodes(sectorsize, nodesize);
+ if (ret)
+ goto out;
+ ret = btrfs_test_qgroups(sectorsize, nodesize);
+ if (ret)
+ goto out;
+ ret = btrfs_test_free_space_tree(sectorsize, nodesize);
+ if (ret)
+ goto out;
+ }
+ }
+out:
+ btrfs_destroy_test_fs();
+ return ret;
+}
diff --git a/fs/btrfs/tests/btrfs-tests.h b/fs/btrfs/tests/btrfs-tests.h
index 66fb6b701eb7..b17ffbe8f9f3 100644
--- a/fs/btrfs/tests/btrfs-tests.h
+++ b/fs/btrfs/tests/btrfs-tests.h
@@ -20,57 +20,29 @@
#define __BTRFS_TESTS
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
+int btrfs_run_sanity_tests(void);
#define test_msg(fmt, ...) pr_info("BTRFS: selftest: " fmt, ##__VA_ARGS__)
struct btrfs_root;
struct btrfs_trans_handle;
-int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize);
int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize);
+int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize);
int btrfs_test_extent_io(u32 sectorsize, u32 nodesize);
int btrfs_test_inodes(u32 sectorsize, u32 nodesize);
int btrfs_test_qgroups(u32 sectorsize, u32 nodesize);
int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize);
-int btrfs_init_test_fs(void);
-void btrfs_destroy_test_fs(void);
struct inode *btrfs_new_test_inode(void);
struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void);
+void btrfs_free_dummy_fs_info(struct btrfs_fs_info *fs_info);
void btrfs_free_dummy_root(struct btrfs_root *root);
struct btrfs_block_group_cache *
btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize);
void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache);
void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans);
#else
-static inline int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
-{
- return 0;
-}
-static inline int btrfs_test_extent_buffer_operations(u32 sectorsize,
- u32 nodesize)
-{
- return 0;
-}
-static inline int btrfs_init_test_fs(void)
-{
- return 0;
-}
-static inline void btrfs_destroy_test_fs(void)
-{
-}
-static inline int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
-{
- return 0;
-}
-static inline int btrfs_test_inodes(u32 sectorsize, u32 nodesize)
-{
- return 0;
-}
-static inline int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
-{
- return 0;
-}
-static inline int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize)
+static inline int btrfs_run_sanity_tests(void)
{
return 0;
}
diff --git a/fs/btrfs/tests/extent-buffer-tests.c b/fs/btrfs/tests/extent-buffer-tests.c
index 4f8cbd1ec5ee..199569174637 100644
--- a/fs/btrfs/tests/extent-buffer-tests.c
+++ b/fs/btrfs/tests/extent-buffer-tests.c
@@ -24,8 +24,9 @@
static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
{
- struct btrfs_path *path;
- struct btrfs_root *root;
+ struct btrfs_fs_info *fs_info;
+ struct btrfs_path *path = NULL;
+ struct btrfs_root *root = NULL;
struct extent_buffer *eb;
struct btrfs_item *item;
char *value = "mary had a little lamb";
@@ -40,17 +41,24 @@ static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
test_msg("Running btrfs_split_item tests\n");
- root = btrfs_alloc_dummy_root(sectorsize, nodesize);
+ fs_info = btrfs_alloc_dummy_fs_info();
+ if (!fs_info) {
+ test_msg("Could not allocate fs_info\n");
+ return -ENOMEM;
+ }
+
+ root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
if (IS_ERR(root)) {
test_msg("Could not allocate root\n");
- return PTR_ERR(root);
+ ret = PTR_ERR(root);
+ goto out;
}
path = btrfs_alloc_path();
if (!path) {
test_msg("Could not allocate path\n");
- kfree(root);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out;
}
path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, nodesize,
@@ -219,7 +227,8 @@ static int test_btrfs_split_item(u32 sectorsize, u32 nodesize)
}
out:
btrfs_free_path(path);
- kfree(root);
+ btrfs_free_dummy_root(root);
+ btrfs_free_dummy_fs_info(fs_info);
return ret;
}
diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c
index 3956bb2ff84c..3221c8dee272 100644
--- a/fs/btrfs/tests/free-space-tests.c
+++ b/fs/btrfs/tests/free-space-tests.c
@@ -837,6 +837,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache,
int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
{
+ struct btrfs_fs_info *fs_info;
struct btrfs_block_group_cache *cache;
struct btrfs_root *root = NULL;
int ret = -ENOMEM;
@@ -855,15 +856,17 @@ int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
return 0;
}
- root = btrfs_alloc_dummy_root(sectorsize, nodesize);
- if (IS_ERR(root)) {
- ret = PTR_ERR(root);
+ fs_info = btrfs_alloc_dummy_fs_info();
+ if (!fs_info) {
+ ret = -ENOMEM;
goto out;
}
- root->fs_info = btrfs_alloc_dummy_fs_info();
- if (!root->fs_info)
+ root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+ if (IS_ERR(root)) {
+ ret = PTR_ERR(root);
goto out;
+ }
root->fs_info->extent_root = root;
cache->fs_info = root->fs_info;
@@ -882,6 +885,7 @@ int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize)
out:
btrfs_free_dummy_block_group(cache);
btrfs_free_dummy_root(root);
+ btrfs_free_dummy_fs_info(fs_info);
test_msg("Free space cache tests finished\n");
return ret;
}
diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
index aac507085ab0..7508d3b42780 100644
--- a/fs/btrfs/tests/free-space-tree-tests.c
+++ b/fs/btrfs/tests/free-space-tree-tests.c
@@ -443,23 +443,24 @@ typedef int (*test_func_t)(struct btrfs_trans_handle *,
static int run_test(test_func_t test_func, int bitmaps,
u32 sectorsize, u32 nodesize)
{
+ struct btrfs_fs_info *fs_info;
struct btrfs_root *root = NULL;
struct btrfs_block_group_cache *cache = NULL;
struct btrfs_trans_handle trans;
struct btrfs_path *path = NULL;
int ret;
- root = btrfs_alloc_dummy_root(sectorsize, nodesize);
- if (IS_ERR(root)) {
- test_msg("Couldn't allocate dummy root\n");
- ret = PTR_ERR(root);
+ fs_info = btrfs_alloc_dummy_fs_info();
+ if (!fs_info) {
+ test_msg("Couldn't allocate dummy fs info\n");
+ ret = -ENOMEM;
goto out;
}
- root->fs_info = btrfs_alloc_dummy_fs_info();
- if (!root->fs_info) {
- test_msg("Couldn't allocate dummy fs info\n");
- ret = -ENOMEM;
+ root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+ if (IS_ERR(root)) {
+ test_msg("Couldn't allocate dummy root\n");
+ ret = PTR_ERR(root);
goto out;
}
@@ -534,6 +535,7 @@ out:
btrfs_free_path(path);
btrfs_free_dummy_block_group(cache);
btrfs_free_dummy_root(root);
+ btrfs_free_dummy_fs_info(fs_info);
return ret;
}
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c
index 29648c0a39f1..9f72aeda9220 100644
--- a/fs/btrfs/tests/inode-tests.c
+++ b/fs/btrfs/tests/inode-tests.c
@@ -230,6 +230,7 @@ static unsigned long vacancy_only = 0;
static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
{
+ struct btrfs_fs_info *fs_info = NULL;
struct inode *inode = NULL;
struct btrfs_root *root = NULL;
struct extent_map *em = NULL;
@@ -248,19 +249,15 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
BTRFS_I(inode)->location.offset = 0;
- root = btrfs_alloc_dummy_root(sectorsize, nodesize);
- if (IS_ERR(root)) {
- test_msg("Couldn't allocate root\n");
+ fs_info = btrfs_alloc_dummy_fs_info();
+ if (!fs_info) {
+ test_msg("Couldn't allocate dummy fs info\n");
goto out;
}
- /*
- * We do this since btrfs_get_extent wants to assign em->bdev to
- * root->fs_info->fs_devices->latest_bdev.
- */
- root->fs_info = btrfs_alloc_dummy_fs_info();
- if (!root->fs_info) {
- test_msg("Couldn't allocate dummy fs info\n");
+ root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+ if (IS_ERR(root)) {
+ test_msg("Couldn't allocate root\n");
goto out;
}
@@ -835,11 +832,13 @@ out:
free_extent_map(em);
iput(inode);
btrfs_free_dummy_root(root);
+ btrfs_free_dummy_fs_info(fs_info);
return ret;
}
static int test_hole_first(u32 sectorsize, u32 nodesize)
{
+ struct btrfs_fs_info *fs_info = NULL;
struct inode *inode = NULL;
struct btrfs_root *root = NULL;
struct extent_map *em = NULL;
@@ -855,15 +854,15 @@ static int test_hole_first(u32 sectorsize, u32 nodesize)
BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
BTRFS_I(inode)->location.offset = 0;
- root = btrfs_alloc_dummy_root(sectorsize, nodesize);
- if (IS_ERR(root)) {
- test_msg("Couldn't allocate root\n");
+ fs_info = btrfs_alloc_dummy_fs_info();
+ if (!fs_info) {
+ test_msg("Couldn't allocate dummy fs info\n");
goto out;
}
- root->fs_info = btrfs_alloc_dummy_fs_info();
- if (!root->fs_info) {
- test_msg("Couldn't allocate dummy fs info\n");
+ root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+ if (IS_ERR(root)) {
+ test_msg("Couldn't allocate root\n");
goto out;
}
@@ -934,11 +933,13 @@ out:
free_extent_map(em);
iput(inode);
btrfs_free_dummy_root(root);
+ btrfs_free_dummy_fs_info(fs_info);
return ret;
}
static int test_extent_accounting(u32 sectorsize, u32 nodesize)
{
+ struct btrfs_fs_info *fs_info = NULL;
struct inode *inode = NULL;
struct btrfs_root *root = NULL;
int ret = -ENOMEM;
@@ -949,15 +950,15 @@ static int test_extent_accounting(u32 sectorsize, u32 nodesize)
return ret;
}
- root = btrfs_alloc_dummy_root(sectorsize, nodesize);
- if (IS_ERR(root)) {
- test_msg("Couldn't allocate root\n");
+ fs_info = btrfs_alloc_dummy_fs_info();
+ if (!fs_info) {
+ test_msg("Couldn't allocate dummy fs info\n");
goto out;
}
- root->fs_info = btrfs_alloc_dummy_fs_info();
- if (!root->fs_info) {
- test_msg("Couldn't allocate dummy fs info\n");
+ root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+ if (IS_ERR(root)) {
+ test_msg("Couldn't allocate root\n");
goto out;
}
@@ -1132,6 +1133,7 @@ out:
NULL, GFP_KERNEL);
iput(inode);
btrfs_free_dummy_root(root);
+ btrfs_free_dummy_fs_info(fs_info);
return ret;
}
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c
index 57a12c0d680b..4407fef7c16c 100644
--- a/fs/btrfs/tests/qgroup-tests.c
+++ b/fs/btrfs/tests/qgroup-tests.c
@@ -453,22 +453,24 @@ static int test_multiple_refs(struct btrfs_root *root,
int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
{
+ struct btrfs_fs_info *fs_info = NULL;
struct btrfs_root *root;
struct btrfs_root *tmp_root;
int ret = 0;
- root = btrfs_alloc_dummy_root(sectorsize, nodesize);
- if (IS_ERR(root)) {
- test_msg("Couldn't allocate root\n");
- return PTR_ERR(root);
+ fs_info = btrfs_alloc_dummy_fs_info();
+ if (!fs_info) {
+ test_msg("Couldn't allocate dummy fs info\n");
+ return -ENOMEM;
}
- root->fs_info = btrfs_alloc_dummy_fs_info();
- if (!root->fs_info) {
- test_msg("Couldn't allocate dummy fs info\n");
- ret = -ENOMEM;
+ root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
+ if (IS_ERR(root)) {
+ test_msg("Couldn't allocate root\n");
+ ret = PTR_ERR(root);
goto out;
}
+
/* We are using this root as our extent root */
root->fs_info->extent_root = root;
@@ -495,7 +497,7 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
btrfs_set_header_nritems(root->node, 0);
root->alloc_bytenr += 2 * nodesize;
- tmp_root = btrfs_alloc_dummy_root(sectorsize, nodesize);
+ tmp_root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
if (IS_ERR(tmp_root)) {
test_msg("Couldn't allocate a fs root\n");
ret = PTR_ERR(tmp_root);
@@ -510,7 +512,7 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
goto out;
}
- tmp_root = btrfs_alloc_dummy_root(sectorsize, nodesize);
+ tmp_root = btrfs_alloc_dummy_root(fs_info, sectorsize, nodesize);
if (IS_ERR(tmp_root)) {
test_msg("Couldn't allocate a fs root\n");
ret = PTR_ERR(tmp_root);
@@ -531,5 +533,6 @@ int btrfs_test_qgroups(u32 sectorsize, u32 nodesize)
ret = test_multiple_refs(root, sectorsize, nodesize);
out:
btrfs_free_dummy_root(root);
+ btrfs_free_dummy_fs_info(fs_info);
return ret;
}
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 948aa186b353..9cca0a721961 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -561,6 +561,7 @@ again:
h->transaction = cur_trans;
h->root = root;
h->use_count = 1;
+ h->fs_info = root->fs_info;
h->type = type;
h->can_flush_pending_bgs = true;
@@ -1491,7 +1492,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
goto dir_item_existed;
} else if (IS_ERR(dir_item)) {
ret = PTR_ERR(dir_item);
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
btrfs_release_path(path);
@@ -1504,7 +1505,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
*/
ret = btrfs_run_delayed_items(trans, root);
if (ret) { /* Transaction aborted */
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -1543,7 +1544,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
if (ret) {
btrfs_tree_unlock(old);
free_extent_buffer(old);
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -1554,7 +1555,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
btrfs_tree_unlock(old);
free_extent_buffer(old);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
/* see comments in should_cow_block() */
@@ -1568,7 +1569,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
btrfs_tree_unlock(tmp);
free_extent_buffer(tmp);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -1580,7 +1581,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
btrfs_ino(parent_inode), index,
dentry->d_name.name, dentry->d_name.len);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -1588,19 +1589,19 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
if (IS_ERR(pending->snap)) {
ret = PTR_ERR(pending->snap);
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
ret = btrfs_reloc_post_snapshot(trans, pending);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -1622,7 +1623,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
/* We have check then name at the beginning, so it is impossible. */
BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -1632,13 +1633,13 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
current_fs_time(parent_inode->i_sb);
ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, new_uuid.b,
BTRFS_UUID_KEY_SUBVOL, objectid);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
@@ -1647,14 +1648,14 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
objectid);
if (ret && ret != -EEXIST) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
}
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto fail;
}
@@ -1709,7 +1710,7 @@ static void update_super_roots(struct btrfs_root *root)
super->root = root_item->bytenr;
super->generation = root_item->generation;
super->root_level = root_item->level;
- if (btrfs_test_opt(root, SPACE_CACHE))
+ if (btrfs_test_opt(root->fs_info, SPACE_CACHE))
super->cache_generation = root_item->generation;
if (root->fs_info->update_uuid_tree_gen)
super->uuid_tree_generation = root_item->generation;
@@ -1850,7 +1851,7 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
WARN_ON(trans->use_count > 1);
- btrfs_abort_transaction(trans, root, err);
+ btrfs_abort_transaction(trans, err);
spin_lock(&root->fs_info->trans_lock);
@@ -1895,14 +1896,14 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
{
- if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
+ if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
return btrfs_start_delalloc_roots(fs_info, 1, -1);
return 0;
}
static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
{
- if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
+ if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1);
}
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index c5abee4f01ad..efb122643380 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -128,6 +128,7 @@ struct btrfs_trans_handle {
* Subvolume quota depends on this
*/
struct btrfs_root *root;
+ struct btrfs_fs_info *fs_info;
struct seq_list delayed_ref_elem;
struct list_head qgroup_ref_list;
struct list_head new_bgs;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index c05f69a8ec42..fff3f3efa436 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2757,7 +2757,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
while (1) {
int batch = atomic_read(&root->log_batch);
/* when we're on an ssd, just kick the log commit out */
- if (!btrfs_test_opt(root, SSD) &&
+ if (!btrfs_test_opt(root->fs_info, SSD) &&
test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) {
mutex_unlock(&root->log_mutex);
schedule_timeout_uninterruptible(1);
@@ -2788,7 +2788,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
if (ret) {
blk_finish_plug(&plug);
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
btrfs_free_logged_extents(log, log_transid);
btrfs_set_log_full_commit(root->fs_info, trans);
mutex_unlock(&root->log_mutex);
@@ -2838,7 +2838,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
btrfs_set_log_full_commit(root->fs_info, trans);
if (ret != -ENOSPC) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
mutex_unlock(&log_root_tree->log_mutex);
goto out;
}
@@ -2898,7 +2898,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
blk_finish_plug(&plug);
if (ret) {
btrfs_set_log_full_commit(root->fs_info, trans);
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
btrfs_free_logged_extents(log, log_transid);
mutex_unlock(&log_root_tree->log_mutex);
goto out_wake_log_root;
@@ -2934,7 +2934,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
ret = write_ctree_super(trans, root->fs_info->tree_root, 1);
if (ret) {
btrfs_set_log_full_commit(root->fs_info, trans);
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out_wake_log_root;
}
@@ -2991,7 +2991,7 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
ret = walk_log_tree(trans, log, &wc);
/* I don't think this can happen but just in case */
if (ret)
- btrfs_abort_transaction(trans, log, ret);
+ btrfs_abort_transaction(trans, ret);
while (1) {
ret = find_first_extent_bit(&log->dirty_log_pages,
@@ -3160,7 +3160,7 @@ out_unlock:
btrfs_set_log_full_commit(root->fs_info, trans);
ret = 0;
} else if (ret < 0)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
btrfs_end_log_trans(root);
@@ -3193,7 +3193,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
btrfs_set_log_full_commit(root->fs_info, trans);
ret = 0;
} else if (ret < 0 && ret != -ENOENT)
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
btrfs_end_log_trans(root);
return ret;
@@ -4469,7 +4469,8 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
static int btrfs_check_ref_name_override(struct extent_buffer *eb,
const int slot,
const struct btrfs_key *key,
- struct inode *inode)
+ struct inode *inode,
+ u64 *other_ino)
{
int ret;
struct btrfs_path *search_path;
@@ -4528,7 +4529,16 @@ static int btrfs_check_ref_name_override(struct extent_buffer *eb,
search_path, parent,
name, this_name_len, 0);
if (di && !IS_ERR(di)) {
- ret = 1;
+ struct btrfs_key di_key;
+
+ btrfs_dir_item_key_to_cpu(search_path->nodes[0],
+ di, &di_key);
+ if (di_key.type == BTRFS_INODE_ITEM_KEY) {
+ ret = 1;
+ *other_ino = di_key.objectid;
+ } else {
+ ret = -EAGAIN;
+ }
goto out;
} else if (IS_ERR(di)) {
ret = PTR_ERR(di);
@@ -4703,6 +4713,10 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
ins_nr = 0;
ret = btrfs_search_forward(root, &min_key,
path, trans->transid);
+ if (ret < 0) {
+ err = ret;
+ goto out_unlock;
+ }
if (ret != 0)
break;
again:
@@ -4718,16 +4732,71 @@ again:
if ((min_key.type == BTRFS_INODE_REF_KEY ||
min_key.type == BTRFS_INODE_EXTREF_KEY) &&
BTRFS_I(inode)->generation == trans->transid) {
+ u64 other_ino = 0;
+
ret = btrfs_check_ref_name_override(path->nodes[0],
path->slots[0],
- &min_key, inode);
+ &min_key, inode,
+ &other_ino);
if (ret < 0) {
err = ret;
goto out_unlock;
} else if (ret > 0) {
- err = 1;
- btrfs_set_log_full_commit(root->fs_info, trans);
- goto out_unlock;
+ struct btrfs_key inode_key;
+ struct inode *other_inode;
+
+ if (ins_nr > 0) {
+ ins_nr++;
+ } else {
+ ins_nr = 1;
+ ins_start_slot = path->slots[0];
+ }
+ ret = copy_items(trans, inode, dst_path, path,
+ &last_extent, ins_start_slot,
+ ins_nr, inode_only,
+ logged_isize);
+ if (ret < 0) {
+ err = ret;
+ goto out_unlock;
+ }
+ ins_nr = 0;
+ btrfs_release_path(path);
+ inode_key.objectid = other_ino;
+ inode_key.type = BTRFS_INODE_ITEM_KEY;
+ inode_key.offset = 0;
+ other_inode = btrfs_iget(root->fs_info->sb,
+ &inode_key, root,
+ NULL);
+ /*
+ * If the other inode that had a conflicting dir
+ * entry was deleted in the current transaction,
+ * we don't need to do more work nor fallback to
+ * a transaction commit.
+ */
+ if (IS_ERR(other_inode) &&
+ PTR_ERR(other_inode) == -ENOENT) {
+ goto next_key;
+ } else if (IS_ERR(other_inode)) {
+ err = PTR_ERR(other_inode);
+ goto out_unlock;
+ }
+ /*
+ * We are safe logging the other inode without
+ * acquiring its i_mutex as long as we log with
+ * the LOG_INODE_EXISTS mode. We're safe against
+ * concurrent renames of the other inode as well
+ * because during a rename we pin the log and
+ * update the log with the new name before we
+ * unpin it.
+ */
+ err = btrfs_log_inode(trans, root, other_inode,
+ LOG_INODE_EXISTS,
+ 0, LLONG_MAX, ctx);
+ iput(other_inode);
+ if (err)
+ goto out_unlock;
+ else
+ goto next_key;
}
}
@@ -4795,7 +4864,7 @@ next_slot:
ins_nr = 0;
}
btrfs_release_path(path);
-
+next_key:
if (min_key.offset < (u64)-1) {
min_key.offset++;
} else if (min_key.type < max_key.type) {
@@ -4989,8 +5058,12 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
break;
- if (IS_ROOT(parent))
+ if (IS_ROOT(parent)) {
+ inode = d_inode(parent);
+ if (btrfs_must_commit_transaction(trans, inode))
+ ret = 1;
break;
+ }
parent = dget_parent(parent);
dput(old_parent);
@@ -5301,7 +5374,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
sb = inode->i_sb;
- if (btrfs_test_opt(root, NOTREELOG)) {
+ if (btrfs_test_opt(root->fs_info, NOTREELOG)) {
ret = 1;
goto end_no_trans;
}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 589f128173b1..51f125508771 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -140,7 +140,6 @@ static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
-static void btrfs_close_one_device(struct btrfs_device *device);
DEFINE_MUTEX(uuid_mutex);
static LIST_HEAD(fs_uuids);
@@ -462,7 +461,7 @@ loop_lock:
sync_pending = 0;
}
- btrfsic_submit_bio(cur->bi_rw, cur);
+ btrfsic_submit_bio(cur);
num_run++;
batch_run++;
@@ -853,6 +852,46 @@ static void free_device(struct rcu_head *head)
schedule_work(&device->rcu_work);
}
+static void btrfs_close_one_device(struct btrfs_device *device)
+{
+ struct btrfs_fs_devices *fs_devices = device->fs_devices;
+ struct btrfs_device *new_device;
+ struct rcu_string *name;
+
+ if (device->bdev)
+ fs_devices->open_devices--;
+
+ if (device->writeable &&
+ device->devid != BTRFS_DEV_REPLACE_DEVID) {
+ list_del_init(&device->dev_alloc_list);
+ fs_devices->rw_devices--;
+ }
+
+ if (device->missing)
+ fs_devices->missing_devices--;
+
+ if (device->bdev && device->writeable) {
+ sync_blockdev(device->bdev);
+ invalidate_bdev(device->bdev);
+ }
+
+ new_device = btrfs_alloc_device(NULL, &device->devid,
+ device->uuid);
+ BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
+
+ /* Safe because we are under uuid_mutex */
+ if (device->name) {
+ name = rcu_string_strdup(device->name->str, GFP_NOFS);
+ BUG_ON(!name); /* -ENOMEM */
+ rcu_assign_pointer(new_device->name, name);
+ }
+
+ list_replace_rcu(&device->dev_list, &new_device->dev_list);
+ new_device->fs_devices = device->fs_devices;
+
+ call_rcu(&device->rcu, free_device);
+}
+
static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
{
struct btrfs_device *device, *tmp;
@@ -2399,14 +2438,14 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
ret = init_first_rw_device(trans, root, device);
unlock_chunks(root);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto error_trans;
}
}
ret = btrfs_add_device(trans, root, device);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto error_trans;
}
@@ -2415,7 +2454,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
ret = btrfs_finish_sprout(trans, root);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto error_trans;
}
@@ -2801,7 +2840,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
&dev_extent_len);
if (ret) {
mutex_unlock(&fs_devices->device_list_mutex);
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -2820,7 +2859,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
ret = btrfs_update_device(trans, map->stripes[i].dev);
if (ret) {
mutex_unlock(&fs_devices->device_list_mutex);
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
}
@@ -2829,7 +2868,7 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
ret = btrfs_free_chunk(trans, root, chunk_objectid, chunk_offset);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -2838,14 +2877,14 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
if (ret) {
- btrfs_abort_transaction(trans, root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
}
ret = btrfs_remove_block_group(trans, extent_root, chunk_offset, em);
if (ret) {
- btrfs_abort_transaction(trans, extent_root, ret);
+ btrfs_abort_transaction(trans, ret);
goto out;
}
@@ -2902,7 +2941,7 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset)
* chunk tree entries
*/
ret = btrfs_remove_chunk(trans, root, chunk_offset);
- btrfs_end_transaction(trans, root);
+ btrfs_end_transaction(trans, extent_root);
return ret;
}
@@ -3421,7 +3460,7 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
u64 size_to_free;
u64 chunk_type;
struct btrfs_chunk *chunk;
- struct btrfs_path *path;
+ struct btrfs_path *path = NULL;
struct btrfs_key key;
struct btrfs_key found_key;
struct btrfs_trans_handle *trans;
@@ -3455,13 +3494,33 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
ret = btrfs_shrink_device(device, old_size - size_to_free);
if (ret == -ENOSPC)
break;
- BUG_ON(ret);
+ if (ret) {
+ /* btrfs_shrink_device never returns ret > 0 */
+ WARN_ON(ret > 0);
+ goto error;
+ }
trans = btrfs_start_transaction(dev_root, 0);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans)) {
+ ret = PTR_ERR(trans);
+ btrfs_info_in_rcu(fs_info,
+ "resize: unable to start transaction after shrinking device %s (error %d), old size %llu, new size %llu",
+ rcu_str_deref(device->name), ret,
+ old_size, old_size - size_to_free);
+ goto error;
+ }
ret = btrfs_grow_device(trans, device, old_size);
- BUG_ON(ret);
+ if (ret) {
+ btrfs_end_transaction(trans, dev_root);
+ /* btrfs_grow_device never returns ret > 0 */
+ WARN_ON(ret > 0);
+ btrfs_info_in_rcu(fs_info,
+ "resize: unable to grow device after shrinking device %s (error %d), old size %llu, new size %llu",
+ rcu_str_deref(device->name), ret,
+ old_size, old_size - size_to_free);
+ goto error;
+ }
btrfs_end_transaction(trans, dev_root);
}
@@ -3885,7 +3944,7 @@ int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
}
spin_unlock(&fs_info->balance_lock);
- if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
+ if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
btrfs_info(fs_info, "force skipping balance");
return 0;
}
@@ -4240,7 +4299,7 @@ int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
BTRFS_UUID_TREE_OBJECTID);
if (IS_ERR(uuid_root)) {
ret = PTR_ERR(uuid_root);
- btrfs_abort_transaction(trans, tree_root, ret);
+ btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans, tree_root);
return ret;
}
@@ -4514,8 +4573,7 @@ static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
btrfs_set_fs_incompat(info, RAID56);
}
-#define BTRFS_MAX_DEVS(r) ((BTRFS_LEAF_DATA_SIZE(r) \
- - sizeof(struct btrfs_item) \
+#define BTRFS_MAX_DEVS(r) ((BTRFS_MAX_ITEM_SIZE(r) \
- sizeof(struct btrfs_chunk)) \
/ sizeof(struct btrfs_stripe) + 1)
@@ -5260,7 +5318,7 @@ void btrfs_put_bbio(struct btrfs_bio *bbio)
kfree(bbio);
}
-static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
+static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret,
int mirror_num, int need_raid_map)
@@ -5346,7 +5404,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
raid56_full_stripe_start *= full_stripe_len;
}
- if (rw & REQ_DISCARD) {
+ if (op == REQ_OP_DISCARD) {
/* we don't discard raid56 yet */
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
ret = -EOPNOTSUPP;
@@ -5359,7 +5417,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
For other RAID types and for RAID[56] reads, just allow a single
stripe (on a single disk). */
if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
- (rw & REQ_WRITE)) {
+ (op == REQ_OP_WRITE)) {
max_len = stripe_len * nr_data_stripes(map) -
(offset - raid56_full_stripe_start);
} else {
@@ -5384,8 +5442,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
btrfs_dev_replace_set_lock_blocking(dev_replace);
if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
- !(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) &&
- dev_replace->tgtdev != NULL) {
+ op != REQ_OP_WRITE && op != REQ_OP_DISCARD &&
+ op != REQ_GET_READ_MIRRORS && dev_replace->tgtdev != NULL) {
/*
* in dev-replace case, for repair case (that's the only
* case where the mirror is selected explicitly when
@@ -5472,15 +5530,17 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
(offset + *length);
if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
- if (rw & REQ_DISCARD)
+ if (op == REQ_OP_DISCARD)
num_stripes = min_t(u64, map->num_stripes,
stripe_nr_end - stripe_nr_orig);
stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
&stripe_index);
- if (!(rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)))
+ if (op != REQ_OP_WRITE && op != REQ_OP_DISCARD &&
+ op != REQ_GET_READ_MIRRORS)
mirror_num = 1;
} else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
- if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS))
+ if (op == REQ_OP_WRITE || op == REQ_OP_DISCARD ||
+ op == REQ_GET_READ_MIRRORS)
num_stripes = map->num_stripes;
else if (mirror_num)
stripe_index = mirror_num - 1;
@@ -5493,7 +5553,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
}
} else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
- if (rw & (REQ_WRITE | REQ_DISCARD | REQ_GET_READ_MIRRORS)) {
+ if (op == REQ_OP_WRITE || op == REQ_OP_DISCARD ||
+ op == REQ_GET_READ_MIRRORS) {
num_stripes = map->num_stripes;
} else if (mirror_num) {
stripe_index = mirror_num - 1;
@@ -5507,9 +5568,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
stripe_index *= map->sub_stripes;
- if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
+ if (op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS)
num_stripes = map->sub_stripes;
- else if (rw & REQ_DISCARD)
+ else if (op == REQ_OP_DISCARD)
num_stripes = min_t(u64, map->sub_stripes *
(stripe_nr_end - stripe_nr_orig),
map->num_stripes);
@@ -5527,7 +5588,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
} else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
if (need_raid_map &&
- ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
+ (op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS ||
mirror_num > 1)) {
/* push stripe_nr back to the start of the full stripe */
stripe_nr = div_u64(raid56_full_stripe_start,
@@ -5555,8 +5616,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
/* We distribute the parity blocks across stripes */
div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
&stripe_index);
- if (!(rw & (REQ_WRITE | REQ_DISCARD |
- REQ_GET_READ_MIRRORS)) && mirror_num <= 1)
+ if ((op != REQ_OP_WRITE && op != REQ_OP_DISCARD &&
+ op != REQ_GET_READ_MIRRORS) && mirror_num <= 1)
mirror_num = 1;
}
} else {
@@ -5579,9 +5640,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
num_alloc_stripes = num_stripes;
if (dev_replace_is_ongoing) {
- if (rw & (REQ_WRITE | REQ_DISCARD))
+ if (op == REQ_OP_WRITE || op == REQ_OP_DISCARD)
num_alloc_stripes <<= 1;
- if (rw & REQ_GET_READ_MIRRORS)
+ if (op == REQ_GET_READ_MIRRORS)
num_alloc_stripes++;
tgtdev_indexes = num_stripes;
}
@@ -5596,7 +5657,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
/* build raid_map */
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
- need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
+ need_raid_map &&
+ ((op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS) ||
mirror_num > 1)) {
u64 tmp;
unsigned rot;
@@ -5621,7 +5683,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
RAID6_Q_STRIPE;
}
- if (rw & REQ_DISCARD) {
+ if (op == REQ_OP_DISCARD) {
u32 factor = 0;
u32 sub_stripes = 0;
u64 stripes_per_dev = 0;
@@ -5701,14 +5763,15 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
}
}
- if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
+ if (op == REQ_OP_WRITE || op == REQ_GET_READ_MIRRORS)
max_errors = btrfs_chunk_max_errors(map);
if (bbio->raid_map)
sort_parity_stripes(bbio, num_stripes);
tgtdev_indexes = 0;
- if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
+ if (dev_replace_is_ongoing &&
+ (op == REQ_OP_WRITE || op == REQ_OP_DISCARD) &&
dev_replace->tgtdev != NULL) {
int index_where_to_add;
u64 srcdev_devid = dev_replace->srcdev->devid;
@@ -5743,7 +5806,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
}
}
num_stripes = index_where_to_add;
- } else if (dev_replace_is_ongoing && (rw & REQ_GET_READ_MIRRORS) &&
+ } else if (dev_replace_is_ongoing && (op == REQ_GET_READ_MIRRORS) &&
dev_replace->tgtdev != NULL) {
u64 srcdev_devid = dev_replace->srcdev->devid;
int index_srcdev = 0;
@@ -5815,21 +5878,21 @@ out:
return ret;
}
-int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
+int btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret, int mirror_num)
{
- return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
+ return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
mirror_num, 0);
}
/* For Scrub/replace */
-int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
+int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int op,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret, int mirror_num,
int need_raid_map)
{
- return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
+ return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
mirror_num, need_raid_map);
}
@@ -5943,13 +6006,13 @@ static void btrfs_end_bio(struct bio *bio)
BUG_ON(stripe_index >= bbio->num_stripes);
dev = bbio->stripes[stripe_index].dev;
if (dev->bdev) {
- if (bio->bi_rw & WRITE)
+ if (bio_op(bio) == REQ_OP_WRITE)
btrfs_dev_stat_inc(dev,
BTRFS_DEV_STAT_WRITE_ERRS);
else
btrfs_dev_stat_inc(dev,
BTRFS_DEV_STAT_READ_ERRS);
- if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
+ if ((bio->bi_opf & WRITE_FLUSH) == WRITE_FLUSH)
btrfs_dev_stat_inc(dev,
BTRFS_DEV_STAT_FLUSH_ERRS);
btrfs_dev_stat_print_on_error(dev);
@@ -5997,7 +6060,7 @@ static void btrfs_end_bio(struct bio *bio)
*/
static noinline void btrfs_schedule_bio(struct btrfs_root *root,
struct btrfs_device *device,
- int rw, struct bio *bio)
+ struct bio *bio)
{
int should_queue = 1;
struct btrfs_pending_bios *pending_bios;
@@ -6008,9 +6071,9 @@ static noinline void btrfs_schedule_bio(struct btrfs_root *root,
}
/* don't bother with additional async steps for reads, right now */
- if (!(rw & REQ_WRITE)) {
+ if (bio_op(bio) == REQ_OP_READ) {
bio_get(bio);
- btrfsic_submit_bio(rw, bio);
+ btrfsic_submit_bio(bio);
bio_put(bio);
return;
}
@@ -6024,10 +6087,9 @@ static noinline void btrfs_schedule_bio(struct btrfs_root *root,
atomic_inc(&root->fs_info->nr_async_bios);
WARN_ON(bio->bi_next);
bio->bi_next = NULL;
- bio->bi_rw |= rw;
spin_lock(&device->io_lock);
- if (bio->bi_rw & REQ_SYNC)
+ if (bio->bi_opf & REQ_SYNC)
pending_bios = &device->pending_sync_bios;
else
pending_bios = &device->pending_bios;
@@ -6050,7 +6112,7 @@ static noinline void btrfs_schedule_bio(struct btrfs_root *root,
static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
struct bio *bio, u64 physical, int dev_nr,
- int rw, int async)
+ int async)
{
struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
@@ -6064,8 +6126,8 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
rcu_read_lock();
name = rcu_dereference(dev->name);
- pr_debug("btrfs_map_bio: rw %d, sector=%llu, dev=%lu "
- "(%s id %llu), size=%u\n", rw,
+ pr_debug("btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu "
+ "(%s id %llu), size=%u\n", bio_op(bio), bio->bi_opf,
(u64)bio->bi_iter.bi_sector, (u_long)dev->bdev->bd_dev,
name->str, dev->devid, bio->bi_iter.bi_size);
rcu_read_unlock();
@@ -6076,9 +6138,9 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
btrfs_bio_counter_inc_noblocked(root->fs_info);
if (async)
- btrfs_schedule_bio(root, dev, rw, bio);
+ btrfs_schedule_bio(root, dev, bio);
else
- btrfsic_submit_bio(rw, bio);
+ btrfsic_submit_bio(bio);
}
static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
@@ -6095,7 +6157,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
}
}
-int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
+int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
int mirror_num, int async_submit)
{
struct btrfs_device *dev;
@@ -6112,8 +6174,8 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
map_length = length;
btrfs_bio_counter_inc_blocked(root->fs_info);
- ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
- mirror_num, 1);
+ ret = __btrfs_map_block(root->fs_info, bio_op(bio), logical,
+ &map_length, &bbio, mirror_num, 1);
if (ret) {
btrfs_bio_counter_dec(root->fs_info);
return ret;
@@ -6127,10 +6189,10 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
atomic_set(&bbio->stripes_pending, bbio->num_stripes);
if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
- ((rw & WRITE) || (mirror_num > 1))) {
+ ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) {
/* In this case, map_length has been set to the length of
a single stripe; not the whole write */
- if (rw & WRITE) {
+ if (bio_op(bio) == REQ_OP_WRITE) {
ret = raid56_parity_write(root, bio, bbio, map_length);
} else {
ret = raid56_parity_recover(root, bio, bbio, map_length,
@@ -6149,7 +6211,8 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
dev = bbio->stripes[dev_nr].dev;
- if (!dev || !dev->bdev || (rw & WRITE && !dev->writeable)) {
+ if (!dev || !dev->bdev ||
+ (bio_op(bio) == REQ_OP_WRITE && !dev->writeable)) {
bbio_error(bbio, first_bio, logical);
continue;
}
@@ -6161,7 +6224,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
bio = first_bio;
submit_stripe_bio(root, bbio, bio,
- bbio->stripes[dev_nr].physical, dev_nr, rw,
+ bbio->stripes[dev_nr].physical, dev_nr,
async_submit);
}
btrfs_bio_counter_dec(root->fs_info);
@@ -6396,7 +6459,8 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
BTRFS_UUID_SIZE);
map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
uuid, NULL);
- if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
+ if (!map->stripes[i].dev &&
+ !btrfs_test_opt(root->fs_info, DEGRADED)) {
free_extent_map(em);
return -EIO;
}
@@ -6464,7 +6528,7 @@ static struct btrfs_fs_devices *open_seed_devices(struct btrfs_root *root,
fs_devices = find_fsid(fsid);
if (!fs_devices) {
- if (!btrfs_test_opt(root, DEGRADED))
+ if (!btrfs_test_opt(root->fs_info, DEGRADED))
return ERR_PTR(-ENOENT);
fs_devices = alloc_fs_devices(fsid);
@@ -6526,7 +6590,7 @@ static int read_one_dev(struct btrfs_root *root,
device = btrfs_find_device(root->fs_info, devid, dev_uuid, fs_uuid);
if (!device) {
- if (!btrfs_test_opt(root, DEGRADED))
+ if (!btrfs_test_opt(root->fs_info, DEGRADED))
return -EIO;
device = add_missing_dev(root, fs_devices, devid, dev_uuid);
@@ -6535,7 +6599,7 @@ static int read_one_dev(struct btrfs_root *root,
btrfs_warn(root->fs_info, "devid %llu uuid %pU missing",
devid, dev_uuid);
} else {
- if (!device->bdev && !btrfs_test_opt(root, DEGRADED))
+ if (!device->bdev && !btrfs_test_opt(root->fs_info, DEGRADED))
return -EIO;
if(!device->bdev && !device->missing) {
@@ -7138,38 +7202,3 @@ void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info)
fs_devices = fs_devices->seed;
}
}
-
-static void btrfs_close_one_device(struct btrfs_device *device)
-{
- struct btrfs_fs_devices *fs_devices = device->fs_devices;
- struct btrfs_device *new_device;
- struct rcu_string *name;
-
- if (device->bdev)
- fs_devices->open_devices--;
-
- if (device->writeable &&
- device->devid != BTRFS_DEV_REPLACE_DEVID) {
- list_del_init(&device->dev_alloc_list);
- fs_devices->rw_devices--;
- }
-
- if (device->missing)
- fs_devices->missing_devices--;
-
- new_device = btrfs_alloc_device(NULL, &device->devid,
- device->uuid);
- BUG_ON(IS_ERR(new_device)); /* -ENOMEM */
-
- /* Safe because we are under uuid_mutex */
- if (device->name) {
- name = rcu_string_strdup(device->name->str, GFP_NOFS);
- BUG_ON(!name); /* -ENOMEM */
- rcu_assign_pointer(new_device->name, name);
- }
-
- list_replace_rcu(&device->dev_list, &new_device->dev_list);
- new_device->fs_devices = device->fs_devices;
-
- call_rcu(&device->rcu, free_device);
-}
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 0ac90f8d85bd..6613e6335ca2 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -375,10 +375,10 @@ int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
u64 end, u64 *length);
void btrfs_get_bbio(struct btrfs_bio *bbio);
void btrfs_put_bbio(struct btrfs_bio *bbio);
-int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
+int btrfs_map_block(struct btrfs_fs_info *fs_info, int op,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret, int mirror_num);
-int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
+int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int op,
u64 logical, u64 *length,
struct btrfs_bio **bbio_ret, int mirror_num,
int need_raid_map);
@@ -391,7 +391,7 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root, u64 type);
void btrfs_mapping_init(struct btrfs_mapping_tree *tree);
void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree);
-int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
+int btrfs_map_bio(struct btrfs_root *root, struct bio *bio,
int mirror_num, int async_submit);
int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
fmode_t flags, void *holder);
diff --git a/fs/buffer.c b/fs/buffer.c
index 754813a6962b..9c8eb9b6db6a 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <linux/fs.h>
+#include <linux/iomap.h>
#include <linux/mm.h>
#include <linux/percpu.h>
#include <linux/slab.h>
@@ -45,7 +46,7 @@
#include <trace/events/block.h>
static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
-static int submit_bh_wbc(int rw, struct buffer_head *bh,
+static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
unsigned long bio_flags,
struct writeback_control *wbc);
@@ -153,7 +154,7 @@ static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
if (uptodate) {
set_buffer_uptodate(bh);
} else {
- /* This happens, due to failed READA attempts. */
+ /* This happens, due to failed read-ahead attempts. */
clear_buffer_uptodate(bh);
}
unlock_buffer(bh);
@@ -588,7 +589,7 @@ void write_boundary_block(struct block_device *bdev,
struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
if (bh) {
if (buffer_dirty(bh))
- ll_rw_block(WRITE, 1, &bh);
+ ll_rw_block(REQ_OP_WRITE, 0, 1, &bh);
put_bh(bh);
}
}
@@ -1225,7 +1226,7 @@ static struct buffer_head *__bread_slow(struct buffer_head *bh)
} else {
get_bh(bh);
bh->b_end_io = end_buffer_read_sync;
- submit_bh(READ, bh);
+ submit_bh(REQ_OP_READ, 0, bh);
wait_on_buffer(bh);
if (buffer_uptodate(bh))
return bh;
@@ -1395,7 +1396,7 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
{
struct buffer_head *bh = __getblk(bdev, block, size);
if (likely(bh)) {
- ll_rw_block(READA, 1, &bh);
+ ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, &bh);
brelse(bh);
}
}
@@ -1687,7 +1688,7 @@ static struct buffer_head *create_page_buffers(struct page *page, struct inode *
* WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
* causes the writes to be flagged as synchronous writes.
*/
-static int __block_write_full_page(struct inode *inode, struct page *page,
+int __block_write_full_page(struct inode *inode, struct page *page,
get_block_t *get_block, struct writeback_control *wbc,
bh_end_io_t *handler)
{
@@ -1697,7 +1698,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
struct buffer_head *bh, *head;
unsigned int blocksize, bbits;
int nr_underway = 0;
- int write_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
+ int write_flags = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0);
head = create_page_buffers(page, inode,
(1 << BH_Dirty)|(1 << BH_Uptodate));
@@ -1786,7 +1787,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page,
do {
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
- submit_bh_wbc(write_op, bh, 0, wbc);
+ submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, 0, wbc);
nr_underway++;
}
bh = next;
@@ -1840,7 +1841,7 @@ recover:
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
clear_buffer_dirty(bh);
- submit_bh_wbc(write_op, bh, 0, wbc);
+ submit_bh_wbc(REQ_OP_WRITE, write_flags, bh, 0, wbc);
nr_underway++;
}
bh = next;
@@ -1848,6 +1849,7 @@ recover:
unlock_page(page);
goto done;
}
+EXPORT_SYMBOL(__block_write_full_page);
/*
* If a page has any new buffers, zero them out here, and mark them uptodate
@@ -1891,8 +1893,62 @@ void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
}
EXPORT_SYMBOL(page_zero_new_buffers);
-int __block_write_begin(struct page *page, loff_t pos, unsigned len,
- get_block_t *get_block)
+static void
+iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
+ struct iomap *iomap)
+{
+ loff_t offset = block << inode->i_blkbits;
+
+ bh->b_bdev = iomap->bdev;
+
+ /*
+ * Block points to offset in file we need to map, iomap contains
+ * the offset at which the map starts. If the map ends before the
+ * current block, then do not map the buffer and let the caller
+ * handle it.
+ */
+ BUG_ON(offset >= iomap->offset + iomap->length);
+
+ switch (iomap->type) {
+ case IOMAP_HOLE:
+ /*
+ * If the buffer is not up to date or beyond the current EOF,
+ * we need to mark it as new to ensure sub-block zeroing is
+ * executed if necessary.
+ */
+ if (!buffer_uptodate(bh) ||
+ (offset >= i_size_read(inode)))
+ set_buffer_new(bh);
+ break;
+ case IOMAP_DELALLOC:
+ if (!buffer_uptodate(bh) ||
+ (offset >= i_size_read(inode)))
+ set_buffer_new(bh);
+ set_buffer_uptodate(bh);
+ set_buffer_mapped(bh);
+ set_buffer_delay(bh);
+ break;
+ case IOMAP_UNWRITTEN:
+ /*
+ * For unwritten regions, we always need to ensure that
+ * sub-block writes cause the regions in the block we are not
+ * writing to are zeroed. Set the buffer as new to ensure this.
+ */
+ set_buffer_new(bh);
+ set_buffer_unwritten(bh);
+ /* FALLTHRU */
+ case IOMAP_MAPPED:
+ if (offset >= i_size_read(inode))
+ set_buffer_new(bh);
+ bh->b_blocknr = (iomap->blkno >> (inode->i_blkbits - 9)) +
+ ((offset - iomap->offset) >> inode->i_blkbits);
+ set_buffer_mapped(bh);
+ break;
+ }
+}
+
+int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
+ get_block_t *get_block, struct iomap *iomap)
{
unsigned from = pos & (PAGE_SIZE - 1);
unsigned to = from + len;
@@ -1928,9 +1984,14 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len,
clear_buffer_new(bh);
if (!buffer_mapped(bh)) {
WARN_ON(bh->b_size != blocksize);
- err = get_block(inode, block, bh, 1);
- if (err)
- break;
+ if (get_block) {
+ err = get_block(inode, block, bh, 1);
+ if (err)
+ break;
+ } else {
+ iomap_to_bh(inode, block, bh, iomap);
+ }
+
if (buffer_new(bh)) {
unmap_underlying_metadata(bh->b_bdev,
bh->b_blocknr);
@@ -1955,7 +2016,7 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len,
if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
!buffer_unwritten(bh) &&
(block_start < from || block_end > to)) {
- ll_rw_block(READ, 1, &bh);
+ ll_rw_block(REQ_OP_READ, 0, 1, &bh);
*wait_bh++=bh;
}
}
@@ -1971,6 +2032,12 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len,
page_zero_new_buffers(page, from, to);
return err;
}
+
+int __block_write_begin(struct page *page, loff_t pos, unsigned len,
+ get_block_t *get_block)
+{
+ return __block_write_begin_int(page, pos, len, get_block, NULL);
+}
EXPORT_SYMBOL(__block_write_begin);
static int __block_commit_write(struct inode *inode, struct page *page,
@@ -2248,7 +2315,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
if (buffer_uptodate(bh))
end_buffer_async_read(bh, 1);
else
- submit_bh(READ, bh);
+ submit_bh(REQ_OP_READ, 0, bh);
}
return 0;
}
@@ -2582,7 +2649,7 @@ int nobh_write_begin(struct address_space *mapping,
if (block_start < from || block_end > to) {
lock_buffer(bh);
bh->b_end_io = end_buffer_read_nobh;
- submit_bh(READ, bh);
+ submit_bh(REQ_OP_READ, 0, bh);
nr_reads++;
}
}
@@ -2852,7 +2919,7 @@ int block_truncate_page(struct address_space *mapping,
if (!buffer_uptodate(bh) && !buffer_delay(bh) && !buffer_unwritten(bh)) {
err = -EIO;
- ll_rw_block(READ, 1, &bh);
+ ll_rw_block(REQ_OP_READ, 0, 1, &bh);
wait_on_buffer(bh);
/* Uhhuh. Read error. Complain and punt. */
if (!buffer_uptodate(bh))
@@ -2949,7 +3016,7 @@ static void end_bio_bh_io_sync(struct bio *bio)
* errors, this only handles the "we need to be able to
* do IO at the final sector" case.
*/
-void guard_bio_eod(int rw, struct bio *bio)
+void guard_bio_eod(int op, struct bio *bio)
{
sector_t maxsector;
struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
@@ -2979,13 +3046,13 @@ void guard_bio_eod(int rw, struct bio *bio)
bvec->bv_len -= truncated_bytes;
/* ..and clear the end of the buffer for reads */
- if ((rw & RW_MASK) == READ) {
+ if (op == REQ_OP_READ) {
zero_user(bvec->bv_page, bvec->bv_offset + bvec->bv_len,
truncated_bytes);
}
}
-static int submit_bh_wbc(int rw, struct buffer_head *bh,
+static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
unsigned long bio_flags, struct writeback_control *wbc)
{
struct bio *bio;
@@ -2999,7 +3066,7 @@ static int submit_bh_wbc(int rw, struct buffer_head *bh,
/*
* Only clear out a write error when rewriting
*/
- if (test_set_buffer_req(bh) && (rw & WRITE))
+ if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE))
clear_buffer_write_io_error(bh);
/*
@@ -3024,39 +3091,42 @@ static int submit_bh_wbc(int rw, struct buffer_head *bh,
bio->bi_flags |= bio_flags;
/* Take care of bh's that straddle the end of the device */
- guard_bio_eod(rw, bio);
+ guard_bio_eod(op, bio);
if (buffer_meta(bh))
- rw |= REQ_META;
+ op_flags |= REQ_META;
if (buffer_prio(bh))
- rw |= REQ_PRIO;
+ op_flags |= REQ_PRIO;
+ bio_set_op_attrs(bio, op, op_flags);
- submit_bio(rw, bio);
+ submit_bio(bio);
return 0;
}
-int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags)
+int _submit_bh(int op, int op_flags, struct buffer_head *bh,
+ unsigned long bio_flags)
{
- return submit_bh_wbc(rw, bh, bio_flags, NULL);
+ return submit_bh_wbc(op, op_flags, bh, bio_flags, NULL);
}
EXPORT_SYMBOL_GPL(_submit_bh);
-int submit_bh(int rw, struct buffer_head *bh)
+int submit_bh(int op, int op_flags, struct buffer_head *bh)
{
- return submit_bh_wbc(rw, bh, 0, NULL);
+ return submit_bh_wbc(op, op_flags, bh, 0, NULL);
}
EXPORT_SYMBOL(submit_bh);
/**
* ll_rw_block: low-level access to block devices (DEPRECATED)
- * @rw: whether to %READ or %WRITE or maybe %READA (readahead)
+ * @op: whether to %READ or %WRITE
+ * @op_flags: rq_flag_bits
* @nr: number of &struct buffer_heads in the array
* @bhs: array of pointers to &struct buffer_head
*
* ll_rw_block() takes an array of pointers to &struct buffer_heads, and
- * requests an I/O operation on them, either a %READ or a %WRITE. The third
- * %READA option is described in the documentation for generic_make_request()
- * which ll_rw_block() calls.
+ * requests an I/O operation on them, either a %REQ_OP_READ or a %REQ_OP_WRITE.
+ * @op_flags contains flags modifying the detailed I/O behavior, most notably
+ * %REQ_RAHEAD.
*
* This function drops any buffer that it cannot get a lock on (with the
* BH_Lock state bit), any buffer that appears to be clean when doing a write
@@ -3072,7 +3142,7 @@ EXPORT_SYMBOL(submit_bh);
* All of the buffers must be for the same device, and must also be a
* multiple of the current approved size for the device.
*/
-void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
+void ll_rw_block(int op, int op_flags, int nr, struct buffer_head *bhs[])
{
int i;
@@ -3081,18 +3151,18 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
if (!trylock_buffer(bh))
continue;
- if (rw == WRITE) {
+ if (op == WRITE) {
if (test_clear_buffer_dirty(bh)) {
bh->b_end_io = end_buffer_write_sync;
get_bh(bh);
- submit_bh(WRITE, bh);
+ submit_bh(op, op_flags, bh);
continue;
}
} else {
if (!buffer_uptodate(bh)) {
bh->b_end_io = end_buffer_read_sync;
get_bh(bh);
- submit_bh(rw, bh);
+ submit_bh(op, op_flags, bh);
continue;
}
}
@@ -3101,7 +3171,7 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
}
EXPORT_SYMBOL(ll_rw_block);
-void write_dirty_buffer(struct buffer_head *bh, int rw)
+void write_dirty_buffer(struct buffer_head *bh, int op_flags)
{
lock_buffer(bh);
if (!test_clear_buffer_dirty(bh)) {
@@ -3110,7 +3180,7 @@ void write_dirty_buffer(struct buffer_head *bh, int rw)
}
bh->b_end_io = end_buffer_write_sync;
get_bh(bh);
- submit_bh(rw, bh);
+ submit_bh(REQ_OP_WRITE, op_flags, bh);
}
EXPORT_SYMBOL(write_dirty_buffer);
@@ -3119,7 +3189,7 @@ EXPORT_SYMBOL(write_dirty_buffer);
* and then start new I/O and then wait upon it. The caller must have a ref on
* the buffer_head.
*/
-int __sync_dirty_buffer(struct buffer_head *bh, int rw)
+int __sync_dirty_buffer(struct buffer_head *bh, int op_flags)
{
int ret = 0;
@@ -3128,7 +3198,7 @@ int __sync_dirty_buffer(struct buffer_head *bh, int rw)
if (test_clear_buffer_dirty(bh)) {
get_bh(bh);
bh->b_end_io = end_buffer_write_sync;
- ret = submit_bh(rw, bh);
+ ret = submit_bh(REQ_OP_WRITE, op_flags, bh);
wait_on_buffer(bh);
if (!ret && !buffer_uptodate(bh))
ret = -EIO;
@@ -3391,7 +3461,7 @@ int bh_submit_read(struct buffer_head *bh)
get_bh(bh);
bh->b_end_io = end_buffer_read_sync;
- submit_bh(READ, bh);
+ submit_bh(REQ_OP_READ, 0, bh);
wait_on_buffer(bh);
if (buffer_uptodate(bh))
return 0;
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 4ae75006e73b..3f7c2cd41f8f 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -263,6 +263,8 @@ requeue:
void cachefiles_mark_object_inactive(struct cachefiles_cache *cache,
struct cachefiles_object *object)
{
+ blkcnt_t i_blocks = d_backing_inode(object->dentry)->i_blocks;
+
write_lock(&cache->active_lock);
rb_erase(&object->active_node, &cache->active_nodes);
clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
@@ -273,8 +275,7 @@ void cachefiles_mark_object_inactive(struct cachefiles_cache *cache,
/* This object can now be culled, so we need to let the daemon know
* that there is something it can remove if it needs to.
*/
- atomic_long_add(d_backing_inode(object->dentry)->i_blocks,
- &cache->b_released);
+ atomic_long_add(i_blocks, &cache->b_released);
if (atomic_inc_return(&cache->f_released))
cachefiles_state_changed(cache);
}
diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
index eccd33941199..125b90f6c796 100644
--- a/fs/cachefiles/proc.c
+++ b/fs/cachefiles/proc.c
@@ -93,7 +93,6 @@ static int cachefiles_histogram_open(struct inode *inode, struct file *file)
}
static const struct file_operations cachefiles_histogram_fops = {
- .owner = THIS_MODULE,
.open = cachefiles_histogram_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 26a9d10d75e9..d5b6f959a3c3 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1730,7 +1730,8 @@ enum {
POOL_WRITE = 2,
};
-static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
+static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
+ s64 pool, struct ceph_string *pool_ns)
{
struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
struct ceph_mds_client *mdsc = fsc->mdsc;
@@ -1738,6 +1739,7 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
struct rb_node **p, *parent;
struct ceph_pool_perm *perm;
struct page **pages;
+ size_t pool_ns_len;
int err = 0, err2 = 0, have = 0;
down_read(&mdsc->pool_perm_rwsem);
@@ -1749,17 +1751,31 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
else if (pool > perm->pool)
p = &(*p)->rb_right;
else {
- have = perm->perm;
- break;
+ int ret = ceph_compare_string(pool_ns,
+ perm->pool_ns,
+ perm->pool_ns_len);
+ if (ret < 0)
+ p = &(*p)->rb_left;
+ else if (ret > 0)
+ p = &(*p)->rb_right;
+ else {
+ have = perm->perm;
+ break;
+ }
}
}
up_read(&mdsc->pool_perm_rwsem);
if (*p)
goto out;
- dout("__ceph_pool_perm_get pool %u no perm cached\n", pool);
+ if (pool_ns)
+ dout("__ceph_pool_perm_get pool %lld ns %.*s no perm cached\n",
+ pool, (int)pool_ns->len, pool_ns->str);
+ else
+ dout("__ceph_pool_perm_get pool %lld no perm cached\n", pool);
down_write(&mdsc->pool_perm_rwsem);
+ p = &mdsc->pool_perm_tree.rb_node;
parent = NULL;
while (*p) {
parent = *p;
@@ -1769,8 +1785,17 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
else if (pool > perm->pool)
p = &(*p)->rb_right;
else {
- have = perm->perm;
- break;
+ int ret = ceph_compare_string(pool_ns,
+ perm->pool_ns,
+ perm->pool_ns_len);
+ if (ret < 0)
+ p = &(*p)->rb_left;
+ else if (ret > 0)
+ p = &(*p)->rb_right;
+ else {
+ have = perm->perm;
+ break;
+ }
}
}
if (*p) {
@@ -1788,6 +1813,8 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
rd_req->r_flags = CEPH_OSD_FLAG_READ;
osd_req_op_init(rd_req, 0, CEPH_OSD_OP_STAT, 0);
rd_req->r_base_oloc.pool = pool;
+ if (pool_ns)
+ rd_req->r_base_oloc.pool_ns = ceph_get_string(pool_ns);
ceph_oid_printf(&rd_req->r_base_oid, "%llx.00000000", ci->i_vino.ino);
err = ceph_osdc_alloc_messages(rd_req, GFP_NOFS);
@@ -1841,7 +1868,8 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
goto out_unlock;
}
- perm = kmalloc(sizeof(*perm), GFP_NOFS);
+ pool_ns_len = pool_ns ? pool_ns->len : 0;
+ perm = kmalloc(sizeof(*perm) + pool_ns_len + 1, GFP_NOFS);
if (!perm) {
err = -ENOMEM;
goto out_unlock;
@@ -1849,6 +1877,11 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
perm->pool = pool;
perm->perm = have;
+ perm->pool_ns_len = pool_ns_len;
+ if (pool_ns_len > 0)
+ memcpy(perm->pool_ns, pool_ns->str, pool_ns_len);
+ perm->pool_ns[pool_ns_len] = 0;
+
rb_link_node(&perm->node, parent, p);
rb_insert_color(&perm->node, &mdsc->pool_perm_tree);
err = 0;
@@ -1860,43 +1893,46 @@ out_unlock:
out:
if (!err)
err = have;
- dout("__ceph_pool_perm_get pool %u result = %d\n", pool, err);
+ if (pool_ns)
+ dout("__ceph_pool_perm_get pool %lld ns %.*s result = %d\n",
+ pool, (int)pool_ns->len, pool_ns->str, err);
+ else
+ dout("__ceph_pool_perm_get pool %lld result = %d\n", pool, err);
return err;
}
int ceph_pool_perm_check(struct ceph_inode_info *ci, int need)
{
- u32 pool;
+ s64 pool;
+ struct ceph_string *pool_ns;
int ret, flags;
- /* does not support pool namespace yet */
- if (ci->i_pool_ns_len)
- return -EIO;
-
if (ceph_test_mount_opt(ceph_inode_to_client(&ci->vfs_inode),
NOPOOLPERM))
return 0;
spin_lock(&ci->i_ceph_lock);
flags = ci->i_ceph_flags;
- pool = ceph_file_layout_pg_pool(ci->i_layout);
+ pool = ci->i_layout.pool_id;
spin_unlock(&ci->i_ceph_lock);
check:
if (flags & CEPH_I_POOL_PERM) {
if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) {
- dout("ceph_pool_perm_check pool %u no read perm\n",
+ dout("ceph_pool_perm_check pool %lld no read perm\n",
pool);
return -EPERM;
}
if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) {
- dout("ceph_pool_perm_check pool %u no write perm\n",
+ dout("ceph_pool_perm_check pool %lld no write perm\n",
pool);
return -EPERM;
}
return 0;
}
- ret = __ceph_pool_perm_get(ci, pool);
+ pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
+ ret = __ceph_pool_perm_get(ci, pool, pool_ns);
+ ceph_put_string(pool_ns);
if (ret < 0)
return ret;
@@ -1907,10 +1943,11 @@ check:
flags |= CEPH_I_POOL_WR;
spin_lock(&ci->i_ceph_lock);
- if (pool == ceph_file_layout_pg_pool(ci->i_layout)) {
- ci->i_ceph_flags = flags;
+ if (pool == ci->i_layout.pool_id &&
+ pool_ns == rcu_dereference_raw(ci->i_layout.pool_ns)) {
+ ci->i_ceph_flags |= flags;
} else {
- pool = ceph_file_layout_pg_pool(ci->i_layout);
+ pool = ci->i_layout.pool_id;
flags = ci->i_ceph_flags;
}
spin_unlock(&ci->i_ceph_lock);
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
index 238c55b01723..5bc5d37b1217 100644
--- a/fs/ceph/cache.c
+++ b/fs/ceph/cache.c
@@ -71,7 +71,7 @@ int ceph_fscache_register_fs(struct ceph_fs_client* fsc)
&ceph_fscache_fsid_object_def,
fsc, true);
if (!fsc->fscache)
- pr_err("Unable to resgister fsid: %p fscache cookie", fsc);
+ pr_err("Unable to register fsid: %p fscache cookie\n", fsc);
return 0;
}
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 6f60d0a3d0f9..16e6ded0b7f2 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -40,6 +40,11 @@
* cluster to release server state.
*/
+static u64 __get_oldest_flush_tid(struct ceph_mds_client *mdsc);
+static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session,
+ struct ceph_inode_info *ci,
+ u64 oldest_flush_tid);
/*
* Generate readable cap strings for debugging output.
@@ -849,12 +854,14 @@ int __ceph_caps_used(struct ceph_inode_info *ci)
*/
int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
{
- int want = 0;
- int mode;
- for (mode = 0; mode < CEPH_FILE_MODE_NUM; mode++)
- if (ci->i_nr_by_mode[mode])
- want |= ceph_caps_for_mode(mode);
- return want;
+ int i, bits = 0;
+ for (i = 0; i < CEPH_FILE_MODE_BITS; i++) {
+ if (ci->i_nr_by_mode[i])
+ bits |= 1 << i;
+ }
+ if (bits == 0)
+ return 0;
+ return ceph_caps_for_mode(bits >> 1);
}
/*
@@ -991,7 +998,7 @@ static int send_cap_msg(struct ceph_mds_session *session,
u32 seq, u64 flush_tid, u64 oldest_flush_tid,
u32 issue_seq, u32 mseq, u64 size, u64 max_size,
struct timespec *mtime, struct timespec *atime,
- struct timespec *ctime, u64 time_warp_seq,
+ struct timespec *ctime, u32 time_warp_seq,
kuid_t uid, kgid_t gid, umode_t mode,
u64 xattr_version,
struct ceph_buffer *xattrs_buf,
@@ -1116,8 +1123,8 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
struct inode *inode = &ci->vfs_inode;
u64 cap_id = cap->cap_id;
int held, revoking, dropping, keep;
- u64 seq, issue_seq, mseq, time_warp_seq, follows;
- u64 size, max_size;
+ u64 follows, size, max_size;
+ u32 seq, issue_seq, mseq, time_warp_seq;
struct timespec mtime, atime, ctime;
int wake = 0;
umode_t mode;
@@ -1215,6 +1222,22 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
return delayed;
}
+static inline int __send_flush_snap(struct inode *inode,
+ struct ceph_mds_session *session,
+ struct ceph_cap_snap *capsnap,
+ u32 mseq, u64 oldest_flush_tid)
+{
+ return send_cap_msg(session, ceph_vino(inode).ino, 0,
+ CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
+ capsnap->dirty, 0, capsnap->cap_flush.tid,
+ oldest_flush_tid, 0, mseq, capsnap->size, 0,
+ &capsnap->mtime, &capsnap->atime,
+ &capsnap->ctime, capsnap->time_warp_seq,
+ capsnap->uid, capsnap->gid, capsnap->mode,
+ capsnap->xattr_version, capsnap->xattr_blob,
+ capsnap->follows, capsnap->inline_data);
+}
+
/*
* When a snapshot is taken, clients accumulate dirty metadata on
* inodes with capabilities in ceph_cap_snaps to describe the file
@@ -1222,37 +1245,22 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
* asynchronously back to the MDS once sync writes complete and dirty
* data is written out.
*
- * Unless @kick is true, skip cap_snaps that were already sent to
- * the MDS (i.e., during this session).
- *
* Called under i_ceph_lock. Takes s_mutex as needed.
*/
-void __ceph_flush_snaps(struct ceph_inode_info *ci,
- struct ceph_mds_session **psession,
- int kick)
+static void __ceph_flush_snaps(struct ceph_inode_info *ci,
+ struct ceph_mds_session *session)
__releases(ci->i_ceph_lock)
__acquires(ci->i_ceph_lock)
{
struct inode *inode = &ci->vfs_inode;
- int mds;
+ struct ceph_mds_client *mdsc = session->s_mdsc;
struct ceph_cap_snap *capsnap;
- u32 mseq;
- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
- struct ceph_mds_session *session = NULL; /* if session != NULL, we hold
- session->s_mutex */
- u64 next_follows = 0; /* keep track of how far we've gotten through the
- i_cap_snaps list, and skip these entries next time
- around to avoid an infinite loop */
+ u64 oldest_flush_tid = 0;
+ u64 first_tid = 1, last_tid = 0;
- if (psession)
- session = *psession;
+ dout("__flush_snaps %p session %p\n", inode, session);
- dout("__flush_snaps %p\n", inode);
-retry:
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
- /* avoid an infiniute loop after retry */
- if (capsnap->follows < next_follows)
- continue;
/*
* we need to wait for sync writes to complete and for dirty
* pages to be written out.
@@ -1263,97 +1271,132 @@ retry:
/* should be removed by ceph_try_drop_cap_snap() */
BUG_ON(!capsnap->need_flush);
- /* pick mds, take s_mutex */
- if (ci->i_auth_cap == NULL) {
- dout("no auth cap (migrating?), doing nothing\n");
- goto out;
- }
-
/* only flush each capsnap once */
- if (!kick && !list_empty(&capsnap->flushing_item)) {
- dout("already flushed %p, skipping\n", capsnap);
+ if (capsnap->cap_flush.tid > 0) {
+ dout(" already flushed %p, skipping\n", capsnap);
continue;
}
- mds = ci->i_auth_cap->session->s_mds;
- mseq = ci->i_auth_cap->mseq;
+ spin_lock(&mdsc->cap_dirty_lock);
+ capsnap->cap_flush.tid = ++mdsc->last_cap_flush_tid;
+ list_add_tail(&capsnap->cap_flush.g_list,
+ &mdsc->cap_flush_list);
+ if (oldest_flush_tid == 0)
+ oldest_flush_tid = __get_oldest_flush_tid(mdsc);
+ if (list_empty(&ci->i_flushing_item)) {
+ list_add_tail(&ci->i_flushing_item,
+ &session->s_cap_flushing);
+ }
+ spin_unlock(&mdsc->cap_dirty_lock);
+
+ list_add_tail(&capsnap->cap_flush.i_list,
+ &ci->i_cap_flush_list);
- if (session && session->s_mds != mds) {
- dout("oops, wrong session %p mutex\n", session);
- if (kick)
- goto out;
+ if (first_tid == 1)
+ first_tid = capsnap->cap_flush.tid;
+ last_tid = capsnap->cap_flush.tid;
+ }
- mutex_unlock(&session->s_mutex);
- ceph_put_mds_session(session);
- session = NULL;
+ ci->i_ceph_flags &= ~CEPH_I_FLUSH_SNAPS;
+
+ while (first_tid <= last_tid) {
+ struct ceph_cap *cap = ci->i_auth_cap;
+ struct ceph_cap_flush *cf;
+ int ret;
+
+ if (!(cap && cap->session == session)) {
+ dout("__flush_snaps %p auth cap %p not mds%d, "
+ "stop\n", inode, cap, session->s_mds);
+ break;
}
- if (!session) {
- spin_unlock(&ci->i_ceph_lock);
- mutex_lock(&mdsc->mutex);
- session = __ceph_lookup_mds_session(mdsc, mds);
- mutex_unlock(&mdsc->mutex);
- if (session) {
- dout("inverting session/ino locks on %p\n",
- session);
- mutex_lock(&session->s_mutex);
+
+ ret = -ENOENT;
+ list_for_each_entry(cf, &ci->i_cap_flush_list, i_list) {
+ if (cf->tid >= first_tid) {
+ ret = 0;
+ break;
}
- /*
- * if session == NULL, we raced against a cap
- * deletion or migration. retry, and we'll
- * get a better @mds value next time.
- */
- spin_lock(&ci->i_ceph_lock);
- goto retry;
}
+ if (ret < 0)
+ break;
- spin_lock(&mdsc->cap_dirty_lock);
- capsnap->flush_tid = ++mdsc->last_cap_flush_tid;
- spin_unlock(&mdsc->cap_dirty_lock);
+ first_tid = cf->tid + 1;
+ capsnap = container_of(cf, struct ceph_cap_snap, cap_flush);
atomic_inc(&capsnap->nref);
- if (list_empty(&capsnap->flushing_item))
- list_add_tail(&capsnap->flushing_item,
- &session->s_cap_snaps_flushing);
spin_unlock(&ci->i_ceph_lock);
- dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
- inode, capsnap, capsnap->follows, capsnap->flush_tid);
- send_cap_msg(session, ceph_vino(inode).ino, 0,
- CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
- capsnap->dirty, 0, capsnap->flush_tid, 0,
- 0, mseq, capsnap->size, 0,
- &capsnap->mtime, &capsnap->atime,
- &capsnap->ctime, capsnap->time_warp_seq,
- capsnap->uid, capsnap->gid, capsnap->mode,
- capsnap->xattr_version, capsnap->xattr_blob,
- capsnap->follows, capsnap->inline_data);
-
- next_follows = capsnap->follows + 1;
- ceph_put_cap_snap(capsnap);
+ dout("__flush_snaps %p capsnap %p tid %llu %s\n",
+ inode, capsnap, cf->tid, ceph_cap_string(capsnap->dirty));
+ ret = __send_flush_snap(inode, session, capsnap, cap->mseq,
+ oldest_flush_tid);
+ if (ret < 0) {
+ pr_err("__flush_snaps: error sending cap flushsnap, "
+ "ino (%llx.%llx) tid %llu follows %llu\n",
+ ceph_vinop(inode), cf->tid, capsnap->follows);
+ }
+
+ ceph_put_cap_snap(capsnap);
spin_lock(&ci->i_ceph_lock);
- goto retry;
}
+}
- /* we flushed them all; remove this inode from the queue */
- spin_lock(&mdsc->snap_flush_lock);
- list_del_init(&ci->i_snap_flush_item);
- spin_unlock(&mdsc->snap_flush_lock);
+void ceph_flush_snaps(struct ceph_inode_info *ci,
+ struct ceph_mds_session **psession)
+{
+ struct inode *inode = &ci->vfs_inode;
+ struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
+ struct ceph_mds_session *session = NULL;
+ int mds;
-out:
+ dout("ceph_flush_snaps %p\n", inode);
if (psession)
- *psession = session;
- else if (session) {
+ session = *psession;
+retry:
+ spin_lock(&ci->i_ceph_lock);
+ if (!(ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)) {
+ dout(" no capsnap needs flush, doing nothing\n");
+ goto out;
+ }
+ if (!ci->i_auth_cap) {
+ dout(" no auth cap (migrating?), doing nothing\n");
+ goto out;
+ }
+
+ mds = ci->i_auth_cap->session->s_mds;
+ if (session && session->s_mds != mds) {
+ dout(" oops, wrong session %p mutex\n", session);
mutex_unlock(&session->s_mutex);
ceph_put_mds_session(session);
+ session = NULL;
+ }
+ if (!session) {
+ spin_unlock(&ci->i_ceph_lock);
+ mutex_lock(&mdsc->mutex);
+ session = __ceph_lookup_mds_session(mdsc, mds);
+ mutex_unlock(&mdsc->mutex);
+ if (session) {
+ dout(" inverting session/ino locks on %p\n", session);
+ mutex_lock(&session->s_mutex);
+ }
+ goto retry;
}
-}
-static void ceph_flush_snaps(struct ceph_inode_info *ci)
-{
- spin_lock(&ci->i_ceph_lock);
- __ceph_flush_snaps(ci, NULL, 0);
+ __ceph_flush_snaps(ci, session);
+out:
spin_unlock(&ci->i_ceph_lock);
+
+ if (psession) {
+ *psession = session;
+ } else {
+ mutex_unlock(&session->s_mutex);
+ ceph_put_mds_session(session);
+ }
+ /* we flushed them all; remove this inode from the queue */
+ spin_lock(&mdsc->snap_flush_lock);
+ list_del_init(&ci->i_snap_flush_item);
+ spin_unlock(&mdsc->snap_flush_lock);
}
/*
@@ -1411,52 +1454,6 @@ int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
return dirty;
}
-static void __add_cap_flushing_to_inode(struct ceph_inode_info *ci,
- struct ceph_cap_flush *cf)
-{
- struct rb_node **p = &ci->i_cap_flush_tree.rb_node;
- struct rb_node *parent = NULL;
- struct ceph_cap_flush *other = NULL;
-
- while (*p) {
- parent = *p;
- other = rb_entry(parent, struct ceph_cap_flush, i_node);
-
- if (cf->tid < other->tid)
- p = &(*p)->rb_left;
- else if (cf->tid > other->tid)
- p = &(*p)->rb_right;
- else
- BUG();
- }
-
- rb_link_node(&cf->i_node, parent, p);
- rb_insert_color(&cf->i_node, &ci->i_cap_flush_tree);
-}
-
-static void __add_cap_flushing_to_mdsc(struct ceph_mds_client *mdsc,
- struct ceph_cap_flush *cf)
-{
- struct rb_node **p = &mdsc->cap_flush_tree.rb_node;
- struct rb_node *parent = NULL;
- struct ceph_cap_flush *other = NULL;
-
- while (*p) {
- parent = *p;
- other = rb_entry(parent, struct ceph_cap_flush, g_node);
-
- if (cf->tid < other->tid)
- p = &(*p)->rb_left;
- else if (cf->tid > other->tid)
- p = &(*p)->rb_right;
- else
- BUG();
- }
-
- rb_link_node(&cf->g_node, parent, p);
- rb_insert_color(&cf->g_node, &mdsc->cap_flush_tree);
-}
-
struct ceph_cap_flush *ceph_alloc_cap_flush(void)
{
return kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
@@ -1470,23 +1467,54 @@ void ceph_free_cap_flush(struct ceph_cap_flush *cf)
static u64 __get_oldest_flush_tid(struct ceph_mds_client *mdsc)
{
- struct rb_node *n = rb_first(&mdsc->cap_flush_tree);
- if (n) {
+ if (!list_empty(&mdsc->cap_flush_list)) {
struct ceph_cap_flush *cf =
- rb_entry(n, struct ceph_cap_flush, g_node);
+ list_first_entry(&mdsc->cap_flush_list,
+ struct ceph_cap_flush, g_list);
return cf->tid;
}
return 0;
}
/*
+ * Remove cap_flush from the mdsc's or inode's flushing cap list.
+ * Return true if caller needs to wake up flush waiters.
+ */
+static bool __finish_cap_flush(struct ceph_mds_client *mdsc,
+ struct ceph_inode_info *ci,
+ struct ceph_cap_flush *cf)
+{
+ struct ceph_cap_flush *prev;
+ bool wake = cf->wake;
+ if (mdsc) {
+ /* are there older pending cap flushes? */
+ if (wake && cf->g_list.prev != &mdsc->cap_flush_list) {
+ prev = list_prev_entry(cf, g_list);
+ prev->wake = true;
+ wake = false;
+ }
+ list_del(&cf->g_list);
+ } else if (ci) {
+ if (wake && cf->i_list.prev != &ci->i_cap_flush_list) {
+ prev = list_prev_entry(cf, i_list);
+ prev->wake = true;
+ wake = false;
+ }
+ list_del(&cf->i_list);
+ } else {
+ BUG_ON(1);
+ }
+ return wake;
+}
+
+/*
* Add dirty inode to the flushing list. Assigned a seq number so we
* can wait for caps to flush without starving.
*
* Called under i_ceph_lock.
*/
static int __mark_caps_flushing(struct inode *inode,
- struct ceph_mds_session *session,
+ struct ceph_mds_session *session, bool wake,
u64 *flush_tid, u64 *oldest_flush_tid)
{
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
@@ -1509,26 +1537,22 @@ static int __mark_caps_flushing(struct inode *inode,
swap(cf, ci->i_prealloc_cap_flush);
cf->caps = flushing;
+ cf->wake = wake;
spin_lock(&mdsc->cap_dirty_lock);
list_del_init(&ci->i_dirty_item);
cf->tid = ++mdsc->last_cap_flush_tid;
- __add_cap_flushing_to_mdsc(mdsc, cf);
+ list_add_tail(&cf->g_list, &mdsc->cap_flush_list);
*oldest_flush_tid = __get_oldest_flush_tid(mdsc);
if (list_empty(&ci->i_flushing_item)) {
list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
mdsc->num_cap_flushing++;
- dout(" inode %p now flushing tid %llu\n", inode, cf->tid);
- } else {
- list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing);
- dout(" inode %p now flushing (more) tid %llu\n",
- inode, cf->tid);
}
spin_unlock(&mdsc->cap_dirty_lock);
- __add_cap_flushing_to_inode(ci, cf);
+ list_add_tail(&cf->i_list, &ci->i_cap_flush_list);
*flush_tid = cf->tid;
return flushing;
@@ -1583,10 +1607,11 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
int mds = -1; /* keep track of how far we've gone through i_caps list
to avoid an infinite loop on retry */
struct rb_node *p;
- int tried_invalidate = 0;
- int delayed = 0, sent = 0, force_requeue = 0, num;
- int queue_invalidate = 0;
- int is_delayed = flags & CHECK_CAPS_NODELAY;
+ int delayed = 0, sent = 0, num;
+ bool is_delayed = flags & CHECK_CAPS_NODELAY;
+ bool queue_invalidate = false;
+ bool force_requeue = false;
+ bool tried_invalidate = false;
/* if we are unmounting, flush any unused caps immediately. */
if (mdsc->stopping)
@@ -1597,9 +1622,6 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
if (ci->i_ceph_flags & CEPH_I_FLUSH)
flags |= CHECK_CAPS_FLUSH;
- /* flush snaps first time around only */
- if (!list_empty(&ci->i_cap_snaps))
- __ceph_flush_snaps(ci, &session, 0);
goto retry_locked;
retry:
spin_lock(&ci->i_ceph_lock);
@@ -1666,17 +1688,17 @@ retry_locked:
if (revoking & (CEPH_CAP_FILE_CACHE|
CEPH_CAP_FILE_LAZYIO)) {
dout("check_caps queuing invalidate\n");
- queue_invalidate = 1;
+ queue_invalidate = true;
ci->i_rdcache_revoking = ci->i_rdcache_gen;
} else {
dout("check_caps failed to invalidate pages\n");
/* we failed to invalidate pages. check these
caps again later. */
- force_requeue = 1;
+ force_requeue = true;
__cap_set_timeouts(mdsc, ci);
}
}
- tried_invalidate = 1;
+ tried_invalidate = true;
goto retry_locked;
}
@@ -1720,10 +1742,15 @@ retry_locked:
}
}
/* flush anything dirty? */
- if (cap == ci->i_auth_cap && (flags & CHECK_CAPS_FLUSH) &&
- ci->i_dirty_caps) {
- dout("flushing dirty caps\n");
- goto ack;
+ if (cap == ci->i_auth_cap) {
+ if ((flags & CHECK_CAPS_FLUSH) && ci->i_dirty_caps) {
+ dout("flushing dirty caps\n");
+ goto ack;
+ }
+ if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS) {
+ dout("flushing snap caps\n");
+ goto ack;
+ }
}
/* completed revocation? going down and there are no caps? */
@@ -1782,6 +1809,26 @@ ack:
goto retry;
}
}
+
+ /* kick flushing and flush snaps before sending normal
+ * cap message */
+ if (cap == ci->i_auth_cap &&
+ (ci->i_ceph_flags &
+ (CEPH_I_KICK_FLUSH | CEPH_I_FLUSH_SNAPS))) {
+ if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) {
+ spin_lock(&mdsc->cap_dirty_lock);
+ oldest_flush_tid = __get_oldest_flush_tid(mdsc);
+ spin_unlock(&mdsc->cap_dirty_lock);
+ __kick_flushing_caps(mdsc, session, ci,
+ oldest_flush_tid);
+ ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
+ }
+ if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)
+ __ceph_flush_snaps(ci, session);
+
+ goto retry_locked;
+ }
+
/* take snap_rwsem after session mutex */
if (!took_snap_rwsem) {
if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
@@ -1796,7 +1843,7 @@ ack:
}
if (cap == ci->i_auth_cap && ci->i_dirty_caps) {
- flushing = __mark_caps_flushing(inode, session,
+ flushing = __mark_caps_flushing(inode, session, false,
&flush_tid,
&oldest_flush_tid);
} else {
@@ -1822,7 +1869,7 @@ ack:
* otherwise cancel.
*/
if (delayed && is_delayed)
- force_requeue = 1; /* __send_cap delayed release; requeue */
+ force_requeue = true; /* __send_cap delayed release; requeue */
if (!delayed && !is_delayed)
__cap_delay_cancel(mdsc, ci);
else if (!is_delayed || force_requeue)
@@ -1873,8 +1920,8 @@ retry:
if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
goto out;
- flushing = __mark_caps_flushing(inode, session, &flush_tid,
- &oldest_flush_tid);
+ flushing = __mark_caps_flushing(inode, session, true,
+ &flush_tid, &oldest_flush_tid);
/* __send_cap drops i_ceph_lock */
delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
@@ -1887,10 +1934,11 @@ retry:
spin_unlock(&ci->i_ceph_lock);
}
} else {
- struct rb_node *n = rb_last(&ci->i_cap_flush_tree);
- if (n) {
+ if (!list_empty(&ci->i_cap_flush_list)) {
struct ceph_cap_flush *cf =
- rb_entry(n, struct ceph_cap_flush, i_node);
+ list_last_entry(&ci->i_cap_flush_list,
+ struct ceph_cap_flush, i_list);
+ cf->wake = true;
flush_tid = cf->tid;
}
flushing = ci->i_flushing_caps;
@@ -1910,14 +1958,13 @@ out:
static int caps_are_flushed(struct inode *inode, u64 flush_tid)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_cap_flush *cf;
- struct rb_node *n;
int ret = 1;
spin_lock(&ci->i_ceph_lock);
- n = rb_first(&ci->i_cap_flush_tree);
- if (n) {
- cf = rb_entry(n, struct ceph_cap_flush, i_node);
+ if (!list_empty(&ci->i_cap_flush_list)) {
+ struct ceph_cap_flush * cf =
+ list_first_entry(&ci->i_cap_flush_list,
+ struct ceph_cap_flush, i_list);
if (cf->tid <= flush_tid)
ret = 0;
}
@@ -1926,53 +1973,6 @@ static int caps_are_flushed(struct inode *inode, u64 flush_tid)
}
/*
- * Wait on any unsafe replies for the given inode. First wait on the
- * newest request, and make that the upper bound. Then, if there are
- * more requests, keep waiting on the oldest as long as it is still older
- * than the original request.
- */
-static void sync_write_wait(struct inode *inode)
-{
- struct ceph_inode_info *ci = ceph_inode(inode);
- struct list_head *head = &ci->i_unsafe_writes;
- struct ceph_osd_request *req;
- u64 last_tid;
-
- if (!S_ISREG(inode->i_mode))
- return;
-
- spin_lock(&ci->i_unsafe_lock);
- if (list_empty(head))
- goto out;
-
- /* set upper bound as _last_ entry in chain */
- req = list_last_entry(head, struct ceph_osd_request,
- r_unsafe_item);
- last_tid = req->r_tid;
-
- do {
- ceph_osdc_get_request(req);
- spin_unlock(&ci->i_unsafe_lock);
- dout("sync_write_wait on tid %llu (until %llu)\n",
- req->r_tid, last_tid);
- wait_for_completion(&req->r_safe_completion);
- spin_lock(&ci->i_unsafe_lock);
- ceph_osdc_put_request(req);
-
- /*
- * from here on look at first entry in chain, since we
- * only want to wait for anything older than last_tid
- */
- if (list_empty(head))
- break;
- req = list_first_entry(head, struct ceph_osd_request,
- r_unsafe_item);
- } while (req->r_tid < last_tid);
-out:
- spin_unlock(&ci->i_unsafe_lock);
-}
-
-/*
* wait for any unsafe requests to complete.
*/
static int unsafe_request_wait(struct inode *inode)
@@ -2024,7 +2024,8 @@ int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
int dirty;
dout("fsync %p%s\n", inode, datasync ? " datasync" : "");
- sync_write_wait(inode);
+
+ ceph_sync_write_wait(inode);
ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
if (ret < 0)
@@ -2087,87 +2088,74 @@ int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
return err;
}
-/*
- * After a recovering MDS goes active, we need to resend any caps
- * we were flushing.
- *
- * Caller holds session->s_mutex.
- */
-static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
- struct ceph_mds_session *session)
-{
- struct ceph_cap_snap *capsnap;
-
- dout("kick_flushing_capsnaps mds%d\n", session->s_mds);
- list_for_each_entry(capsnap, &session->s_cap_snaps_flushing,
- flushing_item) {
- struct ceph_inode_info *ci = capsnap->ci;
- struct inode *inode = &ci->vfs_inode;
- struct ceph_cap *cap;
-
- spin_lock(&ci->i_ceph_lock);
- cap = ci->i_auth_cap;
- if (cap && cap->session == session) {
- dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
- cap, capsnap);
- __ceph_flush_snaps(ci, &session, 1);
- } else {
- pr_err("%p auth cap %p not mds%d ???\n", inode,
- cap, session->s_mds);
- }
- spin_unlock(&ci->i_ceph_lock);
- }
-}
-
-static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
- struct ceph_mds_session *session,
- struct ceph_inode_info *ci)
+static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session,
+ struct ceph_inode_info *ci,
+ u64 oldest_flush_tid)
+ __releases(ci->i_ceph_lock)
+ __acquires(ci->i_ceph_lock)
{
struct inode *inode = &ci->vfs_inode;
struct ceph_cap *cap;
struct ceph_cap_flush *cf;
- struct rb_node *n;
- int delayed = 0;
+ int ret;
u64 first_tid = 0;
- u64 oldest_flush_tid;
- spin_lock(&mdsc->cap_dirty_lock);
- oldest_flush_tid = __get_oldest_flush_tid(mdsc);
- spin_unlock(&mdsc->cap_dirty_lock);
+ list_for_each_entry(cf, &ci->i_cap_flush_list, i_list) {
+ if (cf->tid < first_tid)
+ continue;
- while (true) {
- spin_lock(&ci->i_ceph_lock);
cap = ci->i_auth_cap;
if (!(cap && cap->session == session)) {
- pr_err("%p auth cap %p not mds%d ???\n", inode,
- cap, session->s_mds);
- spin_unlock(&ci->i_ceph_lock);
+ pr_err("%p auth cap %p not mds%d ???\n",
+ inode, cap, session->s_mds);
break;
}
- for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) {
- cf = rb_entry(n, struct ceph_cap_flush, i_node);
- if (cf->tid >= first_tid)
- break;
- }
- if (!n) {
+ first_tid = cf->tid + 1;
+
+ if (cf->caps) {
+ dout("kick_flushing_caps %p cap %p tid %llu %s\n",
+ inode, cap, cf->tid, ceph_cap_string(cf->caps));
+ ci->i_ceph_flags |= CEPH_I_NODELAY;
+ ret = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
+ __ceph_caps_used(ci),
+ __ceph_caps_wanted(ci),
+ cap->issued | cap->implemented,
+ cf->caps, cf->tid, oldest_flush_tid);
+ if (ret) {
+ pr_err("kick_flushing_caps: error sending "
+ "cap flush, ino (%llx.%llx) "
+ "tid %llu flushing %s\n",
+ ceph_vinop(inode), cf->tid,
+ ceph_cap_string(cf->caps));
+ }
+ } else {
+ struct ceph_cap_snap *capsnap =
+ container_of(cf, struct ceph_cap_snap,
+ cap_flush);
+ dout("kick_flushing_caps %p capsnap %p tid %llu %s\n",
+ inode, capsnap, cf->tid,
+ ceph_cap_string(capsnap->dirty));
+
+ atomic_inc(&capsnap->nref);
spin_unlock(&ci->i_ceph_lock);
- break;
- }
- cf = rb_entry(n, struct ceph_cap_flush, i_node);
+ ret = __send_flush_snap(inode, session, capsnap, cap->mseq,
+ oldest_flush_tid);
+ if (ret < 0) {
+ pr_err("kick_flushing_caps: error sending "
+ "cap flushsnap, ino (%llx.%llx) "
+ "tid %llu follows %llu\n",
+ ceph_vinop(inode), cf->tid,
+ capsnap->follows);
+ }
- first_tid = cf->tid + 1;
+ ceph_put_cap_snap(capsnap);
+ }
- dout("kick_flushing_caps %p cap %p tid %llu %s\n", inode,
- cap, cf->tid, ceph_cap_string(cf->caps));
- delayed |= __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
- __ceph_caps_used(ci),
- __ceph_caps_wanted(ci),
- cap->issued | cap->implemented,
- cf->caps, cf->tid, oldest_flush_tid);
+ spin_lock(&ci->i_ceph_lock);
}
- return delayed;
}
void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
@@ -2175,8 +2163,14 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
{
struct ceph_inode_info *ci;
struct ceph_cap *cap;
+ u64 oldest_flush_tid;
dout("early_kick_flushing_caps mds%d\n", session->s_mds);
+
+ spin_lock(&mdsc->cap_dirty_lock);
+ oldest_flush_tid = __get_oldest_flush_tid(mdsc);
+ spin_unlock(&mdsc->cap_dirty_lock);
+
list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
spin_lock(&ci->i_ceph_lock);
cap = ci->i_auth_cap;
@@ -2196,10 +2190,11 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
*/
if ((cap->issued & ci->i_flushing_caps) !=
ci->i_flushing_caps) {
- spin_unlock(&ci->i_ceph_lock);
- if (!__kick_flushing_caps(mdsc, session, ci))
- continue;
- spin_lock(&ci->i_ceph_lock);
+ ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
+ __kick_flushing_caps(mdsc, session, ci,
+ oldest_flush_tid);
+ } else {
+ ci->i_ceph_flags |= CEPH_I_KICK_FLUSH;
}
spin_unlock(&ci->i_ceph_lock);
@@ -2210,50 +2205,56 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
struct ceph_inode_info *ci;
-
- kick_flushing_capsnaps(mdsc, session);
+ struct ceph_cap *cap;
+ u64 oldest_flush_tid;
dout("kick_flushing_caps mds%d\n", session->s_mds);
+
+ spin_lock(&mdsc->cap_dirty_lock);
+ oldest_flush_tid = __get_oldest_flush_tid(mdsc);
+ spin_unlock(&mdsc->cap_dirty_lock);
+
list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
- int delayed = __kick_flushing_caps(mdsc, session, ci);
- if (delayed) {
- spin_lock(&ci->i_ceph_lock);
- __cap_delay_requeue(mdsc, ci);
+ spin_lock(&ci->i_ceph_lock);
+ cap = ci->i_auth_cap;
+ if (!(cap && cap->session == session)) {
+ pr_err("%p auth cap %p not mds%d ???\n",
+ &ci->vfs_inode, cap, session->s_mds);
spin_unlock(&ci->i_ceph_lock);
+ continue;
+ }
+ if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) {
+ ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
+ __kick_flushing_caps(mdsc, session, ci,
+ oldest_flush_tid);
}
+ spin_unlock(&ci->i_ceph_lock);
}
}
static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session,
struct inode *inode)
+ __releases(ci->i_ceph_lock)
{
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_cap *cap;
- spin_lock(&ci->i_ceph_lock);
cap = ci->i_auth_cap;
dout("kick_flushing_inode_caps %p flushing %s\n", inode,
ceph_cap_string(ci->i_flushing_caps));
- __ceph_flush_snaps(ci, &session, 1);
-
- if (ci->i_flushing_caps) {
- int delayed;
-
+ if (!list_empty(&ci->i_cap_flush_list)) {
+ u64 oldest_flush_tid;
spin_lock(&mdsc->cap_dirty_lock);
list_move_tail(&ci->i_flushing_item,
&cap->session->s_cap_flushing);
+ oldest_flush_tid = __get_oldest_flush_tid(mdsc);
spin_unlock(&mdsc->cap_dirty_lock);
+ ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
+ __kick_flushing_caps(mdsc, session, ci, oldest_flush_tid);
spin_unlock(&ci->i_ceph_lock);
-
- delayed = __kick_flushing_caps(mdsc, session, ci);
- if (delayed) {
- spin_lock(&ci->i_ceph_lock);
- __cap_delay_requeue(mdsc, ci);
- spin_unlock(&ci->i_ceph_lock);
- }
} else {
spin_unlock(&ci->i_ceph_lock);
}
@@ -2580,16 +2581,19 @@ void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
* drop cap_snap that is not associated with any snapshot.
* we don't need to send FLUSHSNAP message for it.
*/
-static int ceph_try_drop_cap_snap(struct ceph_cap_snap *capsnap)
+static int ceph_try_drop_cap_snap(struct ceph_inode_info *ci,
+ struct ceph_cap_snap *capsnap)
{
if (!capsnap->need_flush &&
!capsnap->writing && !capsnap->dirty_pages) {
-
dout("dropping cap_snap %p follows %llu\n",
capsnap, capsnap->follows);
+ BUG_ON(capsnap->cap_flush.tid > 0);
ceph_put_snap_context(capsnap->context);
+ if (!list_is_last(&capsnap->ci_item, &ci->i_cap_snaps))
+ ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS;
+
list_del(&capsnap->ci_item);
- list_del(&capsnap->flushing_item);
ceph_put_cap_snap(capsnap);
return 1;
}
@@ -2636,7 +2640,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
struct ceph_cap_snap,
ci_item);
capsnap->writing = 0;
- if (ceph_try_drop_cap_snap(capsnap))
+ if (ceph_try_drop_cap_snap(ci, capsnap))
put++;
else if (__ceph_finish_cap_snap(ci, capsnap))
flushsnaps = 1;
@@ -2661,7 +2665,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
if (last && !flushsnaps)
ceph_check_caps(ci, 0, NULL);
else if (flushsnaps)
- ceph_flush_snaps(ci);
+ ceph_flush_snaps(ci, NULL);
if (wake)
wake_up_all(&ci->i_cap_wq);
while (put-- > 0)
@@ -2679,15 +2683,19 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
struct ceph_snap_context *snapc)
{
struct inode *inode = &ci->vfs_inode;
- int last = 0;
- int complete_capsnap = 0;
- int drop_capsnap = 0;
- int found = 0;
struct ceph_cap_snap *capsnap = NULL;
+ int put = 0;
+ bool last = false;
+ bool found = false;
+ bool flush_snaps = false;
+ bool complete_capsnap = false;
spin_lock(&ci->i_ceph_lock);
ci->i_wrbuffer_ref -= nr;
- last = !ci->i_wrbuffer_ref;
+ if (ci->i_wrbuffer_ref == 0) {
+ last = true;
+ put++;
+ }
if (ci->i_head_snapc == snapc) {
ci->i_wrbuffer_ref_head -= nr;
@@ -2707,15 +2715,22 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
} else {
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
if (capsnap->context == snapc) {
- found = 1;
+ found = true;
break;
}
}
BUG_ON(!found);
capsnap->dirty_pages -= nr;
if (capsnap->dirty_pages == 0) {
- complete_capsnap = 1;
- drop_capsnap = ceph_try_drop_cap_snap(capsnap);
+ complete_capsnap = true;
+ if (!capsnap->writing) {
+ if (ceph_try_drop_cap_snap(ci, capsnap)) {
+ put++;
+ } else {
+ ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS;
+ flush_snaps = true;
+ }
+ }
}
dout("put_wrbuffer_cap_refs on %p cap_snap %p "
" snap %lld %d/%d -> %d/%d %s%s\n",
@@ -2730,12 +2745,12 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
if (last) {
ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
- iput(inode);
- } else if (complete_capsnap) {
- ceph_flush_snaps(ci);
- wake_up_all(&ci->i_cap_wq);
+ } else if (flush_snaps) {
+ ceph_flush_snaps(ci, NULL);
}
- if (drop_capsnap)
+ if (complete_capsnap)
+ wake_up_all(&ci->i_cap_wq);
+ while (put-- > 0)
iput(inode);
}
@@ -2779,12 +2794,11 @@ static void invalidate_aliases(struct inode *inode)
*/
static void handle_cap_grant(struct ceph_mds_client *mdsc,
struct inode *inode, struct ceph_mds_caps *grant,
- u64 inline_version,
- void *inline_data, int inline_len,
+ struct ceph_string **pns, u64 inline_version,
+ void *inline_data, u32 inline_len,
struct ceph_buffer *xattr_buf,
struct ceph_mds_session *session,
- struct ceph_cap *cap, int issued,
- u32 pool_ns_len)
+ struct ceph_cap *cap, int issued)
__releases(ci->i_ceph_lock)
__releases(mdsc->snap_rwsem)
{
@@ -2895,8 +2909,18 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
if (newcaps & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR)) {
/* file layout may have changed */
- ci->i_layout = grant->layout;
- ci->i_pool_ns_len = pool_ns_len;
+ s64 old_pool = ci->i_layout.pool_id;
+ struct ceph_string *old_ns;
+
+ ceph_file_layout_from_legacy(&ci->i_layout, &grant->layout);
+ old_ns = rcu_dereference_protected(ci->i_layout.pool_ns,
+ lockdep_is_held(&ci->i_ceph_lock));
+ rcu_assign_pointer(ci->i_layout.pool_ns, *pns);
+
+ if (ci->i_layout.pool_id != old_pool || *pns != old_ns)
+ ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
+
+ *pns = old_ns;
/* size/truncate_seq? */
queue_trunc = ceph_fill_file_size(inode, issued,
@@ -2979,13 +3003,13 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
fill_inline = true;
}
- spin_unlock(&ci->i_ceph_lock);
-
if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) {
- kick_flushing_inode_caps(mdsc, session, inode);
- up_read(&mdsc->snap_rwsem);
if (newcaps & ~issued)
wake = true;
+ kick_flushing_inode_caps(mdsc, session, inode);
+ up_read(&mdsc->snap_rwsem);
+ } else {
+ spin_unlock(&ci->i_ceph_lock);
}
if (fill_inline)
@@ -3029,23 +3053,24 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
{
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
- struct ceph_cap_flush *cf;
- struct rb_node *n;
+ struct ceph_cap_flush *cf, *tmp_cf;
LIST_HEAD(to_remove);
unsigned seq = le32_to_cpu(m->seq);
int dirty = le32_to_cpu(m->dirty);
int cleaned = 0;
- int drop = 0;
+ bool drop = false;
+ bool wake_ci = 0;
+ bool wake_mdsc = 0;
- n = rb_first(&ci->i_cap_flush_tree);
- while (n) {
- cf = rb_entry(n, struct ceph_cap_flush, i_node);
- n = rb_next(&cf->i_node);
+ list_for_each_entry_safe(cf, tmp_cf, &ci->i_cap_flush_list, i_list) {
if (cf->tid == flush_tid)
cleaned = cf->caps;
+ if (cf->caps == 0) /* capsnap */
+ continue;
if (cf->tid <= flush_tid) {
- rb_erase(&cf->i_node, &ci->i_cap_flush_tree);
- list_add_tail(&cf->list, &to_remove);
+ if (__finish_cap_flush(NULL, ci, cf))
+ wake_ci = true;
+ list_add_tail(&cf->i_list, &to_remove);
} else {
cleaned &= ~cf->caps;
if (!cleaned)
@@ -3066,31 +3091,29 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
spin_lock(&mdsc->cap_dirty_lock);
- if (!list_empty(&to_remove)) {
- list_for_each_entry(cf, &to_remove, list)
- rb_erase(&cf->g_node, &mdsc->cap_flush_tree);
-
- n = rb_first(&mdsc->cap_flush_tree);
- cf = n ? rb_entry(n, struct ceph_cap_flush, g_node) : NULL;
- if (!cf || cf->tid > flush_tid)
- wake_up_all(&mdsc->cap_flushing_wq);
+ list_for_each_entry(cf, &to_remove, i_list) {
+ if (__finish_cap_flush(mdsc, NULL, cf))
+ wake_mdsc = true;
}
if (ci->i_flushing_caps == 0) {
- list_del_init(&ci->i_flushing_item);
- if (!list_empty(&session->s_cap_flushing))
- dout(" mds%d still flushing cap on %p\n",
- session->s_mds,
- &list_entry(session->s_cap_flushing.next,
- struct ceph_inode_info,
- i_flushing_item)->vfs_inode);
+ if (list_empty(&ci->i_cap_flush_list)) {
+ list_del_init(&ci->i_flushing_item);
+ if (!list_empty(&session->s_cap_flushing)) {
+ dout(" mds%d still flushing cap on %p\n",
+ session->s_mds,
+ &list_first_entry(&session->s_cap_flushing,
+ struct ceph_inode_info,
+ i_flushing_item)->vfs_inode);
+ }
+ }
mdsc->num_cap_flushing--;
dout(" inode %p now !flushing\n", inode);
if (ci->i_dirty_caps == 0) {
dout(" inode %p now clean\n", inode);
BUG_ON(!list_empty(&ci->i_dirty_item));
- drop = 1;
+ drop = true;
if (ci->i_wr_ref == 0 &&
ci->i_wrbuffer_ref_head == 0) {
BUG_ON(!ci->i_head_snapc);
@@ -3102,17 +3125,21 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
}
}
spin_unlock(&mdsc->cap_dirty_lock);
- wake_up_all(&ci->i_cap_wq);
out:
spin_unlock(&ci->i_ceph_lock);
while (!list_empty(&to_remove)) {
cf = list_first_entry(&to_remove,
- struct ceph_cap_flush, list);
- list_del(&cf->list);
+ struct ceph_cap_flush, i_list);
+ list_del(&cf->i_list);
ceph_free_cap_flush(cf);
}
+
+ if (wake_ci)
+ wake_up_all(&ci->i_cap_wq);
+ if (wake_mdsc)
+ wake_up_all(&mdsc->cap_flushing_wq);
if (drop)
iput(inode);
}
@@ -3131,7 +3158,9 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
u64 follows = le64_to_cpu(m->snap_follows);
struct ceph_cap_snap *capsnap;
- int drop = 0;
+ bool flushed = false;
+ bool wake_ci = false;
+ bool wake_mdsc = false;
dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
inode, ci, session->s_mds, follows);
@@ -3139,30 +3168,47 @@ static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
spin_lock(&ci->i_ceph_lock);
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
if (capsnap->follows == follows) {
- if (capsnap->flush_tid != flush_tid) {
+ if (capsnap->cap_flush.tid != flush_tid) {
dout(" cap_snap %p follows %lld tid %lld !="
" %lld\n", capsnap, follows,
- flush_tid, capsnap->flush_tid);
+ flush_tid, capsnap->cap_flush.tid);
break;
}
- WARN_ON(capsnap->dirty_pages || capsnap->writing);
- dout(" removing %p cap_snap %p follows %lld\n",
- inode, capsnap, follows);
- ceph_put_snap_context(capsnap->context);
- list_del(&capsnap->ci_item);
- list_del(&capsnap->flushing_item);
- ceph_put_cap_snap(capsnap);
- wake_up_all(&mdsc->cap_flushing_wq);
- drop = 1;
+ flushed = true;
break;
} else {
dout(" skipping cap_snap %p follows %lld\n",
capsnap, capsnap->follows);
}
}
+ if (flushed) {
+ WARN_ON(capsnap->dirty_pages || capsnap->writing);
+ dout(" removing %p cap_snap %p follows %lld\n",
+ inode, capsnap, follows);
+ list_del(&capsnap->ci_item);
+ if (__finish_cap_flush(NULL, ci, &capsnap->cap_flush))
+ wake_ci = true;
+
+ spin_lock(&mdsc->cap_dirty_lock);
+
+ if (list_empty(&ci->i_cap_flush_list))
+ list_del_init(&ci->i_flushing_item);
+
+ if (__finish_cap_flush(mdsc, NULL, &capsnap->cap_flush))
+ wake_mdsc = true;
+
+ spin_unlock(&mdsc->cap_dirty_lock);
+ }
spin_unlock(&ci->i_ceph_lock);
- if (drop)
+ if (flushed) {
+ ceph_put_snap_context(capsnap->context);
+ ceph_put_cap_snap(capsnap);
+ if (wake_ci)
+ wake_up_all(&ci->i_cap_wq);
+ if (wake_mdsc)
+ wake_up_all(&mdsc->cap_flushing_wq);
iput(inode);
+ }
}
/*
@@ -3267,7 +3313,8 @@ retry:
tcap->implemented |= issued;
if (cap == ci->i_auth_cap)
ci->i_auth_cap = tcap;
- if (ci->i_flushing_caps && ci->i_auth_cap == tcap) {
+ if (!list_empty(&ci->i_cap_flush_list) &&
+ ci->i_auth_cap == tcap) {
spin_lock(&mdsc->cap_dirty_lock);
list_move_tail(&ci->i_flushing_item,
&tcap->session->s_cap_flushing);
@@ -3420,20 +3467,18 @@ void ceph_handle_caps(struct ceph_mds_session *session,
struct ceph_cap *cap;
struct ceph_mds_caps *h;
struct ceph_mds_cap_peer *peer = NULL;
- struct ceph_snap_realm *realm;
+ struct ceph_snap_realm *realm = NULL;
+ struct ceph_string *pool_ns = NULL;
int mds = session->s_mds;
int op, issued;
u32 seq, mseq;
struct ceph_vino vino;
- u64 cap_id;
- u64 size, max_size;
u64 tid;
u64 inline_version = 0;
void *inline_data = NULL;
u32 inline_len = 0;
void *snaptrace;
size_t snaptrace_len;
- u32 pool_ns_len = 0;
void *p, *end;
dout("handle_caps from mds%d\n", mds);
@@ -3447,11 +3492,8 @@ void ceph_handle_caps(struct ceph_mds_session *session,
op = le32_to_cpu(h->op);
vino.ino = le64_to_cpu(h->ino);
vino.snap = CEPH_NOSNAP;
- cap_id = le64_to_cpu(h->cap_id);
seq = le32_to_cpu(h->seq);
mseq = le32_to_cpu(h->migrate_seq);
- size = le64_to_cpu(h->size);
- max_size = le64_to_cpu(h->max_size);
snaptrace = h + 1;
snaptrace_len = le32_to_cpu(h->snap_trace_len);
@@ -3490,6 +3532,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
u64 flush_tid;
u32 caller_uid, caller_gid;
u32 osd_epoch_barrier;
+ u32 pool_ns_len;
/* version >= 5 */
ceph_decode_32_safe(&p, end, osd_epoch_barrier, bad);
/* version >= 6 */
@@ -3499,6 +3542,11 @@ void ceph_handle_caps(struct ceph_mds_session *session,
ceph_decode_32_safe(&p, end, caller_gid, bad);
/* version >= 8 */
ceph_decode_32_safe(&p, end, pool_ns_len, bad);
+ if (pool_ns_len > 0) {
+ ceph_decode_need(&p, end, pool_ns_len, bad);
+ pool_ns = ceph_find_or_create_string(p, pool_ns_len);
+ p += pool_ns_len;
+ }
}
/* lookup ino */
@@ -3519,7 +3567,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
cap = ceph_get_cap(mdsc, NULL);
cap->cap_ino = vino.ino;
cap->queue_release = 1;
- cap->cap_id = cap_id;
+ cap->cap_id = le64_to_cpu(h->cap_id);
cap->mseq = mseq;
cap->seq = seq;
spin_lock(&session->s_cap_lock);
@@ -3554,10 +3602,9 @@ void ceph_handle_caps(struct ceph_mds_session *session,
}
handle_cap_import(mdsc, inode, h, peer, session,
&cap, &issued);
- handle_cap_grant(mdsc, inode, h,
+ handle_cap_grant(mdsc, inode, h, &pool_ns,
inline_version, inline_data, inline_len,
- msg->middle, session, cap, issued,
- pool_ns_len);
+ msg->middle, session, cap, issued);
if (realm)
ceph_put_snap_realm(mdsc, realm);
goto done_unlocked;
@@ -3579,10 +3626,9 @@ void ceph_handle_caps(struct ceph_mds_session *session,
case CEPH_CAP_OP_GRANT:
__ceph_caps_issued(ci, &issued);
issued |= __ceph_caps_dirty(ci);
- handle_cap_grant(mdsc, inode, h,
+ handle_cap_grant(mdsc, inode, h, &pool_ns,
inline_version, inline_data, inline_len,
- msg->middle, session, cap, issued,
- pool_ns_len);
+ msg->middle, session, cap, issued);
goto done_unlocked;
case CEPH_CAP_OP_FLUSH_ACK:
@@ -3613,6 +3659,7 @@ done:
mutex_unlock(&session->s_mutex);
done_unlocked:
iput(inode);
+ ceph_put_string(pool_ns);
return;
bad:
@@ -3673,6 +3720,16 @@ void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
dout("flush_dirty_caps done\n");
}
+void __ceph_get_fmode(struct ceph_inode_info *ci, int fmode)
+{
+ int i;
+ int bits = (fmode << 1) | 1;
+ for (i = 0; i < CEPH_FILE_MODE_BITS; i++) {
+ if (bits & (1 << i))
+ ci->i_nr_by_mode[i]++;
+ }
+}
+
/*
* Drop open file reference. If we were the last open file,
* we may need to release capabilities to the MDS (or schedule
@@ -3680,15 +3737,20 @@ void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
*/
void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
{
- struct inode *inode = &ci->vfs_inode;
- int last = 0;
-
+ int i, last = 0;
+ int bits = (fmode << 1) | 1;
spin_lock(&ci->i_ceph_lock);
- dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
- ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
- BUG_ON(ci->i_nr_by_mode[fmode] == 0);
- if (--ci->i_nr_by_mode[fmode] == 0)
- last++;
+ for (i = 0; i < CEPH_FILE_MODE_BITS; i++) {
+ if (bits & (1 << i)) {
+ BUG_ON(ci->i_nr_by_mode[i] == 0);
+ if (--ci->i_nr_by_mode[i] == 0)
+ last++;
+ }
+ }
+ dout("put_fmode %p fmode %d {%d,%d,%d,%d}\n",
+ &ci->vfs_inode, fmode,
+ ci->i_nr_by_mode[0], ci->i_nr_by_mode[1],
+ ci->i_nr_by_mode[2], ci->i_nr_by_mode[3]);
spin_unlock(&ci->i_ceph_lock);
if (last && ci->i_vino.snap == CEPH_NOSNAP)
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 6e0fedf6713b..c64a0b794d49 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -59,7 +59,7 @@ int ceph_init_dentry(struct dentry *dentry)
di->dentry = dentry;
di->lease_session = NULL;
- dentry->d_time = jiffies;
+ di->time = jiffies;
/* avoid reordering d_fsdata setup so that the check above is safe */
smp_mb();
dentry->d_fsdata = di;
@@ -1124,7 +1124,7 @@ static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
void ceph_invalidate_dentry_lease(struct dentry *dentry)
{
spin_lock(&dentry->d_lock);
- dentry->d_time = jiffies;
+ ceph_dentry(dentry)->time = jiffies;
ceph_dentry(dentry)->lease_shared_gen = 0;
spin_unlock(&dentry->d_lock);
}
@@ -1133,7 +1133,8 @@ void ceph_invalidate_dentry_lease(struct dentry *dentry)
* Check if dentry lease is valid. If not, delete the lease. Try to
* renew if the least is more than half up.
*/
-static int dentry_lease_is_valid(struct dentry *dentry)
+static int dentry_lease_is_valid(struct dentry *dentry, unsigned int flags,
+ struct inode *dir)
{
struct ceph_dentry_info *di;
struct ceph_mds_session *s;
@@ -1141,12 +1142,11 @@ static int dentry_lease_is_valid(struct dentry *dentry)
u32 gen;
unsigned long ttl;
struct ceph_mds_session *session = NULL;
- struct inode *dir = NULL;
u32 seq = 0;
spin_lock(&dentry->d_lock);
di = ceph_dentry(dentry);
- if (di->lease_session) {
+ if (di && di->lease_session) {
s = di->lease_session;
spin_lock(&s->s_gen_ttl_lock);
gen = s->s_cap_gen;
@@ -1154,17 +1154,24 @@ static int dentry_lease_is_valid(struct dentry *dentry)
spin_unlock(&s->s_gen_ttl_lock);
if (di->lease_gen == gen &&
- time_before(jiffies, dentry->d_time) &&
+ time_before(jiffies, di->time) &&
time_before(jiffies, ttl)) {
valid = 1;
if (di->lease_renew_after &&
time_after(jiffies, di->lease_renew_after)) {
- /* we should renew */
- dir = d_inode(dentry->d_parent);
- session = ceph_get_mds_session(s);
- seq = di->lease_seq;
- di->lease_renew_after = 0;
- di->lease_renew_from = jiffies;
+ /*
+ * We should renew. If we're in RCU walk mode
+ * though, we can't do that so just return
+ * -ECHILD.
+ */
+ if (flags & LOOKUP_RCU) {
+ valid = -ECHILD;
+ } else {
+ session = ceph_get_mds_session(s);
+ seq = di->lease_seq;
+ di->lease_renew_after = 0;
+ di->lease_renew_from = jiffies;
+ }
}
}
}
@@ -1207,15 +1214,19 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
struct dentry *parent;
struct inode *dir;
- if (flags & LOOKUP_RCU)
- return -ECHILD;
+ if (flags & LOOKUP_RCU) {
+ parent = ACCESS_ONCE(dentry->d_parent);
+ dir = d_inode_rcu(parent);
+ if (!dir)
+ return -ECHILD;
+ } else {
+ parent = dget_parent(dentry);
+ dir = d_inode(parent);
+ }
dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry,
dentry, d_inode(dentry), ceph_dentry(dentry)->offset);
- parent = dget_parent(dentry);
- dir = d_inode(parent);
-
/* always trust cached snapped dentries, snapdir dentry */
if (ceph_snap(dir) != CEPH_NOSNAP) {
dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry,
@@ -1224,12 +1235,16 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
} else if (d_really_is_positive(dentry) &&
ceph_snap(d_inode(dentry)) == CEPH_SNAPDIR) {
valid = 1;
- } else if (dentry_lease_is_valid(dentry) ||
- dir_lease_is_valid(dir, dentry)) {
- if (d_really_is_positive(dentry))
- valid = ceph_is_any_caps(d_inode(dentry));
- else
- valid = 1;
+ } else {
+ valid = dentry_lease_is_valid(dentry, flags, dir);
+ if (valid == -ECHILD)
+ return valid;
+ if (valid || dir_lease_is_valid(dir, dentry)) {
+ if (d_really_is_positive(dentry))
+ valid = ceph_is_any_caps(d_inode(dentry));
+ else
+ valid = 1;
+ }
}
if (!valid) {
@@ -1238,6 +1253,9 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
struct ceph_mds_request *req;
int op, mask, err;
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+
op = ceph_snap(dir) == CEPH_SNAPDIR ?
CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
@@ -1273,7 +1291,8 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
ceph_dir_clear_complete(dir);
}
- dput(parent);
+ if (!(flags & LOOKUP_RCU))
+ dput(parent);
return valid;
}
@@ -1286,10 +1305,14 @@ static void ceph_d_release(struct dentry *dentry)
dout("d_release %p\n", dentry);
ceph_dentry_lru_del(dentry);
+
+ spin_lock(&dentry->d_lock);
+ dentry->d_fsdata = NULL;
+ spin_unlock(&dentry->d_lock);
+
if (di->lease_session)
ceph_put_mds_session(di->lease_session);
kmem_cache_free(ceph_dentry_cachep, di);
- dentry->d_fsdata = NULL;
}
static int ceph_snapdir_d_revalidate(struct dentry *dentry,
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index ce2f5795e44b..0f5375d8e030 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -394,7 +394,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
err = ceph_handle_notrace_create(dir, dentry);
- if (d_unhashed(dentry)) {
+ if (d_in_lookup(dentry)) {
dn = ceph_finish_lookup(req, dentry, err);
if (IS_ERR(dn))
err = PTR_ERR(dn);
@@ -708,7 +708,7 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
}
}
- ceph_put_page_vector(osd_data->pages, num_pages, false);
+ ceph_put_page_vector(osd_data->pages, num_pages, !aio_req->write);
ceph_osdc_put_request(req);
if (rc < 0)
@@ -821,6 +821,54 @@ static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe)
}
}
+/*
+ * Wait on any unsafe replies for the given inode. First wait on the
+ * newest request, and make that the upper bound. Then, if there are
+ * more requests, keep waiting on the oldest as long as it is still older
+ * than the original request.
+ */
+void ceph_sync_write_wait(struct inode *inode)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct list_head *head = &ci->i_unsafe_writes;
+ struct ceph_osd_request *req;
+ u64 last_tid;
+
+ if (!S_ISREG(inode->i_mode))
+ return;
+
+ spin_lock(&ci->i_unsafe_lock);
+ if (list_empty(head))
+ goto out;
+
+ /* set upper bound as _last_ entry in chain */
+
+ req = list_last_entry(head, struct ceph_osd_request,
+ r_unsafe_item);
+ last_tid = req->r_tid;
+
+ do {
+ ceph_osdc_get_request(req);
+ spin_unlock(&ci->i_unsafe_lock);
+
+ dout("sync_write_wait on tid %llu (until %llu)\n",
+ req->r_tid, last_tid);
+ wait_for_completion(&req->r_safe_completion);
+ ceph_osdc_put_request(req);
+
+ spin_lock(&ci->i_unsafe_lock);
+ /*
+ * from here on look at first entry in chain, since we
+ * only want to wait for anything older than last_tid
+ */
+ if (list_empty(head))
+ break;
+ req = list_first_entry(head, struct ceph_osd_request,
+ r_unsafe_item);
+ } while (req->r_tid < last_tid);
+out:
+ spin_unlock(&ci->i_unsafe_lock);
+}
static ssize_t
ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
@@ -964,7 +1012,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
len = ret;
}
- ceph_put_page_vector(pages, num_pages, false);
+ ceph_put_page_vector(pages, num_pages, !write);
ceph_osdc_put_request(req);
if (ret < 0)
@@ -985,6 +1033,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
}
if (aio_req) {
+ LIST_HEAD(osd_reqs);
+
if (aio_req->num_reqs == 0) {
kfree(aio_req);
return ret;
@@ -993,8 +1043,9 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
CEPH_CAP_FILE_RD);
- while (!list_empty(&aio_req->osd_reqs)) {
- req = list_first_entry(&aio_req->osd_reqs,
+ list_splice(&aio_req->osd_reqs, &osd_reqs);
+ while (!list_empty(&osd_reqs)) {
+ req = list_first_entry(&osd_reqs,
struct ceph_osd_request,
r_unsafe_item);
list_del_init(&req->r_unsafe_item);
@@ -1448,16 +1499,14 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
{
struct inode *inode = file->f_mapping->host;
loff_t i_size;
- int ret;
+ loff_t ret;
inode_lock(inode);
if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
- if (ret < 0) {
- offset = ret;
+ if (ret < 0)
goto out;
- }
}
i_size = i_size_read(inode);
@@ -1473,7 +1522,7 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
* write() or lseek() might have altered it
*/
if (offset == 0) {
- offset = file->f_pos;
+ ret = file->f_pos;
goto out;
}
offset += file->f_pos;
@@ -1493,11 +1542,11 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
break;
}
- offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
+ ret = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
out:
inode_unlock(inode);
- return offset;
+ return ret;
}
static inline void ceph_zero_partial_page(
@@ -1583,9 +1632,9 @@ static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
{
int ret = 0;
struct ceph_inode_info *ci = ceph_inode(inode);
- s32 stripe_unit = ceph_file_layout_su(ci->i_layout);
- s32 stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
- s32 object_size = ceph_file_layout_object_size(ci->i_layout);
+ s32 stripe_unit = ci->i_layout.stripe_unit;
+ s32 stripe_count = ci->i_layout.stripe_count;
+ s32 object_size = ci->i_layout.object_size;
u64 object_set_size = object_size * stripe_count;
u64 nearly, t;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index f059b5997072..dd3a6dbf71eb 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -446,7 +446,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
ci->i_symlink = NULL;
memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
- ci->i_pool_ns_len = 0;
+ RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL);
ci->i_fragtree = RB_ROOT;
mutex_init(&ci->i_fragtree_mutex);
@@ -468,7 +468,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
INIT_LIST_HEAD(&ci->i_dirty_item);
INIT_LIST_HEAD(&ci->i_flushing_item);
ci->i_prealloc_cap_flush = NULL;
- ci->i_cap_flush_tree = RB_ROOT;
+ INIT_LIST_HEAD(&ci->i_cap_flush_list);
init_waitqueue_head(&ci->i_cap_wq);
ci->i_hold_caps_min = 0;
ci->i_hold_caps_max = 0;
@@ -477,7 +477,7 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
ci->i_head_snapc = NULL;
ci->i_snap_caps = 0;
- for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
+ for (i = 0; i < CEPH_FILE_MODE_BITS; i++)
ci->i_nr_by_mode[i] = 0;
mutex_init(&ci->i_truncate_mutex);
@@ -570,6 +570,8 @@ void ceph_destroy_inode(struct inode *inode)
if (ci->i_xattrs.prealloc_blob)
ceph_buffer_put(ci->i_xattrs.prealloc_blob);
+ ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns));
+
call_rcu(&inode->i_rcu, ceph_i_callback);
}
@@ -583,6 +585,14 @@ int ceph_drop_inode(struct inode *inode)
return 1;
}
+void ceph_evict_inode(struct inode *inode)
+{
+ /* wait unsafe sync writes */
+ ceph_sync_write_wait(inode);
+ truncate_inode_pages_final(&inode->i_data);
+ clear_inode(inode);
+}
+
static inline blkcnt_t calc_inode_blocks(u64 size)
{
return (size + (1<<9) - 1) >> 9;
@@ -733,6 +743,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
int issued = 0, implemented, new_issued;
struct timespec mtime, atime, ctime;
struct ceph_buffer *xattr_blob = NULL;
+ struct ceph_string *pool_ns = NULL;
struct ceph_cap *new_cap = NULL;
int err = 0;
bool wake = false;
@@ -760,6 +771,10 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
iinfo->xattr_len);
}
+ if (iinfo->pool_ns_len > 0)
+ pool_ns = ceph_find_or_create_string(iinfo->pool_ns_data,
+ iinfo->pool_ns_len);
+
spin_lock(&ci->i_ceph_lock);
/*
@@ -814,10 +829,18 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
if (new_version ||
(new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
- if (ci->i_layout.fl_pg_pool != info->layout.fl_pg_pool)
+ s64 old_pool = ci->i_layout.pool_id;
+ struct ceph_string *old_ns;
+
+ ceph_file_layout_from_legacy(&ci->i_layout, &info->layout);
+ old_ns = rcu_dereference_protected(ci->i_layout.pool_ns,
+ lockdep_is_held(&ci->i_ceph_lock));
+ rcu_assign_pointer(ci->i_layout.pool_ns, pool_ns);
+
+ if (ci->i_layout.pool_id != old_pool || pool_ns != old_ns)
ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
- ci->i_layout = info->layout;
- ci->i_pool_ns_len = iinfo->pool_ns_len;
+
+ pool_ns = old_ns;
queue_trunc = ceph_fill_file_size(inode, issued,
le32_to_cpu(info->truncate_seq),
@@ -985,6 +1008,7 @@ out:
ceph_put_cap(mdsc, new_cap);
if (xattr_blob)
ceph_buffer_put(xattr_blob);
+ ceph_put_string(pool_ns);
return err;
}
@@ -1018,7 +1042,7 @@ static void update_dentry_lease(struct dentry *dentry,
goto out_unlock;
if (di->lease_gen == session->s_cap_gen &&
- time_before(ttl, dentry->d_time))
+ time_before(ttl, di->time))
goto out_unlock; /* we already have a newer lease. */
if (di->lease_session && di->lease_session != session)
@@ -1032,7 +1056,7 @@ static void update_dentry_lease(struct dentry *dentry,
di->lease_seq = le32_to_cpu(lease->seq);
di->lease_renew_after = half_ttl;
di->lease_renew_from = 0;
- dentry->d_time = ttl;
+ di->time = ttl;
out_unlock:
spin_unlock(&dentry->d_lock);
return;
@@ -1164,7 +1188,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
dname.name = rinfo->dname;
dname.len = rinfo->dname_len;
- dname.hash = full_name_hash(dname.name, dname.len);
+ dname.hash = full_name_hash(parent, dname.name, dname.len);
vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
retry_lookup:
@@ -1508,7 +1532,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
dname.name = rde->name;
dname.len = rde->name_len;
- dname.hash = full_name_hash(dname.name, dname.len);
+ dname.hash = full_name_hash(parent, dname.name, dname.len);
vino.ino = le64_to_cpu(rde->inode.in->ino);
vino.snap = le64_to_cpu(rde->inode.in->snapid);
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index be6b1657b1af..7d752d53353a 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -21,10 +21,10 @@ static long ceph_ioctl_get_layout(struct file *file, void __user *arg)
err = ceph_do_getattr(file_inode(file), CEPH_STAT_CAP_LAYOUT, false);
if (!err) {
- l.stripe_unit = ceph_file_layout_su(ci->i_layout);
- l.stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
- l.object_size = ceph_file_layout_object_size(ci->i_layout);
- l.data_pool = le32_to_cpu(ci->i_layout.fl_pg_pool);
+ l.stripe_unit = ci->i_layout.stripe_unit;
+ l.stripe_count = ci->i_layout.stripe_count;
+ l.object_size = ci->i_layout.object_size;
+ l.data_pool = ci->i_layout.pool_id;
l.preferred_osd = (s32)-1;
if (copy_to_user(arg, &l, sizeof(l)))
return -EFAULT;
@@ -82,19 +82,19 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
if (l.stripe_count)
nl.stripe_count = l.stripe_count;
else
- nl.stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
+ nl.stripe_count = ci->i_layout.stripe_count;
if (l.stripe_unit)
nl.stripe_unit = l.stripe_unit;
else
- nl.stripe_unit = ceph_file_layout_su(ci->i_layout);
+ nl.stripe_unit = ci->i_layout.stripe_unit;
if (l.object_size)
nl.object_size = l.object_size;
else
- nl.object_size = ceph_file_layout_object_size(ci->i_layout);
+ nl.object_size = ci->i_layout.object_size;
if (l.data_pool)
nl.data_pool = l.data_pool;
else
- nl.data_pool = ceph_file_layout_pg_pool(ci->i_layout);
+ nl.data_pool = ci->i_layout.pool_id;
/* this is obsolete, and always -1 */
nl.preferred_osd = le64_to_cpu(-1);
@@ -183,7 +183,7 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
struct ceph_osd_client *osdc =
&ceph_sb_to_client(inode->i_sb)->client->osdc;
struct ceph_object_locator oloc;
- struct ceph_object_id oid;
+ CEPH_DEFINE_OID_ONSTACK(oid);
u64 len = 1, olen;
u64 tmp;
struct ceph_pg pgid;
@@ -202,8 +202,8 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
return -EIO;
}
dl.file_offset -= dl.object_offset;
- dl.object_size = ceph_file_layout_object_size(ci->i_layout);
- dl.block_size = ceph_file_layout_su(ci->i_layout);
+ dl.object_size = ci->i_layout.object_size;
+ dl.block_size = ci->i_layout.stripe_unit;
/* block_offset = object_offset % block_size */
tmp = dl.object_offset;
@@ -212,10 +212,13 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
snprintf(dl.object_name, sizeof(dl.object_name), "%llx.%08llx",
ceph_ino(inode), dl.object_no);
- oloc.pool = ceph_file_layout_pg_pool(ci->i_layout);
+ oloc.pool = ci->i_layout.pool_id;
+ oloc.pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
ceph_oid_printf(&oid, "%s", dl.object_name);
r = ceph_object_locator_to_pg(osdc->osdmap, &oid, &oloc, &pgid);
+
+ ceph_oloc_destroy(&oloc);
if (r < 0) {
up_read(&osdc->lock);
return r;
@@ -247,9 +250,8 @@ static long ceph_ioctl_lazyio(struct file *file)
if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) {
spin_lock(&ci->i_ceph_lock);
- ci->i_nr_by_mode[fi->fmode]--;
fi->fmode |= CEPH_FILE_MODE_LAZY;
- ci->i_nr_by_mode[fi->fmode]++;
+ ci->i_nr_by_mode[ffs(CEPH_FILE_MODE_LAZY)]++;
spin_unlock(&ci->i_ceph_lock);
dout("ioctl_layzio: file %p marked lazy\n", file);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 2103b823bec0..f72d4ae303b2 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -48,7 +48,7 @@
struct ceph_reconnect_state {
int nr_caps;
struct ceph_pagelist *pagelist;
- bool flock;
+ unsigned msg_version;
};
static void __wake_requests(struct ceph_mds_client *mdsc,
@@ -100,12 +100,15 @@ static int parse_reply_info_in(void **p, void *end,
} else
info->inline_version = CEPH_INLINE_NONE;
+ info->pool_ns_len = 0;
+ info->pool_ns_data = NULL;
if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) {
ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
- ceph_decode_need(p, end, info->pool_ns_len, bad);
- *p += info->pool_ns_len;
- } else {
- info->pool_ns_len = 0;
+ if (info->pool_ns_len > 0) {
+ ceph_decode_need(p, end, info->pool_ns_len, bad);
+ info->pool_ns_data = *p;
+ *p += info->pool_ns_len;
+ }
}
return 0;
@@ -469,7 +472,6 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
s->s_cap_iterator = NULL;
INIT_LIST_HEAD(&s->s_cap_releases);
INIT_LIST_HEAD(&s->s_cap_flushing);
- INIT_LIST_HEAD(&s->s_cap_snaps_flushing);
dout("register_session mds%d\n", mds);
if (mds >= mdsc->max_sessions) {
@@ -1145,19 +1147,17 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
invalidate = true;
- while (true) {
- struct rb_node *n = rb_first(&ci->i_cap_flush_tree);
- if (!n)
- break;
- cf = rb_entry(n, struct ceph_cap_flush, i_node);
- rb_erase(&cf->i_node, &ci->i_cap_flush_tree);
- list_add(&cf->list, &to_remove);
+ while (!list_empty(&ci->i_cap_flush_list)) {
+ cf = list_first_entry(&ci->i_cap_flush_list,
+ struct ceph_cap_flush, i_list);
+ list_del(&cf->i_list);
+ list_add(&cf->i_list, &to_remove);
}
spin_lock(&mdsc->cap_dirty_lock);
- list_for_each_entry(cf, &to_remove, list)
- rb_erase(&cf->g_node, &mdsc->cap_flush_tree);
+ list_for_each_entry(cf, &to_remove, i_list)
+ list_del(&cf->g_list);
if (!list_empty(&ci->i_dirty_item)) {
pr_warn_ratelimited(
@@ -1181,7 +1181,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
spin_unlock(&mdsc->cap_dirty_lock);
if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
- list_add(&ci->i_prealloc_cap_flush->list, &to_remove);
+ list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
ci->i_prealloc_cap_flush = NULL;
}
}
@@ -1189,8 +1189,8 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
while (!list_empty(&to_remove)) {
struct ceph_cap_flush *cf;
cf = list_first_entry(&to_remove,
- struct ceph_cap_flush, list);
- list_del(&cf->list);
+ struct ceph_cap_flush, i_list);
+ list_del(&cf->i_list);
ceph_free_cap_flush(cf);
}
@@ -1212,6 +1212,8 @@ static void remove_session_caps(struct ceph_mds_session *session)
dout("remove_session_caps on %p\n", session);
iterate_session_caps(session, remove_session_caps_cb, fsc);
+ wake_up_all(&fsc->mdsc->cap_flushing_wq);
+
spin_lock(&session->s_cap_lock);
if (session->s_nr_caps > 0) {
struct inode *inode;
@@ -1478,35 +1480,21 @@ static int trim_caps(struct ceph_mds_client *mdsc,
return 0;
}
-static int check_capsnap_flush(struct ceph_inode_info *ci,
- u64 want_snap_seq)
-{
- int ret = 1;
- spin_lock(&ci->i_ceph_lock);
- if (want_snap_seq > 0 && !list_empty(&ci->i_cap_snaps)) {
- struct ceph_cap_snap *capsnap =
- list_first_entry(&ci->i_cap_snaps,
- struct ceph_cap_snap, ci_item);
- ret = capsnap->follows >= want_snap_seq;
- }
- spin_unlock(&ci->i_ceph_lock);
- return ret;
-}
-
static int check_caps_flush(struct ceph_mds_client *mdsc,
u64 want_flush_tid)
{
- struct rb_node *n;
- struct ceph_cap_flush *cf;
int ret = 1;
spin_lock(&mdsc->cap_dirty_lock);
- n = rb_first(&mdsc->cap_flush_tree);
- cf = n ? rb_entry(n, struct ceph_cap_flush, g_node) : NULL;
- if (cf && cf->tid <= want_flush_tid) {
- dout("check_caps_flush still flushing tid %llu <= %llu\n",
- cf->tid, want_flush_tid);
- ret = 0;
+ if (!list_empty(&mdsc->cap_flush_list)) {
+ struct ceph_cap_flush *cf =
+ list_first_entry(&mdsc->cap_flush_list,
+ struct ceph_cap_flush, g_list);
+ if (cf->tid <= want_flush_tid) {
+ dout("check_caps_flush still flushing tid "
+ "%llu <= %llu\n", cf->tid, want_flush_tid);
+ ret = 0;
+ }
}
spin_unlock(&mdsc->cap_dirty_lock);
return ret;
@@ -1518,54 +1506,9 @@ static int check_caps_flush(struct ceph_mds_client *mdsc,
* returns true if we've flushed through want_flush_tid
*/
static void wait_caps_flush(struct ceph_mds_client *mdsc,
- u64 want_flush_tid, u64 want_snap_seq)
+ u64 want_flush_tid)
{
- int mds;
-
- dout("check_caps_flush want %llu snap want %llu\n",
- want_flush_tid, want_snap_seq);
- mutex_lock(&mdsc->mutex);
- for (mds = 0; mds < mdsc->max_sessions; ) {
- struct ceph_mds_session *session = mdsc->sessions[mds];
- struct inode *inode = NULL;
-
- if (!session) {
- mds++;
- continue;
- }
- get_session(session);
- mutex_unlock(&mdsc->mutex);
-
- mutex_lock(&session->s_mutex);
- if (!list_empty(&session->s_cap_snaps_flushing)) {
- struct ceph_cap_snap *capsnap =
- list_first_entry(&session->s_cap_snaps_flushing,
- struct ceph_cap_snap,
- flushing_item);
- struct ceph_inode_info *ci = capsnap->ci;
- if (!check_capsnap_flush(ci, want_snap_seq)) {
- dout("check_cap_flush still flushing snap %p "
- "follows %lld <= %lld to mds%d\n",
- &ci->vfs_inode, capsnap->follows,
- want_snap_seq, mds);
- inode = igrab(&ci->vfs_inode);
- }
- }
- mutex_unlock(&session->s_mutex);
- ceph_put_mds_session(session);
-
- if (inode) {
- wait_event(mdsc->cap_flushing_wq,
- check_capsnap_flush(ceph_inode(inode),
- want_snap_seq));
- iput(inode);
- } else {
- mds++;
- }
-
- mutex_lock(&mdsc->mutex);
- }
- mutex_unlock(&mdsc->mutex);
+ dout("check_caps_flush want %llu\n", want_flush_tid);
wait_event(mdsc->cap_flushing_wq,
check_caps_flush(mdsc, want_flush_tid));
@@ -2163,6 +2106,11 @@ static int __do_request(struct ceph_mds_client *mdsc,
mds = __choose_mds(mdsc, req);
if (mds < 0 ||
ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
+ if (mdsc->mdsmap_err) {
+ err = mdsc->mdsmap_err;
+ dout("do_request mdsmap err %d\n", err);
+ goto finish;
+ }
dout("do_request no mds or not active, waiting for map\n");
list_add(&req->r_wait, &mdsc->waiting_for_map);
goto out;
@@ -2292,14 +2240,6 @@ int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
CEPH_CAP_PIN);
- /* deny access to directories with pool_ns layouts */
- if (req->r_inode && S_ISDIR(req->r_inode->i_mode) &&
- ceph_inode(req->r_inode)->i_pool_ns_len)
- return -EIO;
- if (req->r_locked_dir &&
- ceph_inode(req->r_locked_dir)->i_pool_ns_len)
- return -EIO;
-
/* issue */
mutex_lock(&mdsc->mutex);
__register_request(mdsc, req, dir);
@@ -2791,13 +2731,13 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
struct ceph_mds_cap_reconnect v2;
struct ceph_mds_cap_reconnect_v1 v1;
} rec;
- size_t reclen;
struct ceph_inode_info *ci;
struct ceph_reconnect_state *recon_state = arg;
struct ceph_pagelist *pagelist = recon_state->pagelist;
char *path;
int pathlen, err;
u64 pathbase;
+ u64 snap_follows;
struct dentry *dentry;
ci = cap->ci;
@@ -2819,10 +2759,8 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
} else {
path = NULL;
pathlen = 0;
+ pathbase = 0;
}
- err = ceph_pagelist_encode_string(pagelist, path, pathlen);
- if (err)
- goto out_free;
spin_lock(&ci->i_ceph_lock);
cap->seq = 0; /* reset cap seq */
@@ -2830,14 +2768,13 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
cap->mseq = 0; /* and migrate_seq */
cap->cap_gen = cap->session->s_cap_gen;
- if (recon_state->flock) {
+ if (recon_state->msg_version >= 2) {
rec.v2.cap_id = cpu_to_le64(cap->cap_id);
rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
rec.v2.issued = cpu_to_le32(cap->issued);
rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
rec.v2.pathbase = cpu_to_le64(pathbase);
rec.v2.flock_len = 0;
- reclen = sizeof(rec.v2);
} else {
rec.v1.cap_id = cpu_to_le64(cap->cap_id);
rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
@@ -2847,13 +2784,23 @@ static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
ceph_encode_timespec(&rec.v1.atime, &inode->i_atime);
rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
rec.v1.pathbase = cpu_to_le64(pathbase);
- reclen = sizeof(rec.v1);
+ }
+
+ if (list_empty(&ci->i_cap_snaps)) {
+ snap_follows = 0;
+ } else {
+ struct ceph_cap_snap *capsnap =
+ list_first_entry(&ci->i_cap_snaps,
+ struct ceph_cap_snap, ci_item);
+ snap_follows = capsnap->follows;
}
spin_unlock(&ci->i_ceph_lock);
- if (recon_state->flock) {
+ if (recon_state->msg_version >= 2) {
int num_fcntl_locks, num_flock_locks;
struct ceph_filelock *flocks;
+ size_t struct_len, total_len = 0;
+ u8 struct_v = 0;
encode_again:
ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
@@ -2872,20 +2819,51 @@ encode_again:
goto encode_again;
goto out_free;
}
+
+ if (recon_state->msg_version >= 3) {
+ /* version, compat_version and struct_len */
+ total_len = 2 * sizeof(u8) + sizeof(u32);
+ struct_v = 2;
+ }
/*
* number of encoded locks is stable, so copy to pagelist
*/
- rec.v2.flock_len = cpu_to_le32(2*sizeof(u32) +
- (num_fcntl_locks+num_flock_locks) *
- sizeof(struct ceph_filelock));
- err = ceph_pagelist_append(pagelist, &rec, reclen);
- if (!err)
- err = ceph_locks_to_pagelist(flocks, pagelist,
- num_fcntl_locks,
- num_flock_locks);
+ struct_len = 2 * sizeof(u32) +
+ (num_fcntl_locks + num_flock_locks) *
+ sizeof(struct ceph_filelock);
+ rec.v2.flock_len = cpu_to_le32(struct_len);
+
+ struct_len += sizeof(rec.v2);
+ struct_len += sizeof(u32) + pathlen;
+
+ if (struct_v >= 2)
+ struct_len += sizeof(u64); /* snap_follows */
+
+ total_len += struct_len;
+ err = ceph_pagelist_reserve(pagelist, total_len);
+
+ if (!err) {
+ if (recon_state->msg_version >= 3) {
+ ceph_pagelist_encode_8(pagelist, struct_v);
+ ceph_pagelist_encode_8(pagelist, 1);
+ ceph_pagelist_encode_32(pagelist, struct_len);
+ }
+ ceph_pagelist_encode_string(pagelist, path, pathlen);
+ ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2));
+ ceph_locks_to_pagelist(flocks, pagelist,
+ num_fcntl_locks,
+ num_flock_locks);
+ if (struct_v >= 2)
+ ceph_pagelist_encode_64(pagelist, snap_follows);
+ }
kfree(flocks);
} else {
- err = ceph_pagelist_append(pagelist, &rec, reclen);
+ size_t size = sizeof(u32) + pathlen + sizeof(rec.v1);
+ err = ceph_pagelist_reserve(pagelist, size);
+ if (!err) {
+ ceph_pagelist_encode_string(pagelist, path, pathlen);
+ ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
+ }
}
recon_state->nr_caps++;
@@ -2976,7 +2954,12 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
recon_state.nr_caps = 0;
recon_state.pagelist = pagelist;
- recon_state.flock = session->s_con.peer_features & CEPH_FEATURE_FLOCK;
+ if (session->s_con.peer_features & CEPH_FEATURE_MDSENC)
+ recon_state.msg_version = 3;
+ else if (session->s_con.peer_features & CEPH_FEATURE_FLOCK)
+ recon_state.msg_version = 2;
+ else
+ recon_state.msg_version = 1;
err = iterate_session_caps(session, encode_caps_cb, &recon_state);
if (err < 0)
goto fail;
@@ -3005,8 +2988,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
goto fail;
}
- if (recon_state.flock)
- reply->hdr.version = cpu_to_le16(2);
+ reply->hdr.version = cpu_to_le16(recon_state.msg_version);
/* raced with cap release? */
if (s_nr_caps != recon_state.nr_caps) {
@@ -3204,7 +3186,7 @@ static void handle_lease(struct ceph_mds_client *mdsc,
WARN_ON(1);
goto release; /* hrm... */
}
- dname.hash = full_name_hash(dname.name, dname.len);
+ dname.hash = full_name_hash(parent, dname.name, dname.len);
dentry = d_lookup(parent, &dname);
dput(parent);
if (!dentry)
@@ -3231,7 +3213,7 @@ static void handle_lease(struct ceph_mds_client *mdsc,
msecs_to_jiffies(le32_to_cpu(h->duration_ms));
di->lease_seq = seq;
- dentry->d_time = di->lease_renew_from + duration;
+ di->time = di->lease_renew_from + duration;
di->lease_renew_after = di->lease_renew_from +
(duration >> 1);
di->lease_renew_from = 0;
@@ -3297,47 +3279,6 @@ void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
}
/*
- * Preemptively release a lease we expect to invalidate anyway.
- * Pass @inode always, @dentry is optional.
- */
-void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc, struct inode *inode,
- struct dentry *dentry)
-{
- struct ceph_dentry_info *di;
- struct ceph_mds_session *session;
- u32 seq;
-
- BUG_ON(inode == NULL);
- BUG_ON(dentry == NULL);
-
- /* is dentry lease valid? */
- spin_lock(&dentry->d_lock);
- di = ceph_dentry(dentry);
- if (!di || !di->lease_session ||
- di->lease_session->s_mds < 0 ||
- di->lease_gen != di->lease_session->s_cap_gen ||
- !time_before(jiffies, dentry->d_time)) {
- dout("lease_release inode %p dentry %p -- "
- "no lease\n",
- inode, dentry);
- spin_unlock(&dentry->d_lock);
- return;
- }
-
- /* we do have a lease on this dentry; note mds and seq */
- session = ceph_get_mds_session(di->lease_session);
- seq = di->lease_seq;
- __ceph_mdsc_drop_dentry_lease(dentry);
- spin_unlock(&dentry->d_lock);
-
- dout("lease_release inode %p dentry %p to mds%d\n",
- inode, dentry, session->s_mds);
- ceph_mdsc_lease_send_msg(session, inode, dentry,
- CEPH_MDS_LEASE_RELEASE, seq);
- ceph_put_mds_session(session);
-}
-
-/*
* drop all leases (and dentry refs) in preparation for umount
*/
static void drop_leases(struct ceph_mds_client *mdsc)
@@ -3470,7 +3411,7 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
INIT_LIST_HEAD(&mdsc->snap_flush_list);
spin_lock_init(&mdsc->snap_flush_lock);
mdsc->last_cap_flush_tid = 1;
- mdsc->cap_flush_tree = RB_ROOT;
+ INIT_LIST_HEAD(&mdsc->cap_flush_list);
INIT_LIST_HEAD(&mdsc->cap_dirty);
INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
mdsc->num_cap_flushing = 0;
@@ -3585,7 +3526,7 @@ restart:
void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
{
- u64 want_tid, want_flush, want_snap;
+ u64 want_tid, want_flush;
if (ACCESS_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
return;
@@ -3598,17 +3539,19 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
ceph_flush_dirty_caps(mdsc);
spin_lock(&mdsc->cap_dirty_lock);
want_flush = mdsc->last_cap_flush_tid;
+ if (!list_empty(&mdsc->cap_flush_list)) {
+ struct ceph_cap_flush *cf =
+ list_last_entry(&mdsc->cap_flush_list,
+ struct ceph_cap_flush, g_list);
+ cf->wake = true;
+ }
spin_unlock(&mdsc->cap_dirty_lock);
- down_read(&mdsc->snap_rwsem);
- want_snap = mdsc->last_snap_seq;
- up_read(&mdsc->snap_rwsem);
-
- dout("sync want tid %lld flush_seq %lld snap_seq %lld\n",
- want_tid, want_flush, want_snap);
+ dout("sync want tid %lld flush_seq %lld\n",
+ want_tid, want_flush);
wait_unsafe_requests(mdsc, want_tid);
- wait_caps_flush(mdsc, want_flush, want_snap);
+ wait_caps_flush(mdsc, want_flush);
}
/*
@@ -3729,11 +3672,86 @@ void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
dout("mdsc_destroy %p done\n", mdsc);
}
+void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
+{
+ struct ceph_fs_client *fsc = mdsc->fsc;
+ const char *mds_namespace = fsc->mount_options->mds_namespace;
+ void *p = msg->front.iov_base;
+ void *end = p + msg->front.iov_len;
+ u32 epoch;
+ u32 map_len;
+ u32 num_fs;
+ u32 mount_fscid = (u32)-1;
+ u8 struct_v, struct_cv;
+ int err = -EINVAL;
+
+ ceph_decode_need(&p, end, sizeof(u32), bad);
+ epoch = ceph_decode_32(&p);
+
+ dout("handle_fsmap epoch %u\n", epoch);
+
+ ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
+ struct_v = ceph_decode_8(&p);
+ struct_cv = ceph_decode_8(&p);
+ map_len = ceph_decode_32(&p);
+
+ ceph_decode_need(&p, end, sizeof(u32) * 3, bad);
+ p += sizeof(u32) * 2; /* skip epoch and legacy_client_fscid */
+
+ num_fs = ceph_decode_32(&p);
+ while (num_fs-- > 0) {
+ void *info_p, *info_end;
+ u32 info_len;
+ u8 info_v, info_cv;
+ u32 fscid, namelen;
+
+ ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
+ info_v = ceph_decode_8(&p);
+ info_cv = ceph_decode_8(&p);
+ info_len = ceph_decode_32(&p);
+ ceph_decode_need(&p, end, info_len, bad);
+ info_p = p;
+ info_end = p + info_len;
+ p = info_end;
+
+ ceph_decode_need(&info_p, info_end, sizeof(u32) * 2, bad);
+ fscid = ceph_decode_32(&info_p);
+ namelen = ceph_decode_32(&info_p);
+ ceph_decode_need(&info_p, info_end, namelen, bad);
+
+ if (mds_namespace &&
+ strlen(mds_namespace) == namelen &&
+ !strncmp(mds_namespace, (char *)info_p, namelen)) {
+ mount_fscid = fscid;
+ break;
+ }
+ }
+
+ ceph_monc_got_map(&fsc->client->monc, CEPH_SUB_FSMAP, epoch);
+ if (mount_fscid != (u32)-1) {
+ fsc->client->monc.fs_cluster_id = mount_fscid;
+ ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
+ 0, true);
+ ceph_monc_renew_subs(&fsc->client->monc);
+ } else {
+ err = -ENOENT;
+ goto err_out;
+ }
+ return;
+bad:
+ pr_err("error decoding fsmap\n");
+err_out:
+ mutex_lock(&mdsc->mutex);
+ mdsc->mdsmap_err = -ENOENT;
+ __wake_requests(mdsc, &mdsc->waiting_for_map);
+ mutex_unlock(&mdsc->mutex);
+ return;
+}
/*
* handle mds map update.
*/
-void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
+void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
{
u32 epoch;
u32 maplen;
@@ -3840,7 +3858,10 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
switch (type) {
case CEPH_MSG_MDS_MAP:
- ceph_mdsc_handle_map(mdsc, msg);
+ ceph_mdsc_handle_mdsmap(mdsc, msg);
+ break;
+ case CEPH_MSG_FS_MAP_USER:
+ ceph_mdsc_handle_fsmap(mdsc, msg);
break;
case CEPH_MSG_CLIENT_SESSION:
handle_session(s, msg);
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index e7d38aac7109..6b3679737d4a 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -45,6 +45,7 @@ struct ceph_mds_reply_info_in {
u32 inline_len;
char *inline_data;
u32 pool_ns_len;
+ char *pool_ns_data;
};
struct ceph_mds_reply_dir_entry {
@@ -151,7 +152,6 @@ struct ceph_mds_session {
/* protected by mutex */
struct list_head s_cap_flushing; /* inodes w/ flushing caps */
- struct list_head s_cap_snaps_flushing;
unsigned long s_renew_requested; /* last time we sent a renew req */
u64 s_renew_seq;
@@ -275,8 +275,10 @@ struct ceph_mds_request {
struct ceph_pool_perm {
struct rb_node node;
- u32 pool;
int perm;
+ s64 pool;
+ size_t pool_ns_len;
+ char pool_ns[];
};
/*
@@ -290,6 +292,7 @@ struct ceph_mds_client {
struct completion safe_umount_waiters;
wait_queue_head_t session_close_wq;
struct list_head waiting_for_map;
+ int mdsmap_err;
struct ceph_mds_session **sessions; /* NULL for mds if no session */
atomic_t num_sessions;
@@ -321,7 +324,7 @@ struct ceph_mds_client {
spinlock_t snap_flush_lock;
u64 last_cap_flush_tid;
- struct rb_root cap_flush_tree;
+ struct list_head cap_flush_list;
struct list_head cap_dirty; /* inodes with dirty caps */
struct list_head cap_dirty_migrating; /* ...that are migration... */
int num_cap_flushing; /* # caps we are flushing */
@@ -382,10 +385,6 @@ extern void ceph_mdsc_destroy(struct ceph_fs_client *fsc);
extern void ceph_mdsc_sync(struct ceph_mds_client *mdsc);
-extern void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc,
- struct inode *inode,
- struct dentry *dn);
-
extern void ceph_invalidate_dir_request(struct ceph_mds_request *req);
extern int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
struct inode *dir);
@@ -420,8 +419,10 @@ extern void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
struct dentry *dentry, char action,
u32 seq);
-extern void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc,
- struct ceph_msg *msg);
+extern void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc,
+ struct ceph_msg *msg);
+extern void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc,
+ struct ceph_msg *msg);
extern struct ceph_mds_session *
ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target);
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 9caaa7ffc93f..9ff5219d849e 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -520,9 +520,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
ihold(inode);
atomic_set(&capsnap->nref, 1);
- capsnap->ci = ci;
INIT_LIST_HEAD(&capsnap->ci_item);
- INIT_LIST_HEAD(&capsnap->flushing_item);
capsnap->follows = old_snapc->seq;
capsnap->issued = __ceph_caps_issued(ci, NULL);
@@ -551,7 +549,6 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
ci->i_wrbuffer_ref_head = 0;
capsnap->context = old_snapc;
list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
- old_snapc = NULL;
if (used & CEPH_CAP_FILE_WR) {
dout("queue_cap_snap %p cap_snap %p snapc %p"
@@ -563,6 +560,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
__ceph_finish_cap_snap(ci, capsnap);
}
capsnap = NULL;
+ old_snapc = NULL;
update_snapc:
if (ci->i_head_snapc) {
@@ -603,6 +601,8 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
capsnap->dirty_pages);
return 0;
}
+
+ ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS;
dout("finish_cap_snap %p cap_snap %p snapc %p %llu %s s=%llu\n",
inode, capsnap, capsnap->context,
capsnap->context->seq, ceph_cap_string(capsnap->dirty),
@@ -799,9 +799,7 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
inode = &ci->vfs_inode;
ihold(inode);
spin_unlock(&mdsc->snap_flush_lock);
- spin_lock(&ci->i_ceph_lock);
- __ceph_flush_snaps(ci, &session, 0);
- spin_unlock(&ci->i_ceph_lock);
+ ceph_flush_snaps(ci, &session);
iput(inode);
spin_lock(&mdsc->snap_flush_lock);
}
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 91e02481ce06..e247f6f0feb7 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -108,7 +108,6 @@ static int ceph_sync_fs(struct super_block *sb, int wait)
* mount options
*/
enum {
- Opt_mds_namespace,
Opt_wsize,
Opt_rsize,
Opt_rasize,
@@ -121,6 +120,7 @@ enum {
Opt_last_int,
/* int args above */
Opt_snapdirname,
+ Opt_mds_namespace,
Opt_last_string,
/* string args above */
Opt_dirstat,
@@ -144,7 +144,6 @@ enum {
};
static match_table_t fsopt_tokens = {
- {Opt_mds_namespace, "mds_namespace=%d"},
{Opt_wsize, "wsize=%d"},
{Opt_rsize, "rsize=%d"},
{Opt_rasize, "rasize=%d"},
@@ -156,6 +155,7 @@ static match_table_t fsopt_tokens = {
{Opt_congestion_kb, "write_congestion_kb=%d"},
/* int args above */
{Opt_snapdirname, "snapdirname=%s"},
+ {Opt_mds_namespace, "mds_namespace=%s"},
/* string args above */
{Opt_dirstat, "dirstat"},
{Opt_nodirstat, "nodirstat"},
@@ -212,11 +212,14 @@ static int parse_fsopt_token(char *c, void *private)
if (!fsopt->snapdir_name)
return -ENOMEM;
break;
-
- /* misc */
case Opt_mds_namespace:
- fsopt->mds_namespace = intval;
+ fsopt->mds_namespace = kstrndup(argstr[0].from,
+ argstr[0].to-argstr[0].from,
+ GFP_KERNEL);
+ if (!fsopt->mds_namespace)
+ return -ENOMEM;
break;
+ /* misc */
case Opt_wsize:
fsopt->wsize = intval;
break;
@@ -302,6 +305,7 @@ static void destroy_mount_options(struct ceph_mount_options *args)
{
dout("destroy_mount_options %p\n", args);
kfree(args->snapdir_name);
+ kfree(args->mds_namespace);
kfree(args->server_path);
kfree(args);
}
@@ -333,6 +337,9 @@ static int compare_mount_options(struct ceph_mount_options *new_fsopt,
ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
if (ret)
return ret;
+ ret = strcmp_null(fsopt1->mds_namespace, fsopt2->mds_namespace);
+ if (ret)
+ return ret;
ret = strcmp_null(fsopt1->server_path, fsopt2->server_path);
if (ret)
@@ -376,7 +383,6 @@ static int parse_mount_options(struct ceph_mount_options **pfsopt,
fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
fsopt->congestion_kb = default_congestion_kb();
- fsopt->mds_namespace = CEPH_FS_CLUSTER_ID_NONE;
/*
* Distinguish the server list from the path in "dev_name".
@@ -469,8 +475,8 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
seq_puts(m, ",noacl");
#endif
- if (fsopt->mds_namespace != CEPH_FS_CLUSTER_ID_NONE)
- seq_printf(m, ",mds_namespace=%d", fsopt->mds_namespace);
+ if (fsopt->mds_namespace)
+ seq_printf(m, ",mds_namespace=%s", fsopt->mds_namespace);
if (fsopt->wsize)
seq_printf(m, ",wsize=%d", fsopt->wsize);
if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
@@ -509,9 +515,11 @@ static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
switch (type) {
case CEPH_MSG_MDS_MAP:
- ceph_mdsc_handle_map(fsc->mdsc, msg);
+ ceph_mdsc_handle_mdsmap(fsc->mdsc, msg);
+ return 0;
+ case CEPH_MSG_FS_MAP_USER:
+ ceph_mdsc_handle_fsmap(fsc->mdsc, msg);
return 0;
-
default:
return -1;
}
@@ -543,8 +551,14 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
goto fail;
}
fsc->client->extra_mon_dispatch = extra_mon_dispatch;
- fsc->client->monc.fs_cluster_id = fsopt->mds_namespace;
- ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP, 0, true);
+
+ if (fsopt->mds_namespace == NULL) {
+ ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
+ 0, true);
+ } else {
+ ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_FSMAP,
+ 0, false);
+ }
fsc->mount_options = fsopt;
@@ -672,8 +686,8 @@ static int __init init_caches(void)
if (ceph_dentry_cachep == NULL)
goto bad_dentry;
- ceph_file_cachep = KMEM_CACHE(ceph_file_info,
- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
+ ceph_file_cachep = KMEM_CACHE(ceph_file_info, SLAB_MEM_SPREAD);
+
if (ceph_file_cachep == NULL)
goto bad_file;
@@ -731,6 +745,7 @@ static const struct super_operations ceph_super_ops = {
.destroy_inode = ceph_destroy_inode,
.write_inode = ceph_write_inode,
.drop_inode = ceph_drop_inode,
+ .evict_inode = ceph_evict_inode,
.sync_fs = ceph_sync_fs,
.put_super = ceph_put_super,
.show_options = ceph_show_options,
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 0168b49fb6ad..3e3fa9163059 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -62,7 +62,6 @@ struct ceph_mount_options {
int cap_release_safety;
int max_readdir; /* max readdir result (entires) */
int max_readdir_bytes; /* max readdir result (bytes) */
- int mds_namespace;
/*
* everything above this point can be memcmp'd; everything below
@@ -70,6 +69,7 @@ struct ceph_mount_options {
*/
char *snapdir_name; /* default ".snap" */
+ char *mds_namespace; /* default NULL */
char *server_path; /* default "/" */
};
@@ -147,6 +147,14 @@ struct ceph_cap {
#define CHECK_CAPS_AUTHONLY 2 /* only check auth cap */
#define CHECK_CAPS_FLUSH 4 /* flush any dirty caps */
+struct ceph_cap_flush {
+ u64 tid;
+ int caps; /* 0 means capsnap */
+ bool wake; /* wake up flush waiters when finish ? */
+ struct list_head g_list; // global
+ struct list_head i_list; // per inode
+};
+
/*
* Snapped cap state that is pending flush to mds. When a snapshot occurs,
* we first complete any in-process sync writes and writeback any dirty
@@ -154,10 +162,11 @@ struct ceph_cap {
*/
struct ceph_cap_snap {
atomic_t nref;
- struct ceph_inode_info *ci;
- struct list_head ci_item, flushing_item;
+ struct list_head ci_item;
+
+ struct ceph_cap_flush cap_flush;
- u64 follows, flush_tid;
+ u64 follows;
int issued, dirty;
struct ceph_snap_context *context;
@@ -186,16 +195,6 @@ static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap)
}
}
-struct ceph_cap_flush {
- u64 tid;
- int caps;
- struct rb_node g_node; // global
- union {
- struct rb_node i_node; // inode
- struct list_head list;
- };
-};
-
/*
* The frag tree describes how a directory is fragmented, potentially across
* multiple metadata servers. It is also used to indicate points where
@@ -246,7 +245,7 @@ struct ceph_dentry_info {
unsigned long lease_renew_after, lease_renew_from;
struct list_head lru;
struct dentry *dentry;
- u64 time;
+ unsigned long time;
u64 offset;
};
@@ -287,7 +286,6 @@ struct ceph_inode_info {
struct ceph_dir_layout i_dir_layout;
struct ceph_file_layout i_layout;
- size_t i_pool_ns_len;
char *i_symlink;
/* for dirs */
@@ -311,7 +309,7 @@ struct ceph_inode_info {
* overlapping, pipelined cap flushes to the mds. we can probably
* reduce the tid to 8 bits if we're concerned about inode size. */
struct ceph_cap_flush *i_prealloc_cap_flush;
- struct rb_root i_cap_flush_tree;
+ struct list_head i_cap_flush_list;
wait_queue_head_t i_cap_wq; /* threads waiting on a capability */
unsigned long i_hold_caps_min; /* jiffies */
unsigned long i_hold_caps_max; /* jiffies */
@@ -322,7 +320,7 @@ struct ceph_inode_info {
dirty|flushing caps */
unsigned i_snap_caps; /* cap bits for snapped files */
- int i_nr_by_mode[CEPH_FILE_MODE_NUM]; /* open file counts */
+ int i_nr_by_mode[CEPH_FILE_MODE_BITS]; /* open file counts */
struct mutex i_truncate_mutex;
u32 i_truncate_seq; /* last truncate to smaller size */
@@ -471,6 +469,8 @@ static inline struct inode *ceph_find_inode(struct super_block *sb,
#define CEPH_I_POOL_WR (1 << 6) /* can write to pool */
#define CEPH_I_SEC_INITED (1 << 7) /* security initialized */
#define CEPH_I_CAP_DROPPED (1 << 8) /* caps were forcibly dropped */
+#define CEPH_I_KICK_FLUSH (1 << 9) /* kick flushing caps */
+#define CEPH_I_FLUSH_SNAPS (1 << 10) /* need flush snapss */
static inline void __ceph_dir_set_complete(struct ceph_inode_info *ci,
long long release_count,
@@ -750,6 +750,7 @@ extern const struct inode_operations ceph_file_iops;
extern struct inode *ceph_alloc_inode(struct super_block *sb);
extern void ceph_destroy_inode(struct inode *inode);
extern int ceph_drop_inode(struct inode *inode);
+extern void ceph_evict_inode(struct inode *inode);
extern struct inode *ceph_get_inode(struct super_block *sb,
struct ceph_vino vino);
@@ -890,9 +891,8 @@ extern void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps);
extern void ceph_put_cap_refs(struct ceph_inode_info *ci, int had);
extern void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
struct ceph_snap_context *snapc);
-extern void __ceph_flush_snaps(struct ceph_inode_info *ci,
- struct ceph_mds_session **psession,
- int again);
+extern void ceph_flush_snaps(struct ceph_inode_info *ci,
+ struct ceph_mds_session **psession);
extern void ceph_check_caps(struct ceph_inode_info *ci, int flags,
struct ceph_mds_session *session);
extern void ceph_check_delayed_caps(struct ceph_mds_client *mdsc);
@@ -907,10 +907,7 @@ extern int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
loff_t endoff, int *got, struct page **pinned_page);
/* for counting open files by mode */
-static inline void __ceph_get_fmode(struct ceph_inode_info *ci, int mode)
-{
- ci->i_nr_by_mode[mode]++;
-}
+extern void __ceph_get_fmode(struct ceph_inode_info *ci, int mode);
extern void ceph_put_fmode(struct ceph_inode_info *ci, int mode);
/* addr.c */
@@ -931,6 +928,7 @@ extern int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
extern int ceph_release(struct inode *inode, struct file *filp);
extern void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
char *data, size_t len);
+extern void ceph_sync_write_wait(struct inode *inode);
/* dir.c */
extern const struct file_operations ceph_dir_fops;
extern const struct file_operations ceph_snapdir_fops;
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 4870b29df224..adc231892b0d 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -57,81 +57,88 @@ struct ceph_vxattr {
static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info *ci)
{
- size_t s;
- char *p = (char *)&ci->i_layout;
-
- for (s = 0; s < sizeof(ci->i_layout); s++, p++)
- if (*p)
- return true;
- return false;
+ struct ceph_file_layout *fl = &ci->i_layout;
+ return (fl->stripe_unit > 0 || fl->stripe_count > 0 ||
+ fl->object_size > 0 || fl->pool_id >= 0 ||
+ rcu_dereference_raw(fl->pool_ns) != NULL);
}
static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
size_t size)
{
- int ret;
struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
struct ceph_osd_client *osdc = &fsc->client->osdc;
- s64 pool = ceph_file_layout_pg_pool(ci->i_layout);
+ struct ceph_string *pool_ns;
+ s64 pool = ci->i_layout.pool_id;
const char *pool_name;
+ const char *ns_field = " pool_namespace=";
char buf[128];
+ size_t len, total_len = 0;
+ int ret;
+
+ pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
dout("ceph_vxattrcb_layout %p\n", &ci->vfs_inode);
down_read(&osdc->lock);
pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
if (pool_name) {
- size_t len = strlen(pool_name);
- ret = snprintf(buf, sizeof(buf),
- "stripe_unit=%lld stripe_count=%lld object_size=%lld pool=",
- (unsigned long long)ceph_file_layout_su(ci->i_layout),
- (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
- (unsigned long long)ceph_file_layout_object_size(ci->i_layout));
- if (!size) {
- ret += len;
- } else if (ret + len > size) {
- ret = -ERANGE;
- } else {
- memcpy(val, buf, ret);
+ len = snprintf(buf, sizeof(buf),
+ "stripe_unit=%u stripe_count=%u object_size=%u pool=",
+ ci->i_layout.stripe_unit, ci->i_layout.stripe_count,
+ ci->i_layout.object_size);
+ total_len = len + strlen(pool_name);
+ } else {
+ len = snprintf(buf, sizeof(buf),
+ "stripe_unit=%u stripe_count=%u object_size=%u pool=%lld",
+ ci->i_layout.stripe_unit, ci->i_layout.stripe_count,
+ ci->i_layout.object_size, (unsigned long long)pool);
+ total_len = len;
+ }
+
+ if (pool_ns)
+ total_len += strlen(ns_field) + pool_ns->len;
+
+ if (!size) {
+ ret = total_len;
+ } else if (total_len > size) {
+ ret = -ERANGE;
+ } else {
+ memcpy(val, buf, len);
+ ret = len;
+ if (pool_name) {
+ len = strlen(pool_name);
memcpy(val + ret, pool_name, len);
ret += len;
}
- } else {
- ret = snprintf(buf, sizeof(buf),
- "stripe_unit=%lld stripe_count=%lld object_size=%lld pool=%lld",
- (unsigned long long)ceph_file_layout_su(ci->i_layout),
- (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout),
- (unsigned long long)ceph_file_layout_object_size(ci->i_layout),
- (unsigned long long)pool);
- if (size) {
- if (ret <= size)
- memcpy(val, buf, ret);
- else
- ret = -ERANGE;
+ if (pool_ns) {
+ len = strlen(ns_field);
+ memcpy(val + ret, ns_field, len);
+ ret += len;
+ memcpy(val + ret, pool_ns->str, pool_ns->len);
+ ret += pool_ns->len;
}
}
up_read(&osdc->lock);
+ ceph_put_string(pool_ns);
return ret;
}
static size_t ceph_vxattrcb_layout_stripe_unit(struct ceph_inode_info *ci,
char *val, size_t size)
{
- return snprintf(val, size, "%lld",
- (unsigned long long)ceph_file_layout_su(ci->i_layout));
+ return snprintf(val, size, "%u", ci->i_layout.stripe_unit);
}
static size_t ceph_vxattrcb_layout_stripe_count(struct ceph_inode_info *ci,
char *val, size_t size)
{
- return snprintf(val, size, "%lld",
- (unsigned long long)ceph_file_layout_stripe_count(ci->i_layout));
+ return snprintf(val, size, "%u", ci->i_layout.stripe_count);
}
static size_t ceph_vxattrcb_layout_object_size(struct ceph_inode_info *ci,
char *val, size_t size)
{
- return snprintf(val, size, "%lld",
- (unsigned long long)ceph_file_layout_object_size(ci->i_layout));
+ return snprintf(val, size, "%u", ci->i_layout.object_size);
}
static size_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci,
@@ -140,7 +147,7 @@ static size_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci,
int ret;
struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
struct ceph_osd_client *osdc = &fsc->client->osdc;
- s64 pool = ceph_file_layout_pg_pool(ci->i_layout);
+ s64 pool = ci->i_layout.pool_id;
const char *pool_name;
down_read(&osdc->lock);
@@ -153,6 +160,18 @@ static size_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci,
return ret;
}
+static size_t ceph_vxattrcb_layout_pool_namespace(struct ceph_inode_info *ci,
+ char *val, size_t size)
+{
+ int ret = 0;
+ struct ceph_string *ns = ceph_try_get_string(ci->i_layout.pool_ns);
+ if (ns) {
+ ret = snprintf(val, size, "%.*s", (int)ns->len, ns->str);
+ ceph_put_string(ns);
+ }
+ return ret;
+}
+
/* directories */
static size_t ceph_vxattrcb_dir_entries(struct ceph_inode_info *ci, char *val,
@@ -241,6 +260,7 @@ static struct ceph_vxattr ceph_dir_vxattrs[] = {
XATTR_LAYOUT_FIELD(dir, layout, stripe_count),
XATTR_LAYOUT_FIELD(dir, layout, object_size),
XATTR_LAYOUT_FIELD(dir, layout, pool),
+ XATTR_LAYOUT_FIELD(dir, layout, pool_namespace),
XATTR_NAME_CEPH(dir, entries),
XATTR_NAME_CEPH(dir, files),
XATTR_NAME_CEPH(dir, subdirs),
@@ -268,6 +288,7 @@ static struct ceph_vxattr ceph_file_vxattrs[] = {
XATTR_LAYOUT_FIELD(file, layout, stripe_count),
XATTR_LAYOUT_FIELD(file, layout, object_size),
XATTR_LAYOUT_FIELD(file, layout, pool),
+ XATTR_LAYOUT_FIELD(file, layout, pool_namespace),
{ .name = NULL, 0 } /* Required table terminator */
};
static size_t ceph_file_vxattrs_name_size; /* total size of all names */
diff --git a/fs/char_dev.c b/fs/char_dev.c
index 687471dc04a0..6edd825231c5 100644
--- a/fs/char_dev.c
+++ b/fs/char_dev.c
@@ -92,7 +92,7 @@ __register_chrdev_region(unsigned int major, unsigned int baseminor,
}
if (i < CHRDEV_MAJOR_DYN_END)
- pr_warn("CHRDEV \"%s\" major number %d goes below the dynamic allocation range",
+ pr_warn("CHRDEV \"%s\" major number %d goes below the dynamic allocation range\n",
name, i);
if (i == 0) {
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 788e19195991..6c58e13fed2f 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -244,7 +244,6 @@ static int cifs_debug_data_proc_open(struct inode *inode, struct file *file)
}
static const struct file_operations cifs_debug_data_proc_fops = {
- .owner = THIS_MODULE,
.open = cifs_debug_data_proc_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -361,7 +360,6 @@ static int cifs_stats_proc_open(struct inode *inode, struct file *file)
}
static const struct file_operations cifs_stats_proc_fops = {
- .owner = THIS_MODULE,
.open = cifs_stats_proc_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -447,7 +445,6 @@ static ssize_t cifsFYI_proc_write(struct file *file, const char __user *buffer,
}
static const struct file_operations cifsFYI_proc_fops = {
- .owner = THIS_MODULE,
.open = cifsFYI_proc_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -479,7 +476,6 @@ static ssize_t cifs_linux_ext_proc_write(struct file *file,
}
static const struct file_operations cifs_linux_ext_proc_fops = {
- .owner = THIS_MODULE,
.open = cifs_linux_ext_proc_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -511,7 +507,6 @@ static ssize_t cifs_lookup_cache_proc_write(struct file *file,
}
static const struct file_operations cifs_lookup_cache_proc_fops = {
- .owner = THIS_MODULE,
.open = cifs_lookup_cache_proc_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -543,7 +538,6 @@ static ssize_t traceSMB_proc_write(struct file *file, const char __user *buffer,
}
static const struct file_operations traceSMB_proc_fops = {
- .owner = THIS_MODULE,
.open = traceSMB_proc_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -655,7 +649,6 @@ static ssize_t cifs_security_flags_proc_write(struct file *file,
}
static const struct file_operations cifs_security_flags_proc_fops = {
- .owner = THIS_MODULE,
.open = cifs_security_flags_proc_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 3182273a3407..1418daa03d95 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -46,6 +46,9 @@
#define CIFS_MOUNT_CIFS_BACKUPUID 0x200000 /* backup intent bit for a user */
#define CIFS_MOUNT_CIFS_BACKUPGID 0x400000 /* backup intent bit for a group */
#define CIFS_MOUNT_MAP_SFM_CHR 0x800000 /* SFM/MAC mapping for illegal chars */
+#define CIFS_MOUNT_USE_PREFIX_PATH 0x1000000 /* make subpath with unaccessible
+ * root mountable
+ */
struct cifs_sb_info {
struct rb_root tlink_tree;
@@ -67,5 +70,6 @@ struct cifs_sb_info {
struct backing_dev_info bdi;
struct delayed_work prune_tlinks;
struct rcu_head rcu;
+ char *prepath;
};
#endif /* _CIFS_FS_SB_H */
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 6aeb8d4616a4..8347c90cf483 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -743,24 +743,26 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
memcpy(ses->auth_key.response + baselen, tiblob, tilen);
+ mutex_lock(&ses->server->srv_mutex);
+
rc = crypto_hmacmd5_alloc(ses->server);
if (rc) {
cifs_dbg(VFS, "could not crypto alloc hmacmd5 rc %d\n", rc);
- goto setup_ntlmv2_rsp_ret;
+ goto unlock;
}
/* calculate ntlmv2_hash */
rc = calc_ntlmv2_hash(ses, ntlmv2_hash, nls_cp);
if (rc) {
cifs_dbg(VFS, "could not get v2 hash rc %d\n", rc);
- goto setup_ntlmv2_rsp_ret;
+ goto unlock;
}
/* calculate first part of the client response (CR1) */
rc = CalcNTLMv2_response(ses, ntlmv2_hash);
if (rc) {
cifs_dbg(VFS, "Could not calculate CR1 rc: %d\n", rc);
- goto setup_ntlmv2_rsp_ret;
+ goto unlock;
}
/* now calculate the session key for NTLMv2 */
@@ -769,13 +771,13 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
if (rc) {
cifs_dbg(VFS, "%s: Could not set NTLMV2 Hash as a key\n",
__func__);
- goto setup_ntlmv2_rsp_ret;
+ goto unlock;
}
rc = crypto_shash_init(&ses->server->secmech.sdeschmacmd5->shash);
if (rc) {
cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__);
- goto setup_ntlmv2_rsp_ret;
+ goto unlock;
}
rc = crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
@@ -783,7 +785,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
CIFS_HMAC_MD5_HASH_SIZE);
if (rc) {
cifs_dbg(VFS, "%s: Could not update with response\n", __func__);
- goto setup_ntlmv2_rsp_ret;
+ goto unlock;
}
rc = crypto_shash_final(&ses->server->secmech.sdeschmacmd5->shash,
@@ -791,6 +793,8 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
if (rc)
cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
+unlock:
+ mutex_unlock(&ses->server->srv_mutex);
setup_ntlmv2_rsp_ret:
kfree(tiblob);
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 5d841f39c4b7..6bbec5e784cd 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -689,6 +689,14 @@ cifs_do_mount(struct file_system_type *fs_type,
goto out_cifs_sb;
}
+ if (volume_info->prepath) {
+ cifs_sb->prepath = kstrdup(volume_info->prepath, GFP_KERNEL);
+ if (cifs_sb->prepath == NULL) {
+ root = ERR_PTR(-ENOMEM);
+ goto out_cifs_sb;
+ }
+ }
+
cifs_setup_cifs_sb(volume_info, cifs_sb);
rc = cifs_mount(cifs_sb, volume_info);
@@ -727,7 +735,11 @@ cifs_do_mount(struct file_system_type *fs_type,
sb->s_flags |= MS_ACTIVE;
}
- root = cifs_get_root(volume_info, sb);
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
+ root = dget(sb->s_root);
+ else
+ root = cifs_get_root(volume_info, sb);
+
if (IS_ERR(root))
goto out_super;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 7d2b15c06090..7ae03283bd61 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1228,6 +1228,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
vol->ops = &smb1_operations;
vol->vals = &smb1_values;
+ vol->echo_interval = SMB_ECHO_INTERVAL_DEFAULT;
+
if (!mountdata)
goto cifs_parse_mount_err;
@@ -2049,7 +2051,7 @@ static int match_server(struct TCP_Server_Info *server, struct smb_vol *vol)
if (!match_security(server, vol))
return 0;
- if (server->echo_interval != vol->echo_interval)
+ if (server->echo_interval != vol->echo_interval * HZ)
return 0;
return 1;
@@ -3483,6 +3485,44 @@ cifs_get_volume_info(char *mount_data, const char *devname)
return volume_info;
}
+static int
+cifs_are_all_path_components_accessible(struct TCP_Server_Info *server,
+ unsigned int xid,
+ struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb,
+ char *full_path)
+{
+ int rc;
+ char *s;
+ char sep, tmp;
+
+ sep = CIFS_DIR_SEP(cifs_sb);
+ s = full_path;
+
+ rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, "");
+ while (rc == 0) {
+ /* skip separators */
+ while (*s == sep)
+ s++;
+ if (!*s)
+ break;
+ /* next separator */
+ while (*s && *s != sep)
+ s++;
+
+ /*
+ * temporarily null-terminate the path at the end of
+ * the current component
+ */
+ tmp = *s;
+ *s = 0;
+ rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
+ full_path);
+ *s = tmp;
+ }
+ return rc;
+}
+
int
cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
{
@@ -3620,6 +3660,16 @@ remote_path_check:
kfree(full_path);
goto mount_fail_check;
}
+
+ rc = cifs_are_all_path_components_accessible(server,
+ xid, tcon, cifs_sb,
+ full_path);
+ if (rc != 0) {
+ cifs_dbg(VFS, "cannot query dirs between root and final path, "
+ "enabling CIFS_MOUNT_USE_PREFIX_PATH\n");
+ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
+ rc = 0;
+ }
kfree(full_path);
}
@@ -3889,6 +3939,7 @@ cifs_umount(struct cifs_sb_info *cifs_sb)
bdi_destroy(&cifs_sb->bdi);
kfree(cifs_sb->mountdata);
+ kfree(cifs_sb->prepath);
call_rcu(&cifs_sb->rcu, delayed_free);
}
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index c3eb998a99bd..4716c54dbfc6 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -84,6 +84,7 @@ build_path_from_dentry(struct dentry *direntry)
struct dentry *temp;
int namelen;
int dfsplen;
+ int pplen = 0;
char *full_path;
char dirsep;
struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
@@ -95,8 +96,12 @@ build_path_from_dentry(struct dentry *direntry)
dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1);
else
dfsplen = 0;
+
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
+ pplen = cifs_sb->prepath ? strlen(cifs_sb->prepath) + 1 : 0;
+
cifs_bp_rename_retry:
- namelen = dfsplen;
+ namelen = dfsplen + pplen;
seq = read_seqbegin(&rename_lock);
rcu_read_lock();
for (temp = direntry; !IS_ROOT(temp);) {
@@ -137,7 +142,7 @@ cifs_bp_rename_retry:
}
}
rcu_read_unlock();
- if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) {
+ if (namelen != dfsplen + pplen || read_seqretry(&rename_lock, seq)) {
cifs_dbg(FYI, "did not end path lookup where expected. namelen=%ddfsplen=%d\n",
namelen, dfsplen);
/* presumably this is only possible if racing with a rename
@@ -153,6 +158,17 @@ cifs_bp_rename_retry:
those safely to '/' if any are found in the middle of the prepath */
/* BB test paths to Windows with '/' in the midst of prepath */
+ if (pplen) {
+ int i;
+
+ cifs_dbg(FYI, "using cifs_sb prepath <%s>\n", cifs_sb->prepath);
+ memcpy(full_path+dfsplen+1, cifs_sb->prepath, pplen-1);
+ full_path[dfsplen] = '\\';
+ for (i = 0; i < pplen-1; i++)
+ if (full_path[dfsplen+1+i] == '/')
+ full_path[dfsplen+1+i] = CIFS_DIR_SEP(cifs_sb);
+ }
+
if (dfsplen) {
strncpy(full_path, tcon->treeName, dfsplen);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) {
@@ -229,6 +245,13 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
goto cifs_create_get_file_info;
}
+ if (S_ISDIR(newinode->i_mode)) {
+ CIFSSMBClose(xid, tcon, fid->netfid);
+ iput(newinode);
+ rc = -EISDIR;
+ goto out;
+ }
+
if (!S_ISREG(newinode->i_mode)) {
/*
* The server may allow us to open things like
@@ -399,10 +422,14 @@ cifs_create_set_dentry:
if (rc != 0) {
cifs_dbg(FYI, "Create worked, get_inode_info failed rc = %d\n",
rc);
- if (server->ops->close)
- server->ops->close(xid, tcon, fid);
- goto out;
+ goto out_err;
+ }
+
+ if (S_ISDIR(newinode->i_mode)) {
+ rc = -EISDIR;
+ goto out_err;
}
+
d_drop(direntry);
d_add(direntry, newinode);
@@ -410,6 +437,13 @@ out:
kfree(buf);
kfree(full_path);
return rc;
+
+out_err:
+ if (server->ops->close)
+ server->ops->close(xid, tcon, fid);
+ if (newinode)
+ iput(newinode);
+ goto out;
}
int
@@ -445,7 +479,7 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
* Check for hashed negative dentry. We have already revalidated
* the dentry and it is fine. No need to perform another lookup.
*/
- if (!d_unhashed(direntry))
+ if (!d_in_lookup(direntry))
return -ENOENT;
res = cifs_lookup(inode, direntry, 0);
@@ -856,7 +890,7 @@ static int cifs_ci_hash(const struct dentry *dentry, struct qstr *q)
wchar_t c;
int i, charlen;
- hash = init_name_hash();
+ hash = init_name_hash(dentry);
for (i = 0; i < q->len; i += charlen) {
charlen = codepage->char2uni(&q->name[i], q->len - i, &c);
/* error out if we can't convert the character */
@@ -869,10 +903,10 @@ static int cifs_ci_hash(const struct dentry *dentry, struct qstr *q)
return 0;
}
-static int cifs_ci_compare(const struct dentry *parent, const struct dentry *dentry,
+static int cifs_ci_compare(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
- struct nls_table *codepage = CIFS_SB(parent->d_sb)->local_nls;
+ struct nls_table *codepage = CIFS_SB(dentry->d_sb)->local_nls;
wchar_t c1, c2;
int i, l1, l2;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index d4890b6dc22d..579e41b350a2 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -3366,7 +3366,7 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
struct page *page, *tpage;
unsigned int expected_index;
int rc;
- gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
+ gfp_t gfp = readahead_gfp_mask(mapping);
INIT_LIST_HEAD(tmplist);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 514dadb0575d..b87efd0c92d6 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1002,10 +1002,26 @@ struct inode *cifs_root_iget(struct super_block *sb)
struct inode *inode = NULL;
long rc;
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
+ char *path = NULL;
+ int len;
+
+ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
+ && cifs_sb->prepath) {
+ len = strlen(cifs_sb->prepath);
+ path = kzalloc(len + 2 /* leading sep + null */, GFP_KERNEL);
+ if (path == NULL)
+ return ERR_PTR(-ENOMEM);
+ path[0] = '/';
+ memcpy(path+1, cifs_sb->prepath, len);
+ } else {
+ path = kstrdup("", GFP_KERNEL);
+ if (path == NULL)
+ return ERR_PTR(-ENOMEM);
+ }
xid = get_xid();
if (tcon->unix_ext) {
- rc = cifs_get_inode_info_unix(&inode, "", sb, xid);
+ rc = cifs_get_inode_info_unix(&inode, path, sb, xid);
/* some servers mistakenly claim POSIX support */
if (rc != -EOPNOTSUPP)
goto iget_no_retry;
@@ -1013,7 +1029,8 @@ struct inode *cifs_root_iget(struct super_block *sb)
tcon->unix_ext = false;
}
- rc = cifs_get_inode_info(&inode, "", NULL, sb, xid, NULL);
+ convert_delimiter(path, CIFS_DIR_SEP(cifs_sb));
+ rc = cifs_get_inode_info(&inode, path, NULL, sb, xid, NULL);
iget_no_retry:
if (!inode) {
@@ -1042,6 +1059,7 @@ iget_no_retry:
}
out:
+ kfree(path);
/* can not call macro free_xid here since in a void func
* TODO: This is no longer true
*/
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 3525ed756173..d203c0329626 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -1044,6 +1044,9 @@ smb2_new_lease_key(struct cifs_fid *fid)
get_random_bytes(fid->lease_key, SMB2_LEASE_KEY_SIZE);
}
+#define SMB2_SYMLINK_STRUCT_SIZE \
+ (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp))
+
static int
smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
const char *full_path, char **target_path,
@@ -1056,7 +1059,10 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_fid fid;
struct smb2_err_rsp *err_buf = NULL;
struct smb2_symlink_err_rsp *symlink;
- unsigned int sub_len, sub_offset;
+ unsigned int sub_len;
+ unsigned int sub_offset;
+ unsigned int print_len;
+ unsigned int print_offset;
cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
@@ -1077,11 +1083,33 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
kfree(utf16_path);
return -ENOENT;
}
+
+ if (le32_to_cpu(err_buf->ByteCount) < sizeof(struct smb2_symlink_err_rsp) ||
+ get_rfc1002_length(err_buf) + 4 < SMB2_SYMLINK_STRUCT_SIZE) {
+ kfree(utf16_path);
+ return -ENOENT;
+ }
+
/* open must fail on symlink - reset rc */
rc = 0;
symlink = (struct smb2_symlink_err_rsp *)err_buf->ErrorData;
sub_len = le16_to_cpu(symlink->SubstituteNameLength);
sub_offset = le16_to_cpu(symlink->SubstituteNameOffset);
+ print_len = le16_to_cpu(symlink->PrintNameLength);
+ print_offset = le16_to_cpu(symlink->PrintNameOffset);
+
+ if (get_rfc1002_length(err_buf) + 4 <
+ SMB2_SYMLINK_STRUCT_SIZE + sub_offset + sub_len) {
+ kfree(utf16_path);
+ return -ENOENT;
+ }
+
+ if (get_rfc1002_length(err_buf) + 4 <
+ SMB2_SYMLINK_STRUCT_SIZE + print_offset + print_len) {
+ kfree(utf16_path);
+ return -ENOENT;
+ }
+
*target_path = cifs_strndup_from_utf16(
(char *)symlink->PathBuffer + sub_offset,
sub_len, true, cifs_sb->local_nls);
@@ -1515,6 +1543,8 @@ struct smb_version_operations smb20_operations = {
.rename = smb2_rename_path,
.create_hardlink = smb2_create_hardlink,
.query_symlink = smb2_query_symlink,
+ .query_mf_symlink = smb3_query_mf_symlink,
+ .create_mf_symlink = smb3_create_mf_symlink,
.open = smb2_open_file,
.set_fid = smb2_set_fid,
.close = smb2_close_file,
diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c
index f36a4040afb8..b0b9cda41928 100644
--- a/fs/coda/pioctl.c
+++ b/fs/coda/pioctl.c
@@ -35,7 +35,6 @@ const struct inode_operations coda_ioctl_inode_operations = {
};
const struct file_operations coda_ioctl_operations = {
- .owner = THIS_MODULE,
.unlocked_ioctl = coda_pioctl,
.llseek = noop_llseek,
};
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index bd01b92aad98..c1e9f29c924c 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -57,6 +57,7 @@
#include <linux/i2c-dev.h>
#include <linux/atalk.h>
#include <linux/gfp.h>
+#include <linux/cec.h>
#include "internal.h"
@@ -1377,6 +1378,17 @@ COMPATIBLE_IOCTL(VIDEO_GET_NAVI)
COMPATIBLE_IOCTL(VIDEO_SET_ATTRIBUTES)
COMPATIBLE_IOCTL(VIDEO_GET_SIZE)
COMPATIBLE_IOCTL(VIDEO_GET_FRAME_RATE)
+/* cec */
+COMPATIBLE_IOCTL(CEC_ADAP_G_CAPS)
+COMPATIBLE_IOCTL(CEC_ADAP_G_LOG_ADDRS)
+COMPATIBLE_IOCTL(CEC_ADAP_S_LOG_ADDRS)
+COMPATIBLE_IOCTL(CEC_ADAP_G_PHYS_ADDR)
+COMPATIBLE_IOCTL(CEC_ADAP_S_PHYS_ADDR)
+COMPATIBLE_IOCTL(CEC_G_MODE)
+COMPATIBLE_IOCTL(CEC_S_MODE)
+COMPATIBLE_IOCTL(CEC_TRANSMIT)
+COMPATIBLE_IOCTL(CEC_RECEIVE)
+COMPATIBLE_IOCTL(CEC_DQEVENT)
/* joystick */
COMPATIBLE_IOCTL(JSIOCGVERSION)
diff --git a/fs/configfs/file.c b/fs/configfs/file.c
index bbc1252a59f5..c30cf49b69d2 100644
--- a/fs/configfs/file.c
+++ b/fs/configfs/file.c
@@ -80,11 +80,11 @@ static int fill_read_buffer(struct dentry * dentry, struct configfs_buffer * buf
count = attr->show(item, buffer->page);
- buffer->needs_read_fill = 0;
BUG_ON(count > (ssize_t)SIMPLE_ATTR_SIZE);
- if (count >= 0)
+ if (count >= 0) {
+ buffer->needs_read_fill = 0;
buffer->count = count;
- else
+ } else
ret = count;
return ret;
}
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 2fc8c43ce531..c502c116924c 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -318,6 +318,7 @@ int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk,
bio->bi_bdev = inode->i_sb->s_bdev;
bio->bi_iter.bi_sector =
pblk << (inode->i_sb->s_blocksize_bits - 9);
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
ret = bio_add_page(bio, ciphertext_page,
inode->i_sb->s_blocksize, 0);
if (ret != inode->i_sb->s_blocksize) {
@@ -327,7 +328,7 @@ int fscrypt_zeroout_range(struct inode *inode, pgoff_t lblk,
err = -EIO;
goto errout;
}
- err = submit_bio_wait(WRITE, bio);
+ err = submit_bio_wait(bio);
if ((err == 0) && bio->bi_error)
err = -EIO;
bio_put(bio);
diff --git a/fs/dax.c b/fs/dax.c
index e207f8f9b700..993dc6fe0416 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -75,13 +75,13 @@ static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
struct request_queue *q = bdev->bd_queue;
long rc = -EIO;
- dax->addr = (void __pmem *) ERR_PTR(-EIO);
+ dax->addr = ERR_PTR(-EIO);
if (blk_queue_enter(q, true) != 0)
return rc;
rc = bdev_direct_access(bdev, dax);
if (rc < 0) {
- dax->addr = (void __pmem *) ERR_PTR(rc);
+ dax->addr = ERR_PTR(rc);
blk_queue_exit(q);
return rc;
}
@@ -147,12 +147,12 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
struct buffer_head *bh)
{
loff_t pos = start, max = start, bh_max = start;
- bool hole = false, need_wmb = false;
+ bool hole = false;
struct block_device *bdev = NULL;
int rw = iov_iter_rw(iter), rc;
long map_len = 0;
struct blk_dax_ctl dax = {
- .addr = (void __pmem *) ERR_PTR(-EIO),
+ .addr = ERR_PTR(-EIO),
};
unsigned blkbits = inode->i_blkbits;
sector_t file_blks = (i_size_read(inode) + (1 << blkbits) - 1)
@@ -218,7 +218,6 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
if (iov_iter_rw(iter) == WRITE) {
len = copy_from_iter_pmem(dax.addr, max - pos, iter);
- need_wmb = true;
} else if (!hole)
len = copy_to_iter((void __force *) dax.addr, max - pos,
iter);
@@ -235,8 +234,6 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
dax.addr += len;
}
- if (need_wmb)
- wmb_pmem();
dax_unmap_atomic(bdev, &dax);
return (pos == start) ? rc : pos - start;
@@ -788,7 +785,6 @@ int dax_writeback_mapping_range(struct address_space *mapping,
return ret;
}
}
- wmb_pmem();
return 0;
}
EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
@@ -819,16 +815,16 @@ static int dax_insert_mapping(struct address_space *mapping,
}
/**
- * __dax_fault - handle a page fault on a DAX file
+ * dax_fault - handle a page fault on a DAX file
* @vma: The virtual memory area where the fault occurred
* @vmf: The description of the fault
* @get_block: The filesystem method used to translate file offsets to blocks
*
* When a page fault occurs, filesystems may call this helper in their
- * fault handler for DAX files. __dax_fault() assumes the caller has done all
+ * fault handler for DAX files. dax_fault() assumes the caller has done all
* the necessary locking for the page fault to proceed successfully.
*/
-int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
+int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
get_block_t get_block)
{
struct file *file = vma->vm_file;
@@ -913,33 +909,6 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
return VM_FAULT_SIGBUS | major;
return VM_FAULT_NOPAGE | major;
}
-EXPORT_SYMBOL(__dax_fault);
-
-/**
- * dax_fault - handle a page fault on a DAX file
- * @vma: The virtual memory area where the fault occurred
- * @vmf: The description of the fault
- * @get_block: The filesystem method used to translate file offsets to blocks
- *
- * When a page fault occurs, filesystems may call this helper in their
- * fault handler for DAX files.
- */
-int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
- get_block_t get_block)
-{
- int result;
- struct super_block *sb = file_inode(vma->vm_file)->i_sb;
-
- if (vmf->flags & FAULT_FLAG_WRITE) {
- sb_start_pagefault(sb);
- file_update_time(vma->vm_file);
- }
- result = __dax_fault(vma, vmf, get_block);
- if (vmf->flags & FAULT_FLAG_WRITE)
- sb_end_pagefault(sb);
-
- return result;
-}
EXPORT_SYMBOL_GPL(dax_fault);
#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
@@ -967,7 +936,16 @@ static void __dax_dbg(struct buffer_head *bh, unsigned long address,
#define dax_pmd_dbg(bh, address, reason) __dax_dbg(bh, address, reason, "dax_pmd")
-int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
+/**
+ * dax_pmd_fault - handle a PMD fault on a DAX file
+ * @vma: The virtual memory area where the fault occurred
+ * @vmf: The description of the fault
+ * @get_block: The filesystem method used to translate file offsets to blocks
+ *
+ * When a page fault occurs, filesystems may call this helper in their
+ * pmd_fault handler for DAX files.
+ */
+int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmd, unsigned int flags, get_block_t get_block)
{
struct file *file = vma->vm_file;
@@ -1119,7 +1097,7 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
*
* The PMD path doesn't have an equivalent to
* dax_pfn_mkwrite(), though, so for a read followed by a
- * write we traverse all the way through __dax_pmd_fault()
+ * write we traverse all the way through dax_pmd_fault()
* twice. This means we can just skip inserting a radix tree
* entry completely on the initial read and just wait until
* the write to insert a dirty entry.
@@ -1148,33 +1126,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
result = VM_FAULT_FALLBACK;
goto out;
}
-EXPORT_SYMBOL_GPL(__dax_pmd_fault);
-
-/**
- * dax_pmd_fault - handle a PMD fault on a DAX file
- * @vma: The virtual memory area where the fault occurred
- * @vmf: The description of the fault
- * @get_block: The filesystem method used to translate file offsets to blocks
- *
- * When a page fault occurs, filesystems may call this helper in their
- * pmd_fault handler for DAX files.
- */
-int dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
- pmd_t *pmd, unsigned int flags, get_block_t get_block)
-{
- int result;
- struct super_block *sb = file_inode(vma->vm_file)->i_sb;
-
- if (flags & FAULT_FLAG_WRITE) {
- sb_start_pagefault(sb);
- file_update_time(vma->vm_file);
- }
- result = __dax_pmd_fault(vma, address, pmd, flags, get_block);
- if (flags & FAULT_FLAG_WRITE)
- sb_end_pagefault(sb);
-
- return result;
-}
EXPORT_SYMBOL_GPL(dax_pmd_fault);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -1232,7 +1183,6 @@ int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
if (dax_map_atomic(bdev, &dax) < 0)
return PTR_ERR(dax.addr);
clear_pmem(dax.addr + offset, length);
- wmb_pmem();
dax_unmap_atomic(bdev, &dax);
}
return 0;
diff --git a/fs/dcache.c b/fs/dcache.c
index d6847d7b123d..5c7cc953ac81 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -104,11 +104,9 @@ static unsigned int d_hash_shift __read_mostly;
static struct hlist_bl_head *dentry_hashtable __read_mostly;
-static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
- unsigned int hash)
+static inline struct hlist_bl_head *d_hash(unsigned int hash)
{
- hash += (unsigned long) parent / L1_CACHE_BYTES;
- return dentry_hashtable + hash_32(hash, d_hash_shift);
+ return dentry_hashtable + (hash >> (32 - d_hash_shift));
}
#define IN_LOOKUP_SHIFT 10
@@ -226,10 +224,9 @@ static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char
static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
{
- const unsigned char *cs;
/*
* Be careful about RCU walk racing with rename:
- * use ACCESS_ONCE to fetch the name pointer.
+ * use 'lockless_dereference' to fetch the name pointer.
*
* NOTE! Even if a rename will mean that the length
* was not loaded atomically, we don't care. The
@@ -243,8 +240,8 @@ static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *c
* early because the data cannot match (there can
* be no NUL in the ct/tcount data)
*/
- cs = ACCESS_ONCE(dentry->d_name.name);
- smp_read_barrier_depends();
+ const unsigned char *cs = lockless_dereference(dentry->d_name.name);
+
return dentry_string_cmp(cs, ct, tcount);
}
@@ -319,60 +316,23 @@ static void dentry_free(struct dentry *dentry)
call_rcu(&dentry->d_u.d_rcu, __d_free);
}
-/**
- * dentry_rcuwalk_invalidate - invalidate in-progress rcu-walk lookups
- * @dentry: the target dentry
- * After this call, in-progress rcu-walk path lookup will fail. This
- * should be called after unhashing, and after changing d_inode (if
- * the dentry has not already been unhashed).
- */
-static inline void dentry_rcuwalk_invalidate(struct dentry *dentry)
-{
- lockdep_assert_held(&dentry->d_lock);
- /* Go through am invalidation barrier */
- write_seqcount_invalidate(&dentry->d_seq);
-}
-
/*
* Release the dentry's inode, using the filesystem
- * d_iput() operation if defined. Dentry has no refcount
- * and is unhashed.
- */
-static void dentry_iput(struct dentry * dentry)
- __releases(dentry->d_lock)
- __releases(dentry->d_inode->i_lock)
-{
- struct inode *inode = dentry->d_inode;
- if (inode) {
- __d_clear_type_and_inode(dentry);
- hlist_del_init(&dentry->d_u.d_alias);
- spin_unlock(&dentry->d_lock);
- spin_unlock(&inode->i_lock);
- if (!inode->i_nlink)
- fsnotify_inoderemove(inode);
- if (dentry->d_op && dentry->d_op->d_iput)
- dentry->d_op->d_iput(dentry, inode);
- else
- iput(inode);
- } else {
- spin_unlock(&dentry->d_lock);
- }
-}
-
-/*
- * Release the dentry's inode, using the filesystem
- * d_iput() operation if defined. dentry remains in-use.
+ * d_iput() operation if defined.
*/
static void dentry_unlink_inode(struct dentry * dentry)
__releases(dentry->d_lock)
__releases(dentry->d_inode->i_lock)
{
struct inode *inode = dentry->d_inode;
+ bool hashed = !d_unhashed(dentry);
- raw_write_seqcount_begin(&dentry->d_seq);
+ if (hashed)
+ raw_write_seqcount_begin(&dentry->d_seq);
__d_clear_type_and_inode(dentry);
hlist_del_init(&dentry->d_u.d_alias);
- raw_write_seqcount_end(&dentry->d_seq);
+ if (hashed)
+ raw_write_seqcount_end(&dentry->d_seq);
spin_unlock(&dentry->d_lock);
spin_unlock(&inode->i_lock);
if (!inode->i_nlink)
@@ -488,13 +448,14 @@ void __d_drop(struct dentry *dentry)
if (unlikely(IS_ROOT(dentry)))
b = &dentry->d_sb->s_anon;
else
- b = d_hash(dentry->d_parent, dentry->d_name.hash);
+ b = d_hash(dentry->d_name.hash);
hlist_bl_lock(b);
__hlist_bl_del(&dentry->d_hash);
dentry->d_hash.pprev = NULL;
hlist_bl_unlock(b);
- dentry_rcuwalk_invalidate(dentry);
+ /* After this call, in-progress rcu-walk path lookup will fail. */
+ write_seqcount_invalidate(&dentry->d_seq);
}
}
EXPORT_SYMBOL(__d_drop);
@@ -573,12 +534,10 @@ static void __dentry_kill(struct dentry *dentry)
dentry_unlist(dentry, parent);
if (parent)
spin_unlock(&parent->d_lock);
- dentry_iput(dentry);
- /*
- * dentry_iput drops the locks, at which point nobody (except
- * transient RCU lookups) can reach this dentry.
- */
- BUG_ON(dentry->d_lockref.count > 0);
+ if (dentry->d_inode)
+ dentry_unlink_inode(dentry);
+ else
+ spin_unlock(&dentry->d_lock);
this_cpu_dec(nr_dentry);
if (dentry->d_op && dentry->d_op->d_release)
dentry->d_op->d_release(dentry);
@@ -622,7 +581,6 @@ static struct dentry *dentry_kill(struct dentry *dentry)
failed:
spin_unlock(&dentry->d_lock);
- cpu_relax();
return dentry; /* try again with same dentry */
}
@@ -796,6 +754,8 @@ void dput(struct dentry *dentry)
return;
repeat:
+ might_sleep();
+
rcu_read_lock();
if (likely(fast_dput(dentry))) {
rcu_read_unlock();
@@ -829,8 +789,10 @@ repeat:
kill_it:
dentry = dentry_kill(dentry);
- if (dentry)
+ if (dentry) {
+ cond_resched();
goto repeat;
+ }
}
EXPORT_SYMBOL(dput);
@@ -1595,6 +1557,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
{
struct dentry *dentry;
char *dname;
+ int err;
dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
if (!dentry)
@@ -1653,6 +1616,16 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
INIT_LIST_HEAD(&dentry->d_child);
d_set_d_op(dentry, dentry->d_sb->s_d_op);
+ if (dentry->d_op && dentry->d_op->d_init) {
+ err = dentry->d_op->d_init(dentry);
+ if (err) {
+ if (dname_external(dentry))
+ kfree(external_name(dentry));
+ kmem_cache_free(dentry_cache, dentry);
+ return NULL;
+ }
+ }
+
this_cpu_inc(nr_dentry);
return dentry;
@@ -1716,7 +1689,7 @@ struct dentry *d_alloc_name(struct dentry *parent, const char *name)
struct qstr q;
q.name = name;
- q.hash_len = hashlen_string(name);
+ q.hash_len = hashlen_string(parent, name);
return d_alloc(parent, &q);
}
EXPORT_SYMBOL(d_alloc_name);
@@ -1729,7 +1702,6 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
DCACHE_OP_REVALIDATE |
DCACHE_OP_WEAK_REVALIDATE |
DCACHE_OP_DELETE |
- DCACHE_OP_SELECT_INODE |
DCACHE_OP_REAL));
dentry->d_op = op;
if (!op)
@@ -1746,8 +1718,6 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
dentry->d_flags |= DCACHE_OP_DELETE;
if (op->d_prune)
dentry->d_flags |= DCACHE_OP_PRUNE;
- if (op->d_select_inode)
- dentry->d_flags |= DCACHE_OP_SELECT_INODE;
if (op->d_real)
dentry->d_flags |= DCACHE_OP_REAL;
@@ -1815,7 +1785,7 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
raw_write_seqcount_begin(&dentry->d_seq);
__d_set_inode_and_type(dentry, inode, add_flags);
raw_write_seqcount_end(&dentry->d_seq);
- __fsnotify_d_instantiate(dentry);
+ fsnotify_update_flags(dentry);
spin_unlock(&dentry->d_lock);
}
@@ -2067,42 +2037,19 @@ struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
}
EXPORT_SYMBOL(d_add_ci);
-/*
- * Do the slow-case of the dentry name compare.
- *
- * Unlike the dentry_cmp() function, we need to atomically
- * load the name and length information, so that the
- * filesystem can rely on them, and can use the 'name' and
- * 'len' information without worrying about walking off the
- * end of memory etc.
- *
- * Thus the read_seqcount_retry() and the "duplicate" info
- * in arguments (the low-level filesystem should not look
- * at the dentry inode or name contents directly, since
- * rename can change them while we're in RCU mode).
- */
-enum slow_d_compare {
- D_COMP_OK,
- D_COMP_NOMATCH,
- D_COMP_SEQRETRY,
-};
-static noinline enum slow_d_compare slow_dentry_cmp(
- const struct dentry *parent,
- struct dentry *dentry,
- unsigned int seq,
- const struct qstr *name)
+static inline bool d_same_name(const struct dentry *dentry,
+ const struct dentry *parent,
+ const struct qstr *name)
{
- int tlen = dentry->d_name.len;
- const char *tname = dentry->d_name.name;
-
- if (read_seqcount_retry(&dentry->d_seq, seq)) {
- cpu_relax();
- return D_COMP_SEQRETRY;
+ if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) {
+ if (dentry->d_name.len != name->len)
+ return false;
+ return dentry_cmp(dentry, name->name, name->len) == 0;
}
- if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
- return D_COMP_NOMATCH;
- return D_COMP_OK;
+ return parent->d_op->d_compare(dentry,
+ dentry->d_name.len, dentry->d_name.name,
+ name) == 0;
}
/**
@@ -2140,7 +2087,7 @@ struct dentry *__d_lookup_rcu(const struct dentry *parent,
{
u64 hashlen = name->hash_len;
const unsigned char *str = name->name;
- struct hlist_bl_head *b = d_hash(parent, hashlen_hash(hashlen));
+ struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen));
struct hlist_bl_node *node;
struct dentry *dentry;
@@ -2181,6 +2128,9 @@ seqretry:
* dentry compare, we will do seqretries until it is stable,
* and if we end up with a successful lookup, we actually
* want to exit RCU lookup anyway.
+ *
+ * Note that raw_seqcount_begin still *does* smp_rmb(), so
+ * we are still guaranteed NUL-termination of ->d_name.name.
*/
seq = raw_seqcount_begin(&dentry->d_seq);
if (dentry->d_parent != parent)
@@ -2189,24 +2139,28 @@ seqretry:
continue;
if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
+ int tlen;
+ const char *tname;
if (dentry->d_name.hash != hashlen_hash(hashlen))
continue;
- *seqp = seq;
- switch (slow_dentry_cmp(parent, dentry, seq, name)) {
- case D_COMP_OK:
- return dentry;
- case D_COMP_NOMATCH:
- continue;
- default:
+ tlen = dentry->d_name.len;
+ tname = dentry->d_name.name;
+ /* we want a consistent (name,len) pair */
+ if (read_seqcount_retry(&dentry->d_seq, seq)) {
+ cpu_relax();
goto seqretry;
}
+ if (parent->d_op->d_compare(dentry,
+ tlen, tname, name) != 0)
+ continue;
+ } else {
+ if (dentry->d_name.hash_len != hashlen)
+ continue;
+ if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
+ continue;
}
-
- if (dentry->d_name.hash_len != hashlen)
- continue;
*seqp = seq;
- if (!dentry_cmp(dentry, str, hashlen_len(hashlen)))
- return dentry;
+ return dentry;
}
return NULL;
}
@@ -2254,10 +2208,8 @@ EXPORT_SYMBOL(d_lookup);
*/
struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
{
- unsigned int len = name->len;
unsigned int hash = name->hash;
- const unsigned char *str = name->name;
- struct hlist_bl_head *b = d_hash(parent, hash);
+ struct hlist_bl_head *b = d_hash(hash);
struct hlist_bl_node *node;
struct dentry *found = NULL;
struct dentry *dentry;
@@ -2295,21 +2247,8 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
if (d_unhashed(dentry))
goto next;
- /*
- * It is safe to compare names since d_move() cannot
- * change the qstr (protected by d_lock).
- */
- if (parent->d_flags & DCACHE_OP_COMPARE) {
- int tlen = dentry->d_name.len;
- const char *tname = dentry->d_name.name;
- if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
- goto next;
- } else {
- if (dentry->d_name.len != len)
- goto next;
- if (dentry_cmp(dentry, str, len))
- goto next;
- }
+ if (!d_same_name(dentry, parent, name))
+ goto next;
dentry->d_lockref.count++;
found = dentry;
@@ -2337,7 +2276,7 @@ struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
* calculate the standard hash first, as the d_op->d_hash()
* routine may choose to leave the hash value unchanged.
*/
- name->hash = full_name_hash(name->name, name->len);
+ name->hash = full_name_hash(dir, name->name, name->len);
if (dir->d_flags & DCACHE_OP_HASH) {
int err = dir->d_op->d_hash(dir, name);
if (unlikely(err < 0))
@@ -2400,19 +2339,15 @@ again:
}
EXPORT_SYMBOL(d_delete);
-static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
+static void __d_rehash(struct dentry *entry)
{
+ struct hlist_bl_head *b = d_hash(entry->d_name.hash);
BUG_ON(!d_unhashed(entry));
hlist_bl_lock(b);
hlist_bl_add_head_rcu(&entry->d_hash, b);
hlist_bl_unlock(b);
}
-static void _d_rehash(struct dentry * entry)
-{
- __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash));
-}
-
/**
* d_rehash - add an entry back to the hash
* @entry: dentry to add to the hash
@@ -2423,7 +2358,7 @@ static void _d_rehash(struct dentry * entry)
void d_rehash(struct dentry * entry)
{
spin_lock(&entry->d_lock);
- _d_rehash(entry);
+ __d_rehash(entry);
spin_unlock(&entry->d_lock);
}
EXPORT_SYMBOL(d_rehash);
@@ -2462,9 +2397,7 @@ struct dentry *d_alloc_parallel(struct dentry *parent,
const struct qstr *name,
wait_queue_head_t *wq)
{
- unsigned int len = name->len;
unsigned int hash = name->hash;
- const unsigned char *str = name->name;
struct hlist_bl_head *b = in_lookup_hash(parent, hash);
struct hlist_bl_node *node;
struct dentry *new = d_alloc(parent, name);
@@ -2515,17 +2448,8 @@ retry:
continue;
if (dentry->d_parent != parent)
continue;
- if (parent->d_flags & DCACHE_OP_COMPARE) {
- int tlen = dentry->d_name.len;
- const char *tname = dentry->d_name.name;
- if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
- continue;
- } else {
- if (dentry->d_name.len != len)
- continue;
- if (dentry_cmp(dentry, str, len))
- continue;
- }
+ if (!d_same_name(dentry, parent, name))
+ continue;
hlist_bl_unlock(b);
/* now we can try to grab a reference */
if (!lockref_get_not_dead(&dentry->d_lockref)) {
@@ -2552,17 +2476,8 @@ retry:
goto mismatch;
if (unlikely(d_unhashed(dentry)))
goto mismatch;
- if (parent->d_flags & DCACHE_OP_COMPARE) {
- int tlen = dentry->d_name.len;
- const char *tname = dentry->d_name.name;
- if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
- goto mismatch;
- } else {
- if (unlikely(dentry->d_name.len != len))
- goto mismatch;
- if (unlikely(dentry_cmp(dentry, str, len)))
- goto mismatch;
- }
+ if (unlikely(!d_same_name(dentry, parent, name)))
+ goto mismatch;
/* OK, it *is* a hashed match; return it */
spin_unlock(&dentry->d_lock);
dput(new);
@@ -2615,9 +2530,9 @@ static inline void __d_add(struct dentry *dentry, struct inode *inode)
raw_write_seqcount_begin(&dentry->d_seq);
__d_set_inode_and_type(dentry, inode, add_flags);
raw_write_seqcount_end(&dentry->d_seq);
- __fsnotify_d_instantiate(dentry);
+ fsnotify_update_flags(dentry);
}
- _d_rehash(dentry);
+ __d_rehash(dentry);
if (dir)
end_dir_add(dir, n);
spin_unlock(&dentry->d_lock);
@@ -2658,8 +2573,6 @@ EXPORT_SYMBOL(d_add);
struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
{
struct dentry *alias;
- int len = entry->d_name.len;
- const char *name = entry->d_name.name;
unsigned int hash = entry->d_name.hash;
spin_lock(&inode->i_lock);
@@ -2673,9 +2586,7 @@ struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
continue;
if (alias->d_parent != entry->d_parent)
continue;
- if (alias->d_name.len != len)
- continue;
- if (dentry_cmp(alias, name, len))
+ if (!d_same_name(alias, entry->d_parent, &entry->d_name))
continue;
spin_lock(&alias->d_lock);
if (!d_unhashed(alias)) {
@@ -2683,7 +2594,7 @@ struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
alias = NULL;
} else {
__dget_dlock(alias);
- _d_rehash(alias);
+ __d_rehash(alias);
spin_unlock(&alias->d_lock);
}
spin_unlock(&inode->i_lock);
@@ -2708,7 +2619,7 @@ EXPORT_SYMBOL(d_exact_alias);
* Parent inode i_mutex must be held over d_lookup and into this call (to
* keep renames and concurrent inserts, and readdir(2) away).
*/
-void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
+void dentry_update_name_case(struct dentry *dentry, const struct qstr *name)
{
BUG_ON(!inode_is_locked(dentry->d_parent->d_inode));
BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
@@ -2867,24 +2778,10 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
write_seqcount_begin(&dentry->d_seq);
write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
+ /* unhash both */
/* __d_drop does write_seqcount_barrier, but they're OK to nest. */
-
- /*
- * Move the dentry to the target hash queue. Don't bother checking
- * for the same hash queue because of how unlikely it is.
- */
__d_drop(dentry);
- __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash));
-
- /*
- * Unhash the target (d_delete() is not usable here). If exchanging
- * the two dentries, then rehash onto the other's hash queue.
- */
__d_drop(target);
- if (exchange) {
- __d_rehash(target,
- d_hash(dentry->d_parent, dentry->d_name.hash));
- }
/* Switch the names.. */
if (exchange)
@@ -2892,6 +2789,11 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
else
copy_name(dentry, target);
+ /* rehash in new place(s) */
+ __d_rehash(dentry);
+ if (exchange)
+ __d_rehash(target);
+
/* ... and switch them in the tree */
if (IS_ROOT(dentry)) {
/* splicing a tree */
@@ -2906,8 +2808,8 @@ static void __d_move(struct dentry *dentry, struct dentry *target,
list_move(&target->d_child, &target->d_parent->d_subdirs);
list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
if (exchange)
- fsnotify_d_move(target);
- fsnotify_d_move(dentry);
+ fsnotify_update_flags(target);
+ fsnotify_update_flags(dentry);
}
write_seqcount_end(&target->d_seq);
@@ -3111,7 +3013,7 @@ static int prepend(char **buffer, int *buflen, const char *str, int namelen)
* Data dependency barrier is needed to make sure that we see that terminating
* NUL. Alpha strikes again, film at 11...
*/
-static int prepend_name(char **buffer, int *buflen, struct qstr *name)
+static int prepend_name(char **buffer, int *buflen, const struct qstr *name)
{
const char *dname = ACCESS_ONCE(name->name);
u32 dlen = ACCESS_ONCE(name->len);
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 4bc1f68243c1..72361baf9da7 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -621,9 +621,6 @@ void debugfs_remove(struct dentry *dentry)
return;
parent = dentry->d_parent;
- if (!parent || d_really_is_negative(parent))
- return;
-
inode_lock(d_inode(parent));
ret = __debugfs_remove(dentry, parent);
inode_unlock(d_inode(parent));
@@ -654,10 +651,6 @@ void debugfs_remove_recursive(struct dentry *dentry)
if (IS_ERR_OR_NULL(dentry))
return;
- parent = dentry->d_parent;
- if (!parent || d_really_is_negative(parent))
- return;
-
parent = dentry;
down:
inode_lock(d_inode(parent));
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 37c134a132c7..d116453b0276 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -396,6 +396,7 @@ devpts_fill_super(struct super_block *s, void *data, int silent)
{
struct inode *inode;
+ s->s_iflags &= ~SB_I_NODEV;
s->s_blocksize = 1024;
s->s_blocksize_bits = 10;
s->s_magic = DEVPTS_SUPER_MAGIC;
@@ -480,7 +481,7 @@ static struct file_system_type devpts_fs_type = {
.name = "devpts",
.mount = devpts_mount,
.kill_sb = devpts_kill_sb,
- .fs_flags = FS_USERNS_MOUNT | FS_USERNS_DEV_MOUNT,
+ .fs_flags = FS_USERNS_MOUNT,
};
/*
diff --git a/fs/direct-io.c b/fs/direct-io.c
index f3b4408be590..7c3ce73cb617 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -108,7 +108,8 @@ struct dio_submit {
/* dio_state communicated between submission path and end_io */
struct dio {
int flags; /* doesn't change */
- int rw;
+ int op;
+ int op_flags;
blk_qc_t bio_cookie;
struct block_device *bio_bdev;
struct inode *inode;
@@ -163,7 +164,7 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES,
&sdio->from);
- if (ret < 0 && sdio->blocks_available && (dio->rw & WRITE)) {
+ if (ret < 0 && sdio->blocks_available && (dio->op == REQ_OP_WRITE)) {
struct page *page = ZERO_PAGE(0);
/*
* A memory fault, but the filesystem has some outstanding
@@ -242,7 +243,8 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
transferred = dio->result;
/* Check for short read case */
- if ((dio->rw == READ) && ((offset + transferred) > dio->i_size))
+ if ((dio->op == REQ_OP_READ) &&
+ ((offset + transferred) > dio->i_size))
transferred = dio->i_size - offset;
}
@@ -273,7 +275,7 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async)
*/
dio->iocb->ki_pos += transferred;
- if (dio->rw & WRITE)
+ if (dio->op == REQ_OP_WRITE)
ret = generic_write_sync(dio->iocb, transferred);
dio->iocb->ki_complete(dio->iocb, ret, 0);
}
@@ -375,6 +377,7 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
bio->bi_bdev = bdev;
bio->bi_iter.bi_sector = first_sector;
+ bio_set_op_attrs(bio, dio->op, dio->op_flags);
if (dio->is_async)
bio->bi_end_io = dio_bio_end_aio;
else
@@ -402,17 +405,16 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
dio->refcount++;
spin_unlock_irqrestore(&dio->bio_lock, flags);
- if (dio->is_async && dio->rw == READ && dio->should_dirty)
+ if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty)
bio_set_pages_dirty(bio);
dio->bio_bdev = bio->bi_bdev;
if (sdio->submit_io) {
- sdio->submit_io(dio->rw, bio, dio->inode,
- sdio->logical_offset_in_bio);
+ sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio);
dio->bio_cookie = BLK_QC_T_NONE;
} else
- dio->bio_cookie = submit_bio(dio->rw, bio);
+ dio->bio_cookie = submit_bio(bio);
sdio->bio = NULL;
sdio->boundary = 0;
@@ -478,14 +480,14 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
if (bio->bi_error)
dio->io_error = -EIO;
- if (dio->is_async && dio->rw == READ && dio->should_dirty) {
+ if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) {
err = bio->bi_error;
bio_check_pages_dirty(bio); /* transfers ownership */
} else {
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
- if (dio->rw == READ && !PageCompound(page) &&
+ if (dio->op == REQ_OP_READ && !PageCompound(page) &&
dio->should_dirty)
set_page_dirty_lock(page);
put_page(page);
@@ -638,7 +640,7 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
* which may decide to handle it or also return an unmapped
* buffer head.
*/
- create = dio->rw & WRITE;
+ create = dio->op == REQ_OP_WRITE;
if (dio->flags & DIO_SKIP_HOLES) {
if (fs_startblk <= ((i_size_read(dio->inode) - 1) >>
i_blkbits))
@@ -788,7 +790,7 @@ submit_page_section(struct dio *dio, struct dio_submit *sdio, struct page *page,
{
int ret = 0;
- if (dio->rw & WRITE) {
+ if (dio->op == REQ_OP_WRITE) {
/*
* Read accounting is performed in submit_bio()
*/
@@ -988,7 +990,7 @@ do_holes:
loff_t i_size_aligned;
/* AKPM: eargh, -ENOTBLK is a hack */
- if (dio->rw & WRITE) {
+ if (dio->op == REQ_OP_WRITE) {
put_page(page);
return -ENOTBLK;
}
@@ -1202,7 +1204,12 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
dio->is_async = true;
dio->inode = inode;
- dio->rw = iov_iter_rw(iter) == WRITE ? WRITE_ODIRECT : READ;
+ if (iov_iter_rw(iter) == WRITE) {
+ dio->op = REQ_OP_WRITE;
+ dio->op_flags = WRITE_ODIRECT;
+ } else {
+ dio->op = REQ_OP_READ;
+ }
/*
* For AIO O_(D)SYNC writes we need to defer completions to a workqueue
diff --git a/fs/dlm/config.c b/fs/dlm/config.c
index 1669f6291c95..df955d2209ce 100644
--- a/fs/dlm/config.c
+++ b/fs/dlm/config.c
@@ -73,6 +73,7 @@ struct dlm_cluster {
unsigned int cl_toss_secs;
unsigned int cl_scan_secs;
unsigned int cl_log_debug;
+ unsigned int cl_log_info;
unsigned int cl_protocol;
unsigned int cl_timewarn_cs;
unsigned int cl_waitwarn_us;
@@ -95,6 +96,7 @@ enum {
CLUSTER_ATTR_TOSS_SECS,
CLUSTER_ATTR_SCAN_SECS,
CLUSTER_ATTR_LOG_DEBUG,
+ CLUSTER_ATTR_LOG_INFO,
CLUSTER_ATTR_PROTOCOL,
CLUSTER_ATTR_TIMEWARN_CS,
CLUSTER_ATTR_WAITWARN_US,
@@ -166,6 +168,7 @@ CLUSTER_ATTR(recover_timer, 1);
CLUSTER_ATTR(toss_secs, 1);
CLUSTER_ATTR(scan_secs, 1);
CLUSTER_ATTR(log_debug, 0);
+CLUSTER_ATTR(log_info, 0);
CLUSTER_ATTR(protocol, 0);
CLUSTER_ATTR(timewarn_cs, 1);
CLUSTER_ATTR(waitwarn_us, 0);
@@ -180,6 +183,7 @@ static struct configfs_attribute *cluster_attrs[] = {
[CLUSTER_ATTR_TOSS_SECS] = &cluster_attr_toss_secs,
[CLUSTER_ATTR_SCAN_SECS] = &cluster_attr_scan_secs,
[CLUSTER_ATTR_LOG_DEBUG] = &cluster_attr_log_debug,
+ [CLUSTER_ATTR_LOG_INFO] = &cluster_attr_log_info,
[CLUSTER_ATTR_PROTOCOL] = &cluster_attr_protocol,
[CLUSTER_ATTR_TIMEWARN_CS] = &cluster_attr_timewarn_cs,
[CLUSTER_ATTR_WAITWARN_US] = &cluster_attr_waitwarn_us,
@@ -365,6 +369,7 @@ static struct config_group *make_cluster(struct config_group *g,
cl->cl_toss_secs = dlm_config.ci_toss_secs;
cl->cl_scan_secs = dlm_config.ci_scan_secs;
cl->cl_log_debug = dlm_config.ci_log_debug;
+ cl->cl_log_info = dlm_config.ci_log_info;
cl->cl_protocol = dlm_config.ci_protocol;
cl->cl_timewarn_cs = dlm_config.ci_timewarn_cs;
cl->cl_waitwarn_us = dlm_config.ci_waitwarn_us;
@@ -850,6 +855,7 @@ int dlm_our_addr(struct sockaddr_storage *addr, int num)
#define DEFAULT_TOSS_SECS 10
#define DEFAULT_SCAN_SECS 5
#define DEFAULT_LOG_DEBUG 0
+#define DEFAULT_LOG_INFO 1
#define DEFAULT_PROTOCOL 0
#define DEFAULT_TIMEWARN_CS 500 /* 5 sec = 500 centiseconds */
#define DEFAULT_WAITWARN_US 0
@@ -865,6 +871,7 @@ struct dlm_config_info dlm_config = {
.ci_toss_secs = DEFAULT_TOSS_SECS,
.ci_scan_secs = DEFAULT_SCAN_SECS,
.ci_log_debug = DEFAULT_LOG_DEBUG,
+ .ci_log_info = DEFAULT_LOG_INFO,
.ci_protocol = DEFAULT_PROTOCOL,
.ci_timewarn_cs = DEFAULT_TIMEWARN_CS,
.ci_waitwarn_us = DEFAULT_WAITWARN_US,
diff --git a/fs/dlm/config.h b/fs/dlm/config.h
index f30697bc2780..6041eec886ab 100644
--- a/fs/dlm/config.h
+++ b/fs/dlm/config.h
@@ -31,6 +31,7 @@ struct dlm_config_info {
int ci_toss_secs;
int ci_scan_secs;
int ci_log_debug;
+ int ci_log_info;
int ci_protocol;
int ci_timewarn_cs;
int ci_waitwarn_us;
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 5eff6ea3e27f..216b61604ef9 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -65,8 +65,16 @@ struct dlm_mhandle;
printk(KERN_ERR "dlm: "fmt"\n" , ##args)
#define log_error(ls, fmt, args...) \
printk(KERN_ERR "dlm: %s: " fmt "\n", (ls)->ls_name , ##args)
+
#define log_rinfo(ls, fmt, args...) \
- printk(KERN_INFO "dlm: %s: " fmt "\n", (ls)->ls_name , ##args);
+do { \
+ if (dlm_config.ci_log_info) \
+ printk(KERN_INFO "dlm: %s: " fmt "\n", \
+ (ls)->ls_name, ##args); \
+ else if (dlm_config.ci_log_debug) \
+ printk(KERN_DEBUG "dlm: %s: " fmt "\n", \
+ (ls)->ls_name , ##args); \
+} while (0)
#define log_debug(ls, fmt, args...) \
do { \
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 1ab012a27d9f..963016c8f3d1 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -1279,10 +1279,9 @@ static void init_local(void)
if (dlm_our_addr(&sas, i))
break;
- addr = kmalloc(sizeof(*addr), GFP_NOFS);
+ addr = kmemdup(&sas, sizeof(*addr), GFP_NOFS);
if (!addr)
break;
- memcpy(addr, &sas, sizeof(*addr));
dlm_local_addr[dlm_local_count++] = addr;
}
}
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index 9cb54a38832d..688ccc16b702 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -45,8 +45,7 @@ static struct super_block *efivarfs_sb;
* So we need to perform a case-sensitive match on part 1 and a
* case-insensitive match on part 2.
*/
-static int efivarfs_d_compare(const struct dentry *parent,
- const struct dentry *dentry,
+static int efivarfs_d_compare(const struct dentry *dentry,
unsigned int len, const char *str,
const struct qstr *name)
{
@@ -65,7 +64,7 @@ static int efivarfs_d_compare(const struct dentry *parent,
static int efivarfs_d_hash(const struct dentry *dentry, struct qstr *qstr)
{
- unsigned long hash = init_name_hash();
+ unsigned long hash = init_name_hash(dentry);
const unsigned char *s = qstr->name;
unsigned int len = qstr->len;
@@ -98,7 +97,7 @@ static struct dentry *efivarfs_alloc_dentry(struct dentry *parent, char *name)
q.name = name;
q.len = strlen(name);
- err = efivarfs_d_hash(NULL, &q);
+ err = efivarfs_d_hash(parent, &q);
if (err)
return ERR_PTR(err);
diff --git a/fs/exec.c b/fs/exec.c
index 887c1c955df8..6fcfb3f7b137 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -762,6 +762,39 @@ out_unlock:
}
EXPORT_SYMBOL(setup_arg_pages);
+#else
+
+/*
+ * Transfer the program arguments and environment from the holding pages
+ * onto the stack. The provided stack pointer is adjusted accordingly.
+ */
+int transfer_args_to_stack(struct linux_binprm *bprm,
+ unsigned long *sp_location)
+{
+ unsigned long index, stop, sp;
+ int ret = 0;
+
+ stop = bprm->p >> PAGE_SHIFT;
+ sp = *sp_location;
+
+ for (index = MAX_ARG_PAGES - 1; index >= stop; index--) {
+ unsigned int offset = index == stop ? bprm->p & ~PAGE_MASK : 0;
+ char *src = kmap(bprm->page[index]) + offset;
+ sp -= PAGE_SIZE - offset;
+ if (copy_to_user((void *) sp, src, PAGE_SIZE - offset) != 0)
+ ret = -EFAULT;
+ kunmap(bprm->page[index]);
+ if (ret)
+ goto out;
+ }
+
+ *sp_location = sp;
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL(transfer_args_to_stack);
+
#endif /* CONFIG_MMU */
static struct file *do_open_execat(int fd, struct filename *name, int flags)
@@ -866,7 +899,8 @@ int kernel_read_file(struct file *file, void **buf, loff_t *size,
goto out;
}
- *buf = vmalloc(i_size);
+ if (id != READING_FIRMWARE_PREALLOC_BUFFER)
+ *buf = vmalloc(i_size);
if (!*buf) {
ret = -ENOMEM;
goto out;
@@ -897,8 +931,10 @@ int kernel_read_file(struct file *file, void **buf, loff_t *size,
out_free:
if (ret < 0) {
- vfree(*buf);
- *buf = NULL;
+ if (id != READING_FIRMWARE_PREALLOC_BUFFER) {
+ vfree(*buf);
+ *buf = NULL;
+ }
}
out:
@@ -1411,7 +1447,7 @@ static void bprm_fill_uid(struct linux_binprm *bprm)
bprm->cred->euid = current_euid();
bprm->cred->egid = current_egid();
- if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
+ if (!mnt_may_suid(bprm->file->f_path.mnt))
return;
if (task_no_new_privs(current))
diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
index 7bd8ac8dfb28..8bb72807e70d 100644
--- a/fs/exofs/ore.c
+++ b/fs/exofs/ore.c
@@ -878,7 +878,7 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
} else {
bio = master_dev->bio;
/* FIXME: bio_set_dir() */
- bio->bi_rw |= REQ_WRITE;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
}
osd_req_write(or, _ios_obj(ios, cur_comp),
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 9f9992b37924..4c40c0786e16 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -1194,6 +1194,27 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
}
/*
+ * Returns 1 if the passed-in block region is valid; 0 if some part overlaps
+ * with filesystem metadata blocksi.
+ */
+int ext2_data_block_valid(struct ext2_sb_info *sbi, ext2_fsblk_t start_blk,
+ unsigned int count)
+{
+ if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
+ (start_blk + count < start_blk) ||
+ (start_blk > le32_to_cpu(sbi->s_es->s_blocks_count)))
+ return 0;
+
+ /* Ensure we do not step over superblock */
+ if ((start_blk <= sbi->s_sb_block) &&
+ (start_blk + count >= sbi->s_sb_block))
+ return 0;
+
+
+ return 1;
+}
+
+/*
* ext2_new_blocks() -- core block(s) allocation function
* @inode: file inode
* @goal: given target block(filesystem wide)
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index 19efd1197fa5..61ad490ed67b 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -358,8 +358,8 @@ ext2_readdir(struct file *file, struct dir_context *ctx)
* and the entry itself. Page is returned mapped and unlocked.
* Entry is guaranteed to be valid.
*/
-struct ext2_dir_entry_2 *ext2_find_entry (struct inode * dir,
- struct qstr *child, struct page ** res_page)
+struct ext2_dir_entry_2 *ext2_find_entry (struct inode *dir,
+ const struct qstr *child, struct page **res_page)
{
const char *name = child->name;
int namelen = child->len;
@@ -435,7 +435,7 @@ struct ext2_dir_entry_2 * ext2_dotdot (struct inode *dir, struct page **p)
return de;
}
-ino_t ext2_inode_by_name(struct inode *dir, struct qstr *child)
+ino_t ext2_inode_by_name(struct inode *dir, const struct qstr *child)
{
ino_t res = 0;
struct ext2_dir_entry_2 *de;
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 170939f379d7..06af2f92226c 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -367,6 +367,7 @@ struct ext2_inode {
*/
#define EXT2_VALID_FS 0x0001 /* Unmounted cleanly */
#define EXT2_ERROR_FS 0x0002 /* Errors detected */
+#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
/*
* Mount flags
@@ -739,6 +740,8 @@ extern unsigned long ext2_bg_num_gdb(struct super_block *sb, int group);
extern ext2_fsblk_t ext2_new_block(struct inode *, unsigned long, int *);
extern ext2_fsblk_t ext2_new_blocks(struct inode *, unsigned long,
unsigned long *, int *);
+extern int ext2_data_block_valid(struct ext2_sb_info *sbi, ext2_fsblk_t start_blk,
+ unsigned int count);
extern void ext2_free_blocks (struct inode *, unsigned long,
unsigned long);
extern unsigned long ext2_count_free_blocks (struct super_block *);
@@ -754,9 +757,9 @@ extern void ext2_rsv_window_add(struct super_block *sb, struct ext2_reserve_wind
/* dir.c */
extern int ext2_add_link (struct dentry *, struct inode *);
-extern ino_t ext2_inode_by_name(struct inode *, struct qstr *);
+extern ino_t ext2_inode_by_name(struct inode *, const struct qstr *);
extern int ext2_make_empty(struct inode *, struct inode *);
-extern struct ext2_dir_entry_2 * ext2_find_entry (struct inode *,struct qstr *, struct page **);
+extern struct ext2_dir_entry_2 * ext2_find_entry (struct inode *,const struct qstr *, struct page **);
extern int ext2_delete_entry (struct ext2_dir_entry_2 *, struct page *);
extern int ext2_empty_dir (struct inode *);
extern struct ext2_dir_entry_2 * ext2_dotdot (struct inode *, struct page **);
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index 868c02317b05..5efeefe17abb 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -51,7 +51,7 @@ static int ext2_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
down_read(&ei->dax_sem);
- ret = __dax_fault(vma, vmf, ext2_get_block);
+ ret = dax_fault(vma, vmf, ext2_get_block);
up_read(&ei->dax_sem);
if (vmf->flags & FAULT_FLAG_WRITE)
@@ -72,7 +72,7 @@ static int ext2_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
}
down_read(&ei->dax_sem);
- ret = __dax_pmd_fault(vma, addr, pmd, flags, ext2_get_block);
+ ret = dax_pmd_fault(vma, addr, pmd, flags, ext2_get_block);
up_read(&ei->dax_sem);
if (flags & FAULT_FLAG_WRITE)
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index fcbe58641e40..d5c7d09919f3 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -1389,6 +1389,16 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
ei->i_frag_size = raw_inode->i_fsize;
ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
ei->i_dir_acl = 0;
+
+ if (ei->i_file_acl &&
+ !ext2_data_block_valid(EXT2_SB(sb), ei->i_file_acl, 1)) {
+ ext2_error(sb, "ext2_iget", "bad extended attribute block %u",
+ ei->i_file_acl);
+ brelse(bh);
+ ret = -EFSCORRUPTED;
+ goto bad_inode;
+ }
+
if (S_ISREG(inode->i_mode))
inode->i_size |= ((__u64)le32_to_cpu(raw_inode->i_size_high)) << 32;
else
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 1a5e3bff0b63..b7f896f3f7a7 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -759,10 +759,19 @@ void
ext2_xattr_delete_inode(struct inode *inode)
{
struct buffer_head *bh = NULL;
+ struct ext2_sb_info *sbi = EXT2_SB(inode->i_sb);
down_write(&EXT2_I(inode)->xattr_sem);
if (!EXT2_I(inode)->i_file_acl)
goto cleanup;
+
+ if (!ext2_data_block_valid(sbi, EXT2_I(inode)->i_file_acl, 0)) {
+ ext2_error(inode->i_sb, "ext2_xattr_delete_inode",
+ "inode %ld: xattr block %d is out of data blocks range",
+ inode->i_ino, EXT2_I(inode)->i_file_acl);
+ goto cleanup;
+ }
+
bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl);
if (!bh) {
ext2_error(inode->i_sb, "ext2_xattr_delete_inode",
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index b46e9fc64196..e38039fd96ff 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -99,17 +99,9 @@ config EXT4_FS_SECURITY
extended attributes for file security labels, say N.
config EXT4_ENCRYPTION
- tristate "Ext4 Encryption"
+ bool "Ext4 Encryption"
depends on EXT4_FS
- select CRYPTO_AES
- select CRYPTO_CBC
- select CRYPTO_ECB
- select CRYPTO_XTS
- select CRYPTO_CTS
- select CRYPTO_CTR
- select CRYPTO_SHA256
- select KEYS
- select ENCRYPTED_KEYS
+ select FS_ENCRYPTION
help
Enable encryption of ext4 files and directories. This
feature is similar to ecryptfs, but it is more memory
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile
index f52cf54f0cbc..354103f3490c 100644
--- a/fs/ext4/Makefile
+++ b/fs/ext4/Makefile
@@ -12,5 +12,3 @@ ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \
ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o
ext4-$(CONFIG_EXT4_FS_SECURITY) += xattr_security.o
-ext4-$(CONFIG_EXT4_FS_ENCRYPTION) += crypto_policy.o crypto.o \
- crypto_key.o crypto_fname.o
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 3020fd70c392..e04ec868e37e 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -208,6 +208,9 @@ static int ext4_init_block_bitmap(struct super_block *sb,
memset(bh->b_data, 0, sb->s_blocksize);
bit_max = ext4_num_base_meta_clusters(sb, block_group);
+ if ((bit_max >> 3) >= bh->b_size)
+ return -EFSCORRUPTED;
+
for (bit = 0; bit < bit_max; bit++)
ext4_set_bit(bit, bh->b_data);
@@ -470,7 +473,7 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
trace_ext4_read_block_bitmap_load(sb, block_group);
bh->b_end_io = ext4_end_bitmap_read;
get_bh(bh);
- submit_bh(READ | REQ_META | REQ_PRIO, bh);
+ submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
return bh;
verify:
err = ext4_validate_block_bitmap(sb, desc, block_group, bh);
@@ -610,7 +613,9 @@ int ext4_should_retry_alloc(struct super_block *sb, int *retries)
jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
- jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
+ smp_mb();
+ if (EXT4_SB(sb)->s_mb_free_pending)
+ jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
return 1;
}
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
deleted file mode 100644
index 6a6c27373b54..000000000000
--- a/fs/ext4/crypto.c
+++ /dev/null
@@ -1,536 +0,0 @@
-/*
- * linux/fs/ext4/crypto.c
- *
- * Copyright (C) 2015, Google, Inc.
- *
- * This contains encryption functions for ext4
- *
- * Written by Michael Halcrow, 2014.
- *
- * Filename encryption additions
- * Uday Savagaonkar, 2014
- * Encryption policy handling additions
- * Ildar Muslukhov, 2014
- *
- * This has not yet undergone a rigorous security audit.
- *
- * The usage of AES-XTS should conform to recommendations in NIST
- * Special Publication 800-38E and IEEE P1619/D16.
- */
-
-#include <crypto/skcipher.h>
-#include <keys/user-type.h>
-#include <keys/encrypted-type.h>
-#include <linux/ecryptfs.h>
-#include <linux/gfp.h>
-#include <linux/kernel.h>
-#include <linux/key.h>
-#include <linux/list.h>
-#include <linux/mempool.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/random.h>
-#include <linux/scatterlist.h>
-#include <linux/spinlock_types.h>
-#include <linux/namei.h>
-
-#include "ext4_extents.h"
-#include "xattr.h"
-
-/* Encryption added and removed here! (L: */
-
-static unsigned int num_prealloc_crypto_pages = 32;
-static unsigned int num_prealloc_crypto_ctxs = 128;
-
-module_param(num_prealloc_crypto_pages, uint, 0444);
-MODULE_PARM_DESC(num_prealloc_crypto_pages,
- "Number of crypto pages to preallocate");
-module_param(num_prealloc_crypto_ctxs, uint, 0444);
-MODULE_PARM_DESC(num_prealloc_crypto_ctxs,
- "Number of crypto contexts to preallocate");
-
-static mempool_t *ext4_bounce_page_pool;
-
-static LIST_HEAD(ext4_free_crypto_ctxs);
-static DEFINE_SPINLOCK(ext4_crypto_ctx_lock);
-
-static struct kmem_cache *ext4_crypto_ctx_cachep;
-struct kmem_cache *ext4_crypt_info_cachep;
-
-/**
- * ext4_release_crypto_ctx() - Releases an encryption context
- * @ctx: The encryption context to release.
- *
- * If the encryption context was allocated from the pre-allocated pool, returns
- * it to that pool. Else, frees it.
- *
- * If there's a bounce page in the context, this frees that.
- */
-void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx)
-{
- unsigned long flags;
-
- if (ctx->flags & EXT4_WRITE_PATH_FL && ctx->w.bounce_page)
- mempool_free(ctx->w.bounce_page, ext4_bounce_page_pool);
- ctx->w.bounce_page = NULL;
- ctx->w.control_page = NULL;
- if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) {
- kmem_cache_free(ext4_crypto_ctx_cachep, ctx);
- } else {
- spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
- list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
- spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
- }
-}
-
-/**
- * ext4_get_crypto_ctx() - Gets an encryption context
- * @inode: The inode for which we are doing the crypto
- *
- * Allocates and initializes an encryption context.
- *
- * Return: An allocated and initialized encryption context on success; error
- * value or NULL otherwise.
- */
-struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode,
- gfp_t gfp_flags)
-{
- struct ext4_crypto_ctx *ctx = NULL;
- int res = 0;
- unsigned long flags;
- struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
-
- if (ci == NULL)
- return ERR_PTR(-ENOKEY);
-
- /*
- * We first try getting the ctx from a free list because in
- * the common case the ctx will have an allocated and
- * initialized crypto tfm, so it's probably a worthwhile
- * optimization. For the bounce page, we first try getting it
- * from the kernel allocator because that's just about as fast
- * as getting it from a list and because a cache of free pages
- * should generally be a "last resort" option for a filesystem
- * to be able to do its job.
- */
- spin_lock_irqsave(&ext4_crypto_ctx_lock, flags);
- ctx = list_first_entry_or_null(&ext4_free_crypto_ctxs,
- struct ext4_crypto_ctx, free_list);
- if (ctx)
- list_del(&ctx->free_list);
- spin_unlock_irqrestore(&ext4_crypto_ctx_lock, flags);
- if (!ctx) {
- ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, gfp_flags);
- if (!ctx) {
- res = -ENOMEM;
- goto out;
- }
- ctx->flags |= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
- } else {
- ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL;
- }
- ctx->flags &= ~EXT4_WRITE_PATH_FL;
-
-out:
- if (res) {
- if (!IS_ERR_OR_NULL(ctx))
- ext4_release_crypto_ctx(ctx);
- ctx = ERR_PTR(res);
- }
- return ctx;
-}
-
-struct workqueue_struct *ext4_read_workqueue;
-static DEFINE_MUTEX(crypto_init);
-
-/**
- * ext4_exit_crypto() - Shutdown the ext4 encryption system
- */
-void ext4_exit_crypto(void)
-{
- struct ext4_crypto_ctx *pos, *n;
-
- list_for_each_entry_safe(pos, n, &ext4_free_crypto_ctxs, free_list)
- kmem_cache_free(ext4_crypto_ctx_cachep, pos);
- INIT_LIST_HEAD(&ext4_free_crypto_ctxs);
- if (ext4_bounce_page_pool)
- mempool_destroy(ext4_bounce_page_pool);
- ext4_bounce_page_pool = NULL;
- if (ext4_read_workqueue)
- destroy_workqueue(ext4_read_workqueue);
- ext4_read_workqueue = NULL;
- if (ext4_crypto_ctx_cachep)
- kmem_cache_destroy(ext4_crypto_ctx_cachep);
- ext4_crypto_ctx_cachep = NULL;
- if (ext4_crypt_info_cachep)
- kmem_cache_destroy(ext4_crypt_info_cachep);
- ext4_crypt_info_cachep = NULL;
-}
-
-/**
- * ext4_init_crypto() - Set up for ext4 encryption.
- *
- * We only call this when we start accessing encrypted files, since it
- * results in memory getting allocated that wouldn't otherwise be used.
- *
- * Return: Zero on success, non-zero otherwise.
- */
-int ext4_init_crypto(void)
-{
- int i, res = -ENOMEM;
-
- mutex_lock(&crypto_init);
- if (ext4_read_workqueue)
- goto already_initialized;
- ext4_read_workqueue = alloc_workqueue("ext4_crypto", WQ_HIGHPRI, 0);
- if (!ext4_read_workqueue)
- goto fail;
-
- ext4_crypto_ctx_cachep = KMEM_CACHE(ext4_crypto_ctx,
- SLAB_RECLAIM_ACCOUNT);
- if (!ext4_crypto_ctx_cachep)
- goto fail;
-
- ext4_crypt_info_cachep = KMEM_CACHE(ext4_crypt_info,
- SLAB_RECLAIM_ACCOUNT);
- if (!ext4_crypt_info_cachep)
- goto fail;
-
- for (i = 0; i < num_prealloc_crypto_ctxs; i++) {
- struct ext4_crypto_ctx *ctx;
-
- ctx = kmem_cache_zalloc(ext4_crypto_ctx_cachep, GFP_NOFS);
- if (!ctx) {
- res = -ENOMEM;
- goto fail;
- }
- list_add(&ctx->free_list, &ext4_free_crypto_ctxs);
- }
-
- ext4_bounce_page_pool =
- mempool_create_page_pool(num_prealloc_crypto_pages, 0);
- if (!ext4_bounce_page_pool) {
- res = -ENOMEM;
- goto fail;
- }
-already_initialized:
- mutex_unlock(&crypto_init);
- return 0;
-fail:
- ext4_exit_crypto();
- mutex_unlock(&crypto_init);
- return res;
-}
-
-void ext4_restore_control_page(struct page *data_page)
-{
- struct ext4_crypto_ctx *ctx =
- (struct ext4_crypto_ctx *)page_private(data_page);
-
- set_page_private(data_page, (unsigned long)NULL);
- ClearPagePrivate(data_page);
- unlock_page(data_page);
- ext4_release_crypto_ctx(ctx);
-}
-
-/**
- * ext4_crypt_complete() - The completion callback for page encryption
- * @req: The asynchronous encryption request context
- * @res: The result of the encryption operation
- */
-static void ext4_crypt_complete(struct crypto_async_request *req, int res)
-{
- struct ext4_completion_result *ecr = req->data;
-
- if (res == -EINPROGRESS)
- return;
- ecr->res = res;
- complete(&ecr->completion);
-}
-
-typedef enum {
- EXT4_DECRYPT = 0,
- EXT4_ENCRYPT,
-} ext4_direction_t;
-
-static int ext4_page_crypto(struct inode *inode,
- ext4_direction_t rw,
- pgoff_t index,
- struct page *src_page,
- struct page *dest_page,
- gfp_t gfp_flags)
-
-{
- u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
- struct skcipher_request *req = NULL;
- DECLARE_EXT4_COMPLETION_RESULT(ecr);
- struct scatterlist dst, src;
- struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
- struct crypto_skcipher *tfm = ci->ci_ctfm;
- int res = 0;
-
- req = skcipher_request_alloc(tfm, gfp_flags);
- if (!req) {
- printk_ratelimited(KERN_ERR
- "%s: crypto_request_alloc() failed\n",
- __func__);
- return -ENOMEM;
- }
- skcipher_request_set_callback(
- req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- ext4_crypt_complete, &ecr);
-
- BUILD_BUG_ON(EXT4_XTS_TWEAK_SIZE < sizeof(index));
- memcpy(xts_tweak, &index, sizeof(index));
- memset(&xts_tweak[sizeof(index)], 0,
- EXT4_XTS_TWEAK_SIZE - sizeof(index));
-
- sg_init_table(&dst, 1);
- sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
- sg_init_table(&src, 1);
- sg_set_page(&src, src_page, PAGE_SIZE, 0);
- skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE,
- xts_tweak);
- if (rw == EXT4_DECRYPT)
- res = crypto_skcipher_decrypt(req);
- else
- res = crypto_skcipher_encrypt(req);
- if (res == -EINPROGRESS || res == -EBUSY) {
- wait_for_completion(&ecr.completion);
- res = ecr.res;
- }
- skcipher_request_free(req);
- if (res) {
- printk_ratelimited(
- KERN_ERR
- "%s: crypto_skcipher_encrypt() returned %d\n",
- __func__, res);
- return res;
- }
- return 0;
-}
-
-static struct page *alloc_bounce_page(struct ext4_crypto_ctx *ctx,
- gfp_t gfp_flags)
-{
- ctx->w.bounce_page = mempool_alloc(ext4_bounce_page_pool, gfp_flags);
- if (ctx->w.bounce_page == NULL)
- return ERR_PTR(-ENOMEM);
- ctx->flags |= EXT4_WRITE_PATH_FL;
- return ctx->w.bounce_page;
-}
-
-/**
- * ext4_encrypt() - Encrypts a page
- * @inode: The inode for which the encryption should take place
- * @plaintext_page: The page to encrypt. Must be locked.
- *
- * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
- * encryption context.
- *
- * Called on the page write path. The caller must call
- * ext4_restore_control_page() on the returned ciphertext page to
- * release the bounce buffer and the encryption context.
- *
- * Return: An allocated page with the encrypted content on success. Else, an
- * error value or NULL.
- */
-struct page *ext4_encrypt(struct inode *inode,
- struct page *plaintext_page,
- gfp_t gfp_flags)
-{
- struct ext4_crypto_ctx *ctx;
- struct page *ciphertext_page = NULL;
- int err;
-
- BUG_ON(!PageLocked(plaintext_page));
-
- ctx = ext4_get_crypto_ctx(inode, gfp_flags);
- if (IS_ERR(ctx))
- return (struct page *) ctx;
-
- /* The encryption operation will require a bounce page. */
- ciphertext_page = alloc_bounce_page(ctx, gfp_flags);
- if (IS_ERR(ciphertext_page))
- goto errout;
- ctx->w.control_page = plaintext_page;
- err = ext4_page_crypto(inode, EXT4_ENCRYPT, plaintext_page->index,
- plaintext_page, ciphertext_page, gfp_flags);
- if (err) {
- ciphertext_page = ERR_PTR(err);
- errout:
- ext4_release_crypto_ctx(ctx);
- return ciphertext_page;
- }
- SetPagePrivate(ciphertext_page);
- set_page_private(ciphertext_page, (unsigned long)ctx);
- lock_page(ciphertext_page);
- return ciphertext_page;
-}
-
-/**
- * ext4_decrypt() - Decrypts a page in-place
- * @ctx: The encryption context.
- * @page: The page to decrypt. Must be locked.
- *
- * Decrypts page in-place using the ctx encryption context.
- *
- * Called from the read completion callback.
- *
- * Return: Zero on success, non-zero otherwise.
- */
-int ext4_decrypt(struct page *page)
-{
- BUG_ON(!PageLocked(page));
-
- return ext4_page_crypto(page->mapping->host, EXT4_DECRYPT,
- page->index, page, page, GFP_NOFS);
-}
-
-int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
- ext4_fsblk_t pblk, ext4_lblk_t len)
-{
- struct ext4_crypto_ctx *ctx;
- struct page *ciphertext_page = NULL;
- struct bio *bio;
- int ret, err = 0;
-
-#if 0
- ext4_msg(inode->i_sb, KERN_CRIT,
- "ext4_encrypted_zeroout ino %lu lblk %u len %u",
- (unsigned long) inode->i_ino, lblk, len);
-#endif
-
- BUG_ON(inode->i_sb->s_blocksize != PAGE_SIZE);
-
- ctx = ext4_get_crypto_ctx(inode, GFP_NOFS);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-
- ciphertext_page = alloc_bounce_page(ctx, GFP_NOWAIT);
- if (IS_ERR(ciphertext_page)) {
- err = PTR_ERR(ciphertext_page);
- goto errout;
- }
-
- while (len--) {
- err = ext4_page_crypto(inode, EXT4_ENCRYPT, lblk,
- ZERO_PAGE(0), ciphertext_page,
- GFP_NOFS);
- if (err)
- goto errout;
-
- bio = bio_alloc(GFP_NOWAIT, 1);
- if (!bio) {
- err = -ENOMEM;
- goto errout;
- }
- bio->bi_bdev = inode->i_sb->s_bdev;
- bio->bi_iter.bi_sector =
- pblk << (inode->i_sb->s_blocksize_bits - 9);
- ret = bio_add_page(bio, ciphertext_page,
- inode->i_sb->s_blocksize, 0);
- if (ret != inode->i_sb->s_blocksize) {
- /* should never happen! */
- ext4_msg(inode->i_sb, KERN_ERR,
- "bio_add_page failed: %d", ret);
- WARN_ON(1);
- bio_put(bio);
- err = -EIO;
- goto errout;
- }
- err = submit_bio_wait(WRITE, bio);
- if ((err == 0) && bio->bi_error)
- err = -EIO;
- bio_put(bio);
- if (err)
- goto errout;
- lblk++; pblk++;
- }
- err = 0;
-errout:
- ext4_release_crypto_ctx(ctx);
- return err;
-}
-
-bool ext4_valid_contents_enc_mode(uint32_t mode)
-{
- return (mode == EXT4_ENCRYPTION_MODE_AES_256_XTS);
-}
-
-/**
- * ext4_validate_encryption_key_size() - Validate the encryption key size
- * @mode: The key mode.
- * @size: The key size to validate.
- *
- * Return: The validated key size for @mode. Zero if invalid.
- */
-uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size)
-{
- if (size == ext4_encryption_key_size(mode))
- return size;
- return 0;
-}
-
-/*
- * Validate dentries for encrypted directories to make sure we aren't
- * potentially caching stale data after a key has been added or
- * removed.
- */
-static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
-{
- struct dentry *dir;
- struct ext4_crypt_info *ci;
- int dir_has_key, cached_with_key;
-
- if (flags & LOOKUP_RCU)
- return -ECHILD;
-
- dir = dget_parent(dentry);
- if (!ext4_encrypted_inode(d_inode(dir))) {
- dput(dir);
- return 0;
- }
- ci = EXT4_I(d_inode(dir))->i_crypt_info;
- if (ci && ci->ci_keyring_key &&
- (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
- (1 << KEY_FLAG_REVOKED) |
- (1 << KEY_FLAG_DEAD))))
- ci = NULL;
-
- /* this should eventually be an flag in d_flags */
- cached_with_key = dentry->d_fsdata != NULL;
- dir_has_key = (ci != NULL);
- dput(dir);
-
- /*
- * If the dentry was cached without the key, and it is a
- * negative dentry, it might be a valid name. We can't check
- * if the key has since been made available due to locking
- * reasons, so we fail the validation so ext4_lookup() can do
- * this check.
- *
- * We also fail the validation if the dentry was created with
- * the key present, but we no longer have the key, or vice versa.
- */
- if ((!cached_with_key && d_is_negative(dentry)) ||
- (!cached_with_key && dir_has_key) ||
- (cached_with_key && !dir_has_key)) {
-#if 0 /* Revalidation debug */
- char buf[80];
- char *cp = simple_dname(dentry, buf, sizeof(buf));
-
- if (IS_ERR(cp))
- cp = (char *) "???";
- pr_err("revalidate: %s %p %d %d %d\n", cp, dentry->d_fsdata,
- cached_with_key, d_is_negative(dentry),
- dir_has_key);
-#endif
- return 0;
- }
- return 1;
-}
-
-const struct dentry_operations ext4_encrypted_d_ops = {
- .d_revalidate = ext4_d_revalidate,
-};
diff --git a/fs/ext4/crypto_fname.c b/fs/ext4/crypto_fname.c
deleted file mode 100644
index 1a2f360405db..000000000000
--- a/fs/ext4/crypto_fname.c
+++ /dev/null
@@ -1,468 +0,0 @@
-/*
- * linux/fs/ext4/crypto_fname.c
- *
- * Copyright (C) 2015, Google, Inc.
- *
- * This contains functions for filename crypto management in ext4
- *
- * Written by Uday Savagaonkar, 2014.
- *
- * This has not yet undergone a rigorous security audit.
- *
- */
-
-#include <crypto/skcipher.h>
-#include <keys/encrypted-type.h>
-#include <keys/user-type.h>
-#include <linux/gfp.h>
-#include <linux/kernel.h>
-#include <linux/key.h>
-#include <linux/list.h>
-#include <linux/mempool.h>
-#include <linux/random.h>
-#include <linux/scatterlist.h>
-#include <linux/spinlock_types.h>
-
-#include "ext4.h"
-#include "ext4_crypto.h"
-#include "xattr.h"
-
-/**
- * ext4_dir_crypt_complete() -
- */
-static void ext4_dir_crypt_complete(struct crypto_async_request *req, int res)
-{
- struct ext4_completion_result *ecr = req->data;
-
- if (res == -EINPROGRESS)
- return;
- ecr->res = res;
- complete(&ecr->completion);
-}
-
-bool ext4_valid_filenames_enc_mode(uint32_t mode)
-{
- return (mode == EXT4_ENCRYPTION_MODE_AES_256_CTS);
-}
-
-static unsigned max_name_len(struct inode *inode)
-{
- return S_ISLNK(inode->i_mode) ? inode->i_sb->s_blocksize :
- EXT4_NAME_LEN;
-}
-
-/**
- * ext4_fname_encrypt() -
- *
- * This function encrypts the input filename, and returns the length of the
- * ciphertext. Errors are returned as negative numbers. We trust the caller to
- * allocate sufficient memory to oname string.
- */
-static int ext4_fname_encrypt(struct inode *inode,
- const struct qstr *iname,
- struct ext4_str *oname)
-{
- u32 ciphertext_len;
- struct skcipher_request *req = NULL;
- DECLARE_EXT4_COMPLETION_RESULT(ecr);
- struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
- struct crypto_skcipher *tfm = ci->ci_ctfm;
- int res = 0;
- char iv[EXT4_CRYPTO_BLOCK_SIZE];
- struct scatterlist src_sg, dst_sg;
- int padding = 4 << (ci->ci_flags & EXT4_POLICY_FLAGS_PAD_MASK);
- char *workbuf, buf[32], *alloc_buf = NULL;
- unsigned lim = max_name_len(inode);
-
- if (iname->len <= 0 || iname->len > lim)
- return -EIO;
-
- ciphertext_len = (iname->len < EXT4_CRYPTO_BLOCK_SIZE) ?
- EXT4_CRYPTO_BLOCK_SIZE : iname->len;
- ciphertext_len = ext4_fname_crypto_round_up(ciphertext_len, padding);
- ciphertext_len = (ciphertext_len > lim)
- ? lim : ciphertext_len;
-
- if (ciphertext_len <= sizeof(buf)) {
- workbuf = buf;
- } else {
- alloc_buf = kmalloc(ciphertext_len, GFP_NOFS);
- if (!alloc_buf)
- return -ENOMEM;
- workbuf = alloc_buf;
- }
-
- /* Allocate request */
- req = skcipher_request_alloc(tfm, GFP_NOFS);
- if (!req) {
- printk_ratelimited(
- KERN_ERR "%s: crypto_request_alloc() failed\n", __func__);
- kfree(alloc_buf);
- return -ENOMEM;
- }
- skcipher_request_set_callback(req,
- CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- ext4_dir_crypt_complete, &ecr);
-
- /* Copy the input */
- memcpy(workbuf, iname->name, iname->len);
- if (iname->len < ciphertext_len)
- memset(workbuf + iname->len, 0, ciphertext_len - iname->len);
-
- /* Initialize IV */
- memset(iv, 0, EXT4_CRYPTO_BLOCK_SIZE);
-
- /* Create encryption request */
- sg_init_one(&src_sg, workbuf, ciphertext_len);
- sg_init_one(&dst_sg, oname->name, ciphertext_len);
- skcipher_request_set_crypt(req, &src_sg, &dst_sg, ciphertext_len, iv);
- res = crypto_skcipher_encrypt(req);
- if (res == -EINPROGRESS || res == -EBUSY) {
- wait_for_completion(&ecr.completion);
- res = ecr.res;
- }
- kfree(alloc_buf);
- skcipher_request_free(req);
- if (res < 0) {
- printk_ratelimited(
- KERN_ERR "%s: Error (error code %d)\n", __func__, res);
- }
- oname->len = ciphertext_len;
- return res;
-}
-
-/*
- * ext4_fname_decrypt()
- * This function decrypts the input filename, and returns
- * the length of the plaintext.
- * Errors are returned as negative numbers.
- * We trust the caller to allocate sufficient memory to oname string.
- */
-static int ext4_fname_decrypt(struct inode *inode,
- const struct ext4_str *iname,
- struct ext4_str *oname)
-{
- struct ext4_str tmp_in[2], tmp_out[1];
- struct skcipher_request *req = NULL;
- DECLARE_EXT4_COMPLETION_RESULT(ecr);
- struct scatterlist src_sg, dst_sg;
- struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
- struct crypto_skcipher *tfm = ci->ci_ctfm;
- int res = 0;
- char iv[EXT4_CRYPTO_BLOCK_SIZE];
- unsigned lim = max_name_len(inode);
-
- if (iname->len <= 0 || iname->len > lim)
- return -EIO;
-
- tmp_in[0].name = iname->name;
- tmp_in[0].len = iname->len;
- tmp_out[0].name = oname->name;
-
- /* Allocate request */
- req = skcipher_request_alloc(tfm, GFP_NOFS);
- if (!req) {
- printk_ratelimited(
- KERN_ERR "%s: crypto_request_alloc() failed\n", __func__);
- return -ENOMEM;
- }
- skcipher_request_set_callback(req,
- CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- ext4_dir_crypt_complete, &ecr);
-
- /* Initialize IV */
- memset(iv, 0, EXT4_CRYPTO_BLOCK_SIZE);
-
- /* Create encryption request */
- sg_init_one(&src_sg, iname->name, iname->len);
- sg_init_one(&dst_sg, oname->name, oname->len);
- skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, iv);
- res = crypto_skcipher_decrypt(req);
- if (res == -EINPROGRESS || res == -EBUSY) {
- wait_for_completion(&ecr.completion);
- res = ecr.res;
- }
- skcipher_request_free(req);
- if (res < 0) {
- printk_ratelimited(
- KERN_ERR "%s: Error in ext4_fname_encrypt (error code %d)\n",
- __func__, res);
- return res;
- }
-
- oname->len = strnlen(oname->name, iname->len);
- return oname->len;
-}
-
-static const char *lookup_table =
- "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,";
-
-/**
- * ext4_fname_encode_digest() -
- *
- * Encodes the input digest using characters from the set [a-zA-Z0-9_+].
- * The encoded string is roughly 4/3 times the size of the input string.
- */
-static int digest_encode(const char *src, int len, char *dst)
-{
- int i = 0, bits = 0, ac = 0;
- char *cp = dst;
-
- while (i < len) {
- ac += (((unsigned char) src[i]) << bits);
- bits += 8;
- do {
- *cp++ = lookup_table[ac & 0x3f];
- ac >>= 6;
- bits -= 6;
- } while (bits >= 6);
- i++;
- }
- if (bits)
- *cp++ = lookup_table[ac & 0x3f];
- return cp - dst;
-}
-
-static int digest_decode(const char *src, int len, char *dst)
-{
- int i = 0, bits = 0, ac = 0;
- const char *p;
- char *cp = dst;
-
- while (i < len) {
- p = strchr(lookup_table, src[i]);
- if (p == NULL || src[i] == 0)
- return -2;
- ac += (p - lookup_table) << bits;
- bits += 6;
- if (bits >= 8) {
- *cp++ = ac & 0xff;
- ac >>= 8;
- bits -= 8;
- }
- i++;
- }
- if (ac)
- return -1;
- return cp - dst;
-}
-
-/**
- * ext4_fname_crypto_round_up() -
- *
- * Return: The next multiple of block size
- */
-u32 ext4_fname_crypto_round_up(u32 size, u32 blksize)
-{
- return ((size+blksize-1)/blksize)*blksize;
-}
-
-unsigned ext4_fname_encrypted_size(struct inode *inode, u32 ilen)
-{
- struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
- int padding = 32;
-
- if (ci)
- padding = 4 << (ci->ci_flags & EXT4_POLICY_FLAGS_PAD_MASK);
- if (ilen < EXT4_CRYPTO_BLOCK_SIZE)
- ilen = EXT4_CRYPTO_BLOCK_SIZE;
- return ext4_fname_crypto_round_up(ilen, padding);
-}
-
-/*
- * ext4_fname_crypto_alloc_buffer() -
- *
- * Allocates an output buffer that is sufficient for the crypto operation
- * specified by the context and the direction.
- */
-int ext4_fname_crypto_alloc_buffer(struct inode *inode,
- u32 ilen, struct ext4_str *crypto_str)
-{
- unsigned int olen = ext4_fname_encrypted_size(inode, ilen);
-
- crypto_str->len = olen;
- if (olen < EXT4_FNAME_CRYPTO_DIGEST_SIZE*2)
- olen = EXT4_FNAME_CRYPTO_DIGEST_SIZE*2;
- /* Allocated buffer can hold one more character to null-terminate the
- * string */
- crypto_str->name = kmalloc(olen+1, GFP_NOFS);
- if (!(crypto_str->name))
- return -ENOMEM;
- return 0;
-}
-
-/**
- * ext4_fname_crypto_free_buffer() -
- *
- * Frees the buffer allocated for crypto operation.
- */
-void ext4_fname_crypto_free_buffer(struct ext4_str *crypto_str)
-{
- if (!crypto_str)
- return;
- kfree(crypto_str->name);
- crypto_str->name = NULL;
-}
-
-/**
- * ext4_fname_disk_to_usr() - converts a filename from disk space to user space
- */
-int _ext4_fname_disk_to_usr(struct inode *inode,
- struct dx_hash_info *hinfo,
- const struct ext4_str *iname,
- struct ext4_str *oname)
-{
- char buf[24];
- int ret;
-
- if (iname->len < 3) {
- /*Check for . and .. */
- if (iname->name[0] == '.' && iname->name[iname->len-1] == '.') {
- oname->name[0] = '.';
- oname->name[iname->len-1] = '.';
- oname->len = iname->len;
- return oname->len;
- }
- }
- if (iname->len < EXT4_CRYPTO_BLOCK_SIZE) {
- EXT4_ERROR_INODE(inode, "encrypted inode too small");
- return -EUCLEAN;
- }
- if (EXT4_I(inode)->i_crypt_info)
- return ext4_fname_decrypt(inode, iname, oname);
-
- if (iname->len <= EXT4_FNAME_CRYPTO_DIGEST_SIZE) {
- ret = digest_encode(iname->name, iname->len, oname->name);
- oname->len = ret;
- return ret;
- }
- if (hinfo) {
- memcpy(buf, &hinfo->hash, 4);
- memcpy(buf+4, &hinfo->minor_hash, 4);
- } else
- memset(buf, 0, 8);
- memcpy(buf + 8, iname->name + iname->len - 16, 16);
- oname->name[0] = '_';
- ret = digest_encode(buf, 24, oname->name+1);
- oname->len = ret + 1;
- return ret + 1;
-}
-
-int ext4_fname_disk_to_usr(struct inode *inode,
- struct dx_hash_info *hinfo,
- const struct ext4_dir_entry_2 *de,
- struct ext4_str *oname)
-{
- struct ext4_str iname = {.name = (unsigned char *) de->name,
- .len = de->name_len };
-
- return _ext4_fname_disk_to_usr(inode, hinfo, &iname, oname);
-}
-
-
-/**
- * ext4_fname_usr_to_disk() - converts a filename from user space to disk space
- */
-int ext4_fname_usr_to_disk(struct inode *inode,
- const struct qstr *iname,
- struct ext4_str *oname)
-{
- int res;
- struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
-
- if (iname->len < 3) {
- /*Check for . and .. */
- if (iname->name[0] == '.' &&
- iname->name[iname->len-1] == '.') {
- oname->name[0] = '.';
- oname->name[iname->len-1] = '.';
- oname->len = iname->len;
- return oname->len;
- }
- }
- if (ci) {
- res = ext4_fname_encrypt(inode, iname, oname);
- return res;
- }
- /* Without a proper key, a user is not allowed to modify the filenames
- * in a directory. Consequently, a user space name cannot be mapped to
- * a disk-space name */
- return -EACCES;
-}
-
-int ext4_fname_setup_filename(struct inode *dir, const struct qstr *iname,
- int lookup, struct ext4_filename *fname)
-{
- struct ext4_crypt_info *ci;
- int ret = 0, bigname = 0;
-
- memset(fname, 0, sizeof(struct ext4_filename));
- fname->usr_fname = iname;
-
- if (!ext4_encrypted_inode(dir) ||
- ((iname->name[0] == '.') &&
- ((iname->len == 1) ||
- ((iname->name[1] == '.') && (iname->len == 2))))) {
- fname->disk_name.name = (unsigned char *) iname->name;
- fname->disk_name.len = iname->len;
- return 0;
- }
- ret = ext4_get_encryption_info(dir);
- if (ret)
- return ret;
- ci = EXT4_I(dir)->i_crypt_info;
- if (ci) {
- ret = ext4_fname_crypto_alloc_buffer(dir, iname->len,
- &fname->crypto_buf);
- if (ret < 0)
- return ret;
- ret = ext4_fname_encrypt(dir, iname, &fname->crypto_buf);
- if (ret < 0)
- goto errout;
- fname->disk_name.name = fname->crypto_buf.name;
- fname->disk_name.len = fname->crypto_buf.len;
- return 0;
- }
- if (!lookup)
- return -EACCES;
-
- /* We don't have the key and we are doing a lookup; decode the
- * user-supplied name
- */
- if (iname->name[0] == '_')
- bigname = 1;
- if ((bigname && (iname->len != 33)) ||
- (!bigname && (iname->len > 43)))
- return -ENOENT;
-
- fname->crypto_buf.name = kmalloc(32, GFP_KERNEL);
- if (fname->crypto_buf.name == NULL)
- return -ENOMEM;
- ret = digest_decode(iname->name + bigname, iname->len - bigname,
- fname->crypto_buf.name);
- if (ret < 0) {
- ret = -ENOENT;
- goto errout;
- }
- fname->crypto_buf.len = ret;
- if (bigname) {
- memcpy(&fname->hinfo.hash, fname->crypto_buf.name, 4);
- memcpy(&fname->hinfo.minor_hash, fname->crypto_buf.name + 4, 4);
- } else {
- fname->disk_name.name = fname->crypto_buf.name;
- fname->disk_name.len = fname->crypto_buf.len;
- }
- return 0;
-errout:
- kfree(fname->crypto_buf.name);
- fname->crypto_buf.name = NULL;
- return ret;
-}
-
-void ext4_fname_free_filename(struct ext4_filename *fname)
-{
- kfree(fname->crypto_buf.name);
- fname->crypto_buf.name = NULL;
- fname->usr_fname = NULL;
- fname->disk_name.name = NULL;
-}
diff --git a/fs/ext4/crypto_key.c b/fs/ext4/crypto_key.c
deleted file mode 100644
index 0129d688d1f7..000000000000
--- a/fs/ext4/crypto_key.c
+++ /dev/null
@@ -1,274 +0,0 @@
-/*
- * linux/fs/ext4/crypto_key.c
- *
- * Copyright (C) 2015, Google, Inc.
- *
- * This contains encryption key functions for ext4
- *
- * Written by Michael Halcrow, Ildar Muslukhov, and Uday Savagaonkar, 2015.
- */
-
-#include <crypto/skcipher.h>
-#include <keys/encrypted-type.h>
-#include <keys/user-type.h>
-#include <linux/random.h>
-#include <linux/scatterlist.h>
-#include <uapi/linux/keyctl.h>
-
-#include "ext4.h"
-#include "xattr.h"
-
-static void derive_crypt_complete(struct crypto_async_request *req, int rc)
-{
- struct ext4_completion_result *ecr = req->data;
-
- if (rc == -EINPROGRESS)
- return;
-
- ecr->res = rc;
- complete(&ecr->completion);
-}
-
-/**
- * ext4_derive_key_aes() - Derive a key using AES-128-ECB
- * @deriving_key: Encryption key used for derivation.
- * @source_key: Source key to which to apply derivation.
- * @derived_key: Derived key.
- *
- * Return: Zero on success; non-zero otherwise.
- */
-static int ext4_derive_key_aes(char deriving_key[EXT4_AES_128_ECB_KEY_SIZE],
- char source_key[EXT4_AES_256_XTS_KEY_SIZE],
- char derived_key[EXT4_AES_256_XTS_KEY_SIZE])
-{
- int res = 0;
- struct skcipher_request *req = NULL;
- DECLARE_EXT4_COMPLETION_RESULT(ecr);
- struct scatterlist src_sg, dst_sg;
- struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
-
- if (IS_ERR(tfm)) {
- res = PTR_ERR(tfm);
- tfm = NULL;
- goto out;
- }
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
- req = skcipher_request_alloc(tfm, GFP_NOFS);
- if (!req) {
- res = -ENOMEM;
- goto out;
- }
- skcipher_request_set_callback(req,
- CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
- derive_crypt_complete, &ecr);
- res = crypto_skcipher_setkey(tfm, deriving_key,
- EXT4_AES_128_ECB_KEY_SIZE);
- if (res < 0)
- goto out;
- sg_init_one(&src_sg, source_key, EXT4_AES_256_XTS_KEY_SIZE);
- sg_init_one(&dst_sg, derived_key, EXT4_AES_256_XTS_KEY_SIZE);
- skcipher_request_set_crypt(req, &src_sg, &dst_sg,
- EXT4_AES_256_XTS_KEY_SIZE, NULL);
- res = crypto_skcipher_encrypt(req);
- if (res == -EINPROGRESS || res == -EBUSY) {
- wait_for_completion(&ecr.completion);
- res = ecr.res;
- }
-
-out:
- skcipher_request_free(req);
- crypto_free_skcipher(tfm);
- return res;
-}
-
-void ext4_free_crypt_info(struct ext4_crypt_info *ci)
-{
- if (!ci)
- return;
-
- if (ci->ci_keyring_key)
- key_put(ci->ci_keyring_key);
- crypto_free_skcipher(ci->ci_ctfm);
- kmem_cache_free(ext4_crypt_info_cachep, ci);
-}
-
-void ext4_free_encryption_info(struct inode *inode,
- struct ext4_crypt_info *ci)
-{
- struct ext4_inode_info *ei = EXT4_I(inode);
- struct ext4_crypt_info *prev;
-
- if (ci == NULL)
- ci = ACCESS_ONCE(ei->i_crypt_info);
- if (ci == NULL)
- return;
- prev = cmpxchg(&ei->i_crypt_info, ci, NULL);
- if (prev != ci)
- return;
-
- ext4_free_crypt_info(ci);
-}
-
-int _ext4_get_encryption_info(struct inode *inode)
-{
- struct ext4_inode_info *ei = EXT4_I(inode);
- struct ext4_crypt_info *crypt_info;
- char full_key_descriptor[EXT4_KEY_DESC_PREFIX_SIZE +
- (EXT4_KEY_DESCRIPTOR_SIZE * 2) + 1];
- struct key *keyring_key = NULL;
- struct ext4_encryption_key *master_key;
- struct ext4_encryption_context ctx;
- const struct user_key_payload *ukp;
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- struct crypto_skcipher *ctfm;
- const char *cipher_str;
- char raw_key[EXT4_MAX_KEY_SIZE];
- char mode;
- int res;
-
- if (!ext4_read_workqueue) {
- res = ext4_init_crypto();
- if (res)
- return res;
- }
-
-retry:
- crypt_info = ACCESS_ONCE(ei->i_crypt_info);
- if (crypt_info) {
- if (!crypt_info->ci_keyring_key ||
- key_validate(crypt_info->ci_keyring_key) == 0)
- return 0;
- ext4_free_encryption_info(inode, crypt_info);
- goto retry;
- }
-
- res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
- EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
- &ctx, sizeof(ctx));
- if (res < 0) {
- if (!DUMMY_ENCRYPTION_ENABLED(sbi))
- return res;
- ctx.contents_encryption_mode = EXT4_ENCRYPTION_MODE_AES_256_XTS;
- ctx.filenames_encryption_mode =
- EXT4_ENCRYPTION_MODE_AES_256_CTS;
- ctx.flags = 0;
- } else if (res != sizeof(ctx))
- return -EINVAL;
- res = 0;
-
- crypt_info = kmem_cache_alloc(ext4_crypt_info_cachep, GFP_KERNEL);
- if (!crypt_info)
- return -ENOMEM;
-
- crypt_info->ci_flags = ctx.flags;
- crypt_info->ci_data_mode = ctx.contents_encryption_mode;
- crypt_info->ci_filename_mode = ctx.filenames_encryption_mode;
- crypt_info->ci_ctfm = NULL;
- crypt_info->ci_keyring_key = NULL;
- memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor,
- sizeof(crypt_info->ci_master_key));
- if (S_ISREG(inode->i_mode))
- mode = crypt_info->ci_data_mode;
- else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
- mode = crypt_info->ci_filename_mode;
- else
- BUG();
- switch (mode) {
- case EXT4_ENCRYPTION_MODE_AES_256_XTS:
- cipher_str = "xts(aes)";
- break;
- case EXT4_ENCRYPTION_MODE_AES_256_CTS:
- cipher_str = "cts(cbc(aes))";
- break;
- default:
- printk_once(KERN_WARNING
- "ext4: unsupported key mode %d (ino %u)\n",
- mode, (unsigned) inode->i_ino);
- res = -ENOKEY;
- goto out;
- }
- if (DUMMY_ENCRYPTION_ENABLED(sbi)) {
- memset(raw_key, 0x42, EXT4_AES_256_XTS_KEY_SIZE);
- goto got_key;
- }
- memcpy(full_key_descriptor, EXT4_KEY_DESC_PREFIX,
- EXT4_KEY_DESC_PREFIX_SIZE);
- sprintf(full_key_descriptor + EXT4_KEY_DESC_PREFIX_SIZE,
- "%*phN", EXT4_KEY_DESCRIPTOR_SIZE,
- ctx.master_key_descriptor);
- full_key_descriptor[EXT4_KEY_DESC_PREFIX_SIZE +
- (2 * EXT4_KEY_DESCRIPTOR_SIZE)] = '\0';
- keyring_key = request_key(&key_type_logon, full_key_descriptor, NULL);
- if (IS_ERR(keyring_key)) {
- res = PTR_ERR(keyring_key);
- keyring_key = NULL;
- goto out;
- }
- crypt_info->ci_keyring_key = keyring_key;
- if (keyring_key->type != &key_type_logon) {
- printk_once(KERN_WARNING
- "ext4: key type must be logon\n");
- res = -ENOKEY;
- goto out;
- }
- down_read(&keyring_key->sem);
- ukp = user_key_payload(keyring_key);
- if (ukp->datalen != sizeof(struct ext4_encryption_key)) {
- res = -EINVAL;
- up_read(&keyring_key->sem);
- goto out;
- }
- master_key = (struct ext4_encryption_key *)ukp->data;
- BUILD_BUG_ON(EXT4_AES_128_ECB_KEY_SIZE !=
- EXT4_KEY_DERIVATION_NONCE_SIZE);
- if (master_key->size != EXT4_AES_256_XTS_KEY_SIZE) {
- printk_once(KERN_WARNING
- "ext4: key size incorrect: %d\n",
- master_key->size);
- res = -ENOKEY;
- up_read(&keyring_key->sem);
- goto out;
- }
- res = ext4_derive_key_aes(ctx.nonce, master_key->raw,
- raw_key);
- up_read(&keyring_key->sem);
- if (res)
- goto out;
-got_key:
- ctfm = crypto_alloc_skcipher(cipher_str, 0, 0);
- if (!ctfm || IS_ERR(ctfm)) {
- res = ctfm ? PTR_ERR(ctfm) : -ENOMEM;
- printk(KERN_DEBUG
- "%s: error %d (inode %u) allocating crypto tfm\n",
- __func__, res, (unsigned) inode->i_ino);
- goto out;
- }
- crypt_info->ci_ctfm = ctfm;
- crypto_skcipher_clear_flags(ctfm, ~0);
- crypto_tfm_set_flags(crypto_skcipher_tfm(ctfm),
- CRYPTO_TFM_REQ_WEAK_KEY);
- res = crypto_skcipher_setkey(ctfm, raw_key,
- ext4_encryption_key_size(mode));
- if (res)
- goto out;
- memzero_explicit(raw_key, sizeof(raw_key));
- if (cmpxchg(&ei->i_crypt_info, NULL, crypt_info) != NULL) {
- ext4_free_crypt_info(crypt_info);
- goto retry;
- }
- return 0;
-
-out:
- if (res == -ENOKEY)
- res = 0;
- ext4_free_crypt_info(crypt_info);
- memzero_explicit(raw_key, sizeof(raw_key));
- return res;
-}
-
-int ext4_has_encryption_key(struct inode *inode)
-{
- struct ext4_inode_info *ei = EXT4_I(inode);
-
- return (ei->i_crypt_info != NULL);
-}
diff --git a/fs/ext4/crypto_policy.c b/fs/ext4/crypto_policy.c
deleted file mode 100644
index ad050698143f..000000000000
--- a/fs/ext4/crypto_policy.c
+++ /dev/null
@@ -1,229 +0,0 @@
-/*
- * linux/fs/ext4/crypto_policy.c
- *
- * Copyright (C) 2015, Google, Inc.
- *
- * This contains encryption policy functions for ext4
- *
- * Written by Michael Halcrow, 2015.
- */
-
-#include <linux/random.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-#include "ext4_jbd2.h"
-#include "ext4.h"
-#include "xattr.h"
-
-static int ext4_inode_has_encryption_context(struct inode *inode)
-{
- int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
- EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, NULL, 0);
- return (res > 0);
-}
-
-/*
- * check whether the policy is consistent with the encryption context
- * for the inode
- */
-static int ext4_is_encryption_context_consistent_with_policy(
- struct inode *inode, const struct ext4_encryption_policy *policy)
-{
- struct ext4_encryption_context ctx;
- int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
- EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
- sizeof(ctx));
- if (res != sizeof(ctx))
- return 0;
- return (memcmp(ctx.master_key_descriptor, policy->master_key_descriptor,
- EXT4_KEY_DESCRIPTOR_SIZE) == 0 &&
- (ctx.flags ==
- policy->flags) &&
- (ctx.contents_encryption_mode ==
- policy->contents_encryption_mode) &&
- (ctx.filenames_encryption_mode ==
- policy->filenames_encryption_mode));
-}
-
-static int ext4_create_encryption_context_from_policy(
- struct inode *inode, const struct ext4_encryption_policy *policy)
-{
- struct ext4_encryption_context ctx;
- handle_t *handle;
- int res, res2;
-
- res = ext4_convert_inline_data(inode);
- if (res)
- return res;
-
- ctx.format = EXT4_ENCRYPTION_CONTEXT_FORMAT_V1;
- memcpy(ctx.master_key_descriptor, policy->master_key_descriptor,
- EXT4_KEY_DESCRIPTOR_SIZE);
- if (!ext4_valid_contents_enc_mode(policy->contents_encryption_mode)) {
- printk(KERN_WARNING
- "%s: Invalid contents encryption mode %d\n", __func__,
- policy->contents_encryption_mode);
- return -EINVAL;
- }
- if (!ext4_valid_filenames_enc_mode(policy->filenames_encryption_mode)) {
- printk(KERN_WARNING
- "%s: Invalid filenames encryption mode %d\n", __func__,
- policy->filenames_encryption_mode);
- return -EINVAL;
- }
- if (policy->flags & ~EXT4_POLICY_FLAGS_VALID)
- return -EINVAL;
- ctx.contents_encryption_mode = policy->contents_encryption_mode;
- ctx.filenames_encryption_mode = policy->filenames_encryption_mode;
- ctx.flags = policy->flags;
- BUILD_BUG_ON(sizeof(ctx.nonce) != EXT4_KEY_DERIVATION_NONCE_SIZE);
- get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE);
-
- handle = ext4_journal_start(inode, EXT4_HT_MISC,
- ext4_jbd2_credits_xattr(inode));
- if (IS_ERR(handle))
- return PTR_ERR(handle);
- res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION,
- EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
- sizeof(ctx), 0);
- if (!res) {
- ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
- res = ext4_mark_inode_dirty(handle, inode);
- if (res)
- EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
- }
- res2 = ext4_journal_stop(handle);
- if (!res)
- res = res2;
- return res;
-}
-
-int ext4_process_policy(const struct ext4_encryption_policy *policy,
- struct inode *inode)
-{
- if (policy->version != 0)
- return -EINVAL;
-
- if (!ext4_inode_has_encryption_context(inode)) {
- if (!S_ISDIR(inode->i_mode))
- return -EINVAL;
- if (!ext4_empty_dir(inode))
- return -ENOTEMPTY;
- return ext4_create_encryption_context_from_policy(inode,
- policy);
- }
-
- if (ext4_is_encryption_context_consistent_with_policy(inode, policy))
- return 0;
-
- printk(KERN_WARNING "%s: Policy inconsistent with encryption context\n",
- __func__);
- return -EINVAL;
-}
-
-int ext4_get_policy(struct inode *inode, struct ext4_encryption_policy *policy)
-{
- struct ext4_encryption_context ctx;
-
- int res = ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
- EXT4_XATTR_NAME_ENCRYPTION_CONTEXT,
- &ctx, sizeof(ctx));
- if (res != sizeof(ctx))
- return -ENOENT;
- if (ctx.format != EXT4_ENCRYPTION_CONTEXT_FORMAT_V1)
- return -EINVAL;
- policy->version = 0;
- policy->contents_encryption_mode = ctx.contents_encryption_mode;
- policy->filenames_encryption_mode = ctx.filenames_encryption_mode;
- policy->flags = ctx.flags;
- memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor,
- EXT4_KEY_DESCRIPTOR_SIZE);
- return 0;
-}
-
-int ext4_is_child_context_consistent_with_parent(struct inode *parent,
- struct inode *child)
-{
- struct ext4_crypt_info *parent_ci, *child_ci;
- int res;
-
- if ((parent == NULL) || (child == NULL)) {
- pr_err("parent %p child %p\n", parent, child);
- WARN_ON(1); /* Should never happen */
- return 0;
- }
- /* no restrictions if the parent directory is not encrypted */
- if (!ext4_encrypted_inode(parent))
- return 1;
- /* if the child directory is not encrypted, this is always a problem */
- if (!ext4_encrypted_inode(child))
- return 0;
- res = ext4_get_encryption_info(parent);
- if (res)
- return 0;
- res = ext4_get_encryption_info(child);
- if (res)
- return 0;
- parent_ci = EXT4_I(parent)->i_crypt_info;
- child_ci = EXT4_I(child)->i_crypt_info;
- if (!parent_ci && !child_ci)
- return 1;
- if (!parent_ci || !child_ci)
- return 0;
-
- return (memcmp(parent_ci->ci_master_key,
- child_ci->ci_master_key,
- EXT4_KEY_DESCRIPTOR_SIZE) == 0 &&
- (parent_ci->ci_data_mode == child_ci->ci_data_mode) &&
- (parent_ci->ci_filename_mode == child_ci->ci_filename_mode) &&
- (parent_ci->ci_flags == child_ci->ci_flags));
-}
-
-/**
- * ext4_inherit_context() - Sets a child context from its parent
- * @parent: Parent inode from which the context is inherited.
- * @child: Child inode that inherits the context from @parent.
- *
- * Return: Zero on success, non-zero otherwise
- */
-int ext4_inherit_context(struct inode *parent, struct inode *child)
-{
- struct ext4_encryption_context ctx;
- struct ext4_crypt_info *ci;
- int res;
-
- res = ext4_get_encryption_info(parent);
- if (res < 0)
- return res;
- ci = EXT4_I(parent)->i_crypt_info;
- if (ci == NULL)
- return -ENOKEY;
-
- ctx.format = EXT4_ENCRYPTION_CONTEXT_FORMAT_V1;
- if (DUMMY_ENCRYPTION_ENABLED(EXT4_SB(parent->i_sb))) {
- ctx.contents_encryption_mode = EXT4_ENCRYPTION_MODE_AES_256_XTS;
- ctx.filenames_encryption_mode =
- EXT4_ENCRYPTION_MODE_AES_256_CTS;
- ctx.flags = 0;
- memset(ctx.master_key_descriptor, 0x42,
- EXT4_KEY_DESCRIPTOR_SIZE);
- res = 0;
- } else {
- ctx.contents_encryption_mode = ci->ci_data_mode;
- ctx.filenames_encryption_mode = ci->ci_filename_mode;
- ctx.flags = ci->ci_flags;
- memcpy(ctx.master_key_descriptor, ci->ci_master_key,
- EXT4_KEY_DESCRIPTOR_SIZE);
- }
- get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE);
- res = ext4_xattr_set(child, EXT4_XATTR_INDEX_ENCRYPTION,
- EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
- sizeof(ctx), 0);
- if (!res) {
- ext4_set_inode_flag(child, EXT4_INODE_ENCRYPT);
- ext4_clear_inode_state(child, EXT4_STATE_MAY_INLINE_DATA);
- res = ext4_get_encryption_info(child);
- }
- return res;
-}
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 68323e3da3fa..67415e0e6af0 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -109,10 +109,10 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
struct super_block *sb = inode->i_sb;
struct buffer_head *bh = NULL;
int dir_has_error = 0;
- struct ext4_str fname_crypto_str = {.name = NULL, .len = 0};
+ struct fscrypt_str fstr = FSTR_INIT(NULL, 0);
if (ext4_encrypted_inode(inode)) {
- err = ext4_get_encryption_info(inode);
+ err = fscrypt_get_encryption_info(inode);
if (err && err != -ENOKEY)
return err;
}
@@ -139,8 +139,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
}
if (ext4_encrypted_inode(inode)) {
- err = ext4_fname_crypto_alloc_buffer(inode, EXT4_NAME_LEN,
- &fname_crypto_str);
+ err = fscrypt_fname_alloc_buffer(inode, EXT4_NAME_LEN, &fstr);
if (err < 0)
return err;
}
@@ -253,16 +252,19 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
get_dtype(sb, de->file_type)))
goto done;
} else {
- int save_len = fname_crypto_str.len;
+ int save_len = fstr.len;
+ struct fscrypt_str de_name =
+ FSTR_INIT(de->name,
+ de->name_len);
/* Directory is encrypted */
- err = ext4_fname_disk_to_usr(inode,
- NULL, de, &fname_crypto_str);
- fname_crypto_str.len = save_len;
+ err = fscrypt_fname_disk_to_usr(inode,
+ 0, 0, &de_name, &fstr);
+ fstr.len = save_len;
if (err < 0)
goto errout;
if (!dir_emit(ctx,
- fname_crypto_str.name, err,
+ fstr.name, err,
le32_to_cpu(de->inode),
get_dtype(sb, de->file_type)))
goto done;
@@ -281,7 +283,7 @@ done:
err = 0;
errout:
#ifdef CONFIG_EXT4_FS_ENCRYPTION
- ext4_fname_crypto_free_buffer(&fname_crypto_str);
+ fscrypt_fname_free_buffer(&fstr);
#endif
brelse(bh);
return err;
@@ -432,7 +434,7 @@ void ext4_htree_free_dir_info(struct dir_private_info *p)
int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
__u32 minor_hash,
struct ext4_dir_entry_2 *dirent,
- struct ext4_str *ent_name)
+ struct fscrypt_str *ent_name)
{
struct rb_node **p, *parent = NULL;
struct fname *fname, *new_fn;
@@ -609,7 +611,7 @@ finished:
static int ext4_dir_open(struct inode * inode, struct file * filp)
{
if (ext4_encrypted_inode(inode))
- return ext4_get_encryption_info(inode) ? -EACCES : 0;
+ return fscrypt_get_encryption_info(inode) ? -EACCES : 0;
return 0;
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index b84aa1ca480a..ea31931386ec 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -32,6 +32,7 @@
#include <linux/percpu_counter.h>
#include <linux/ratelimit.h>
#include <crypto/hash.h>
+#include <linux/fscrypto.h>
#include <linux/falloc.h>
#include <linux/percpu-rwsem.h>
#ifdef __KERNEL__
@@ -608,15 +609,6 @@ enum {
#define EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER 0x0010
#define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER 0x0020
-/* Encryption algorithms */
-#define EXT4_ENCRYPTION_MODE_INVALID 0
-#define EXT4_ENCRYPTION_MODE_AES_256_XTS 1
-#define EXT4_ENCRYPTION_MODE_AES_256_GCM 2
-#define EXT4_ENCRYPTION_MODE_AES_256_CBC 3
-#define EXT4_ENCRYPTION_MODE_AES_256_CTS 4
-
-#include "ext4_crypto.h"
-
/*
* ioctl commands
*/
@@ -638,9 +630,9 @@ enum {
#define EXT4_IOC_RESIZE_FS _IOW('f', 16, __u64)
#define EXT4_IOC_SWAP_BOOT _IO('f', 17)
#define EXT4_IOC_PRECACHE_EXTENTS _IO('f', 18)
-#define EXT4_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct ext4_encryption_policy)
-#define EXT4_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16])
-#define EXT4_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct ext4_encryption_policy)
+#define EXT4_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY
+#define EXT4_IOC_GET_ENCRYPTION_PWSALT FS_IOC_GET_ENCRYPTION_PWSALT
+#define EXT4_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY
#ifndef FS_IOC_FSGETXATTR
/* Until the uapi changes get merged for project quota... */
@@ -1082,10 +1074,6 @@ struct ext4_inode_info {
/* Precomputed uuid+inum+igen checksum for seeding inode checksums */
__u32 i_csum_seed;
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
- /* Encryption params */
- struct ext4_crypt_info *i_crypt_info;
-#endif
kprojid_t i_projid;
};
@@ -1344,6 +1332,11 @@ struct ext4_super_block {
/* Number of quota types we support */
#define EXT4_MAXQUOTAS 3
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+#define EXT4_KEY_DESC_PREFIX "ext4:"
+#define EXT4_KEY_DESC_PREFIX_SIZE 5
+#endif
+
/*
* fourth extended-fs super-block data in memory
*/
@@ -1430,6 +1423,7 @@ struct ext4_sb_info {
unsigned short *s_mb_offsets;
unsigned int *s_mb_maxs;
unsigned int s_group_info_size;
+ unsigned int s_mb_free_pending;
/* tunables */
unsigned long s_stripe;
@@ -1512,6 +1506,12 @@ struct ext4_sb_info {
/* Barrier between changing inodes' journal flags and writepages ops. */
struct percpu_rw_semaphore s_journal_flag_rwsem;
+
+ /* Encryption support */
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ u8 key_prefix[EXT4_KEY_DESC_PREFIX_SIZE];
+ u8 key_prefix_size;
+#endif
};
static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -1610,15 +1610,6 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
/*
* Returns true if the inode is inode is encrypted
*/
-static inline int ext4_encrypted_inode(struct inode *inode)
-{
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
- return ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT);
-#else
- return 0;
-#endif
-}
-
#define NEXT_ORPHAN(inode) EXT4_I(inode)->i_dtime
/*
@@ -2082,10 +2073,10 @@ struct dx_hash_info
struct ext4_filename {
const struct qstr *usr_fname;
- struct ext4_str disk_name;
+ struct fscrypt_str disk_name;
struct dx_hash_info hinfo;
#ifdef CONFIG_EXT4_FS_ENCRYPTION
- struct ext4_str crypto_buf;
+ struct fscrypt_str crypto_buf;
#endif
};
@@ -2296,132 +2287,82 @@ extern unsigned ext4_free_clusters_after_init(struct super_block *sb,
struct ext4_group_desc *gdp);
ext4_fsblk_t ext4_inode_to_goal_block(struct inode *);
-/* crypto_policy.c */
-int ext4_is_child_context_consistent_with_parent(struct inode *parent,
- struct inode *child);
-int ext4_inherit_context(struct inode *parent, struct inode *child);
-void ext4_to_hex(char *dst, char *src, size_t src_size);
-int ext4_process_policy(const struct ext4_encryption_policy *policy,
- struct inode *inode);
-int ext4_get_policy(struct inode *inode,
- struct ext4_encryption_policy *policy);
-
-/* crypto.c */
-extern struct kmem_cache *ext4_crypt_info_cachep;
-bool ext4_valid_contents_enc_mode(uint32_t mode);
-uint32_t ext4_validate_encryption_key_size(uint32_t mode, uint32_t size);
-extern struct workqueue_struct *ext4_read_workqueue;
-struct ext4_crypto_ctx *ext4_get_crypto_ctx(struct inode *inode,
- gfp_t gfp_flags);
-void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx);
-void ext4_restore_control_page(struct page *data_page);
-struct page *ext4_encrypt(struct inode *inode,
- struct page *plaintext_page,
- gfp_t gfp_flags);
-int ext4_decrypt(struct page *page);
-int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
- ext4_fsblk_t pblk, ext4_lblk_t len);
-extern const struct dentry_operations ext4_encrypted_d_ops;
-
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
-int ext4_init_crypto(void);
-void ext4_exit_crypto(void);
static inline int ext4_sb_has_crypto(struct super_block *sb)
{
return ext4_has_feature_encrypt(sb);
}
-#else
-static inline int ext4_init_crypto(void) { return 0; }
-static inline void ext4_exit_crypto(void) { }
-static inline int ext4_sb_has_crypto(struct super_block *sb)
+
+static inline bool ext4_encrypted_inode(struct inode *inode)
{
- return 0;
+ return ext4_test_inode_flag(inode, EXT4_INODE_ENCRYPT);
}
-#endif
-/* crypto_fname.c */
-bool ext4_valid_filenames_enc_mode(uint32_t mode);
-u32 ext4_fname_crypto_round_up(u32 size, u32 blksize);
-unsigned ext4_fname_encrypted_size(struct inode *inode, u32 ilen);
-int ext4_fname_crypto_alloc_buffer(struct inode *inode,
- u32 ilen, struct ext4_str *crypto_str);
-int _ext4_fname_disk_to_usr(struct inode *inode,
- struct dx_hash_info *hinfo,
- const struct ext4_str *iname,
- struct ext4_str *oname);
-int ext4_fname_disk_to_usr(struct inode *inode,
- struct dx_hash_info *hinfo,
- const struct ext4_dir_entry_2 *de,
- struct ext4_str *oname);
-int ext4_fname_usr_to_disk(struct inode *inode,
- const struct qstr *iname,
- struct ext4_str *oname);
#ifdef CONFIG_EXT4_FS_ENCRYPTION
-void ext4_fname_crypto_free_buffer(struct ext4_str *crypto_str);
-int ext4_fname_setup_filename(struct inode *dir, const struct qstr *iname,
- int lookup, struct ext4_filename *fname);
-void ext4_fname_free_filename(struct ext4_filename *fname);
-#else
-static inline
-int ext4_setup_fname_crypto(struct inode *inode)
-{
- return 0;
-}
-static inline void ext4_fname_crypto_free_buffer(struct ext4_str *p) { }
static inline int ext4_fname_setup_filename(struct inode *dir,
- const struct qstr *iname,
- int lookup, struct ext4_filename *fname)
+ const struct qstr *iname,
+ int lookup, struct ext4_filename *fname)
{
- fname->usr_fname = iname;
- fname->disk_name.name = (unsigned char *) iname->name;
- fname->disk_name.len = iname->len;
- return 0;
-}
-static inline void ext4_fname_free_filename(struct ext4_filename *fname) { }
-#endif
-
+ struct fscrypt_name name;
+ int err;
-/* crypto_key.c */
-void ext4_free_crypt_info(struct ext4_crypt_info *ci);
-void ext4_free_encryption_info(struct inode *inode, struct ext4_crypt_info *ci);
-int _ext4_get_encryption_info(struct inode *inode);
+ memset(fname, 0, sizeof(struct ext4_filename));
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
-int ext4_has_encryption_key(struct inode *inode);
+ err = fscrypt_setup_filename(dir, iname, lookup, &name);
-static inline int ext4_get_encryption_info(struct inode *inode)
-{
- struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
-
- if (!ci ||
- (ci->ci_keyring_key &&
- (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
- (1 << KEY_FLAG_REVOKED) |
- (1 << KEY_FLAG_DEAD)))))
- return _ext4_get_encryption_info(inode);
- return 0;
+ fname->usr_fname = name.usr_fname;
+ fname->disk_name = name.disk_name;
+ fname->hinfo.hash = name.hash;
+ fname->hinfo.minor_hash = name.minor_hash;
+ fname->crypto_buf = name.crypto_buf;
+ return err;
}
-static inline struct ext4_crypt_info *ext4_encryption_info(struct inode *inode)
+static inline void ext4_fname_free_filename(struct ext4_filename *fname)
{
- return EXT4_I(inode)->i_crypt_info;
-}
+ struct fscrypt_name name;
-#else
-static inline int ext4_has_encryption_key(struct inode *inode)
-{
- return 0;
+ name.crypto_buf = fname->crypto_buf;
+ fscrypt_free_filename(&name);
+
+ fname->crypto_buf.name = NULL;
+ fname->usr_fname = NULL;
+ fname->disk_name.name = NULL;
}
-static inline int ext4_get_encryption_info(struct inode *inode)
+#else
+static inline int ext4_fname_setup_filename(struct inode *dir,
+ const struct qstr *iname,
+ int lookup, struct ext4_filename *fname)
{
+ fname->usr_fname = iname;
+ fname->disk_name.name = (unsigned char *) iname->name;
+ fname->disk_name.len = iname->len;
return 0;
}
-static inline struct ext4_crypt_info *ext4_encryption_info(struct inode *inode)
-{
- return NULL;
-}
-#endif
+static inline void ext4_fname_free_filename(struct ext4_filename *fname) { }
+#define fscrypt_set_d_op(i)
+#define fscrypt_get_ctx fscrypt_notsupp_get_ctx
+#define fscrypt_release_ctx fscrypt_notsupp_release_ctx
+#define fscrypt_encrypt_page fscrypt_notsupp_encrypt_page
+#define fscrypt_decrypt_page fscrypt_notsupp_decrypt_page
+#define fscrypt_decrypt_bio_pages fscrypt_notsupp_decrypt_bio_pages
+#define fscrypt_pullback_bio_page fscrypt_notsupp_pullback_bio_page
+#define fscrypt_restore_control_page fscrypt_notsupp_restore_control_page
+#define fscrypt_zeroout_range fscrypt_notsupp_zeroout_range
+#define fscrypt_process_policy fscrypt_notsupp_process_policy
+#define fscrypt_get_policy fscrypt_notsupp_get_policy
+#define fscrypt_has_permitted_context fscrypt_notsupp_has_permitted_context
+#define fscrypt_inherit_context fscrypt_notsupp_inherit_context
+#define fscrypt_get_encryption_info fscrypt_notsupp_get_encryption_info
+#define fscrypt_put_encryption_info fscrypt_notsupp_put_encryption_info
+#define fscrypt_setup_filename fscrypt_notsupp_setup_filename
+#define fscrypt_free_filename fscrypt_notsupp_free_filename
+#define fscrypt_fname_encrypted_size fscrypt_notsupp_fname_encrypted_size
+#define fscrypt_fname_alloc_buffer fscrypt_notsupp_fname_alloc_buffer
+#define fscrypt_fname_free_buffer fscrypt_notsupp_fname_free_buffer
+#define fscrypt_fname_disk_to_usr fscrypt_notsupp_fname_disk_to_usr
+#define fscrypt_fname_usr_to_disk fscrypt_notsupp_fname_usr_to_disk
+#endif
/* dir.c */
extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *,
@@ -2435,7 +2376,7 @@ extern int __ext4_check_dir_entry(const char *, unsigned int, struct inode *,
extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
__u32 minor_hash,
struct ext4_dir_entry_2 *dirent,
- struct ext4_str *ent_name);
+ struct fscrypt_str *ent_name);
extern void ext4_htree_free_dir_info(struct dir_private_info *p);
extern int ext4_find_dest_de(struct inode *dir, struct inode *inode,
struct buffer_head *bh,
@@ -2623,7 +2564,7 @@ extern int ext4_generic_delete_entry(handle_t *handle,
void *entry_buf,
int buf_size,
int csum_size);
-extern int ext4_empty_dir(struct inode *inode);
+extern bool ext4_empty_dir(struct inode *inode);
/* resize.c */
extern int ext4_group_add(struct super_block *sb,
@@ -3105,7 +3046,7 @@ extern int ext4_delete_inline_entry(handle_t *handle,
struct ext4_dir_entry_2 *de_del,
struct buffer_head *bh,
int *has_inline_data);
-extern int empty_inline_dir(struct inode *dir, int *has_inline_data);
+extern bool empty_inline_dir(struct inode *dir, int *has_inline_data);
extern struct buffer_head *ext4_get_first_inline_block(struct inode *inode,
struct ext4_dir_entry_2 **parent_de,
int *retval);
diff --git a/fs/ext4/ext4_crypto.h b/fs/ext4/ext4_crypto.h
deleted file mode 100644
index 1f73c29717e1..000000000000
--- a/fs/ext4/ext4_crypto.h
+++ /dev/null
@@ -1,159 +0,0 @@
-/*
- * linux/fs/ext4/ext4_crypto.h
- *
- * Copyright (C) 2015, Google, Inc.
- *
- * This contains encryption header content for ext4
- *
- * Written by Michael Halcrow, 2015.
- */
-
-#ifndef _EXT4_CRYPTO_H
-#define _EXT4_CRYPTO_H
-
-#include <linux/fs.h>
-
-#define EXT4_KEY_DESCRIPTOR_SIZE 8
-
-/* Policy provided via an ioctl on the topmost directory */
-struct ext4_encryption_policy {
- char version;
- char contents_encryption_mode;
- char filenames_encryption_mode;
- char flags;
- char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE];
-} __attribute__((__packed__));
-
-#define EXT4_ENCRYPTION_CONTEXT_FORMAT_V1 1
-#define EXT4_KEY_DERIVATION_NONCE_SIZE 16
-
-#define EXT4_POLICY_FLAGS_PAD_4 0x00
-#define EXT4_POLICY_FLAGS_PAD_8 0x01
-#define EXT4_POLICY_FLAGS_PAD_16 0x02
-#define EXT4_POLICY_FLAGS_PAD_32 0x03
-#define EXT4_POLICY_FLAGS_PAD_MASK 0x03
-#define EXT4_POLICY_FLAGS_VALID 0x03
-
-/**
- * Encryption context for inode
- *
- * Protector format:
- * 1 byte: Protector format (1 = this version)
- * 1 byte: File contents encryption mode
- * 1 byte: File names encryption mode
- * 1 byte: Reserved
- * 8 bytes: Master Key descriptor
- * 16 bytes: Encryption Key derivation nonce
- */
-struct ext4_encryption_context {
- char format;
- char contents_encryption_mode;
- char filenames_encryption_mode;
- char flags;
- char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE];
- char nonce[EXT4_KEY_DERIVATION_NONCE_SIZE];
-} __attribute__((__packed__));
-
-/* Encryption parameters */
-#define EXT4_XTS_TWEAK_SIZE 16
-#define EXT4_AES_128_ECB_KEY_SIZE 16
-#define EXT4_AES_256_GCM_KEY_SIZE 32
-#define EXT4_AES_256_CBC_KEY_SIZE 32
-#define EXT4_AES_256_CTS_KEY_SIZE 32
-#define EXT4_AES_256_XTS_KEY_SIZE 64
-#define EXT4_MAX_KEY_SIZE 64
-
-#define EXT4_KEY_DESC_PREFIX "ext4:"
-#define EXT4_KEY_DESC_PREFIX_SIZE 5
-
-/* This is passed in from userspace into the kernel keyring */
-struct ext4_encryption_key {
- __u32 mode;
- char raw[EXT4_MAX_KEY_SIZE];
- __u32 size;
-} __attribute__((__packed__));
-
-struct ext4_crypt_info {
- char ci_data_mode;
- char ci_filename_mode;
- char ci_flags;
- struct crypto_skcipher *ci_ctfm;
- struct key *ci_keyring_key;
- char ci_master_key[EXT4_KEY_DESCRIPTOR_SIZE];
-};
-
-#define EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL 0x00000001
-#define EXT4_WRITE_PATH_FL 0x00000002
-
-struct ext4_crypto_ctx {
- union {
- struct {
- struct page *bounce_page; /* Ciphertext page */
- struct page *control_page; /* Original page */
- } w;
- struct {
- struct bio *bio;
- struct work_struct work;
- } r;
- struct list_head free_list; /* Free list */
- };
- char flags; /* Flags */
- char mode; /* Encryption mode for tfm */
-};
-
-struct ext4_completion_result {
- struct completion completion;
- int res;
-};
-
-#define DECLARE_EXT4_COMPLETION_RESULT(ecr) \
- struct ext4_completion_result ecr = { \
- COMPLETION_INITIALIZER((ecr).completion), 0 }
-
-static inline int ext4_encryption_key_size(int mode)
-{
- switch (mode) {
- case EXT4_ENCRYPTION_MODE_AES_256_XTS:
- return EXT4_AES_256_XTS_KEY_SIZE;
- case EXT4_ENCRYPTION_MODE_AES_256_GCM:
- return EXT4_AES_256_GCM_KEY_SIZE;
- case EXT4_ENCRYPTION_MODE_AES_256_CBC:
- return EXT4_AES_256_CBC_KEY_SIZE;
- case EXT4_ENCRYPTION_MODE_AES_256_CTS:
- return EXT4_AES_256_CTS_KEY_SIZE;
- default:
- BUG();
- }
- return 0;
-}
-
-#define EXT4_FNAME_NUM_SCATTER_ENTRIES 4
-#define EXT4_CRYPTO_BLOCK_SIZE 16
-#define EXT4_FNAME_CRYPTO_DIGEST_SIZE 32
-
-struct ext4_str {
- unsigned char *name;
- u32 len;
-};
-
-/**
- * For encrypted symlinks, the ciphertext length is stored at the beginning
- * of the string in little-endian format.
- */
-struct ext4_encrypted_symlink_data {
- __le16 len;
- char encrypted_path[1];
-} __attribute__((__packed__));
-
-/**
- * This function is used to calculate the disk space required to
- * store a filename of length l in encrypted symlink format.
- */
-static inline u32 encrypted_symlink_data_len(u32 l)
-{
- if (l < EXT4_CRYPTO_BLOCK_SIZE)
- l = EXT4_CRYPTO_BLOCK_SIZE;
- return (l + sizeof(struct ext4_encrypted_symlink_data) - 1);
-}
-
-#endif /* _EXT4_CRYPTO_H */
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index 09c1ef38cbe6..b1d52c14098e 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -175,6 +175,13 @@ struct ext4_journal_cb_entry {
* There is no guaranteed calling order of multiple registered callbacks on
* the same transaction.
*/
+static inline void _ext4_journal_callback_add(handle_t *handle,
+ struct ext4_journal_cb_entry *jce)
+{
+ /* Add the jce to transaction's private list */
+ list_add_tail(&jce->jce_list, &handle->h_transaction->t_private_list);
+}
+
static inline void ext4_journal_callback_add(handle_t *handle,
void (*func)(struct super_block *sb,
struct ext4_journal_cb_entry *jce,
@@ -187,10 +194,11 @@ static inline void ext4_journal_callback_add(handle_t *handle,
/* Add the jce to transaction's private list */
jce->jce_func = func;
spin_lock(&sbi->s_md_lock);
- list_add_tail(&jce->jce_list, &handle->h_transaction->t_private_list);
+ _ext4_journal_callback_add(handle, jce);
spin_unlock(&sbi->s_md_lock);
}
+
/**
* ext4_journal_callback_del: delete a registered callback
* @handle: active journal transaction handle on which callback was registered
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 2a2eef9c14e4..d7ccb7f51dfc 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -381,9 +381,13 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
ext4_fsblk_t block = ext4_ext_pblock(ext);
int len = ext4_ext_get_actual_len(ext);
ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
- ext4_lblk_t last = lblock + len - 1;
- if (len == 0 || lblock > last)
+ /*
+ * We allow neither:
+ * - zero length
+ * - overflow/wrap-around
+ */
+ if (lblock + len <= lblock)
return 0;
return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
}
@@ -474,6 +478,10 @@ static int __ext4_ext_check(const char *function, unsigned int line,
error_msg = "invalid extent entries";
goto corrupted;
}
+ if (unlikely(depth > 32)) {
+ error_msg = "too large eh_depth";
+ goto corrupted;
+ }
/* Verify checksum on non-root extent tree nodes */
if (ext_depth(inode) != depth &&
!ext4_extent_block_csum_verify(inode, eh)) {
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index df44c877892a..261ac3734c58 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -202,7 +202,7 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (IS_ERR(handle))
result = VM_FAULT_SIGBUS;
else
- result = __dax_fault(vma, vmf, ext4_dax_get_block);
+ result = dax_fault(vma, vmf, ext4_dax_get_block);
if (write) {
if (!IS_ERR(handle))
@@ -237,7 +237,7 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
if (IS_ERR(handle))
result = VM_FAULT_SIGBUS;
else
- result = __dax_pmd_fault(vma, addr, pmd, flags,
+ result = dax_pmd_fault(vma, addr, pmd, flags,
ext4_dax_get_block);
if (write) {
@@ -303,10 +303,10 @@ static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
struct inode *inode = file->f_mapping->host;
if (ext4_encrypted_inode(inode)) {
- int err = ext4_get_encryption_info(inode);
+ int err = fscrypt_get_encryption_info(inode);
if (err)
return 0;
- if (ext4_encryption_info(inode) == NULL)
+ if (!fscrypt_has_encryption_key(inode))
return -ENOKEY;
}
file_accessed(file);
@@ -362,16 +362,16 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
}
}
if (ext4_encrypted_inode(inode)) {
- ret = ext4_get_encryption_info(inode);
+ ret = fscrypt_get_encryption_info(inode);
if (ret)
return -EACCES;
- if (ext4_encryption_info(inode) == NULL)
+ if (!fscrypt_has_encryption_key(inode))
return -ENOKEY;
}
dir = dget_parent(file_dentry(filp));
if (ext4_encrypted_inode(d_inode(dir)) &&
- !ext4_is_child_context_consistent_with_parent(d_inode(dir), inode)) {
+ !fscrypt_has_permitted_context(d_inode(dir), inode)) {
ext4_warning(inode->i_sb,
"Inconsistent encryption contexts: %lu/%lu",
(unsigned long) d_inode(dir)->i_ino,
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 8850254136ae..5c4372512ef7 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -106,9 +106,11 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
}
if (!journal) {
- ret = generic_file_fsync(file, start, end, datasync);
+ ret = __generic_file_fsync(file, start, end, datasync);
if (!ret && !hlist_empty(&inode->i_dentry))
ret = ext4_sync_parent(inode);
+ if (test_opt(inode->i_sb, BARRIER))
+ goto issue_flush;
goto out;
}
@@ -140,6 +142,7 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
needs_barrier = true;
ret = jbd2_complete_transaction(journal, commit_tid);
if (needs_barrier) {
+ issue_flush:
err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
if (!ret)
ret = err;
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 3da4cf8d18b6..9e66cd1d7b78 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -214,7 +214,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
trace_ext4_load_inode_bitmap(sb, block_group);
bh->b_end_io = ext4_end_bitmap_read;
get_bh(bh);
- submit_bh(READ | REQ_META | REQ_PRIO, bh);
+ submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
put_bh(bh);
@@ -767,10 +767,10 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
if ((ext4_encrypted_inode(dir) ||
DUMMY_ENCRYPTION_ENABLED(EXT4_SB(dir->i_sb))) &&
(S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
- err = ext4_get_encryption_info(dir);
+ err = fscrypt_get_encryption_info(dir);
if (err)
return ERR_PTR(err);
- if (ext4_encryption_info(dir) == NULL)
+ if (!fscrypt_has_encryption_key(dir))
return ERR_PTR(-EPERM);
if (!handle)
nblocks += EXT4_DATA_TRANS_BLOCKS(dir->i_sb);
@@ -1115,7 +1115,8 @@ got:
}
if (encrypt) {
- err = ext4_inherit_context(dir, inode);
+ /* give pointer to avoid set_context with journal ops. */
+ err = fscrypt_inherit_context(dir, inode, &encrypt, true);
if (err)
goto fail_free_drop;
}
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index ff7538c26992..f74d5ee2cdec 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -1326,7 +1326,7 @@ int htree_inlinedir_to_tree(struct file *dir_file,
struct ext4_iloc iloc;
void *dir_buf = NULL;
struct ext4_dir_entry_2 fake;
- struct ext4_str tmp_str;
+ struct fscrypt_str tmp_str;
ret = ext4_get_inode_loc(inode, &iloc);
if (ret)
@@ -1739,20 +1739,20 @@ ext4_get_inline_entry(struct inode *inode,
return (struct ext4_dir_entry_2 *)(inline_pos + offset);
}
-int empty_inline_dir(struct inode *dir, int *has_inline_data)
+bool empty_inline_dir(struct inode *dir, int *has_inline_data)
{
int err, inline_size;
struct ext4_iloc iloc;
void *inline_pos;
unsigned int offset;
struct ext4_dir_entry_2 *de;
- int ret = 1;
+ bool ret = true;
err = ext4_get_inode_loc(dir, &iloc);
if (err) {
EXT4_ERROR_INODE(dir, "error %d getting inode %lu block",
err, dir->i_ino);
- return 1;
+ return true;
}
down_read(&EXT4_I(dir)->xattr_sem);
@@ -1766,7 +1766,7 @@ int empty_inline_dir(struct inode *dir, int *has_inline_data)
ext4_warning(dir->i_sb,
"bad inline directory (dir #%lu) - no `..'",
dir->i_ino);
- ret = 1;
+ ret = true;
goto out;
}
@@ -1784,11 +1784,11 @@ int empty_inline_dir(struct inode *dir, int *has_inline_data)
dir->i_ino, le32_to_cpu(de->inode),
le16_to_cpu(de->rec_len), de->name_len,
inline_size);
- ret = 1;
+ ret = true;
goto out;
}
if (le32_to_cpu(de->inode)) {
- ret = 0;
+ ret = false;
goto out;
}
offset += ext4_rec_len_from_disk(de->rec_len, inline_size);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index f7140ca66e3b..3131747199e1 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -51,25 +51,31 @@ static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
struct ext4_inode_info *ei)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- __u16 csum_lo;
- __u16 csum_hi = 0;
__u32 csum;
+ __u16 dummy_csum = 0;
+ int offset = offsetof(struct ext4_inode, i_checksum_lo);
+ unsigned int csum_size = sizeof(dummy_csum);
- csum_lo = le16_to_cpu(raw->i_checksum_lo);
- raw->i_checksum_lo = 0;
- if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
- EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
- csum_hi = le16_to_cpu(raw->i_checksum_hi);
- raw->i_checksum_hi = 0;
- }
-
- csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw,
- EXT4_INODE_SIZE(inode->i_sb));
+ csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw, offset);
+ csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, csum_size);
+ offset += csum_size;
+ csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
+ EXT4_GOOD_OLD_INODE_SIZE - offset);
- raw->i_checksum_lo = cpu_to_le16(csum_lo);
- if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
- EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
- raw->i_checksum_hi = cpu_to_le16(csum_hi);
+ if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
+ offset = offsetof(struct ext4_inode, i_checksum_hi);
+ csum = ext4_chksum(sbi, csum, (__u8 *)raw +
+ EXT4_GOOD_OLD_INODE_SIZE,
+ offset - EXT4_GOOD_OLD_INODE_SIZE);
+ if (EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
+ csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
+ csum_size);
+ offset += csum_size;
+ csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
+ EXT4_INODE_SIZE(inode->i_sb) -
+ offset);
+ }
+ }
return csum;
}
@@ -205,9 +211,9 @@ void ext4_evict_inode(struct inode *inode)
* Note that directories do not have this problem because they
* don't use page cache.
*/
- if (ext4_should_journal_data(inode) &&
- (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
- inode->i_ino != EXT4_JOURNAL_INO) {
+ if (inode->i_ino != EXT4_JOURNAL_INO &&
+ ext4_should_journal_data(inode) &&
+ (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
@@ -386,7 +392,7 @@ int ext4_issue_zeroout(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk,
int ret;
if (ext4_encrypted_inode(inode))
- return ext4_encrypted_zeroout(inode, lblk, pblk, len);
+ return fscrypt_zeroout_range(inode, lblk, pblk, len);
ret = sb_issue_zeroout(inode->i_sb, pblk, len, GFP_NOFS);
if (ret > 0)
@@ -981,7 +987,7 @@ struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
return bh;
if (!bh || buffer_uptodate(bh))
return bh;
- ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
+ ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &bh);
wait_on_buffer(bh);
if (buffer_uptodate(bh))
return bh;
@@ -1135,7 +1141,7 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
!buffer_unwritten(bh) &&
(block_start < from || block_end > to)) {
- ll_rw_block(READ, 1, &bh);
+ ll_rw_block(REQ_OP_READ, 0, 1, &bh);
*wait_bh++ = bh;
decrypt = ext4_encrypted_inode(inode) &&
S_ISREG(inode->i_mode);
@@ -1152,7 +1158,7 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
if (unlikely(err))
page_zero_new_buffers(page, from, to);
else if (decrypt)
- err = ext4_decrypt(page);
+ err = fscrypt_decrypt_page(page);
return err;
}
#endif
@@ -2748,13 +2754,36 @@ retry:
done = true;
}
}
- ext4_journal_stop(handle);
+ /*
+ * Caution: If the handle is synchronous,
+ * ext4_journal_stop() can wait for transaction commit
+ * to finish which may depend on writeback of pages to
+ * complete or on page lock to be released. In that
+ * case, we have to wait until after after we have
+ * submitted all the IO, released page locks we hold,
+ * and dropped io_end reference (for extent conversion
+ * to be able to complete) before stopping the handle.
+ */
+ if (!ext4_handle_valid(handle) || handle->h_sync == 0) {
+ ext4_journal_stop(handle);
+ handle = NULL;
+ }
/* Submit prepared bio */
ext4_io_submit(&mpd.io_submit);
/* Unlock pages we didn't use */
mpage_release_unused_pages(&mpd, give_up_on_write);
- /* Drop our io_end reference we got from init */
- ext4_put_io_end(mpd.io_submit.io_end);
+ /*
+ * Drop our io_end reference we got from init. We have
+ * to be careful and use deferred io_end finishing if
+ * we are still holding the transaction as we can
+ * release the last reference to io_end which may end
+ * up doing unwritten extent conversion.
+ */
+ if (handle) {
+ ext4_put_io_end_defer(mpd.io_submit.io_end);
+ ext4_journal_stop(handle);
+ } else
+ ext4_put_io_end(mpd.io_submit.io_end);
if (ret == -ENOSPC && sbi->s_journal) {
/*
@@ -3698,7 +3727,7 @@ static int __ext4_block_zero_page_range(handle_t *handle,
if (!buffer_uptodate(bh)) {
err = -EIO;
- ll_rw_block(READ, 1, &bh);
+ ll_rw_block(REQ_OP_READ, 0, 1, &bh);
wait_on_buffer(bh);
/* Uhhuh. Read error. Complain and punt. */
if (!buffer_uptodate(bh))
@@ -3706,9 +3735,9 @@ static int __ext4_block_zero_page_range(handle_t *handle,
if (S_ISREG(inode->i_mode) &&
ext4_encrypted_inode(inode)) {
/* We expect the key to be set. */
- BUG_ON(!ext4_has_encryption_key(inode));
+ BUG_ON(!fscrypt_has_encryption_key(inode));
BUG_ON(blocksize != PAGE_SIZE);
- WARN_ON_ONCE(ext4_decrypt(page));
+ WARN_ON_ONCE(fscrypt_decrypt_page(page));
}
}
if (ext4_should_journal_data(inode)) {
@@ -4281,7 +4310,7 @@ make_io:
trace_ext4_load_inode(inode);
get_bh(bh);
bh->b_end_io = end_buffer_read_sync;
- submit_bh(READ | REQ_META | REQ_PRIO, bh);
+ submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
EXT4_ERROR_INODE_BLOCK(inode, block,
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 28cc412852af..10686fd67fb4 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -308,6 +308,7 @@ static int ext4_ioctl_setproject(struct file *filp, __u32 projid)
kprojid_t kprojid;
struct ext4_iloc iloc;
struct ext4_inode *raw_inode;
+ struct dquot *transfer_to[MAXQUOTAS] = { };
if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_PROJECT)) {
@@ -361,17 +362,14 @@ static int ext4_ioctl_setproject(struct file *filp, __u32 projid)
if (err)
goto out_stop;
- if (sb_has_quota_limits_enabled(sb, PRJQUOTA)) {
- struct dquot *transfer_to[MAXQUOTAS] = { };
-
- transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
- if (!IS_ERR(transfer_to[PRJQUOTA])) {
- err = __dquot_transfer(inode, transfer_to);
- dqput(transfer_to[PRJQUOTA]);
- if (err)
- goto out_dirty;
- }
+ transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid));
+ if (!IS_ERR(transfer_to[PRJQUOTA])) {
+ err = __dquot_transfer(inode, transfer_to);
+ dqput(transfer_to[PRJQUOTA]);
+ if (err)
+ goto out_dirty;
}
+
EXT4_I(inode)->i_projid = kprojid;
inode->i_ctime = ext4_current_time(inode);
out_dirty:
@@ -772,19 +770,13 @@ resizefs_out:
return ext4_ext_precache(inode);
case EXT4_IOC_SET_ENCRYPTION_POLICY: {
#ifdef CONFIG_EXT4_FS_ENCRYPTION
- struct ext4_encryption_policy policy;
- int err = 0;
+ struct fscrypt_policy policy;
if (copy_from_user(&policy,
- (struct ext4_encryption_policy __user *)arg,
- sizeof(policy))) {
- err = -EFAULT;
- goto encryption_policy_out;
- }
-
- err = ext4_process_policy(&policy, inode);
-encryption_policy_out:
- return err;
+ (struct fscrypt_policy __user *)arg,
+ sizeof(policy)))
+ return -EFAULT;
+ return fscrypt_process_policy(inode, &policy);
#else
return -EOPNOTSUPP;
#endif
@@ -827,12 +819,12 @@ encryption_policy_out:
}
case EXT4_IOC_GET_ENCRYPTION_POLICY: {
#ifdef CONFIG_EXT4_FS_ENCRYPTION
- struct ext4_encryption_policy policy;
+ struct fscrypt_policy policy;
int err = 0;
if (!ext4_encrypted_inode(inode))
return -ENOENT;
- err = ext4_get_policy(inode, &policy);
+ err = fscrypt_get_policy(inode, &policy);
if (err)
return err;
if (copy_to_user((void __user *)arg, &policy, sizeof(policy)))
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index c1ab3ec30423..f418f55c2bbe 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2350,7 +2350,6 @@ static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
}
const struct file_operations ext4_seq_mb_groups_fops = {
- .owner = THIS_MODULE,
.open = ext4_mb_seq_groups_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -2627,6 +2626,7 @@ int ext4_mb_init(struct super_block *sb)
spin_lock_init(&sbi->s_md_lock);
spin_lock_init(&sbi->s_bal_lock);
+ sbi->s_mb_free_pending = 0;
sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
@@ -2814,6 +2814,9 @@ static void ext4_free_data_callback(struct super_block *sb,
/* we expect to find existing buddy because it's pinned */
BUG_ON(err != 0);
+ spin_lock(&EXT4_SB(sb)->s_md_lock);
+ EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count;
+ spin_unlock(&EXT4_SB(sb)->s_md_lock);
db = e4b.bd_info;
/* there are blocks to put in buddy to make them really free */
@@ -2939,7 +2942,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
"fs metadata", block, block+len);
/* File system mounted not to panic on error
- * Fix the bitmap and repeat the block allocation
+ * Fix the bitmap and return EFSCORRUPTED
* We leak some of the blocks here.
*/
ext4_lock_group(sb, ac->ac_b_ex.fe_group);
@@ -2948,7 +2951,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
if (!err)
- err = -EAGAIN;
+ err = -EFSCORRUPTED;
goto out_err;
}
@@ -4513,18 +4516,7 @@ repeat:
}
if (likely(ac->ac_status == AC_STATUS_FOUND)) {
*errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs);
- if (*errp == -EAGAIN) {
- /*
- * drop the reference that we took
- * in ext4_mb_use_best_found
- */
- ext4_mb_release_context(ac);
- ac->ac_b_ex.fe_group = 0;
- ac->ac_b_ex.fe_start = 0;
- ac->ac_b_ex.fe_len = 0;
- ac->ac_status = AC_STATUS_CONTINUE;
- goto repeat;
- } else if (*errp) {
+ if (*errp) {
ext4_discard_allocated_blocks(ac);
goto errout;
} else {
@@ -4583,6 +4575,7 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
{
ext4_group_t group = e4b->bd_group;
ext4_grpblk_t cluster;
+ ext4_grpblk_t clusters = new_entry->efd_count;
struct ext4_free_data *entry;
struct ext4_group_info *db = e4b->bd_info;
struct super_block *sb = e4b->bd_sb;
@@ -4649,8 +4642,11 @@ ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
}
}
/* Add the extent to transaction's private list */
- ext4_journal_callback_add(handle, ext4_free_data_callback,
- &new_entry->efd_jce);
+ new_entry->efd_jce.jce_func = ext4_free_data_callback;
+ spin_lock(&sbi->s_md_lock);
+ _ext4_journal_callback_add(handle, &new_entry->efd_jce);
+ sbi->s_mb_free_pending += clusters;
+ spin_unlock(&sbi->s_md_lock);
return 0;
}
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index 23d436d6f8b8..d89754ef1aab 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -52,7 +52,7 @@ static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
lock_buffer(bh);
bh->b_end_io = end_buffer_write_sync;
get_bh(bh);
- submit_bh(WRITE_SYNC | REQ_META | REQ_PRIO, bh);
+ submit_bh(REQ_OP_WRITE, WRITE_SYNC | REQ_META | REQ_PRIO, bh);
wait_on_buffer(bh);
sb_end_write(sb);
if (unlikely(!buffer_uptodate(bh)))
@@ -88,7 +88,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
get_bh(*bh);
lock_buffer(*bh);
(*bh)->b_end_io = end_buffer_read_sync;
- submit_bh(READ_SYNC | REQ_META | REQ_PRIO, *bh);
+ submit_bh(REQ_OP_READ, READ_SYNC | REQ_META | REQ_PRIO, *bh);
wait_on_buffer(*bh);
if (!buffer_uptodate(*bh)) {
ret = -EIO;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index ec4c39952e84..34c0142caf6a 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -420,15 +420,14 @@ static __le32 ext4_dx_csum(struct inode *inode, struct ext4_dir_entry *dirent,
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
__u32 csum;
- __le32 save_csum;
int size;
+ __u32 dummy_csum = 0;
+ int offset = offsetof(struct dx_tail, dt_checksum);
size = count_offset + (count * sizeof(struct dx_entry));
- save_csum = t->dt_checksum;
- t->dt_checksum = 0;
csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size);
- csum = ext4_chksum(sbi, csum, (__u8 *)t, sizeof(struct dx_tail));
- t->dt_checksum = save_csum;
+ csum = ext4_chksum(sbi, csum, (__u8 *)t, offset);
+ csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum));
return cpu_to_le32(csum);
}
@@ -446,14 +445,14 @@ static int ext4_dx_csum_verify(struct inode *inode,
c = get_dx_countlimit(inode, dirent, &count_offset);
if (!c) {
EXT4_ERROR_INODE(inode, "dir seems corrupt? Run e2fsck -D.");
- return 1;
+ return 0;
}
limit = le16_to_cpu(c->limit);
count = le16_to_cpu(c->count);
if (count_offset + (limit * sizeof(struct dx_entry)) >
EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) {
warn_no_space_for_csum(inode);
- return 1;
+ return 0;
}
t = (struct dx_tail *)(((struct dx_entry *)c) + limit);
@@ -612,19 +611,19 @@ static struct stats dx_show_leaf(struct inode *dir,
#ifdef CONFIG_EXT4_FS_ENCRYPTION
int len;
char *name;
- struct ext4_str fname_crypto_str
- = {.name = NULL, .len = 0};
+ struct fscrypt_str fname_crypto_str =
+ FSTR_INIT(NULL, 0);
int res = 0;
name = de->name;
len = de->name_len;
- if (ext4_encrypted_inode(inode))
- res = ext4_get_encryption_info(dir);
+ if (ext4_encrypted_inode(dir))
+ res = fscrypt_get_encryption_info(dir);
if (res) {
printk(KERN_WARNING "Error setting up"
" fname crypto: %d\n", res);
}
- if (ctx == NULL) {
+ if (!fscrypt_has_encryption_key(dir)) {
/* Directory is not encrypted */
ext4fs_dirhash(de->name,
de->name_len, &h);
@@ -633,19 +632,21 @@ static struct stats dx_show_leaf(struct inode *dir,
(unsigned) ((char *) de
- base));
} else {
+ struct fscrypt_str de_name =
+ FSTR_INIT(name, len);
+
/* Directory is encrypted */
- res = ext4_fname_crypto_alloc_buffer(
- ctx, de->name_len,
+ res = fscrypt_fname_alloc_buffer(
+ dir, len,
&fname_crypto_str);
- if (res < 0) {
+ if (res < 0)
printk(KERN_WARNING "Error "
"allocating crypto "
"buffer--skipping "
"crypto\n");
- ctx = NULL;
- }
- res = ext4_fname_disk_to_usr(ctx, NULL, de,
- &fname_crypto_str);
+ res = fscrypt_fname_disk_to_usr(dir,
+ 0, 0, &de_name,
+ &fname_crypto_str);
if (res < 0) {
printk(KERN_WARNING "Error "
"converting filename "
@@ -662,8 +663,8 @@ static struct stats dx_show_leaf(struct inode *dir,
printk("%*.s:(E)%x.%u ", len, name,
h.hash, (unsigned) ((char *) de
- base));
- ext4_fname_crypto_free_buffer(
- &fname_crypto_str);
+ fscrypt_fname_free_buffer(
+ &fname_crypto_str);
}
#else
int len = de->name_len;
@@ -952,7 +953,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
struct buffer_head *bh;
struct ext4_dir_entry_2 *de, *top;
int err = 0, count = 0;
- struct ext4_str fname_crypto_str = {.name = NULL, .len = 0}, tmp_str;
+ struct fscrypt_str fname_crypto_str = FSTR_INIT(NULL, 0), tmp_str;
dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n",
(unsigned long)block));
@@ -967,12 +968,12 @@ static int htree_dirblock_to_tree(struct file *dir_file,
#ifdef CONFIG_EXT4_FS_ENCRYPTION
/* Check if the directory is encrypted */
if (ext4_encrypted_inode(dir)) {
- err = ext4_get_encryption_info(dir);
+ err = fscrypt_get_encryption_info(dir);
if (err < 0) {
brelse(bh);
return err;
}
- err = ext4_fname_crypto_alloc_buffer(dir, EXT4_NAME_LEN,
+ err = fscrypt_fname_alloc_buffer(dir, EXT4_NAME_LEN,
&fname_crypto_str);
if (err < 0) {
brelse(bh);
@@ -1003,10 +1004,13 @@ static int htree_dirblock_to_tree(struct file *dir_file,
&tmp_str);
} else {
int save_len = fname_crypto_str.len;
+ struct fscrypt_str de_name = FSTR_INIT(de->name,
+ de->name_len);
/* Directory is encrypted */
- err = ext4_fname_disk_to_usr(dir, hinfo, de,
- &fname_crypto_str);
+ err = fscrypt_fname_disk_to_usr(dir, hinfo->hash,
+ hinfo->minor_hash, &de_name,
+ &fname_crypto_str);
if (err < 0) {
count = err;
goto errout;
@@ -1025,7 +1029,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
errout:
brelse(bh);
#ifdef CONFIG_EXT4_FS_ENCRYPTION
- ext4_fname_crypto_free_buffer(&fname_crypto_str);
+ fscrypt_fname_free_buffer(&fname_crypto_str);
#endif
return count;
}
@@ -1050,7 +1054,7 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
int count = 0;
int ret, err;
__u32 hashval;
- struct ext4_str tmp_str;
+ struct fscrypt_str tmp_str;
dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n",
start_hash, start_minor_hash));
@@ -1443,7 +1447,8 @@ restart:
}
bh_use[ra_max] = bh;
if (bh)
- ll_rw_block(READ | REQ_META | REQ_PRIO,
+ ll_rw_block(REQ_OP_READ,
+ REQ_META | REQ_PRIO,
1, &bh);
}
}
@@ -1563,26 +1568,23 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
struct ext4_dir_entry_2 *de;
struct buffer_head *bh;
- if (ext4_encrypted_inode(dir)) {
- int res = ext4_get_encryption_info(dir);
+ if (ext4_encrypted_inode(dir)) {
+ int res = fscrypt_get_encryption_info(dir);
/*
- * This should be a properly defined flag for
- * dentry->d_flags when we uplift this to the VFS.
- * d_fsdata is set to (void *) 1 if if the dentry is
+ * DCACHE_ENCRYPTED_WITH_KEY is set if the dentry is
* created while the directory was encrypted and we
- * don't have access to the key.
+ * have access to the key.
*/
- dentry->d_fsdata = NULL;
- if (ext4_encryption_info(dir))
- dentry->d_fsdata = (void *) 1;
- d_set_d_op(dentry, &ext4_encrypted_d_ops);
- if (res && res != -ENOKEY)
- return ERR_PTR(res);
- }
+ if (fscrypt_has_encryption_key(dir))
+ fscrypt_set_encrypted_dentry(dentry);
+ fscrypt_set_d_op(dentry);
+ if (res && res != -ENOKEY)
+ return ERR_PTR(res);
+ }
- if (dentry->d_name.len > EXT4_NAME_LEN)
- return ERR_PTR(-ENAMETOOLONG);
+ if (dentry->d_name.len > EXT4_NAME_LEN)
+ return ERR_PTR(-ENAMETOOLONG);
bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
if (IS_ERR(bh))
@@ -1609,11 +1611,9 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
}
if (!IS_ERR(inode) && ext4_encrypted_inode(dir) &&
(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
- !ext4_is_child_context_consistent_with_parent(dir,
- inode)) {
+ !fscrypt_has_permitted_context(dir, inode)) {
int nokey = ext4_encrypted_inode(inode) &&
- !ext4_encryption_info(inode);
-
+ !fscrypt_has_encryption_key(inode);
iput(inode);
if (nokey)
return ERR_PTR(-ENOKEY);
@@ -2690,30 +2690,30 @@ out_stop:
/*
* routine to check that the specified directory is empty (for rmdir)
*/
-int ext4_empty_dir(struct inode *inode)
+bool ext4_empty_dir(struct inode *inode)
{
unsigned int offset;
struct buffer_head *bh;
struct ext4_dir_entry_2 *de, *de1;
struct super_block *sb;
- int err = 0;
if (ext4_has_inline_data(inode)) {
int has_inline_data = 1;
+ int ret;
- err = empty_inline_dir(inode, &has_inline_data);
+ ret = empty_inline_dir(inode, &has_inline_data);
if (has_inline_data)
- return err;
+ return ret;
}
sb = inode->i_sb;
if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2)) {
EXT4_ERROR_INODE(inode, "invalid size");
- return 1;
+ return true;
}
bh = ext4_read_dirblock(inode, 0, EITHER);
if (IS_ERR(bh))
- return 1;
+ return true;
de = (struct ext4_dir_entry_2 *) bh->b_data;
de1 = ext4_next_entry(de, sb->s_blocksize);
@@ -2722,7 +2722,7 @@ int ext4_empty_dir(struct inode *inode)
strcmp(".", de->name) || strcmp("..", de1->name)) {
ext4_warning_inode(inode, "directory missing '.' and/or '..'");
brelse(bh);
- return 1;
+ return true;
}
offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize) +
ext4_rec_len_from_disk(de1->rec_len, sb->s_blocksize);
@@ -2730,12 +2730,11 @@ int ext4_empty_dir(struct inode *inode)
while (offset < inode->i_size) {
if ((void *) de >= (void *) (bh->b_data+sb->s_blocksize)) {
unsigned int lblock;
- err = 0;
brelse(bh);
lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb);
bh = ext4_read_dirblock(inode, lblock, EITHER);
if (IS_ERR(bh))
- return 1;
+ return true;
de = (struct ext4_dir_entry_2 *) bh->b_data;
}
if (ext4_check_dir_entry(inode, NULL, de, bh,
@@ -2747,13 +2746,13 @@ int ext4_empty_dir(struct inode *inode)
}
if (le32_to_cpu(de->inode)) {
brelse(bh);
- return 0;
+ return false;
}
offset += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
de = ext4_next_entry(de, sb->s_blocksize);
}
brelse(bh);
- return 1;
+ return true;
}
/*
@@ -3076,8 +3075,8 @@ static int ext4_symlink(struct inode *dir,
int err, len = strlen(symname);
int credits;
bool encryption_required;
- struct ext4_str disk_link;
- struct ext4_encrypted_symlink_data *sd = NULL;
+ struct fscrypt_str disk_link;
+ struct fscrypt_symlink_data *sd = NULL;
disk_link.len = len + 1;
disk_link.name = (char *) symname;
@@ -3085,13 +3084,13 @@ static int ext4_symlink(struct inode *dir,
encryption_required = (ext4_encrypted_inode(dir) ||
DUMMY_ENCRYPTION_ENABLED(EXT4_SB(dir->i_sb)));
if (encryption_required) {
- err = ext4_get_encryption_info(dir);
+ err = fscrypt_get_encryption_info(dir);
if (err)
return err;
- if (ext4_encryption_info(dir) == NULL)
+ if (!fscrypt_has_encryption_key(dir))
return -EPERM;
- disk_link.len = (ext4_fname_encrypted_size(dir, len) +
- sizeof(struct ext4_encrypted_symlink_data));
+ disk_link.len = (fscrypt_fname_encrypted_size(dir, len) +
+ sizeof(struct fscrypt_symlink_data));
sd = kzalloc(disk_link.len, GFP_KERNEL);
if (!sd)
return -ENOMEM;
@@ -3139,13 +3138,12 @@ static int ext4_symlink(struct inode *dir,
if (encryption_required) {
struct qstr istr;
- struct ext4_str ostr;
+ struct fscrypt_str ostr =
+ FSTR_INIT(sd->encrypted_path, disk_link.len);
istr.name = (const unsigned char *) symname;
istr.len = len;
- ostr.name = sd->encrypted_path;
- ostr.len = disk_link.len;
- err = ext4_fname_usr_to_disk(inode, &istr, &ostr);
+ err = fscrypt_fname_usr_to_disk(inode, &istr, &ostr);
if (err < 0)
goto err_drop_inode;
sd->len = cpu_to_le16(ostr.len);
@@ -3234,7 +3232,7 @@ static int ext4_link(struct dentry *old_dentry,
if (inode->i_nlink >= EXT4_LINK_MAX)
return -EMLINK;
if (ext4_encrypted_inode(dir) &&
- !ext4_is_child_context_consistent_with_parent(dir, inode))
+ !fscrypt_has_permitted_context(dir, inode))
return -EPERM;
if ((ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT)) &&
@@ -3557,8 +3555,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
if ((old.dir != new.dir) &&
ext4_encrypted_inode(new.dir) &&
- !ext4_is_child_context_consistent_with_parent(new.dir,
- old.inode)) {
+ !fscrypt_has_permitted_context(new.dir, old.inode)) {
retval = -EPERM;
goto end_rename;
}
@@ -3730,10 +3727,8 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
if ((ext4_encrypted_inode(old_dir) ||
ext4_encrypted_inode(new_dir)) &&
(old_dir != new_dir) &&
- (!ext4_is_child_context_consistent_with_parent(new_dir,
- old.inode) ||
- !ext4_is_child_context_consistent_with_parent(old_dir,
- new.inode)))
+ (!fscrypt_has_permitted_context(new_dir, old.inode) ||
+ !fscrypt_has_permitted_context(old_dir, new.inode)))
return -EPERM;
if ((ext4_test_inode_flag(new_dir, EXT4_INODE_PROJINHERIT) &&
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 2a01df9cc1c3..a6132a730967 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -24,6 +24,7 @@
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/backing-dev.h>
+#include <linux/fscrypto.h>
#include "ext4_jbd2.h"
#include "xattr.h"
@@ -67,7 +68,6 @@ static void ext4_finish_bio(struct bio *bio)
struct page *page = bvec->bv_page;
#ifdef CONFIG_EXT4_FS_ENCRYPTION
struct page *data_page = NULL;
- struct ext4_crypto_ctx *ctx = NULL;
#endif
struct buffer_head *bh, *head;
unsigned bio_start = bvec->bv_offset;
@@ -82,8 +82,7 @@ static void ext4_finish_bio(struct bio *bio)
if (!page->mapping) {
/* The bounce data pages are unmapped. */
data_page = page;
- ctx = (struct ext4_crypto_ctx *)page_private(data_page);
- page = ctx->w.control_page;
+ fscrypt_pullback_bio_page(&page, false);
}
#endif
@@ -113,8 +112,8 @@ static void ext4_finish_bio(struct bio *bio)
local_irq_restore(flags);
if (!under_io) {
#ifdef CONFIG_EXT4_FS_ENCRYPTION
- if (ctx)
- ext4_restore_control_page(data_page);
+ if (data_page)
+ fscrypt_restore_control_page(data_page);
#endif
end_page_writeback(page);
}
@@ -340,9 +339,10 @@ void ext4_io_submit(struct ext4_io_submit *io)
struct bio *bio = io->io_bio;
if (bio) {
- int io_op = io->io_wbc->sync_mode == WB_SYNC_ALL ?
- WRITE_SYNC : WRITE;
- submit_bio(io_op, io->io_bio);
+ int io_op_flags = io->io_wbc->sync_mode == WB_SYNC_ALL ?
+ WRITE_SYNC : 0;
+ bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags);
+ submit_bio(io->io_bio);
}
io->io_bio = NULL;
}
@@ -472,7 +472,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
gfp_t gfp_flags = GFP_NOFS;
retry_encrypt:
- data_page = ext4_encrypt(inode, page, gfp_flags);
+ data_page = fscrypt_encrypt_page(inode, page, gfp_flags);
if (IS_ERR(data_page)) {
ret = PTR_ERR(data_page);
if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) {
@@ -510,7 +510,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
if (ret) {
out:
if (data_page)
- ext4_restore_control_page(data_page);
+ fscrypt_restore_control_page(data_page);
printk_ratelimited(KERN_ERR "%s: ret = %d\n", __func__, ret);
redirty_page_for_writepage(wbc, page);
do {
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index dc54a4b60eba..a81b829d56de 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -46,37 +46,6 @@
#include "ext4.h"
-/*
- * Call ext4_decrypt on every single page, reusing the encryption
- * context.
- */
-static void completion_pages(struct work_struct *work)
-{
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
- struct ext4_crypto_ctx *ctx =
- container_of(work, struct ext4_crypto_ctx, r.work);
- struct bio *bio = ctx->r.bio;
- struct bio_vec *bv;
- int i;
-
- bio_for_each_segment_all(bv, bio, i) {
- struct page *page = bv->bv_page;
-
- int ret = ext4_decrypt(page);
- if (ret) {
- WARN_ON_ONCE(1);
- SetPageError(page);
- } else
- SetPageUptodate(page);
- unlock_page(page);
- }
- ext4_release_crypto_ctx(ctx);
- bio_put(bio);
-#else
- BUG();
-#endif
-}
-
static inline bool ext4_bio_encrypted(struct bio *bio)
{
#ifdef CONFIG_EXT4_FS_ENCRYPTION
@@ -104,14 +73,10 @@ static void mpage_end_io(struct bio *bio)
int i;
if (ext4_bio_encrypted(bio)) {
- struct ext4_crypto_ctx *ctx = bio->bi_private;
-
if (bio->bi_error) {
- ext4_release_crypto_ctx(ctx);
+ fscrypt_release_ctx(bio->bi_private);
} else {
- INIT_WORK(&ctx->r.work, completion_pages);
- ctx->r.bio = bio;
- queue_work(ext4_read_workqueue, &ctx->r.work);
+ fscrypt_decrypt_bio_pages(bio->bi_private, bio);
return;
}
}
@@ -135,7 +100,6 @@ int ext4_mpage_readpages(struct address_space *mapping,
unsigned nr_pages)
{
struct bio *bio = NULL;
- unsigned page_idx;
sector_t last_block_in_bio = 0;
struct inode *inode = mapping->host;
@@ -157,7 +121,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
map.m_len = 0;
map.m_flags = 0;
- for (page_idx = 0; nr_pages; page_idx++, nr_pages--) {
+ for (; nr_pages; nr_pages--) {
int fully_mapped = 1;
unsigned first_hole = blocks_per_page;
@@ -166,7 +130,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
page = list_entry(pages->prev, struct page, lru);
list_del(&page->lru);
if (add_to_page_cache_lru(page, mapping, page->index,
- mapping_gfp_constraint(mapping, GFP_KERNEL)))
+ readahead_gfp_mask(mapping)))
goto next_page;
}
@@ -271,15 +235,15 @@ int ext4_mpage_readpages(struct address_space *mapping,
*/
if (bio && (last_block_in_bio != blocks[0] - 1)) {
submit_and_realloc:
- submit_bio(READ, bio);
+ submit_bio(bio);
bio = NULL;
}
if (bio == NULL) {
- struct ext4_crypto_ctx *ctx = NULL;
+ struct fscrypt_ctx *ctx = NULL;
if (ext4_encrypted_inode(inode) &&
S_ISREG(inode->i_mode)) {
- ctx = ext4_get_crypto_ctx(inode, GFP_NOFS);
+ ctx = fscrypt_get_ctx(inode, GFP_NOFS);
if (IS_ERR(ctx))
goto set_error_page;
}
@@ -287,13 +251,14 @@ int ext4_mpage_readpages(struct address_space *mapping,
min_t(int, nr_pages, BIO_MAX_PAGES));
if (!bio) {
if (ctx)
- ext4_release_crypto_ctx(ctx);
+ fscrypt_release_ctx(ctx);
goto set_error_page;
}
bio->bi_bdev = bdev;
bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
bio->bi_end_io = mpage_end_io;
bio->bi_private = ctx;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
}
length = first_hole << blkbits;
@@ -303,14 +268,14 @@ int ext4_mpage_readpages(struct address_space *mapping,
if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
(relative_block == map.m_len)) ||
(first_hole != blocks_per_page)) {
- submit_bio(READ, bio);
+ submit_bio(bio);
bio = NULL;
} else
last_block_in_bio = blocks[blocks_per_page - 1];
goto next_page;
confused:
if (bio) {
- submit_bio(READ, bio);
+ submit_bio(bio);
bio = NULL;
}
if (!PageUptodate(page))
@@ -323,6 +288,6 @@ int ext4_mpage_readpages(struct address_space *mapping,
}
BUG_ON(pages && !list_empty(pages));
if (bio)
- submit_bio(READ, bio);
+ submit_bio(bio);
return 0;
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 3822a5aedc61..1c593aa0218e 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -945,9 +945,6 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
ei->i_datasync_tid = 0;
atomic_set(&ei->i_unwritten, 0);
INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
- ei->i_crypt_info = NULL;
-#endif
return &ei->vfs_inode;
}
@@ -1026,8 +1023,7 @@ void ext4_clear_inode(struct inode *inode)
EXT4_I(inode)->jinode = NULL;
}
#ifdef CONFIG_EXT4_FS_ENCRYPTION
- if (EXT4_I(inode)->i_crypt_info)
- ext4_free_encryption_info(inode, EXT4_I(inode)->i_crypt_info);
+ fscrypt_put_encryption_info(inode, NULL);
#endif
}
@@ -1094,6 +1090,90 @@ static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
return try_to_free_buffers(page);
}
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+static int ext4_get_context(struct inode *inode, void *ctx, size_t len)
+{
+ return ext4_xattr_get(inode, EXT4_XATTR_INDEX_ENCRYPTION,
+ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len);
+}
+
+static int ext4_key_prefix(struct inode *inode, u8 **key)
+{
+ *key = EXT4_SB(inode->i_sb)->key_prefix;
+ return EXT4_SB(inode->i_sb)->key_prefix_size;
+}
+
+static int ext4_prepare_context(struct inode *inode)
+{
+ return ext4_convert_inline_data(inode);
+}
+
+static int ext4_set_context(struct inode *inode, const void *ctx, size_t len,
+ void *fs_data)
+{
+ handle_t *handle;
+ int res, res2;
+
+ /* fs_data is null when internally used. */
+ if (fs_data) {
+ res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION,
+ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx,
+ len, 0);
+ if (!res) {
+ ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
+ ext4_clear_inode_state(inode,
+ EXT4_STATE_MAY_INLINE_DATA);
+ }
+ return res;
+ }
+
+ handle = ext4_journal_start(inode, EXT4_HT_MISC,
+ ext4_jbd2_credits_xattr(inode));
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION,
+ EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx,
+ len, 0);
+ if (!res) {
+ ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
+ res = ext4_mark_inode_dirty(handle, inode);
+ if (res)
+ EXT4_ERROR_INODE(inode, "Failed to mark inode dirty");
+ }
+ res2 = ext4_journal_stop(handle);
+ if (!res)
+ res = res2;
+ return res;
+}
+
+static int ext4_dummy_context(struct inode *inode)
+{
+ return DUMMY_ENCRYPTION_ENABLED(EXT4_SB(inode->i_sb));
+}
+
+static unsigned ext4_max_namelen(struct inode *inode)
+{
+ return S_ISLNK(inode->i_mode) ? inode->i_sb->s_blocksize :
+ EXT4_NAME_LEN;
+}
+
+static struct fscrypt_operations ext4_cryptops = {
+ .get_context = ext4_get_context,
+ .key_prefix = ext4_key_prefix,
+ .prepare_context = ext4_prepare_context,
+ .set_context = ext4_set_context,
+ .dummy_context = ext4_dummy_context,
+ .is_encrypted = ext4_encrypted_inode,
+ .empty_dir = ext4_empty_dir,
+ .max_namelen = ext4_max_namelen,
+};
+#else
+static struct fscrypt_operations ext4_cryptops = {
+ .is_encrypted = ext4_encrypted_inode,
+};
+#endif
+
#ifdef CONFIG_QUOTA
static char *quotatypes[] = INITQFNAMES;
#define QTYPE2NAME(t) (quotatypes[t])
@@ -2068,23 +2148,25 @@ failed:
static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
struct ext4_group_desc *gdp)
{
- int offset;
+ int offset = offsetof(struct ext4_group_desc, bg_checksum);
__u16 crc = 0;
__le32 le_group = cpu_to_le32(block_group);
struct ext4_sb_info *sbi = EXT4_SB(sb);
if (ext4_has_metadata_csum(sbi->s_sb)) {
/* Use new metadata_csum algorithm */
- __le16 save_csum;
__u32 csum32;
+ __u16 dummy_csum = 0;
- save_csum = gdp->bg_checksum;
- gdp->bg_checksum = 0;
csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
sizeof(le_group));
- csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp,
- sbi->s_desc_size);
- gdp->bg_checksum = save_csum;
+ csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp, offset);
+ csum32 = ext4_chksum(sbi, csum32, (__u8 *)&dummy_csum,
+ sizeof(dummy_csum));
+ offset += sizeof(dummy_csum);
+ if (offset < sbi->s_desc_size)
+ csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp + offset,
+ sbi->s_desc_size - offset);
crc = csum32 & 0xFFFF;
goto out;
@@ -2094,8 +2176,6 @@ static __le16 ext4_group_desc_csum(struct super_block *sb, __u32 block_group,
if (!ext4_has_feature_gdt_csum(sb))
return 0;
- offset = offsetof(struct ext4_group_desc, bg_checksum);
-
crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
crc = crc16(crc, (__u8 *)gdp, offset);
@@ -2278,6 +2358,16 @@ static void ext4_orphan_cleanup(struct super_block *sb,
while (es->s_last_orphan) {
struct inode *inode;
+ /*
+ * We may have encountered an error during cleanup; if
+ * so, skip the rest.
+ */
+ if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
+ jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
+ es->s_last_orphan = 0;
+ break;
+ }
+
inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
if (IS_ERR(inode)) {
es->s_last_orphan = 0;
@@ -3416,6 +3506,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount;
}
+ if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
+ ext4_msg(sb, KERN_ERR,
+ "Number of reserved GDT blocks insanely large: %d",
+ le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks));
+ goto failed_mount;
+ }
+
if (sbi->s_mount_opt & EXT4_MOUNT_DAX) {
err = bdev_dax_supported(sb, blocksize);
if (err)
@@ -3686,6 +3783,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sb->s_op = &ext4_sops;
sb->s_export_op = &ext4_export_ops;
sb->s_xattr = ext4_xattr_handlers;
+ sb->s_cop = &ext4_cryptops;
#ifdef CONFIG_QUOTA
sb->dq_op = &ext4_quota_operations;
if (ext4_has_feature_quota(sb))
@@ -3996,6 +4094,11 @@ no_journal:
ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10);
kfree(orig_data);
+#ifdef CONFIG_EXT4_FS_ENCRYPTION
+ memcpy(sbi->key_prefix, EXT4_KEY_DESC_PREFIX,
+ EXT4_KEY_DESC_PREFIX_SIZE);
+ sbi->key_prefix_size = EXT4_KEY_DESC_PREFIX_SIZE;
+#endif
return 0;
cantfind_ext4:
@@ -4204,7 +4307,7 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
goto out_bdev;
}
journal->j_private = sb;
- ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer);
+ ll_rw_block(REQ_OP_READ, REQ_META | REQ_PRIO, 1, &journal->j_sb_buffer);
wait_on_buffer(journal->j_sb_buffer);
if (!buffer_uptodate(journal->j_sb_buffer)) {
ext4_msg(sb, KERN_ERR, "I/O error on journal device");
@@ -4327,20 +4430,6 @@ static int ext4_commit_super(struct super_block *sb, int sync)
if (!sbh || block_device_ejected(sb))
return error;
- if (buffer_write_io_error(sbh)) {
- /*
- * Oh, dear. A previous attempt to write the
- * superblock failed. This could happen because the
- * USB device was yanked out. Or it could happen to
- * be a transient write error and maybe the block will
- * be remapped. Nothing we can do but to retry the
- * write and hope for the best.
- */
- ext4_msg(sb, KERN_ERR, "previous I/O error to "
- "superblock detected");
- clear_buffer_write_io_error(sbh);
- set_buffer_uptodate(sbh);
- }
/*
* If the file system is mounted read-only, don't update the
* superblock write time. This avoids updating the superblock
@@ -4371,7 +4460,23 @@ static int ext4_commit_super(struct super_block *sb, int sync)
&EXT4_SB(sb)->s_freeinodes_counter));
BUFFER_TRACE(sbh, "marking dirty");
ext4_superblock_csum_set(sb);
+ lock_buffer(sbh);
+ if (buffer_write_io_error(sbh)) {
+ /*
+ * Oh, dear. A previous attempt to write the
+ * superblock failed. This could happen because the
+ * USB device was yanked out. Or it could happen to
+ * be a transient write error and maybe the block will
+ * be remapped. Nothing we can do but to retry the
+ * write and hope for the best.
+ */
+ ext4_msg(sb, KERN_ERR, "previous I/O error to "
+ "superblock detected");
+ clear_buffer_write_io_error(sbh);
+ set_buffer_uptodate(sbh);
+ }
mark_buffer_dirty(sbh);
+ unlock_buffer(sbh);
if (sync) {
error = __sync_dirty_buffer(sbh,
test_opt(sb, BARRIER) ? WRITE_FUA : WRITE_SYNC);
@@ -5422,7 +5527,6 @@ out5:
static void __exit ext4_exit_fs(void)
{
- ext4_exit_crypto();
ext4_destroy_lazyinit_thread();
unregister_as_ext2();
unregister_as_ext3();
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index 75ed5c2f0c16..4d83d9e05f2e 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -22,23 +22,22 @@
#include "ext4.h"
#include "xattr.h"
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
static const char *ext4_encrypted_get_link(struct dentry *dentry,
struct inode *inode,
struct delayed_call *done)
{
struct page *cpage = NULL;
char *caddr, *paddr = NULL;
- struct ext4_str cstr, pstr;
- struct ext4_encrypted_symlink_data *sd;
+ struct fscrypt_str cstr, pstr;
+ struct fscrypt_symlink_data *sd;
loff_t size = min_t(loff_t, i_size_read(inode), PAGE_SIZE - 1);
int res;
- u32 plen, max_size = inode->i_sb->s_blocksize;
+ u32 max_size = inode->i_sb->s_blocksize;
if (!dentry)
return ERR_PTR(-ECHILD);
- res = ext4_get_encryption_info(inode);
+ res = fscrypt_get_encryption_info(inode);
if (res)
return ERR_PTR(res);
@@ -54,30 +53,27 @@ static const char *ext4_encrypted_get_link(struct dentry *dentry,
}
/* Symlink is encrypted */
- sd = (struct ext4_encrypted_symlink_data *)caddr;
+ sd = (struct fscrypt_symlink_data *)caddr;
cstr.name = sd->encrypted_path;
cstr.len = le16_to_cpu(sd->len);
- if ((cstr.len +
- sizeof(struct ext4_encrypted_symlink_data) - 1) >
- max_size) {
+ if ((cstr.len + sizeof(struct fscrypt_symlink_data) - 1) > max_size) {
/* Symlink data on the disk is corrupted */
res = -EFSCORRUPTED;
goto errout;
}
- plen = (cstr.len < EXT4_FNAME_CRYPTO_DIGEST_SIZE*2) ?
- EXT4_FNAME_CRYPTO_DIGEST_SIZE*2 : cstr.len;
- paddr = kmalloc(plen + 1, GFP_NOFS);
- if (!paddr) {
- res = -ENOMEM;
+
+ res = fscrypt_fname_alloc_buffer(inode, cstr.len, &pstr);
+ if (res)
goto errout;
- }
- pstr.name = paddr;
- pstr.len = plen;
- res = _ext4_fname_disk_to_usr(inode, NULL, &cstr, &pstr);
+
+ res = fscrypt_fname_disk_to_usr(inode, 0, 0, &cstr, &pstr);
if (res < 0)
goto errout;
+
+ paddr = pstr.name;
+
/* Null-terminate the name */
- if (res <= plen)
+ if (res <= pstr.len)
paddr[res] = '\0';
if (cpage)
put_page(cpage);
@@ -99,7 +95,6 @@ const struct inode_operations ext4_encrypted_symlink_inode_operations = {
.listxattr = ext4_listxattr,
.removexattr = generic_removexattr,
};
-#endif
const struct inode_operations ext4_symlink_inode_operations = {
.readlink = generic_readlink,
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index 1420a3c614af..73bcfd41f5f2 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -359,7 +359,6 @@ static int name##_open(struct inode *inode, struct file *file) \
} \
\
static const struct file_operations ext4_seq_##name##_fops = { \
- .owner = THIS_MODULE, \
.open = name##_open, \
.read = seq_read, \
.llseek = seq_lseek, \
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index e79bd32b9b79..39e9cfb1b371 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -121,17 +121,18 @@ static __le32 ext4_xattr_block_csum(struct inode *inode,
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
__u32 csum;
- __le32 save_csum;
__le64 dsk_block_nr = cpu_to_le64(block_nr);
+ __u32 dummy_csum = 0;
+ int offset = offsetof(struct ext4_xattr_header, h_checksum);
- save_csum = hdr->h_checksum;
- hdr->h_checksum = 0;
csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&dsk_block_nr,
sizeof(dsk_block_nr));
- csum = ext4_chksum(sbi, csum, (__u8 *)hdr,
- EXT4_BLOCK_SIZE(inode->i_sb));
+ csum = ext4_chksum(sbi, csum, (__u8 *)hdr, offset);
+ csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum));
+ offset += sizeof(dummy_csum);
+ csum = ext4_chksum(sbi, csum, (__u8 *)hdr + offset,
+ EXT4_BLOCK_SIZE(inode->i_sb) - offset);
- hdr->h_checksum = save_csum;
return cpu_to_le32(csum);
}
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index a31c7e859af6..4dcc9e28dc5c 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -201,7 +201,6 @@ struct posix_acl *f2fs_get_acl(struct inode *inode, int type)
static int __f2fs_set_acl(struct inode *inode, int type,
struct posix_acl *acl, struct page *ipage)
{
- struct f2fs_inode_info *fi = F2FS_I(inode);
int name_index;
void *value = NULL;
size_t size = 0;
@@ -214,7 +213,7 @@ static int __f2fs_set_acl(struct inode *inode, int type,
error = posix_acl_equiv_mode(acl, &inode->i_mode);
if (error < 0)
return error;
- set_acl_inode(fi, inode->i_mode);
+ set_acl_inode(inode, inode->i_mode);
if (error == 0)
acl = NULL;
}
@@ -233,7 +232,7 @@ static int __f2fs_set_acl(struct inode *inode, int type,
if (acl) {
value = f2fs_acl_to_disk(acl, &size);
if (IS_ERR(value)) {
- clear_inode_flag(fi, FI_ACL_MODE);
+ clear_inode_flag(inode, FI_ACL_MODE);
return (int)PTR_ERR(value);
}
}
@@ -244,7 +243,7 @@ static int __f2fs_set_acl(struct inode *inode, int type,
if (!error)
set_cached_acl(inode, type, acl);
- clear_inode_flag(fi, FI_ACL_MODE);
+ clear_inode_flag(inode, FI_ACL_MODE);
return error;
}
@@ -385,6 +384,8 @@ int f2fs_init_acl(struct inode *inode, struct inode *dir, struct page *ipage,
if (error)
return error;
+ f2fs_mark_inode_dirty_sync(inode);
+
if (default_acl) {
error = __f2fs_set_acl(inode, ACL_TYPE_DEFAULT, default_acl,
ipage);
diff --git a/fs/f2fs/acl.h b/fs/f2fs/acl.h
index 997ca8edb6cb..b2334d11dae8 100644
--- a/fs/f2fs/acl.h
+++ b/fs/f2fs/acl.h
@@ -37,7 +37,7 @@ struct f2fs_acl_header {
#ifdef CONFIG_F2FS_FS_POSIX_ACL
extern struct posix_acl *f2fs_get_acl(struct inode *, int);
-extern int f2fs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
+extern int f2fs_set_acl(struct inode *, struct posix_acl *, int);
extern int f2fs_init_acl(struct inode *, struct inode *, struct page *,
struct page *);
#else
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 389160049993..f94d01e7d001 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -48,7 +48,8 @@ repeat:
goto repeat;
}
f2fs_wait_on_page_writeback(page, META, true);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
return page;
}
@@ -63,14 +64,15 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
struct f2fs_io_info fio = {
.sbi = sbi,
.type = META,
- .rw = READ_SYNC | REQ_META | REQ_PRIO,
+ .op = REQ_OP_READ,
+ .op_flags = READ_SYNC | REQ_META | REQ_PRIO,
.old_blkaddr = index,
.new_blkaddr = index,
.encrypted_page = NULL,
};
if (unlikely(!is_meta))
- fio.rw &= ~REQ_META;
+ fio.op_flags &= ~REQ_META;
repeat:
page = f2fs_grab_cache_page(mapping, index, false);
if (!page) {
@@ -157,13 +159,14 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
struct f2fs_io_info fio = {
.sbi = sbi,
.type = META,
- .rw = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : READA,
+ .op = REQ_OP_READ,
+ .op_flags = sync ? (READ_SYNC | REQ_META | REQ_PRIO) : REQ_RAHEAD,
.encrypted_page = NULL,
};
struct blk_plug plug;
if (unlikely(type == META_POR))
- fio.rw &= ~REQ_META;
+ fio.op_flags &= ~REQ_META;
blk_start_plug(&plug);
for (; nrpages-- > 0; blkno++) {
@@ -264,6 +267,7 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
+ struct blk_plug plug;
long diff, written;
/* collect a number of dirty meta pages and write together */
@@ -276,7 +280,9 @@ static int f2fs_write_meta_pages(struct address_space *mapping,
/* if mounting is failed, skip writing node pages */
mutex_lock(&sbi->cp_mutex);
diff = nr_pages_to_write(sbi, META, wbc);
+ blk_start_plug(&plug);
written = sync_meta_pages(sbi, META, wbc->nr_to_write);
+ blk_finish_plug(&plug);
mutex_unlock(&sbi->cp_mutex);
wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff);
return 0;
@@ -364,9 +370,10 @@ static int f2fs_set_meta_page_dirty(struct page *page)
{
trace_f2fs_set_page_dirty(page, META);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
if (!PageDirty(page)) {
- __set_page_dirty_nobuffers(page);
+ f2fs_set_page_dirty_nobuffers(page);
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_META);
SetPagePrivate(page);
f2fs_trace_pid(page);
@@ -508,10 +515,11 @@ void release_orphan_inode(struct f2fs_sb_info *sbi)
spin_unlock(&im->ino_lock);
}
-void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
+void add_orphan_inode(struct inode *inode)
{
/* add new orphan ino entry into list */
- __add_ino_entry(sbi, ino, ORPHAN_INO);
+ __add_ino_entry(F2FS_I_SB(inode), inode->i_ino, ORPHAN_INO);
+ update_inode_page(inode);
}
void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
@@ -759,28 +767,25 @@ fail_no_cp:
static void __add_dirty_inode(struct inode *inode, enum inode_type type)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct f2fs_inode_info *fi = F2FS_I(inode);
int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
- if (is_inode_flag_set(fi, flag))
+ if (is_inode_flag_set(inode, flag))
return;
- set_inode_flag(fi, flag);
- list_add_tail(&fi->dirty_list, &sbi->inode_list[type]);
+ set_inode_flag(inode, flag);
+ list_add_tail(&F2FS_I(inode)->dirty_list, &sbi->inode_list[type]);
stat_inc_dirty_inode(sbi, type);
}
static void __remove_dirty_inode(struct inode *inode, enum inode_type type)
{
- struct f2fs_inode_info *fi = F2FS_I(inode);
int flag = (type == DIR_INODE) ? FI_DIRTY_DIR : FI_DIRTY_FILE;
- if (get_dirty_pages(inode) ||
- !is_inode_flag_set(F2FS_I(inode), flag))
+ if (get_dirty_pages(inode) || !is_inode_flag_set(inode, flag))
return;
- list_del_init(&fi->dirty_list);
- clear_inode_flag(fi, flag);
+ list_del_init(&F2FS_I(inode)->dirty_list);
+ clear_inode_flag(inode, flag);
stat_dec_dirty_inode(F2FS_I_SB(inode), type);
}
@@ -793,13 +798,12 @@ void update_dirty_page(struct inode *inode, struct page *page)
!S_ISLNK(inode->i_mode))
return;
- if (type != FILE_INODE || test_opt(sbi, DATA_FLUSH)) {
- spin_lock(&sbi->inode_lock[type]);
+ spin_lock(&sbi->inode_lock[type]);
+ if (type != FILE_INODE || test_opt(sbi, DATA_FLUSH))
__add_dirty_inode(inode, type);
- spin_unlock(&sbi->inode_lock[type]);
- }
-
inode_inc_dirty_pages(inode);
+ spin_unlock(&sbi->inode_lock[type]);
+
SetPagePrivate(page);
f2fs_trace_pid(page);
}
@@ -862,6 +866,34 @@ retry:
goto retry;
}
+int f2fs_sync_inode_meta(struct f2fs_sb_info *sbi)
+{
+ struct list_head *head = &sbi->inode_list[DIRTY_META];
+ struct inode *inode;
+ struct f2fs_inode_info *fi;
+ s64 total = get_pages(sbi, F2FS_DIRTY_IMETA);
+
+ while (total--) {
+ if (unlikely(f2fs_cp_error(sbi)))
+ return -EIO;
+
+ spin_lock(&sbi->inode_lock[DIRTY_META]);
+ if (list_empty(head)) {
+ spin_unlock(&sbi->inode_lock[DIRTY_META]);
+ return 0;
+ }
+ fi = list_entry(head->next, struct f2fs_inode_info,
+ gdirty_list);
+ inode = igrab(&fi->vfs_inode);
+ spin_unlock(&sbi->inode_lock[DIRTY_META]);
+ if (inode) {
+ update_inode_page(inode);
+ iput(inode);
+ }
+ };
+ return 0;
+}
+
/*
* Freeze all the FS-operations for checkpoint.
*/
@@ -888,6 +920,14 @@ retry_flush_dents:
goto retry_flush_dents;
}
+ if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
+ f2fs_unlock_all(sbi);
+ err = f2fs_sync_inode_meta(sbi);
+ if (err)
+ goto out;
+ goto retry_flush_dents;
+ }
+
/*
* POR: we should ensure that there are no dirty node pages
* until finishing nat/sit flush.
@@ -912,6 +952,8 @@ out:
static void unblock_operations(struct f2fs_sb_info *sbi)
{
up_write(&sbi->node_write);
+
+ build_free_nids(sbi);
f2fs_unlock_all(sbi);
}
@@ -952,7 +994,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
* This avoids to conduct wrong roll-forward operations and uses
* metapages, so should be called prior to sync_meta_pages below.
*/
- if (discard_next_dnode(sbi, discard_blk))
+ if (!test_opt(sbi, LFS) && discard_next_dnode(sbi, discard_blk))
invalidate = true;
/* Flush all the NAT/SIT pages */
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 9a8bbc1fb1fa..d64d2a515cb2 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -19,6 +19,8 @@
#include <linux/bio.h>
#include <linux/prefetch.h>
#include <linux/uio.h>
+#include <linux/mm.h>
+#include <linux/memcontrol.h>
#include <linux/cleancache.h>
#include "f2fs.h"
@@ -45,7 +47,8 @@ static void f2fs_read_end_io(struct bio *bio)
struct page *page = bvec->bv_page;
if (!bio->bi_error) {
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
} else {
ClearPageUptodate(page);
SetPageError(page);
@@ -97,12 +100,16 @@ static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
return bio;
}
-static inline void __submit_bio(struct f2fs_sb_info *sbi, int rw,
- struct bio *bio)
+static inline void __submit_bio(struct f2fs_sb_info *sbi,
+ struct bio *bio, enum page_type type)
{
- if (!is_read_io(rw))
+ if (!is_read_io(bio_op(bio))) {
atomic_inc(&sbi->nr_wb_bios);
- submit_bio(rw, bio);
+ if (f2fs_sb_mounted_hmsmr(sbi->sb) &&
+ current->plug && (type == DATA || type == NODE))
+ blk_finish_plug(current->plug);
+ }
+ submit_bio(bio);
}
static void __submit_merged_bio(struct f2fs_bio_info *io)
@@ -112,12 +119,14 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
if (!io->bio)
return;
- if (is_read_io(fio->rw))
+ if (is_read_io(fio->op))
trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio);
else
trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio);
- __submit_bio(io->sbi, fio->rw, io->bio);
+ bio_set_op_attrs(io->bio, fio->op, fio->op_flags);
+
+ __submit_bio(io->sbi, io->bio, fio->type);
io->bio = NULL;
}
@@ -183,10 +192,12 @@ static void __f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
/* change META to META_FLUSH in the checkpoint procedure */
if (type >= META_FLUSH) {
io->fio.type = META_FLUSH;
+ io->fio.op = REQ_OP_WRITE;
if (test_opt(sbi, NOBARRIER))
- io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO;
+ io->fio.op_flags = WRITE_FLUSH | REQ_META | REQ_PRIO;
else
- io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
+ io->fio.op_flags = WRITE_FLUSH_FUA | REQ_META |
+ REQ_PRIO;
}
__submit_merged_bio(io);
out:
@@ -228,14 +239,15 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
f2fs_trace_ios(fio, 0);
/* Allocate a new bio */
- bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->rw));
+ bio = __bio_alloc(fio->sbi, fio->new_blkaddr, 1, is_read_io(fio->op));
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio);
return -EFAULT;
}
+ bio_set_op_attrs(bio, fio->op, fio->op_flags);
- __submit_bio(fio->sbi, fio->rw, bio);
+ __submit_bio(fio->sbi, bio, fio->type);
return 0;
}
@@ -244,7 +256,7 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
struct f2fs_sb_info *sbi = fio->sbi;
enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
struct f2fs_bio_info *io;
- bool is_read = is_read_io(fio->rw);
+ bool is_read = is_read_io(fio->op);
struct page *bio_page;
io = is_read ? &sbi->read_io : &sbi->write_io[btype];
@@ -256,7 +268,7 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio)
down_write(&io->io_rwsem);
if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 ||
- io->fio.rw != fio->rw))
+ (io->fio.op != fio->op || io->fio.op_flags != fio->op_flags)))
__submit_merged_bio(io);
alloc_new:
if (io->bio == NULL) {
@@ -321,7 +333,7 @@ int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
if (!count)
return 0;
- if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
+ if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return -EPERM;
if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count)))
return -ENOSPC;
@@ -343,9 +355,6 @@ int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
if (set_page_dirty(dn->node_page))
dn->node_changed = true;
-
- mark_inode_dirty(dn->inode);
- sync_inode_page(dn);
return 0;
}
@@ -390,7 +399,7 @@ int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index)
}
struct page *get_read_data_page(struct inode *inode, pgoff_t index,
- int rw, bool for_write)
+ int op_flags, bool for_write)
{
struct address_space *mapping = inode->i_mapping;
struct dnode_of_data dn;
@@ -400,7 +409,8 @@ struct page *get_read_data_page(struct inode *inode, pgoff_t index,
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(inode),
.type = DATA,
- .rw = rw,
+ .op = REQ_OP_READ,
+ .op_flags = op_flags,
.encrypted_page = NULL,
};
@@ -440,7 +450,8 @@ got_it:
*/
if (dn.data_blkaddr == NEW_ADDR) {
zero_user_segment(page, 0, PAGE_SIZE);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
unlock_page(page);
return page;
}
@@ -499,14 +510,14 @@ repeat:
/* wait for read completion */
lock_page(page);
- if (unlikely(!PageUptodate(page))) {
- f2fs_put_page(page, 1);
- return ERR_PTR(-EIO);
- }
if (unlikely(page->mapping != mapping)) {
f2fs_put_page(page, 1);
goto repeat;
}
+ if (unlikely(!PageUptodate(page))) {
+ f2fs_put_page(page, 1);
+ return ERR_PTR(-EIO);
+ }
return page;
}
@@ -551,7 +562,8 @@ struct page *get_new_data_page(struct inode *inode,
if (dn.data_blkaddr == NEW_ADDR) {
zero_user_segment(page, 0, PAGE_SIZE);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
} else {
f2fs_put_page(page, 1);
@@ -563,11 +575,8 @@ struct page *get_new_data_page(struct inode *inode,
}
got_it:
if (new_i_size && i_size_read(inode) <
- ((loff_t)(index + 1) << PAGE_SHIFT)) {
- i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
- /* Only the directory inode sets new_i_size */
- set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
- }
+ ((loff_t)(index + 1) << PAGE_SHIFT))
+ f2fs_i_size_write(inode, ((loff_t)(index + 1) << PAGE_SHIFT));
return page;
}
@@ -580,7 +589,7 @@ static int __allocate_data_block(struct dnode_of_data *dn)
pgoff_t fofs;
blkcnt_t count = 1;
- if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
+ if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return -EPERM;
dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node);
@@ -605,7 +614,7 @@ alloc:
fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
dn->ofs_in_node;
if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_SHIFT))
- i_size_write(dn->inode,
+ f2fs_i_size_write(dn->inode,
((loff_t)(fofs + 1) << PAGE_SHIFT));
return 0;
}
@@ -654,7 +663,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
unsigned int maxblocks = map->m_len;
struct dnode_of_data dn;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
+ int mode = create ? ALLOC_NODE : LOOKUP_NODE;
pgoff_t pgofs, end_offset, end;
int err = 0, ofs = 1;
unsigned int ofs_in_node, last_ofs_in_node;
@@ -717,8 +726,7 @@ next_block:
} else {
err = __allocate_data_block(&dn);
if (!err) {
- set_inode_flag(F2FS_I(inode),
- FI_APPEND_WRITE);
+ set_inode_flag(inode, FI_APPEND_WRITE);
allocated = true;
}
}
@@ -789,8 +797,6 @@ skip:
else if (dn.ofs_in_node < end_offset)
goto next_block;
- if (allocated)
- sync_inode_page(&dn);
f2fs_put_dnode(&dn);
if (create) {
@@ -801,8 +807,6 @@ skip:
goto next_dnode;
sync_out:
- if (allocated)
- sync_inode_page(&dn);
f2fs_put_dnode(&dn);
unlock_out:
if (create) {
@@ -962,6 +966,37 @@ out:
return ret;
}
+struct bio *f2fs_grab_bio(struct inode *inode, block_t blkaddr,
+ unsigned nr_pages)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct fscrypt_ctx *ctx = NULL;
+ struct block_device *bdev = sbi->sb->s_bdev;
+ struct bio *bio;
+
+ if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
+ ctx = fscrypt_get_ctx(inode, GFP_NOFS);
+ if (IS_ERR(ctx))
+ return ERR_CAST(ctx);
+
+ /* wait the page to be moved by cleaning */
+ f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr);
+ }
+
+ bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES));
+ if (!bio) {
+ if (ctx)
+ fscrypt_release_ctx(ctx);
+ return ERR_PTR(-ENOMEM);
+ }
+ bio->bi_bdev = bdev;
+ bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blkaddr);
+ bio->bi_end_io = f2fs_read_end_io;
+ bio->bi_private = ctx;
+
+ return bio;
+}
+
/*
* This function was originally taken from fs/mpage.c, and customized for f2fs.
* Major change was from block_size == page_size in f2fs by default.
@@ -980,7 +1015,6 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
sector_t last_block;
sector_t last_block_in_file;
sector_t block_nr;
- struct block_device *bdev = inode->i_sb->s_bdev;
struct f2fs_map_blocks map;
map.m_pblk = 0;
@@ -996,7 +1030,8 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
page = list_entry(pages->prev, struct page, lru);
list_del(&page->lru);
if (add_to_page_cache_lru(page, mapping,
- page->index, GFP_KERNEL))
+ page->index,
+ readahead_gfp_mask(mapping)))
goto next_page;
}
@@ -1040,7 +1075,8 @@ got_it:
}
} else {
zero_user_segment(page, 0, PAGE_SIZE);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
unlock_page(page);
goto next_page;
}
@@ -1051,35 +1087,16 @@ got_it:
*/
if (bio && (last_block_in_bio != block_nr - 1)) {
submit_and_realloc:
- __submit_bio(F2FS_I_SB(inode), READ, bio);
+ __submit_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
if (bio == NULL) {
- struct fscrypt_ctx *ctx = NULL;
-
- if (f2fs_encrypted_inode(inode) &&
- S_ISREG(inode->i_mode)) {
-
- ctx = fscrypt_get_ctx(inode, GFP_NOFS);
- if (IS_ERR(ctx))
- goto set_error_page;
-
- /* wait the page to be moved by cleaning */
- f2fs_wait_on_encrypted_page_writeback(
- F2FS_I_SB(inode), block_nr);
- }
-
- bio = bio_alloc(GFP_KERNEL,
- min_t(int, nr_pages, BIO_MAX_PAGES));
- if (!bio) {
- if (ctx)
- fscrypt_release_ctx(ctx);
+ bio = f2fs_grab_bio(inode, block_nr, nr_pages);
+ if (IS_ERR(bio)) {
+ bio = NULL;
goto set_error_page;
}
- bio->bi_bdev = bdev;
- bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr);
- bio->bi_end_io = f2fs_read_end_io;
- bio->bi_private = ctx;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
}
if (bio_add_page(bio, page, blocksize, 0) < blocksize)
@@ -1094,7 +1111,7 @@ set_error_page:
goto next_page;
confused:
if (bio) {
- __submit_bio(F2FS_I_SB(inode), READ, bio);
+ __submit_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
unlock_page(page);
@@ -1104,7 +1121,7 @@ next_page:
}
BUG_ON(pages && !list_empty(pages));
if (bio)
- __submit_bio(F2FS_I_SB(inode), READ, bio);
+ __submit_bio(F2FS_I_SB(inode), bio, DATA);
return 0;
}
@@ -1193,14 +1210,14 @@ retry_encrypt:
!IS_ATOMIC_WRITTEN_PAGE(page) &&
need_inplace_update(inode))) {
rewrite_data_page(fio);
- set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
+ set_inode_flag(inode, FI_UPDATE_WRITE);
trace_f2fs_do_write_data_page(page, IPU);
} else {
write_data_page(&dn, fio);
trace_f2fs_do_write_data_page(page, OPU);
- set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
+ set_inode_flag(inode, FI_APPEND_WRITE);
if (page->index == 0)
- set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
+ set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
}
out_writepage:
f2fs_put_dnode(&dn);
@@ -1215,13 +1232,15 @@ static int f2fs_write_data_page(struct page *page,
loff_t i_size = i_size_read(inode);
const pgoff_t end_index = ((unsigned long long) i_size)
>> PAGE_SHIFT;
+ loff_t psize = (page->index + 1) << PAGE_SHIFT;
unsigned offset = 0;
bool need_balance_fs = false;
int err = 0;
struct f2fs_io_info fio = {
.sbi = sbi,
.type = DATA,
- .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
+ .op = REQ_OP_WRITE,
+ .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0,
.page = page,
.encrypted_page = NULL,
};
@@ -1251,20 +1270,18 @@ write:
available_free_memory(sbi, BASE_CHECK))))
goto redirty_out;
+ /* we should bypass data pages to proceed the kworkder jobs */
+ if (unlikely(f2fs_cp_error(sbi))) {
+ mapping_set_error(page->mapping, -EIO);
+ goto out;
+ }
+
/* Dentry blocks are controlled by checkpoint */
if (S_ISDIR(inode->i_mode)) {
- if (unlikely(f2fs_cp_error(sbi)))
- goto redirty_out;
err = do_write_data_page(&fio);
goto done;
}
- /* we should bypass data pages to proceed the kworkder jobs */
- if (unlikely(f2fs_cp_error(sbi))) {
- SetPageError(page);
- goto out;
- }
-
if (!wbc->for_reclaim)
need_balance_fs = true;
else if (has_not_enough_free_secs(sbi, 0))
@@ -1276,6 +1293,8 @@ write:
err = f2fs_write_inline_data(inode, page);
if (err == -EAGAIN)
err = do_write_data_page(&fio);
+ if (F2FS_I(inode)->last_disk_size < psize)
+ F2FS_I(inode)->last_disk_size = psize;
f2fs_unlock_op(sbi);
done:
if (err && err != -ENOENT)
@@ -1302,16 +1321,8 @@ out:
redirty_out:
redirty_page_for_writepage(wbc, page);
- return AOP_WRITEPAGE_ACTIVATE;
-}
-
-static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
- void *data)
-{
- struct address_space *mapping = data;
- int ret = mapping->a_ops->writepage(page, wbc);
- mapping_set_error(mapping, ret);
- return ret;
+ unlock_page(page);
+ return err;
}
/*
@@ -1320,8 +1331,7 @@ static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
* warm/hot data page.
*/
static int f2fs_write_cache_pages(struct address_space *mapping,
- struct writeback_control *wbc, writepage_t writepage,
- void *data)
+ struct writeback_control *wbc)
{
int ret = 0;
int done = 0;
@@ -1334,10 +1344,9 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
int cycled;
int range_whole = 0;
int tag;
- int step = 0;
pagevec_init(&pvec, 0);
-next:
+
if (wbc->range_cyclic) {
writeback_index = mapping->writeback_index; /* prev offset */
index = writeback_index;
@@ -1392,9 +1401,6 @@ continue_unlock:
goto continue_unlock;
}
- if (step == is_cold_data(page))
- goto continue_unlock;
-
if (PageWriteback(page)) {
if (wbc->sync_mode != WB_SYNC_NONE)
f2fs_wait_on_page_writeback(page,
@@ -1407,16 +1413,11 @@ continue_unlock:
if (!clear_page_dirty_for_io(page))
goto continue_unlock;
- ret = (*writepage)(page, wbc, data);
+ ret = mapping->a_ops->writepage(page, wbc);
if (unlikely(ret)) {
- if (ret == AOP_WRITEPAGE_ACTIVATE) {
- unlock_page(page);
- ret = 0;
- } else {
- done_index = page->index + 1;
- done = 1;
- break;
- }
+ done_index = page->index + 1;
+ done = 1;
+ break;
}
if (--wbc->nr_to_write <= 0 &&
@@ -1429,11 +1430,6 @@ continue_unlock:
cond_resched();
}
- if (step < 1) {
- step++;
- goto next;
- }
-
if (!cycled && !done) {
cycled = 1;
index = 0;
@@ -1451,9 +1447,8 @@ static int f2fs_write_data_pages(struct address_space *mapping,
{
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- bool locked = false;
+ struct blk_plug plug;
int ret;
- long diff;
/* deal with chardevs and other special file */
if (!mapping->a_ops->writepage)
@@ -1469,7 +1464,7 @@ static int f2fs_write_data_pages(struct address_space *mapping,
goto skip_write;
/* skip writing during file defragment */
- if (is_inode_flag_set(F2FS_I(inode), FI_DO_DEFRAG))
+ if (is_inode_flag_set(inode, FI_DO_DEFRAG))
goto skip_write;
/* during POR, we don't need to trigger writepage at all. */
@@ -1478,20 +1473,16 @@ static int f2fs_write_data_pages(struct address_space *mapping,
trace_f2fs_writepages(mapping->host, wbc, DATA);
- diff = nr_pages_to_write(sbi, DATA, wbc);
-
- if (!S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_ALL) {
- mutex_lock(&sbi->writepages);
- locked = true;
- }
- ret = f2fs_write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
- f2fs_submit_merged_bio_cond(sbi, inode, NULL, 0, DATA, WRITE);
- if (locked)
- mutex_unlock(&sbi->writepages);
+ blk_start_plug(&plug);
+ ret = f2fs_write_cache_pages(mapping, wbc);
+ blk_finish_plug(&plug);
+ /*
+ * if some pages were truncated, we cannot guarantee its mapping->host
+ * to detect pending bios.
+ */
+ f2fs_submit_merged_bio(sbi, DATA, WRITE);
remove_dirty_inode(inode);
-
- wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
return ret;
skip_write:
@@ -1549,7 +1540,7 @@ restart:
if (f2fs_has_inline_data(inode)) {
if (pos + len <= MAX_INLINE_DATA) {
read_inline_data(page, ipage);
- set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
+ set_inode_flag(inode, FI_DATA_EXIST);
if (inode->i_nlink)
set_inline_node(ipage);
} else {
@@ -1659,38 +1650,35 @@ repeat:
if (blkaddr == NEW_ADDR) {
zero_user_segment(page, 0, PAGE_SIZE);
} else {
- struct f2fs_io_info fio = {
- .sbi = sbi,
- .type = DATA,
- .rw = READ_SYNC,
- .old_blkaddr = blkaddr,
- .new_blkaddr = blkaddr,
- .page = page,
- .encrypted_page = NULL,
- };
- err = f2fs_submit_page_bio(&fio);
- if (err)
- goto fail;
+ struct bio *bio;
- lock_page(page);
- if (unlikely(!PageUptodate(page))) {
- err = -EIO;
+ bio = f2fs_grab_bio(inode, blkaddr, 1);
+ if (IS_ERR(bio)) {
+ err = PTR_ERR(bio);
+ goto fail;
+ }
+ bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC);
+ if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
+ bio_put(bio);
+ err = -EFAULT;
goto fail;
}
+
+ __submit_bio(sbi, bio, DATA);
+
+ lock_page(page);
if (unlikely(page->mapping != mapping)) {
f2fs_put_page(page, 1);
goto repeat;
}
-
- /* avoid symlink page */
- if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
- err = fscrypt_decrypt_page(page);
- if (err)
- goto fail;
+ if (unlikely(!PageUptodate(page))) {
+ err = -EIO;
+ goto fail;
}
}
out_update:
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
out_clear:
clear_cold_data(page);
return 0;
@@ -1711,13 +1699,11 @@ static int f2fs_write_end(struct file *file,
trace_f2fs_write_end(inode, pos, len, copied);
set_page_dirty(page);
+ f2fs_put_page(page, 1);
- if (pos + copied > i_size_read(inode)) {
- i_size_write(inode, pos + copied);
- mark_inode_dirty(inode);
- }
+ if (pos + copied > i_size_read(inode))
+ f2fs_i_size_write(inode, pos + copied);
- f2fs_put_page(page, 1);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return copied;
}
@@ -1742,6 +1728,7 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
struct inode *inode = mapping->host;
size_t count = iov_iter_count(iter);
loff_t offset = iocb->ki_pos;
+ int rw = iov_iter_rw(iter);
int err;
err = check_direct_IO(inode, iter, offset);
@@ -1750,18 +1737,23 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
return 0;
+ if (test_opt(F2FS_I_SB(inode), LFS))
+ return 0;
- trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
+ trace_f2fs_direct_IO_enter(inode, offset, count, rw);
+ down_read(&F2FS_I(inode)->dio_rwsem[rw]);
err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
- if (iov_iter_rw(iter) == WRITE) {
+ up_read(&F2FS_I(inode)->dio_rwsem[rw]);
+
+ if (rw == WRITE) {
if (err > 0)
- set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
+ set_inode_flag(inode, FI_UPDATE_WRITE);
else if (err < 0)
f2fs_write_failed(mapping, offset + count);
}
- trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err);
+ trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
return err;
}
@@ -1808,6 +1800,35 @@ int f2fs_release_page(struct page *page, gfp_t wait)
return 1;
}
+/*
+ * This was copied from __set_page_dirty_buffers which gives higher performance
+ * in very high speed storages. (e.g., pmem)
+ */
+void f2fs_set_page_dirty_nobuffers(struct page *page)
+{
+ struct address_space *mapping = page->mapping;
+ unsigned long flags;
+
+ if (unlikely(!mapping))
+ return;
+
+ spin_lock(&mapping->private_lock);
+ lock_page_memcg(page);
+ SetPageDirty(page);
+ spin_unlock(&mapping->private_lock);
+
+ spin_lock_irqsave(&mapping->tree_lock, flags);
+ WARN_ON_ONCE(!PageUptodate(page));
+ account_page_dirtied(page, mapping);
+ radix_tree_tag_set(&mapping->page_tree,
+ page_index(page), PAGECACHE_TAG_DIRTY);
+ spin_unlock_irqrestore(&mapping->tree_lock, flags);
+ unlock_page_memcg(page);
+
+ __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
+ return;
+}
+
static int f2fs_set_data_page_dirty(struct page *page)
{
struct address_space *mapping = page->mapping;
@@ -1815,7 +1836,8 @@ static int f2fs_set_data_page_dirty(struct page *page)
trace_f2fs_set_page_dirty(page, DATA);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
if (f2fs_is_atomic_file(inode)) {
if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
@@ -1830,7 +1852,7 @@ static int f2fs_set_data_page_dirty(struct page *page)
}
if (!PageDirty(page)) {
- __set_page_dirty_nobuffers(page);
+ f2fs_set_page_dirty_nobuffers(page);
update_dirty_page(inode, page);
return 1;
}
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index d89a425055d0..badd407bb622 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -47,6 +47,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
si->ndirty_data = get_pages(sbi, F2FS_DIRTY_DATA);
si->ndirty_dirs = sbi->ndirty_inode[DIR_INODE];
si->ndirty_files = sbi->ndirty_inode[FILE_INODE];
+ si->ndirty_all = sbi->ndirty_inode[DIRTY_META];
si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES);
si->wb_bios = atomic_read(&sbi->nr_wb_bios);
si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg;
@@ -304,8 +305,8 @@ static int stat_show(struct seq_file *s, void *v)
si->inmem_pages, si->wb_bios);
seq_printf(s, " - nodes: %4lld in %4d\n",
si->ndirty_node, si->node_pages);
- seq_printf(s, " - dents: %4lld in dirs:%4d\n",
- si->ndirty_dent, si->ndirty_dirs);
+ seq_printf(s, " - dents: %4lld in dirs:%4d (%4d)\n",
+ si->ndirty_dent, si->ndirty_dirs, si->ndirty_all);
seq_printf(s, " - datas: %4lld in files:%4d\n",
si->ndirty_data, si->ndirty_files);
seq_printf(s, " - meta: %4lld in %4d\n",
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index f9313f684540..9054aeac8015 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -185,8 +185,13 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
/* no need to allocate new dentry pages to all the indices */
dentry_page = find_data_page(dir, bidx);
if (IS_ERR(dentry_page)) {
- room = true;
- continue;
+ if (PTR_ERR(dentry_page) == -ENOENT) {
+ room = true;
+ continue;
+ } else {
+ *res_page = dentry_page;
+ break;
+ }
}
de = find_in_block(dentry_page, fname, namehash, &max_slots,
@@ -214,7 +219,7 @@ static struct f2fs_dir_entry *find_in_level(struct inode *dir,
* Entry is guaranteed to be valid.
*/
struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
- struct qstr *child, struct page **res_page)
+ const struct qstr *child, struct page **res_page)
{
unsigned long npages = dir_blocks(dir);
struct f2fs_dir_entry *de = NULL;
@@ -223,19 +228,22 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
struct fscrypt_name fname;
int err;
- *res_page = NULL;
-
err = fscrypt_setup_filename(dir, child, 1, &fname);
- if (err)
+ if (err) {
+ *res_page = ERR_PTR(err);
return NULL;
+ }
if (f2fs_has_inline_dentry(dir)) {
+ *res_page = NULL;
de = find_in_inline_dir(dir, &fname, res_page);
goto out;
}
- if (npages == 0)
+ if (npages == 0) {
+ *res_page = NULL;
goto out;
+ }
max_depth = F2FS_I(dir)->i_current_depth;
if (unlikely(max_depth > MAX_DIR_HASH_DEPTH)) {
@@ -243,13 +251,13 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
"Corrupted max_depth of %lu: %u",
dir->i_ino, max_depth);
max_depth = MAX_DIR_HASH_DEPTH;
- F2FS_I(dir)->i_current_depth = max_depth;
- mark_inode_dirty(dir);
+ f2fs_i_depth_write(dir, max_depth);
}
for (level = 0; level < max_depth; level++) {
+ *res_page = NULL;
de = find_in_level(dir, level, &fname, res_page);
- if (de)
+ if (de || IS_ERR(*res_page))
break;
}
out:
@@ -259,35 +267,22 @@ out:
struct f2fs_dir_entry *f2fs_parent_dir(struct inode *dir, struct page **p)
{
- struct page *page;
- struct f2fs_dir_entry *de;
- struct f2fs_dentry_block *dentry_blk;
-
- if (f2fs_has_inline_dentry(dir))
- return f2fs_parent_inline_dir(dir, p);
-
- page = get_lock_data_page(dir, 0, false);
- if (IS_ERR(page))
- return NULL;
+ struct qstr dotdot = QSTR_INIT("..", 2);
- dentry_blk = kmap(page);
- de = &dentry_blk->dentry[1];
- *p = page;
- unlock_page(page);
- return de;
+ return f2fs_find_entry(dir, &dotdot, p);
}
-ino_t f2fs_inode_by_name(struct inode *dir, struct qstr *qstr)
+ino_t f2fs_inode_by_name(struct inode *dir, const struct qstr *qstr,
+ struct page **page)
{
ino_t res = 0;
struct f2fs_dir_entry *de;
- struct page *page;
- de = f2fs_find_entry(dir, qstr, &page);
+ de = f2fs_find_entry(dir, qstr, page);
if (de) {
res = le32_to_cpu(de->ino);
- f2fs_dentry_kunmap(dir, page);
- f2fs_put_page(page, 0);
+ f2fs_dentry_kunmap(dir, *page);
+ f2fs_put_page(*page, 0);
}
return res;
@@ -303,9 +298,9 @@ void f2fs_set_link(struct inode *dir, struct f2fs_dir_entry *de,
set_de_type(de, inode->i_mode);
f2fs_dentry_kunmap(dir, page);
set_page_dirty(page);
- dir->i_mtime = dir->i_ctime = CURRENT_TIME;
- mark_inode_dirty(dir);
+ dir->i_mtime = dir->i_ctime = CURRENT_TIME;
+ f2fs_mark_inode_dirty_sync(dir);
f2fs_put_page(page, 1);
}
@@ -385,7 +380,7 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
struct page *page;
int err;
- if (is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) {
+ if (is_inode_flag_set(inode, FI_NEW_INODE)) {
page = new_inode_page(inode);
if (IS_ERR(page))
return page;
@@ -429,7 +424,7 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
* This file should be checkpointed during fsync.
* We lost i_pino from now on.
*/
- if (is_inode_flag_set(F2FS_I(inode), FI_INC_LINK)) {
+ if (is_inode_flag_set(inode, FI_INC_LINK)) {
file_lost_pino(inode);
/*
* If link the tmpfile to alias through linkat path,
@@ -437,14 +432,11 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir,
*/
if (inode->i_nlink == 0)
remove_orphan_inode(F2FS_I_SB(dir), inode->i_ino);
- inc_nlink(inode);
+ f2fs_i_links_write(inode, true);
}
return page;
put_error:
- /* truncate empty dir pages */
- truncate_inode_pages(&inode->i_data, 0);
-
clear_nlink(inode);
update_inode(inode, page);
f2fs_put_page(page, 1);
@@ -454,23 +446,19 @@ put_error:
void update_parent_metadata(struct inode *dir, struct inode *inode,
unsigned int current_depth)
{
- if (inode && is_inode_flag_set(F2FS_I(inode), FI_NEW_INODE)) {
- if (S_ISDIR(inode->i_mode)) {
- inc_nlink(dir);
- set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
- }
- clear_inode_flag(F2FS_I(inode), FI_NEW_INODE);
+ if (inode && is_inode_flag_set(inode, FI_NEW_INODE)) {
+ if (S_ISDIR(inode->i_mode))
+ f2fs_i_links_write(dir, true);
+ clear_inode_flag(inode, FI_NEW_INODE);
}
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
- mark_inode_dirty(dir);
+ f2fs_mark_inode_dirty_sync(dir);
- if (F2FS_I(dir)->i_current_depth != current_depth) {
- F2FS_I(dir)->i_current_depth = current_depth;
- set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
- }
+ if (F2FS_I(dir)->i_current_depth != current_depth)
+ f2fs_i_depth_write(dir, current_depth);
- if (inode && is_inode_flag_set(F2FS_I(inode), FI_INC_LINK))
- clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
+ if (inode && is_inode_flag_set(inode, FI_INC_LINK))
+ clear_inode_flag(inode, FI_INC_LINK);
}
int room_for_filename(const void *bitmap, int slots, int max_slots)
@@ -596,9 +584,7 @@ add_dentry:
set_page_dirty(dentry_page);
if (inode) {
- /* we don't need to mark_inode_dirty now */
- F2FS_I(inode)->i_pino = dir->i_ino;
- update_inode(inode, page);
+ f2fs_i_pino_write(inode, dir->i_ino);
f2fs_put_page(page, 1);
}
@@ -607,10 +593,6 @@ fail:
if (inode)
up_write(&F2FS_I(inode)->i_sem);
- if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) {
- update_inode_page(dir);
- clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
- }
kunmap(dentry_page);
f2fs_put_page(dentry_page, 1);
@@ -657,42 +639,34 @@ int f2fs_do_tmpfile(struct inode *inode, struct inode *dir)
err = PTR_ERR(page);
goto fail;
}
- /* we don't need to mark_inode_dirty now */
- update_inode(inode, page);
f2fs_put_page(page, 1);
- clear_inode_flag(F2FS_I(inode), FI_NEW_INODE);
+ clear_inode_flag(inode, FI_NEW_INODE);
fail:
up_write(&F2FS_I(inode)->i_sem);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
return err;
}
-void f2fs_drop_nlink(struct inode *dir, struct inode *inode, struct page *page)
+void f2fs_drop_nlink(struct inode *dir, struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
down_write(&F2FS_I(inode)->i_sem);
- if (S_ISDIR(inode->i_mode)) {
- drop_nlink(dir);
- if (page)
- update_inode(dir, page);
- else
- update_inode_page(dir);
- }
+ if (S_ISDIR(inode->i_mode))
+ f2fs_i_links_write(dir, false);
inode->i_ctime = CURRENT_TIME;
- drop_nlink(inode);
+ f2fs_i_links_write(inode, false);
if (S_ISDIR(inode->i_mode)) {
- drop_nlink(inode);
- i_size_write(inode, 0);
+ f2fs_i_links_write(inode, false);
+ f2fs_i_size_write(inode, 0);
}
up_write(&F2FS_I(inode)->i_sem);
- update_inode_page(inode);
if (inode->i_nlink == 0)
- add_orphan_inode(sbi, inode->i_ino);
+ add_orphan_inode(inode);
else
release_orphan_inode(sbi);
}
@@ -730,9 +704,10 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
set_page_dirty(page);
dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+ f2fs_mark_inode_dirty_sync(dir);
if (inode)
- f2fs_drop_nlink(dir, inode, NULL);
+ f2fs_drop_nlink(dir, inode);
if (bit_pos == NR_DENTRY_IN_BLOCK &&
!truncate_hole(dir, page->index, page->index + 1)) {
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index 5bfcdb9b69f2..2b06d4fcd954 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -170,8 +170,10 @@ static void __drop_largest_extent(struct inode *inode,
{
struct extent_info *largest = &F2FS_I(inode)->extent_tree->largest;
- if (fofs < largest->fofs + largest->len && fofs + len > largest->fofs)
+ if (fofs < largest->fofs + largest->len && fofs + len > largest->fofs) {
largest->len = 0;
+ f2fs_mark_inode_dirty_sync(inode);
+ }
}
/* return true, if inode page is changed */
@@ -335,11 +337,12 @@ lookup_neighbors:
return en;
}
-static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
+static struct extent_node *__try_merge_extent_node(struct inode *inode,
struct extent_tree *et, struct extent_info *ei,
struct extent_node *prev_ex,
struct extent_node *next_ex)
{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct extent_node *en = NULL;
if (prev_ex && __is_back_mergeable(ei, &prev_ex->ei)) {
@@ -360,7 +363,7 @@ static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
if (!en)
return NULL;
- __try_update_largest_extent(et, en);
+ __try_update_largest_extent(inode, et, en);
spin_lock(&sbi->extent_lock);
if (!list_empty(&en->list)) {
@@ -371,11 +374,12 @@ static struct extent_node *__try_merge_extent_node(struct f2fs_sb_info *sbi,
return en;
}
-static struct extent_node *__insert_extent_tree(struct f2fs_sb_info *sbi,
+static struct extent_node *__insert_extent_tree(struct inode *inode,
struct extent_tree *et, struct extent_info *ei,
struct rb_node **insert_p,
struct rb_node *insert_parent)
{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct rb_node **p = &et->root.rb_node;
struct rb_node *parent = NULL;
struct extent_node *en = NULL;
@@ -402,7 +406,7 @@ do_insert:
if (!en)
return NULL;
- __try_update_largest_extent(et, en);
+ __try_update_largest_extent(inode, et, en);
/* update in global extent list */
spin_lock(&sbi->extent_lock);
@@ -431,7 +435,7 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
write_lock(&et->lock);
- if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT)) {
+ if (is_inode_flag_set(inode, FI_NO_EXTENT)) {
write_unlock(&et->lock);
return false;
}
@@ -473,7 +477,7 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
set_extent_info(&ei, end,
end - dei.fofs + dei.blk,
org_end - end);
- en1 = __insert_extent_tree(sbi, et, &ei,
+ en1 = __insert_extent_tree(inode, et, &ei,
NULL, NULL);
next_en = en1;
} else {
@@ -494,7 +498,7 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
}
if (parts)
- __try_update_largest_extent(et, en);
+ __try_update_largest_extent(inode, et, en);
else
__release_extent_node(sbi, et, en);
@@ -514,20 +518,20 @@ static unsigned int f2fs_update_extent_tree_range(struct inode *inode,
if (blkaddr) {
set_extent_info(&ei, fofs, blkaddr, len);
- if (!__try_merge_extent_node(sbi, et, &ei, prev_en, next_en))
- __insert_extent_tree(sbi, et, &ei,
+ if (!__try_merge_extent_node(inode, et, &ei, prev_en, next_en))
+ __insert_extent_tree(inode, et, &ei,
insert_p, insert_parent);
/* give up extent_cache, if split and small updates happen */
if (dei.len >= 1 &&
prev.len < F2FS_MIN_EXTENT_LEN &&
et->largest.len < F2FS_MIN_EXTENT_LEN) {
- et->largest.len = 0;
- set_inode_flag(F2FS_I(inode), FI_NO_EXTENT);
+ __drop_largest_extent(inode, 0, UINT_MAX);
+ set_inode_flag(inode, FI_NO_EXTENT);
}
}
- if (is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
+ if (is_inode_flag_set(inode, FI_NO_EXTENT))
__free_extent_tree(sbi, et);
write_unlock(&et->lock);
@@ -627,6 +631,19 @@ unsigned int f2fs_destroy_extent_node(struct inode *inode)
return node_cnt;
}
+void f2fs_drop_extent_tree(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct extent_tree *et = F2FS_I(inode)->extent_tree;
+
+ set_inode_flag(inode, FI_NO_EXTENT);
+
+ write_lock(&et->lock);
+ __free_extent_tree(sbi, et);
+ __drop_largest_extent(inode, 0, UINT_MAX);
+ write_unlock(&et->lock);
+}
+
void f2fs_destroy_extent_tree(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -685,9 +702,7 @@ void f2fs_update_extent_cache(struct dnode_of_data *dn)
fofs = start_bidx_of_node(ofs_of_node(dn->node_page), dn->inode) +
dn->ofs_in_node;
-
- if (f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, 1))
- sync_inode_page(dn);
+ f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, 1);
}
void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
@@ -697,8 +712,7 @@ void f2fs_update_extent_cache_range(struct dnode_of_data *dn,
if (!f2fs_may_extent_tree(dn->inode))
return;
- if (f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len))
- sync_inode_page(dn);
+ f2fs_update_extent_tree_range(dn->inode, fofs, blkaddr, len);
}
void init_extent_cache_info(struct f2fs_sb_info *sbi)
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 916e7c238e3d..675fa79d86f6 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -45,6 +45,7 @@ enum {
FAULT_ORPHAN,
FAULT_BLOCK,
FAULT_DIR_DEPTH,
+ FAULT_EVICT_INODE,
FAULT_MAX,
};
@@ -74,6 +75,8 @@ static inline bool time_to_inject(int type)
return false;
else if (type == FAULT_DIR_DEPTH && !IS_FAULT_SET(type))
return false;
+ else if (type == FAULT_EVICT_INODE && !IS_FAULT_SET(type))
+ return false;
atomic_inc(&f2fs_fault.inject_ops);
if (atomic_read(&f2fs_fault.inject_ops) >= f2fs_fault.inject_rate) {
@@ -108,6 +111,8 @@ static inline bool time_to_inject(int type)
#define F2FS_MOUNT_FORCE_FG_GC 0x00004000
#define F2FS_MOUNT_DATA_FLUSH 0x00008000
#define F2FS_MOUNT_FAULT_INJECTION 0x00010000
+#define F2FS_MOUNT_ADAPTIVE 0x00020000
+#define F2FS_MOUNT_LFS 0x00040000
#define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option)
#define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option)
@@ -128,6 +133,7 @@ struct f2fs_mount_info {
};
#define F2FS_FEATURE_ENCRYPT 0x0001
+#define F2FS_FEATURE_HMSMR 0x0002
#define F2FS_HAS_FEATURE(sb, mask) \
((F2FS_SB(sb)->raw_super->feature & cpu_to_le32(mask)) != 0)
@@ -158,7 +164,7 @@ enum {
#define BATCHED_TRIM_BLOCKS(sbi) \
(BATCHED_TRIM_SEGMENTS(sbi) << (sbi)->log_blocks_per_seg)
#define DEF_CP_INTERVAL 60 /* 60 secs */
-#define DEF_IDLE_INTERVAL 120 /* 2 mins */
+#define DEF_IDLE_INTERVAL 5 /* 5 secs */
struct cp_control {
int reason;
@@ -262,6 +268,8 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal,
#define F2FS_IOC_GARBAGE_COLLECT _IO(F2FS_IOCTL_MAGIC, 6)
#define F2FS_IOC_WRITE_CHECKPOINT _IO(F2FS_IOCTL_MAGIC, 7)
#define F2FS_IOC_DEFRAGMENT _IO(F2FS_IOCTL_MAGIC, 8)
+#define F2FS_IOC_MOVE_RANGE _IOWR(F2FS_IOCTL_MAGIC, 9, \
+ struct f2fs_move_range)
#define F2FS_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY
#define F2FS_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY
@@ -291,6 +299,13 @@ struct f2fs_defragment {
u64 len;
};
+struct f2fs_move_range {
+ u32 dst_fd; /* destination fd */
+ u64 pos_in; /* start position in src_fd */
+ u64 pos_out; /* start position in dst_fd */
+ u64 len; /* size to move */
+};
+
/*
* For INODE and NODE manager
*/
@@ -441,11 +456,14 @@ struct f2fs_inode_info {
unsigned int clevel; /* maximum level of given file name */
nid_t i_xattr_nid; /* node id that contains xattrs */
unsigned long long xattr_ver; /* cp version of xattr modification */
+ loff_t last_disk_size; /* lastly written file size */
- struct list_head dirty_list; /* linked in global dirty list */
+ struct list_head dirty_list; /* dirty list for dirs and files */
+ struct list_head gdirty_list; /* linked in global dirty list */
struct list_head inmem_pages; /* inmemory pages managed by f2fs */
struct mutex inmem_lock; /* lock for inmemory pages */
struct extent_tree *extent_tree; /* cached extent_tree entry */
+ struct rw_semaphore dio_rwsem[2];/* avoid racing between dio and gc */
};
static inline void get_extent_info(struct extent_info *ext,
@@ -498,11 +516,14 @@ static inline bool __is_front_mergeable(struct extent_info *cur,
return __is_extent_mergeable(cur, front);
}
-static inline void __try_update_largest_extent(struct extent_tree *et,
- struct extent_node *en)
+extern void f2fs_mark_inode_dirty_sync(struct inode *);
+static inline void __try_update_largest_extent(struct inode *inode,
+ struct extent_tree *et, struct extent_node *en)
{
- if (en->ei.len > et->largest.len)
+ if (en->ei.len > et->largest.len) {
et->largest = en->ei;
+ f2fs_mark_inode_dirty_sync(inode);
+ }
}
struct f2fs_nm_info {
@@ -517,7 +538,7 @@ struct f2fs_nm_info {
/* NAT cache management */
struct radix_tree_root nat_root;/* root of the nat entry cache */
struct radix_tree_root nat_set_root;/* root of the nat set cache */
- struct rw_semaphore nat_tree_lock; /* protect nat_tree_lock */
+ struct percpu_rw_semaphore nat_tree_lock; /* protect nat_tree_lock */
struct list_head nat_entries; /* cached nat entry list (clean) */
unsigned int nat_cnt; /* the # of cached nat entries */
unsigned int dirty_nat_cnt; /* total num of nat entries in set */
@@ -599,6 +620,7 @@ struct flush_cmd {
struct flush_cmd_control {
struct task_struct *f2fs_issue_flush; /* flush thread */
wait_queue_head_t flush_wait_queue; /* waiting queue for wake-up */
+ atomic_t submit_flush; /* # of issued flushes */
struct llist_head issue_list; /* list for command issue */
struct llist_node *dispatch_list; /* list for command dispatch */
};
@@ -655,6 +677,7 @@ enum count_type {
F2FS_DIRTY_NODES,
F2FS_DIRTY_META,
F2FS_INMEM_PAGES,
+ F2FS_DIRTY_IMETA,
NR_COUNT_TYPE,
};
@@ -686,14 +709,15 @@ enum page_type {
struct f2fs_io_info {
struct f2fs_sb_info *sbi; /* f2fs_sb_info pointer */
enum page_type type; /* contains DATA/NODE/META/META_FLUSH */
- int rw; /* contains R/RS/W/WS with REQ_META/REQ_PRIO */
+ int op; /* contains REQ_OP_ */
+ int op_flags; /* rq_flag_bits */
block_t new_blkaddr; /* new block address to be written */
block_t old_blkaddr; /* old block address before Cow */
struct page *page; /* page to be written */
struct page *encrypted_page; /* encrypted page */
};
-#define is_read_io(rw) (((rw) & 1) == READ)
+#define is_read_io(rw) (rw == READ)
struct f2fs_bio_info {
struct f2fs_sb_info *sbi; /* f2fs superblock */
struct bio *bio; /* bios to merge */
@@ -705,6 +729,7 @@ struct f2fs_bio_info {
enum inode_type {
DIR_INODE, /* for dirty dir inode */
FILE_INODE, /* for dirty regular/symlink inode */
+ DIRTY_META, /* for all dirtied inode metadata */
NR_INODE_TYPE,
};
@@ -756,14 +781,14 @@ struct f2fs_sb_info {
/* for bio operations */
struct f2fs_bio_info read_io; /* for read bios */
struct f2fs_bio_info write_io[NR_PAGE_TYPE]; /* for write bios */
+ struct mutex wio_mutex[NODE + 1]; /* bio ordering for NODE/DATA */
/* for checkpoint */
struct f2fs_checkpoint *ckpt; /* raw checkpoint pointer */
struct inode *meta_inode; /* cache meta blocks */
struct mutex cp_mutex; /* checkpoint procedure lock */
- struct rw_semaphore cp_rwsem; /* blocking FS operations */
+ struct percpu_rw_semaphore cp_rwsem; /* blocking FS operations */
struct rw_semaphore node_write; /* locking node writes */
- struct mutex writepages; /* mutex for writepages() */
wait_queue_head_t cp_wait;
unsigned long last_time[MAX_TIME]; /* to store time in jiffies */
long interval_time[MAX_TIME]; /* to store thresholds */
@@ -1049,22 +1074,22 @@ static inline void clear_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
{
- down_read(&sbi->cp_rwsem);
+ percpu_down_read(&sbi->cp_rwsem);
}
static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi)
{
- up_read(&sbi->cp_rwsem);
+ percpu_up_read(&sbi->cp_rwsem);
}
static inline void f2fs_lock_all(struct f2fs_sb_info *sbi)
{
- down_write(&sbi->cp_rwsem);
+ percpu_down_write(&sbi->cp_rwsem);
}
static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
{
- up_write(&sbi->cp_rwsem);
+ percpu_up_write(&sbi->cp_rwsem);
}
static inline int __get_cp_reason(struct f2fs_sb_info *sbi)
@@ -1119,34 +1144,37 @@ static inline bool f2fs_has_xattr_block(unsigned int ofs)
return ofs == XATTR_NODE_OFFSET;
}
+static inline void f2fs_i_blocks_write(struct inode *, blkcnt_t, bool);
static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi,
struct inode *inode, blkcnt_t *count)
{
- block_t valid_block_count;
+ blkcnt_t diff;
- spin_lock(&sbi->stat_lock);
#ifdef CONFIG_F2FS_FAULT_INJECTION
- if (time_to_inject(FAULT_BLOCK)) {
- spin_unlock(&sbi->stat_lock);
+ if (time_to_inject(FAULT_BLOCK))
return false;
- }
#endif
- valid_block_count =
- sbi->total_valid_block_count + (block_t)(*count);
- if (unlikely(valid_block_count > sbi->user_block_count)) {
- *count = sbi->user_block_count - sbi->total_valid_block_count;
+ /*
+ * let's increase this in prior to actual block count change in order
+ * for f2fs_sync_file to avoid data races when deciding checkpoint.
+ */
+ percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
+
+ spin_lock(&sbi->stat_lock);
+ sbi->total_valid_block_count += (block_t)(*count);
+ if (unlikely(sbi->total_valid_block_count > sbi->user_block_count)) {
+ diff = sbi->total_valid_block_count - sbi->user_block_count;
+ *count -= diff;
+ sbi->total_valid_block_count = sbi->user_block_count;
if (!*count) {
spin_unlock(&sbi->stat_lock);
+ percpu_counter_sub(&sbi->alloc_valid_block_count, diff);
return false;
}
}
- /* *count can be recalculated */
- inode->i_blocks += *count;
- sbi->total_valid_block_count =
- sbi->total_valid_block_count + (block_t)(*count);
spin_unlock(&sbi->stat_lock);
- percpu_counter_add(&sbi->alloc_valid_block_count, (*count));
+ f2fs_i_blocks_write(inode, *count, true);
return true;
}
@@ -1157,9 +1185,9 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
spin_lock(&sbi->stat_lock);
f2fs_bug_on(sbi, sbi->total_valid_block_count < (block_t) count);
f2fs_bug_on(sbi, inode->i_blocks < count);
- inode->i_blocks -= count;
sbi->total_valid_block_count -= (block_t)count;
spin_unlock(&sbi->stat_lock);
+ f2fs_i_blocks_write(inode, count, false);
}
static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type)
@@ -1294,7 +1322,7 @@ static inline bool inc_valid_node_count(struct f2fs_sb_info *sbi,
}
if (inode)
- inode->i_blocks++;
+ f2fs_i_blocks_write(inode, 1, true);
sbi->total_valid_node_count++;
sbi->total_valid_block_count++;
@@ -1313,7 +1341,7 @@ static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
f2fs_bug_on(sbi, !sbi->total_valid_node_count);
f2fs_bug_on(sbi, !inode->i_blocks);
- inode->i_blocks--;
+ f2fs_i_blocks_write(inode, 1, false);
sbi->total_valid_node_count--;
sbi->total_valid_block_count--;
@@ -1510,12 +1538,12 @@ static inline void f2fs_change_bit(unsigned int nr, char *addr)
enum {
FI_NEW_INODE, /* indicate newly allocated inode */
FI_DIRTY_INODE, /* indicate inode is dirty or not */
+ FI_AUTO_RECOVER, /* indicate inode is recoverable */
FI_DIRTY_DIR, /* indicate directory has dirty pages */
FI_INC_LINK, /* need to increment i_nlink */
FI_ACL_MODE, /* indicate acl mode */
FI_NO_ALLOC, /* should not allocate any blocks */
FI_FREE_NID, /* free allocated nide */
- FI_UPDATE_DIR, /* should update inode block for consistency */
FI_NO_EXTENT, /* not to use the extent cache */
FI_INLINE_XATTR, /* used for inline xattr */
FI_INLINE_DATA, /* used for inline data*/
@@ -1533,64 +1561,143 @@ enum {
FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */
};
-static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag)
+static inline void __mark_inode_dirty_flag(struct inode *inode,
+ int flag, bool set)
+{
+ switch (flag) {
+ case FI_INLINE_XATTR:
+ case FI_INLINE_DATA:
+ case FI_INLINE_DENTRY:
+ if (set)
+ return;
+ case FI_DATA_EXIST:
+ case FI_INLINE_DOTS:
+ f2fs_mark_inode_dirty_sync(inode);
+ }
+}
+
+static inline void set_inode_flag(struct inode *inode, int flag)
+{
+ if (!test_bit(flag, &F2FS_I(inode)->flags))
+ set_bit(flag, &F2FS_I(inode)->flags);
+ __mark_inode_dirty_flag(inode, flag, true);
+}
+
+static inline int is_inode_flag_set(struct inode *inode, int flag)
+{
+ return test_bit(flag, &F2FS_I(inode)->flags);
+}
+
+static inline void clear_inode_flag(struct inode *inode, int flag)
+{
+ if (test_bit(flag, &F2FS_I(inode)->flags))
+ clear_bit(flag, &F2FS_I(inode)->flags);
+ __mark_inode_dirty_flag(inode, flag, false);
+}
+
+static inline void set_acl_inode(struct inode *inode, umode_t mode)
{
- if (!test_bit(flag, &fi->flags))
- set_bit(flag, &fi->flags);
+ F2FS_I(inode)->i_acl_mode = mode;
+ set_inode_flag(inode, FI_ACL_MODE);
+ f2fs_mark_inode_dirty_sync(inode);
}
-static inline int is_inode_flag_set(struct f2fs_inode_info *fi, int flag)
+static inline void f2fs_i_links_write(struct inode *inode, bool inc)
{
- return test_bit(flag, &fi->flags);
+ if (inc)
+ inc_nlink(inode);
+ else
+ drop_nlink(inode);
+ f2fs_mark_inode_dirty_sync(inode);
+}
+
+static inline void f2fs_i_blocks_write(struct inode *inode,
+ blkcnt_t diff, bool add)
+{
+ bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
+ bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
+
+ inode->i_blocks = add ? inode->i_blocks + diff :
+ inode->i_blocks - diff;
+ f2fs_mark_inode_dirty_sync(inode);
+ if (clean || recover)
+ set_inode_flag(inode, FI_AUTO_RECOVER);
+}
+
+static inline void f2fs_i_size_write(struct inode *inode, loff_t i_size)
+{
+ bool clean = !is_inode_flag_set(inode, FI_DIRTY_INODE);
+ bool recover = is_inode_flag_set(inode, FI_AUTO_RECOVER);
+
+ if (i_size_read(inode) == i_size)
+ return;
+
+ i_size_write(inode, i_size);
+ f2fs_mark_inode_dirty_sync(inode);
+ if (clean || recover)
+ set_inode_flag(inode, FI_AUTO_RECOVER);
}
-static inline void clear_inode_flag(struct f2fs_inode_info *fi, int flag)
+static inline bool f2fs_skip_inode_update(struct inode *inode)
{
- if (test_bit(flag, &fi->flags))
- clear_bit(flag, &fi->flags);
+ if (!is_inode_flag_set(inode, FI_AUTO_RECOVER))
+ return false;
+ return F2FS_I(inode)->last_disk_size == i_size_read(inode);
}
-static inline void set_acl_inode(struct f2fs_inode_info *fi, umode_t mode)
+static inline void f2fs_i_depth_write(struct inode *inode, unsigned int depth)
{
- fi->i_acl_mode = mode;
- set_inode_flag(fi, FI_ACL_MODE);
+ F2FS_I(inode)->i_current_depth = depth;
+ f2fs_mark_inode_dirty_sync(inode);
}
-static inline void get_inline_info(struct f2fs_inode_info *fi,
- struct f2fs_inode *ri)
+static inline void f2fs_i_xnid_write(struct inode *inode, nid_t xnid)
{
+ F2FS_I(inode)->i_xattr_nid = xnid;
+ f2fs_mark_inode_dirty_sync(inode);
+}
+
+static inline void f2fs_i_pino_write(struct inode *inode, nid_t pino)
+{
+ F2FS_I(inode)->i_pino = pino;
+ f2fs_mark_inode_dirty_sync(inode);
+}
+
+static inline void get_inline_info(struct inode *inode, struct f2fs_inode *ri)
+{
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+
if (ri->i_inline & F2FS_INLINE_XATTR)
- set_inode_flag(fi, FI_INLINE_XATTR);
+ set_bit(FI_INLINE_XATTR, &fi->flags);
if (ri->i_inline & F2FS_INLINE_DATA)
- set_inode_flag(fi, FI_INLINE_DATA);
+ set_bit(FI_INLINE_DATA, &fi->flags);
if (ri->i_inline & F2FS_INLINE_DENTRY)
- set_inode_flag(fi, FI_INLINE_DENTRY);
+ set_bit(FI_INLINE_DENTRY, &fi->flags);
if (ri->i_inline & F2FS_DATA_EXIST)
- set_inode_flag(fi, FI_DATA_EXIST);
+ set_bit(FI_DATA_EXIST, &fi->flags);
if (ri->i_inline & F2FS_INLINE_DOTS)
- set_inode_flag(fi, FI_INLINE_DOTS);
+ set_bit(FI_INLINE_DOTS, &fi->flags);
}
-static inline void set_raw_inline(struct f2fs_inode_info *fi,
- struct f2fs_inode *ri)
+static inline void set_raw_inline(struct inode *inode, struct f2fs_inode *ri)
{
ri->i_inline = 0;
- if (is_inode_flag_set(fi, FI_INLINE_XATTR))
+ if (is_inode_flag_set(inode, FI_INLINE_XATTR))
ri->i_inline |= F2FS_INLINE_XATTR;
- if (is_inode_flag_set(fi, FI_INLINE_DATA))
+ if (is_inode_flag_set(inode, FI_INLINE_DATA))
ri->i_inline |= F2FS_INLINE_DATA;
- if (is_inode_flag_set(fi, FI_INLINE_DENTRY))
+ if (is_inode_flag_set(inode, FI_INLINE_DENTRY))
ri->i_inline |= F2FS_INLINE_DENTRY;
- if (is_inode_flag_set(fi, FI_DATA_EXIST))
+ if (is_inode_flag_set(inode, FI_DATA_EXIST))
ri->i_inline |= F2FS_DATA_EXIST;
- if (is_inode_flag_set(fi, FI_INLINE_DOTS))
+ if (is_inode_flag_set(inode, FI_INLINE_DOTS))
ri->i_inline |= F2FS_INLINE_DOTS;
}
static inline int f2fs_has_inline_xattr(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_INLINE_XATTR);
+ return is_inode_flag_set(inode, FI_INLINE_XATTR);
}
static inline unsigned int addrs_per_inode(struct inode *inode)
@@ -1617,43 +1724,43 @@ static inline int inline_xattr_size(struct inode *inode)
static inline int f2fs_has_inline_data(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_INLINE_DATA);
+ return is_inode_flag_set(inode, FI_INLINE_DATA);
}
static inline void f2fs_clear_inline_inode(struct inode *inode)
{
- clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
- clear_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
+ clear_inode_flag(inode, FI_INLINE_DATA);
+ clear_inode_flag(inode, FI_DATA_EXIST);
}
static inline int f2fs_exist_data(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_DATA_EXIST);
+ return is_inode_flag_set(inode, FI_DATA_EXIST);
}
static inline int f2fs_has_inline_dots(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_INLINE_DOTS);
+ return is_inode_flag_set(inode, FI_INLINE_DOTS);
}
static inline bool f2fs_is_atomic_file(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_ATOMIC_FILE);
+ return is_inode_flag_set(inode, FI_ATOMIC_FILE);
}
static inline bool f2fs_is_volatile_file(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_VOLATILE_FILE);
+ return is_inode_flag_set(inode, FI_VOLATILE_FILE);
}
static inline bool f2fs_is_first_block_written(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
+ return is_inode_flag_set(inode, FI_FIRST_BLOCK_WRITTEN);
}
static inline bool f2fs_is_drop_cache(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_DROP_CACHE);
+ return is_inode_flag_set(inode, FI_DROP_CACHE);
}
static inline void *inline_data_addr(struct page *page)
@@ -1664,7 +1771,7 @@ static inline void *inline_data_addr(struct page *page)
static inline int f2fs_has_inline_dentry(struct inode *inode)
{
- return is_inode_flag_set(F2FS_I(inode), FI_INLINE_DENTRY);
+ return is_inode_flag_set(inode, FI_INLINE_DENTRY);
}
static inline void f2fs_dentry_kunmap(struct inode *dir, struct page *page)
@@ -1681,11 +1788,13 @@ static inline int is_file(struct inode *inode, int type)
static inline void set_file(struct inode *inode, int type)
{
F2FS_I(inode)->i_advise |= type;
+ f2fs_mark_inode_dirty_sync(inode);
}
static inline void clear_file(struct inode *inode, int type)
{
F2FS_I(inode)->i_advise &= ~type;
+ f2fs_mark_inode_dirty_sync(inode);
}
static inline int f2fs_readonly(struct super_block *sb)
@@ -1712,7 +1821,7 @@ static inline bool is_dot_dotdot(const struct qstr *str)
static inline bool f2fs_may_extent_tree(struct inode *inode)
{
if (!test_opt(F2FS_I_SB(inode), EXTENT_CACHE) ||
- is_inode_flag_set(F2FS_I(inode), FI_NO_EXTENT))
+ is_inode_flag_set(inode, FI_NO_EXTENT))
return false;
return S_ISREG(inode->i_mode);
@@ -1748,7 +1857,7 @@ static inline void *f2fs_kvzalloc(size_t size, gfp_t flags)
}
#define get_inode_mode(i) \
- ((is_inode_flag_set(F2FS_I(i), FI_ACL_MODE)) ? \
+ ((is_inode_flag_set(i, FI_ACL_MODE)) ? \
(F2FS_I(i)->i_acl_mode) : ((i)->i_mode))
/* get offset of first page in next direct node */
@@ -1763,7 +1872,7 @@ static inline void *f2fs_kvzalloc(size_t size, gfp_t flags)
int f2fs_sync_file(struct file *, loff_t, loff_t, int);
void truncate_data_blocks(struct dnode_of_data *);
int truncate_blocks(struct inode *, u64, bool);
-int f2fs_truncate(struct inode *, bool);
+int f2fs_truncate(struct inode *);
int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
int f2fs_setattr(struct dentry *, struct iattr *);
int truncate_hole(struct inode *, pgoff_t, pgoff_t);
@@ -1804,11 +1913,11 @@ struct page *init_inode_metadata(struct inode *, struct inode *,
const struct qstr *, struct page *);
void update_parent_metadata(struct inode *, struct inode *, unsigned int);
int room_for_filename(const void *, int, int);
-void f2fs_drop_nlink(struct inode *, struct inode *, struct page *);
-struct f2fs_dir_entry *f2fs_find_entry(struct inode *, struct qstr *,
+void f2fs_drop_nlink(struct inode *, struct inode *);
+struct f2fs_dir_entry *f2fs_find_entry(struct inode *, const struct qstr *,
struct page **);
struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **);
-ino_t f2fs_inode_by_name(struct inode *, struct qstr *);
+ino_t f2fs_inode_by_name(struct inode *, const struct qstr *, struct page **);
void f2fs_set_link(struct inode *, struct f2fs_dir_entry *,
struct page *, struct inode *);
int update_dent_inode(struct inode *, struct inode *, const struct qstr *);
@@ -1832,6 +1941,8 @@ static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
/*
* super.c
*/
+int f2fs_inode_dirtied(struct inode *);
+void f2fs_inode_synced(struct inode *);
int f2fs_commit_super(struct f2fs_sb_info *, bool);
int f2fs_sync_fs(struct super_block *, int);
extern __printf(3, 4)
@@ -1865,11 +1976,11 @@ struct page *new_node_page(struct dnode_of_data *, unsigned int, struct page *);
void ra_node_page(struct f2fs_sb_info *, nid_t);
struct page *get_node_page(struct f2fs_sb_info *, pgoff_t);
struct page *get_node_page_ra(struct page *, int);
-void sync_inode_page(struct dnode_of_data *);
void move_node_page(struct page *, int);
-int fsync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *,
- bool);
+int fsync_node_pages(struct f2fs_sb_info *, struct inode *,
+ struct writeback_control *, bool);
int sync_node_pages(struct f2fs_sb_info *, struct writeback_control *);
+void build_free_nids(struct f2fs_sb_info *);
bool alloc_nid(struct f2fs_sb_info *, nid_t *);
void alloc_nid_done(struct f2fs_sb_info *, nid_t);
void alloc_nid_failed(struct f2fs_sb_info *, nid_t);
@@ -1943,9 +2054,10 @@ void add_ino_entry(struct f2fs_sb_info *, nid_t, int type);
void remove_ino_entry(struct f2fs_sb_info *, nid_t, int type);
void release_ino_entry(struct f2fs_sb_info *, bool);
bool exist_written_data(struct f2fs_sb_info *, nid_t, int);
+int f2fs_sync_inode_meta(struct f2fs_sb_info *);
int acquire_orphan_inode(struct f2fs_sb_info *);
void release_orphan_inode(struct f2fs_sb_info *);
-void add_orphan_inode(struct f2fs_sb_info *, nid_t);
+void add_orphan_inode(struct inode *);
void remove_orphan_inode(struct f2fs_sb_info *, nid_t);
int recover_orphan_inodes(struct f2fs_sb_info *);
int get_valid_checkpoint(struct f2fs_sb_info *);
@@ -1980,6 +2092,7 @@ struct page *get_new_data_page(struct inode *, struct page *, pgoff_t, bool);
int do_write_data_page(struct f2fs_io_info *);
int f2fs_map_blocks(struct inode *, struct f2fs_map_blocks *, int, int);
int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *, u64, u64);
+void f2fs_set_page_dirty_nobuffers(struct page *);
void f2fs_invalidate_page(struct page *, unsigned int, unsigned int);
int f2fs_release_page(struct page *, gfp_t);
@@ -2011,7 +2124,7 @@ struct f2fs_stat_info {
unsigned long long hit_total, total_ext;
int ext_tree, zombie_tree, ext_node;
s64 ndirty_node, ndirty_dent, ndirty_meta, ndirty_data, inmem_pages;
- unsigned int ndirty_dirs, ndirty_files;
+ unsigned int ndirty_dirs, ndirty_files, ndirty_all;
int nats, dirty_nats, sits, dirty_sits, fnids;
int total_count, utilization;
int bg_gc, wb_bios;
@@ -2180,7 +2293,6 @@ int f2fs_write_inline_data(struct inode *, struct page *);
bool recover_inline_data(struct inode *, struct page *);
struct f2fs_dir_entry *find_in_inline_dir(struct inode *,
struct fscrypt_name *, struct page **);
-struct f2fs_dir_entry *f2fs_parent_inline_dir(struct inode *, struct page **);
int make_empty_inline_dir(struct inode *inode, struct inode *, struct page *);
int f2fs_add_inline_entry(struct inode *, const struct qstr *, struct inode *,
nid_t, umode_t);
@@ -2205,6 +2317,7 @@ void f2fs_leave_shrinker(struct f2fs_sb_info *);
*/
unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *, int);
bool f2fs_init_extent_tree(struct inode *, struct f2fs_extent *);
+void f2fs_drop_extent_tree(struct inode *);
unsigned int f2fs_destroy_extent_node(struct inode *);
void f2fs_destroy_extent_tree(struct inode *);
bool f2fs_lookup_extent_cache(struct inode *, pgoff_t, struct extent_info *);
@@ -2240,6 +2353,26 @@ static inline int f2fs_sb_has_crypto(struct super_block *sb)
return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_ENCRYPT);
}
+static inline int f2fs_sb_mounted_hmsmr(struct super_block *sb)
+{
+ return F2FS_HAS_FEATURE(sb, F2FS_FEATURE_HMSMR);
+}
+
+static inline void set_opt_mode(struct f2fs_sb_info *sbi, unsigned int mt)
+{
+ clear_opt(sbi, ADAPTIVE);
+ clear_opt(sbi, LFS);
+
+ switch (mt) {
+ case F2FS_MOUNT_ADAPTIVE:
+ set_opt(sbi, ADAPTIVE);
+ break;
+ case F2FS_MOUNT_LFS:
+ set_opt(sbi, LFS);
+ break;
+ }
+}
+
static inline bool f2fs_may_encrypt(struct inode *inode)
{
#ifdef CONFIG_F2FS_FS_ENCRYPTION
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index f4c0086655c4..0e493f63ea41 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -21,6 +21,7 @@
#include <linux/mount.h>
#include <linux/pagevec.h>
#include <linux/uuid.h>
+#include <linux/file.h>
#include "f2fs.h"
#include "node.h"
@@ -81,7 +82,8 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma,
zero_user_segment(page, offset, PAGE_SIZE);
}
set_page_dirty(page);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
trace_f2fs_vm_page_mkwrite(page, DATA);
mapped:
@@ -171,22 +173,16 @@ static void try_to_fix_pino(struct inode *inode)
fi->xattr_ver = 0;
if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
get_parent_ino(inode, &pino)) {
- fi->i_pino = pino;
+ f2fs_i_pino_write(inode, pino);
file_got_pino(inode);
- up_write(&fi->i_sem);
-
- mark_inode_dirty_sync(inode);
- f2fs_write_inode(inode, NULL);
- } else {
- up_write(&fi->i_sem);
}
+ up_write(&fi->i_sem);
}
static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
int datasync, bool atomic)
{
struct inode *inode = file->f_mapping->host;
- struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
nid_t ino = inode->i_ino;
int ret = 0;
@@ -204,9 +200,9 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
/* if fdatasync is triggered, let's do in-place-update */
if (datasync || get_dirty_pages(inode) <= SM_I(sbi)->min_fsync_blocks)
- set_inode_flag(fi, FI_NEED_IPU);
+ set_inode_flag(inode, FI_NEED_IPU);
ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
- clear_inode_flag(fi, FI_NEED_IPU);
+ clear_inode_flag(inode, FI_NEED_IPU);
if (ret) {
trace_f2fs_sync_file_exit(inode, need_cp, datasync, ret);
@@ -214,7 +210,7 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
}
/* if the inode is dirty, let's recover all the time */
- if (!datasync) {
+ if (!datasync && !f2fs_skip_inode_update(inode)) {
f2fs_write_inode(inode, NULL);
goto go_write;
}
@@ -222,14 +218,14 @@ static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end,
/*
* if there is no written data, don't waste time to write recovery info.
*/
- if (!is_inode_flag_set(fi, FI_APPEND_WRITE) &&
+ if (!is_inode_flag_set(inode, FI_APPEND_WRITE) &&
!exist_written_data(sbi, ino, APPEND_INO)) {
/* it may call write_inode just prior to fsync */
if (need_inode_page_update(sbi, ino))
goto go_write;
- if (is_inode_flag_set(fi, FI_UPDATE_WRITE) ||
+ if (is_inode_flag_set(inode, FI_UPDATE_WRITE) ||
exist_written_data(sbi, ino, UPDATE_INO))
goto flush_out;
goto out;
@@ -239,9 +235,9 @@ go_write:
* Both of fdatasync() and fsync() are able to be recovered from
* sudden-power-off.
*/
- down_read(&fi->i_sem);
+ down_read(&F2FS_I(inode)->i_sem);
need_cp = need_do_checkpoint(inode);
- up_read(&fi->i_sem);
+ up_read(&F2FS_I(inode)->i_sem);
if (need_cp) {
/* all the dirty node pages should be flushed for POR */
@@ -252,12 +248,12 @@ go_write:
* will be used only for fsynced inodes after checkpoint.
*/
try_to_fix_pino(inode);
- clear_inode_flag(fi, FI_APPEND_WRITE);
- clear_inode_flag(fi, FI_UPDATE_WRITE);
+ clear_inode_flag(inode, FI_APPEND_WRITE);
+ clear_inode_flag(inode, FI_UPDATE_WRITE);
goto out;
}
sync_nodes:
- ret = fsync_node_pages(sbi, ino, &wbc, atomic);
+ ret = fsync_node_pages(sbi, inode, &wbc, atomic);
if (ret)
goto out;
@@ -268,7 +264,7 @@ sync_nodes:
}
if (need_inode_block_update(sbi, ino)) {
- mark_inode_dirty_sync(inode);
+ f2fs_mark_inode_dirty_sync(inode);
f2fs_write_inode(inode, NULL);
goto sync_nodes;
}
@@ -279,10 +275,10 @@ sync_nodes:
/* once recovery info is written, don't need to tack this */
remove_ino_entry(sbi, ino, APPEND_INO);
- clear_inode_flag(fi, FI_APPEND_WRITE);
+ clear_inode_flag(inode, FI_APPEND_WRITE);
flush_out:
remove_ino_entry(sbi, ino, UPDATE_INO);
- clear_inode_flag(fi, FI_UPDATE_WRITE);
+ clear_inode_flag(inode, FI_UPDATE_WRITE);
ret = f2fs_issue_flush(sbi);
f2fs_update_time(sbi, REQ_TIME);
out:
@@ -360,7 +356,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence)
for (; data_ofs < isize; data_ofs = (loff_t)pgofs << PAGE_SHIFT) {
set_new_dnode(&dn, inode, NULL, NULL, 0);
- err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
+ err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE);
if (err && err != -ENOENT) {
goto fail;
} else if (err == -ENOENT) {
@@ -487,8 +483,7 @@ int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
set_data_blkaddr(dn);
invalidate_blocks(sbi, blkaddr);
if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
- clear_inode_flag(F2FS_I(dn->inode),
- FI_FIRST_BLOCK_WRITTEN);
+ clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
nr_free++;
}
@@ -502,7 +497,6 @@ int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
dn->inode) + ofs;
f2fs_update_extent_cache_range(dn, fofs, 0, len);
dec_valid_block_count(sbi, dn->inode, nr_free);
- sync_inode_page(dn);
}
dn->ofs_in_node = ofs;
@@ -616,7 +610,7 @@ free_partial:
return err;
}
-int f2fs_truncate(struct inode *inode, bool lock)
+int f2fs_truncate(struct inode *inode)
{
int err;
@@ -633,12 +627,12 @@ int f2fs_truncate(struct inode *inode, bool lock)
return err;
}
- err = truncate_blocks(inode, i_size_read(inode), lock);
+ err = truncate_blocks(inode, i_size_read(inode), true);
if (err)
return err;
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- mark_inode_dirty(inode);
+ f2fs_mark_inode_dirty_sync(inode);
return 0;
}
@@ -654,7 +648,6 @@ int f2fs_getattr(struct vfsmount *mnt,
#ifdef CONFIG_F2FS_FS_POSIX_ACL
static void __setattr_copy(struct inode *inode, const struct iattr *attr)
{
- struct f2fs_inode_info *fi = F2FS_I(inode);
unsigned int ia_valid = attr->ia_valid;
if (ia_valid & ATTR_UID)
@@ -675,7 +668,7 @@ static void __setattr_copy(struct inode *inode, const struct iattr *attr)
if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
mode &= ~S_ISGID;
- set_acl_inode(fi, mode);
+ set_acl_inode(inode, mode);
}
}
#else
@@ -685,7 +678,6 @@ static void __setattr_copy(struct inode *inode, const struct iattr *attr)
int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
- struct f2fs_inode_info *fi = F2FS_I(inode);
int err;
err = inode_change_ok(inode, attr);
@@ -699,7 +691,7 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
if (attr->ia_size <= i_size_read(inode)) {
truncate_setsize(inode, attr->ia_size);
- err = f2fs_truncate(inode, true);
+ err = f2fs_truncate(inode);
if (err)
return err;
f2fs_balance_fs(F2FS_I_SB(inode), true);
@@ -724,13 +716,13 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr)
if (attr->ia_valid & ATTR_MODE) {
err = posix_acl_chmod(inode, get_inode_mode(inode));
- if (err || is_inode_flag_set(fi, FI_ACL_MODE)) {
- inode->i_mode = fi->i_acl_mode;
- clear_inode_flag(fi, FI_ACL_MODE);
+ if (err || is_inode_flag_set(inode, FI_ACL_MODE)) {
+ inode->i_mode = F2FS_I(inode)->i_acl_mode;
+ clear_inode_flag(inode, FI_ACL_MODE);
}
}
- mark_inode_dirty(inode);
+ f2fs_mark_inode_dirty_sync(inode);
return err;
}
@@ -859,79 +851,199 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len)
return ret;
}
-static int __exchange_data_block(struct inode *inode, pgoff_t src,
- pgoff_t dst, bool full)
+static int __read_out_blkaddrs(struct inode *inode, block_t *blkaddr,
+ int *do_replace, pgoff_t off, pgoff_t len)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct dnode_of_data dn;
- block_t new_addr;
- bool do_replace = false;
- int ret;
+ int ret, done, i;
+next_dnode:
set_new_dnode(&dn, inode, NULL, NULL, 0);
- ret = get_dnode_of_data(&dn, src, LOOKUP_NODE_RA);
+ ret = get_dnode_of_data(&dn, off, LOOKUP_NODE_RA);
if (ret && ret != -ENOENT) {
return ret;
} else if (ret == -ENOENT) {
- new_addr = NULL_ADDR;
- } else {
- new_addr = dn.data_blkaddr;
- if (!is_checkpointed_data(sbi, new_addr)) {
+ if (dn.max_level == 0)
+ return -ENOENT;
+ done = min((pgoff_t)ADDRS_PER_BLOCK - dn.ofs_in_node, len);
+ blkaddr += done;
+ do_replace += done;
+ goto next;
+ }
+
+ done = min((pgoff_t)ADDRS_PER_PAGE(dn.node_page, inode) -
+ dn.ofs_in_node, len);
+ for (i = 0; i < done; i++, blkaddr++, do_replace++, dn.ofs_in_node++) {
+ *blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
+ if (!is_checkpointed_data(sbi, *blkaddr)) {
+
+ if (test_opt(sbi, LFS)) {
+ f2fs_put_dnode(&dn);
+ return -ENOTSUPP;
+ }
+
/* do not invalidate this block address */
f2fs_update_data_blkaddr(&dn, NULL_ADDR);
- do_replace = true;
+ *do_replace = 1;
}
- f2fs_put_dnode(&dn);
}
+ f2fs_put_dnode(&dn);
+next:
+ len -= done;
+ off += done;
+ if (len)
+ goto next_dnode;
+ return 0;
+}
- if (new_addr == NULL_ADDR)
- return full ? truncate_hole(inode, dst, dst + 1) : 0;
+static int __roll_back_blkaddrs(struct inode *inode, block_t *blkaddr,
+ int *do_replace, pgoff_t off, int len)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct dnode_of_data dn;
+ int ret, i;
- if (do_replace) {
- struct page *ipage = get_node_page(sbi, inode->i_ino);
- struct node_info ni;
+ for (i = 0; i < len; i++, do_replace++, blkaddr++) {
+ if (*do_replace == 0)
+ continue;
- if (IS_ERR(ipage)) {
- ret = PTR_ERR(ipage);
- goto err_out;
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ ret = get_dnode_of_data(&dn, off + i, LOOKUP_NODE_RA);
+ if (ret) {
+ dec_valid_block_count(sbi, inode, 1);
+ invalidate_blocks(sbi, *blkaddr);
+ } else {
+ f2fs_update_data_blkaddr(&dn, *blkaddr);
}
+ f2fs_put_dnode(&dn);
+ }
+ return 0;
+}
- set_new_dnode(&dn, inode, ipage, NULL, 0);
- ret = f2fs_reserve_block(&dn, dst);
- if (ret)
- goto err_out;
+static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
+ block_t *blkaddr, int *do_replace,
+ pgoff_t src, pgoff_t dst, pgoff_t len, bool full)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(src_inode);
+ pgoff_t i = 0;
+ int ret;
+
+ while (i < len) {
+ if (blkaddr[i] == NULL_ADDR && !full) {
+ i++;
+ continue;
+ }
- truncate_data_blocks_range(&dn, 1);
+ if (do_replace[i] || blkaddr[i] == NULL_ADDR) {
+ struct dnode_of_data dn;
+ struct node_info ni;
+ size_t new_size;
+ pgoff_t ilen;
- get_node_info(sbi, dn.nid, &ni);
- f2fs_replace_block(sbi, &dn, dn.data_blkaddr, new_addr,
- ni.version, true, false);
- f2fs_put_dnode(&dn);
- } else {
- struct page *psrc, *pdst;
+ set_new_dnode(&dn, dst_inode, NULL, NULL, 0);
+ ret = get_dnode_of_data(&dn, dst + i, ALLOC_NODE);
+ if (ret)
+ return ret;
+
+ get_node_info(sbi, dn.nid, &ni);
+ ilen = min((pgoff_t)
+ ADDRS_PER_PAGE(dn.node_page, dst_inode) -
+ dn.ofs_in_node, len - i);
+ do {
+ dn.data_blkaddr = datablock_addr(dn.node_page,
+ dn.ofs_in_node);
+ truncate_data_blocks_range(&dn, 1);
+
+ if (do_replace[i]) {
+ f2fs_i_blocks_write(src_inode,
+ 1, false);
+ f2fs_i_blocks_write(dst_inode,
+ 1, true);
+ f2fs_replace_block(sbi, &dn, dn.data_blkaddr,
+ blkaddr[i], ni.version, true, false);
+
+ do_replace[i] = 0;
+ }
+ dn.ofs_in_node++;
+ i++;
+ new_size = (dst + i) << PAGE_SHIFT;
+ if (dst_inode->i_size < new_size)
+ f2fs_i_size_write(dst_inode, new_size);
+ } while ((do_replace[i] || blkaddr[i] == NULL_ADDR) && --ilen);
- psrc = get_lock_data_page(inode, src, true);
- if (IS_ERR(psrc))
- return PTR_ERR(psrc);
- pdst = get_new_data_page(inode, NULL, dst, true);
- if (IS_ERR(pdst)) {
+ f2fs_put_dnode(&dn);
+ } else {
+ struct page *psrc, *pdst;
+
+ psrc = get_lock_data_page(src_inode, src + i, true);
+ if (IS_ERR(psrc))
+ return PTR_ERR(psrc);
+ pdst = get_new_data_page(dst_inode, NULL, dst + i,
+ true);
+ if (IS_ERR(pdst)) {
+ f2fs_put_page(psrc, 1);
+ return PTR_ERR(pdst);
+ }
+ f2fs_copy_page(psrc, pdst);
+ set_page_dirty(pdst);
+ f2fs_put_page(pdst, 1);
f2fs_put_page(psrc, 1);
- return PTR_ERR(pdst);
- }
- f2fs_copy_page(psrc, pdst);
- set_page_dirty(pdst);
- f2fs_put_page(pdst, 1);
- f2fs_put_page(psrc, 1);
- return truncate_hole(inode, src, src + 1);
+ ret = truncate_hole(src_inode, src + i, src + i + 1);
+ if (ret)
+ return ret;
+ i++;
+ }
}
return 0;
+}
-err_out:
- if (!get_dnode_of_data(&dn, src, LOOKUP_NODE)) {
- f2fs_update_data_blkaddr(&dn, new_addr);
- f2fs_put_dnode(&dn);
+static int __exchange_data_block(struct inode *src_inode,
+ struct inode *dst_inode, pgoff_t src, pgoff_t dst,
+ pgoff_t len, bool full)
+{
+ block_t *src_blkaddr;
+ int *do_replace;
+ pgoff_t olen;
+ int ret;
+
+ while (len) {
+ olen = min((pgoff_t)4 * ADDRS_PER_BLOCK, len);
+
+ src_blkaddr = f2fs_kvzalloc(sizeof(block_t) * olen, GFP_KERNEL);
+ if (!src_blkaddr)
+ return -ENOMEM;
+
+ do_replace = f2fs_kvzalloc(sizeof(int) * olen, GFP_KERNEL);
+ if (!do_replace) {
+ kvfree(src_blkaddr);
+ return -ENOMEM;
+ }
+
+ ret = __read_out_blkaddrs(src_inode, src_blkaddr,
+ do_replace, src, olen);
+ if (ret)
+ goto roll_back;
+
+ ret = __clone_blkaddrs(src_inode, dst_inode, src_blkaddr,
+ do_replace, src, dst, olen, full);
+ if (ret)
+ goto roll_back;
+
+ src += olen;
+ dst += olen;
+ len -= olen;
+
+ kvfree(src_blkaddr);
+ kvfree(do_replace);
}
+ return 0;
+
+roll_back:
+ __roll_back_blkaddrs(src_inode, src_blkaddr, do_replace, src, len);
+ kvfree(src_blkaddr);
+ kvfree(do_replace);
return ret;
}
@@ -939,16 +1051,15 @@ static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
- int ret = 0;
+ int ret;
- for (; end < nrpages; start++, end++) {
- f2fs_balance_fs(sbi, true);
- f2fs_lock_op(sbi);
- ret = __exchange_data_block(inode, end, start, true);
- f2fs_unlock_op(sbi);
- if (ret)
- break;
- }
+ f2fs_balance_fs(sbi, true);
+ f2fs_lock_op(sbi);
+
+ f2fs_drop_extent_tree(inode);
+
+ ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
+ f2fs_unlock_op(sbi);
return ret;
}
@@ -992,7 +1103,7 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
ret = truncate_blocks(inode, new_size, true);
if (!ret)
- i_size_write(inode, new_size);
+ f2fs_i_size_write(inode, new_size);
return ret;
}
@@ -1128,11 +1239,8 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len,
}
out:
- if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size) {
- i_size_write(inode, new_size);
- mark_inode_dirty(inode);
- update_inode_page(inode);
- }
+ if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size)
+ f2fs_i_size_write(inode, new_size);
return ret;
}
@@ -1140,7 +1248,7 @@ out:
static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- pgoff_t pg_start, pg_end, delta, nrpages, idx;
+ pgoff_t nr, pg_start, pg_end, delta, idx;
loff_t new_size;
int ret = 0;
@@ -1175,14 +1283,20 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
pg_start = offset >> PAGE_SHIFT;
pg_end = (offset + len) >> PAGE_SHIFT;
delta = pg_end - pg_start;
- nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
+ idx = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
+
+ while (!ret && idx > pg_start) {
+ nr = idx - pg_start;
+ if (nr > delta)
+ nr = delta;
+ idx -= nr;
- for (idx = nrpages - 1; idx >= pg_start && idx != -1; idx--) {
f2fs_lock_op(sbi);
- ret = __exchange_data_block(inode, idx, idx + delta, false);
+ f2fs_drop_extent_tree(inode);
+
+ ret = __exchange_data_block(inode, inode, idx,
+ idx + delta, nr, false);
f2fs_unlock_op(sbi);
- if (ret)
- break;
}
/* write out all moved pages, if possible */
@@ -1190,7 +1304,7 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
truncate_pagecache(inode, offset);
if (!ret)
- i_size_write(inode, new_size);
+ f2fs_i_size_write(inode, new_size);
return ret;
}
@@ -1238,11 +1352,8 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end;
}
- if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size) {
- i_size_write(inode, new_size);
- mark_inode_dirty(inode);
- update_inode_page(inode);
- }
+ if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size)
+ f2fs_i_size_write(inode, new_size);
return ret;
}
@@ -1285,7 +1396,7 @@ static long f2fs_fallocate(struct file *file, int mode,
if (!ret) {
inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- mark_inode_dirty(inode);
+ f2fs_mark_inode_dirty_sync(inode);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
}
@@ -1310,10 +1421,10 @@ static int f2fs_release_file(struct inode *inode, struct file *filp)
if (f2fs_is_atomic_file(inode))
drop_inmem_pages(inode);
if (f2fs_is_volatile_file(inode)) {
- clear_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
- set_inode_flag(F2FS_I(inode), FI_DROP_CACHE);
+ clear_inode_flag(inode, FI_VOLATILE_FILE);
+ set_inode_flag(inode, FI_DROP_CACHE);
filemap_fdatawrite(inode->i_mapping);
- clear_inode_flag(F2FS_I(inode), FI_DROP_CACHE);
+ clear_inode_flag(inode, FI_DROP_CACHE);
}
return 0;
}
@@ -1376,9 +1487,8 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
fi->i_flags = flags;
inode_unlock(inode);
- f2fs_set_inode_flags(inode);
inode->i_ctime = CURRENT_TIME;
- mark_inode_dirty(inode);
+ f2fs_set_inode_flags(inode);
out:
mnt_drop_write_file(filp);
return ret;
@@ -1412,7 +1522,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
if (ret)
goto out;
- set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
+ set_inode_flag(inode, FI_ATOMIC_FILE);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
if (!get_dirty_pages(inode))
@@ -1423,7 +1533,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
inode->i_ino, get_dirty_pages(inode));
ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
if (ret)
- clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
+ clear_inode_flag(inode, FI_ATOMIC_FILE);
out:
inode_unlock(inode);
mnt_drop_write_file(filp);
@@ -1448,10 +1558,10 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
goto err_out;
if (f2fs_is_atomic_file(inode)) {
- clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
+ clear_inode_flag(inode, FI_ATOMIC_FILE);
ret = commit_inmem_pages(inode);
if (ret) {
- set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
+ set_inode_flag(inode, FI_ATOMIC_FILE);
goto err_out;
}
}
@@ -1484,7 +1594,7 @@ static int f2fs_ioc_start_volatile_write(struct file *filp)
if (ret)
goto out;
- set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
+ set_inode_flag(inode, FI_VOLATILE_FILE);
f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
out:
inode_unlock(inode);
@@ -1538,7 +1648,7 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp)
if (f2fs_is_atomic_file(inode))
drop_inmem_pages(inode);
if (f2fs_is_volatile_file(inode)) {
- clear_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE);
+ clear_inode_flag(inode, FI_VOLATILE_FILE);
ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
}
@@ -1871,7 +1981,7 @@ do_map:
continue;
}
- set_inode_flag(F2FS_I(inode), FI_DO_DEFRAG);
+ set_inode_flag(inode, FI_DO_DEFRAG);
idx = map.m_lblk;
while (idx < map.m_lblk + map.m_len && cnt < blk_per_seg) {
@@ -1896,14 +2006,14 @@ do_map:
if (idx < pg_end && cnt < blk_per_seg)
goto do_map;
- clear_inode_flag(F2FS_I(inode), FI_DO_DEFRAG);
+ clear_inode_flag(inode, FI_DO_DEFRAG);
err = filemap_fdatawrite(inode->i_mapping);
if (err)
goto out;
}
clear_out:
- clear_inode_flag(F2FS_I(inode), FI_DO_DEFRAG);
+ clear_inode_flag(inode, FI_DO_DEFRAG);
out:
inode_unlock(inode);
if (!err)
@@ -1959,6 +2069,133 @@ out:
return err;
}
+static int f2fs_move_file_range(struct file *file_in, loff_t pos_in,
+ struct file *file_out, loff_t pos_out, size_t len)
+{
+ struct inode *src = file_inode(file_in);
+ struct inode *dst = file_inode(file_out);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(src);
+ size_t olen = len, dst_max_i_size = 0;
+ size_t dst_osize;
+ int ret;
+
+ if (file_in->f_path.mnt != file_out->f_path.mnt ||
+ src->i_sb != dst->i_sb)
+ return -EXDEV;
+
+ if (unlikely(f2fs_readonly(src->i_sb)))
+ return -EROFS;
+
+ if (S_ISDIR(src->i_mode) || S_ISDIR(dst->i_mode))
+ return -EISDIR;
+
+ if (f2fs_encrypted_inode(src) || f2fs_encrypted_inode(dst))
+ return -EOPNOTSUPP;
+
+ inode_lock(src);
+ if (src != dst)
+ inode_lock(dst);
+
+ ret = -EINVAL;
+ if (pos_in + len > src->i_size || pos_in + len < pos_in)
+ goto out_unlock;
+ if (len == 0)
+ olen = len = src->i_size - pos_in;
+ if (pos_in + len == src->i_size)
+ len = ALIGN(src->i_size, F2FS_BLKSIZE) - pos_in;
+ if (len == 0) {
+ ret = 0;
+ goto out_unlock;
+ }
+
+ dst_osize = dst->i_size;
+ if (pos_out + olen > dst->i_size)
+ dst_max_i_size = pos_out + olen;
+
+ /* verify the end result is block aligned */
+ if (!IS_ALIGNED(pos_in, F2FS_BLKSIZE) ||
+ !IS_ALIGNED(pos_in + len, F2FS_BLKSIZE) ||
+ !IS_ALIGNED(pos_out, F2FS_BLKSIZE))
+ goto out_unlock;
+
+ ret = f2fs_convert_inline_inode(src);
+ if (ret)
+ goto out_unlock;
+
+ ret = f2fs_convert_inline_inode(dst);
+ if (ret)
+ goto out_unlock;
+
+ /* write out all dirty pages from offset */
+ ret = filemap_write_and_wait_range(src->i_mapping,
+ pos_in, pos_in + len);
+ if (ret)
+ goto out_unlock;
+
+ ret = filemap_write_and_wait_range(dst->i_mapping,
+ pos_out, pos_out + len);
+ if (ret)
+ goto out_unlock;
+
+ f2fs_balance_fs(sbi, true);
+ f2fs_lock_op(sbi);
+ ret = __exchange_data_block(src, dst, pos_in,
+ pos_out, len >> F2FS_BLKSIZE_BITS, false);
+
+ if (!ret) {
+ if (dst_max_i_size)
+ f2fs_i_size_write(dst, dst_max_i_size);
+ else if (dst_osize != dst->i_size)
+ f2fs_i_size_write(dst, dst_osize);
+ }
+ f2fs_unlock_op(sbi);
+out_unlock:
+ if (src != dst)
+ inode_unlock(dst);
+ inode_unlock(src);
+ return ret;
+}
+
+static int f2fs_ioc_move_range(struct file *filp, unsigned long arg)
+{
+ struct f2fs_move_range range;
+ struct fd dst;
+ int err;
+
+ if (!(filp->f_mode & FMODE_READ) ||
+ !(filp->f_mode & FMODE_WRITE))
+ return -EBADF;
+
+ if (copy_from_user(&range, (struct f2fs_move_range __user *)arg,
+ sizeof(range)))
+ return -EFAULT;
+
+ dst = fdget(range.dst_fd);
+ if (!dst.file)
+ return -EBADF;
+
+ if (!(dst.file->f_mode & FMODE_WRITE)) {
+ err = -EBADF;
+ goto err_out;
+ }
+
+ err = mnt_want_write_file(filp);
+ if (err)
+ goto err_out;
+
+ err = f2fs_move_file_range(filp, range.pos_in, dst.file,
+ range.pos_out, range.len);
+
+ mnt_drop_write_file(filp);
+
+ if (copy_to_user((struct f2fs_move_range __user *)arg,
+ &range, sizeof(range)))
+ err = -EFAULT;
+err_out:
+ fdput(dst);
+ return err;
+}
+
long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
@@ -1994,6 +2231,8 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return f2fs_ioc_write_checkpoint(filp, arg);
case F2FS_IOC_DEFRAGMENT:
return f2fs_ioc_defragment(filp, arg);
+ case F2FS_IOC_MOVE_RANGE:
+ return f2fs_ioc_move_range(filp, arg);
default:
return -ENOTTY;
}
@@ -2003,6 +2242,7 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
+ struct blk_plug plug;
ssize_t ret;
if (f2fs_encrypted_inode(inode) &&
@@ -2014,8 +2254,11 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
ret = generic_write_checks(iocb, from);
if (ret > 0) {
ret = f2fs_preallocate_blocks(iocb, from);
- if (!ret)
+ if (!ret) {
+ blk_start_plug(&plug);
ret = __generic_file_write_iter(iocb, from);
+ blk_finish_plug(&plug);
+ }
}
inode_unlock(inode);
@@ -2050,6 +2293,8 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case F2FS_IOC_WRITE_CHECKPOINT:
case F2FS_IOC_DEFRAGMENT:
break;
+ case F2FS_IOC_MOVE_RANGE:
+ break;
default:
return -ENOIOCTLCMD;
}
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 38d56f678912..8f7fa326ce95 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -538,7 +538,8 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(inode),
.type = DATA,
- .rw = READ_SYNC,
+ .op = REQ_OP_READ,
+ .op_flags = READ_SYNC,
.encrypted_page = NULL,
};
struct dnode_of_data dn;
@@ -593,11 +594,11 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
/* write page */
lock_page(fio.encrypted_page);
- if (unlikely(!PageUptodate(fio.encrypted_page))) {
+ if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
err = -EIO;
goto put_page_out;
}
- if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi))) {
+ if (unlikely(!PageUptodate(fio.encrypted_page))) {
err = -EIO;
goto put_page_out;
}
@@ -612,14 +613,15 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
/* allocate block address */
f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
- fio.rw = WRITE_SYNC;
+ fio.op = REQ_OP_WRITE;
+ fio.op_flags = WRITE_SYNC;
fio.new_blkaddr = newaddr;
f2fs_submit_page_mbio(&fio);
f2fs_update_data_blkaddr(&dn, newaddr);
- set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
+ set_inode_flag(inode, FI_APPEND_WRITE);
if (page->index == 0)
- set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
+ set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
put_page_out:
f2fs_put_page(fio.encrypted_page, 1);
recover_block:
@@ -649,16 +651,28 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(inode),
.type = DATA,
- .rw = WRITE_SYNC,
+ .op = REQ_OP_WRITE,
+ .op_flags = WRITE_SYNC,
.page = page,
.encrypted_page = NULL,
};
+ bool is_dirty = PageDirty(page);
+ int err;
+
+retry:
set_page_dirty(page);
f2fs_wait_on_page_writeback(page, DATA, true);
if (clear_page_dirty_for_io(page))
inode_dec_dirty_pages(inode);
+
set_cold_data(page);
- do_write_data_page(&fio);
+
+ err = do_write_data_page(&fio);
+ if (err == -ENOMEM && is_dirty) {
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ goto retry;
+ }
+
clear_cold_data(page);
}
out:
@@ -730,7 +744,8 @@ next_step:
start_bidx = start_bidx_of_node(nofs, inode);
data_page = get_read_data_page(inode,
- start_bidx + ofs_in_node, READA, true);
+ start_bidx + ofs_in_node, REQ_RAHEAD,
+ true);
if (IS_ERR(data_page)) {
iput(inode);
continue;
@@ -744,12 +759,32 @@ next_step:
/* phase 3 */
inode = find_gc_inode(gc_list, dni.ino);
if (inode) {
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ bool locked = false;
+
+ if (S_ISREG(inode->i_mode)) {
+ if (!down_write_trylock(&fi->dio_rwsem[READ]))
+ continue;
+ if (!down_write_trylock(
+ &fi->dio_rwsem[WRITE])) {
+ up_write(&fi->dio_rwsem[READ]);
+ continue;
+ }
+ locked = true;
+ }
+
start_bidx = start_bidx_of_node(nofs, inode)
+ ofs_in_node;
if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode))
move_encrypted_block(inode, start_bidx);
else
move_data_page(inode, start_bidx, gc_type);
+
+ if (locked) {
+ up_write(&fi->dio_rwsem[WRITE]);
+ up_write(&fi->dio_rwsem[READ]);
+ }
+
stat_inc_data_blk_count(sbi, 1, gc_type);
}
}
@@ -798,6 +833,10 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
blk_start_plug(&plug);
for (segno = start_segno; segno < end_segno; segno++) {
+
+ if (get_valid_blocks(sbi, segno, 1) == 0)
+ continue;
+
/* find segment summary of victim */
sum_page = find_get_page(META_MAPPING(sbi),
GET_SUM_BLOCK(sbi, segno));
@@ -873,10 +912,13 @@ gc_more:
* enough free sections, we should flush dent/node blocks and do
* garbage collections.
*/
- if (__get_victim(sbi, &segno, gc_type) || prefree_segments(sbi))
+ if (__get_victim(sbi, &segno, gc_type) ||
+ prefree_segments(sbi)) {
write_checkpoint(sbi, &cpc);
- else if (has_not_enough_free_secs(sbi, 0))
+ segno = NULL_SEGNO;
+ } else if (has_not_enough_free_secs(sbi, 0)) {
write_checkpoint(sbi, &cpc);
+ }
}
if (segno == NULL_SEGNO && !__get_victim(sbi, &segno, gc_type))
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index a4bb155dd00a..ccea8735de59 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -59,7 +59,8 @@ void read_inline_data(struct page *page, struct page *ipage)
memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
flush_dcache_page(page);
kunmap_atomic(dst_addr);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
}
bool truncate_inline_inode(struct page *ipage, u64 from)
@@ -73,7 +74,7 @@ bool truncate_inline_inode(struct page *ipage, u64 from)
f2fs_wait_on_page_writeback(ipage, NODE, true);
memset(addr + from, 0, MAX_INLINE_DATA - from);
-
+ set_page_dirty(ipage);
return true;
}
@@ -97,7 +98,8 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page)
else
read_inline_data(page, ipage);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
f2fs_put_page(ipage, 1);
unlock_page(page);
return 0;
@@ -108,7 +110,8 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
struct f2fs_io_info fio = {
.sbi = F2FS_I_SB(dn->inode),
.type = DATA,
- .rw = WRITE_SYNC | REQ_PRIO,
+ .op = REQ_OP_WRITE,
+ .op_flags = WRITE_SYNC | REQ_PRIO,
.page = page,
.encrypted_page = NULL,
};
@@ -138,7 +141,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
inode_dec_dirty_pages(dn->inode);
/* this converted inline_data should be recovered. */
- set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE);
+ set_inode_flag(dn->inode, FI_APPEND_WRITE);
/* clear inline data and flag after data writeback */
truncate_inline_inode(dn->inode_page, 0);
@@ -146,7 +149,6 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
clear_out:
stat_dec_inline_inode(dn->inode);
f2fs_clear_inline_inode(dn->inode);
- sync_inode_page(dn);
f2fs_put_dnode(dn);
return 0;
}
@@ -212,11 +214,11 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page)
dst_addr = inline_data_addr(dn.inode_page);
memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
kunmap_atomic(src_addr);
+ set_page_dirty(dn.inode_page);
- set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
- set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
+ set_inode_flag(inode, FI_APPEND_WRITE);
+ set_inode_flag(inode, FI_DATA_EXIST);
- sync_inode_page(&dn);
clear_inline_node(dn.inode_page);
f2fs_put_dnode(&dn);
return 0;
@@ -252,10 +254,10 @@ process_inline:
dst_addr = inline_data_addr(ipage);
memcpy(dst_addr, src_addr, MAX_INLINE_DATA);
- set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
- set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
+ set_inode_flag(inode, FI_INLINE_DATA);
+ set_inode_flag(inode, FI_DATA_EXIST);
- update_inode(inode, ipage);
+ set_page_dirty(ipage);
f2fs_put_page(ipage, 1);
return true;
}
@@ -266,7 +268,6 @@ process_inline:
if (!truncate_inline_inode(ipage, 0))
return false;
f2fs_clear_inline_inode(inode);
- update_inode(inode, ipage);
f2fs_put_page(ipage, 1);
} else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
if (truncate_blocks(inode, 0, false))
@@ -288,8 +289,10 @@ struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir,
f2fs_hash_t namehash;
ipage = get_node_page(sbi, dir->i_ino);
- if (IS_ERR(ipage))
+ if (IS_ERR(ipage)) {
+ *res_page = ipage;
return NULL;
+ }
namehash = f2fs_dentry_hash(&name);
@@ -306,25 +309,6 @@ struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir,
return de;
}
-struct f2fs_dir_entry *f2fs_parent_inline_dir(struct inode *dir,
- struct page **p)
-{
- struct f2fs_sb_info *sbi = F2FS_I_SB(dir);
- struct page *ipage;
- struct f2fs_dir_entry *de;
- struct f2fs_inline_dentry *dentry_blk;
-
- ipage = get_node_page(sbi, dir->i_ino);
- if (IS_ERR(ipage))
- return NULL;
-
- dentry_blk = inline_data_addr(ipage);
- de = &dentry_blk->dentry[1];
- *p = ipage;
- unlock_page(ipage);
- return de;
-}
-
int make_empty_inline_dir(struct inode *inode, struct inode *parent,
struct page *ipage)
{
@@ -339,10 +323,8 @@ int make_empty_inline_dir(struct inode *inode, struct inode *parent,
set_page_dirty(ipage);
/* update i_size to MAX_INLINE_DATA */
- if (i_size_read(inode) < MAX_INLINE_DATA) {
- i_size_write(inode, MAX_INLINE_DATA);
- set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
- }
+ if (i_size_read(inode) < MAX_INLINE_DATA)
+ f2fs_i_size_write(inode, MAX_INLINE_DATA);
return 0;
}
@@ -391,22 +373,19 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
NR_INLINE_DENTRY * F2FS_SLOT_LEN);
kunmap_atomic(dentry_blk);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
set_page_dirty(page);
/* clear inline dir and flag after data writeback */
truncate_inline_inode(ipage, 0);
stat_dec_inline_dir(dir);
- clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY);
+ clear_inode_flag(dir, FI_INLINE_DENTRY);
- F2FS_I(dir)->i_current_depth = 1;
- if (i_size_read(dir) < PAGE_SIZE) {
- i_size_write(dir, PAGE_SIZE);
- set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
- }
-
- sync_inode_page(&dn);
+ f2fs_i_depth_write(dir, 1);
+ if (i_size_read(dir) < PAGE_SIZE)
+ f2fs_i_size_write(dir, PAGE_SIZE);
out:
f2fs_put_page(page, 1);
return err;
@@ -464,7 +443,6 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
struct f2fs_inline_dentry *inline_dentry)
{
struct f2fs_inline_dentry *backup_dentry;
- struct f2fs_inode_info *fi = F2FS_I(dir);
int err;
backup_dentry = f2fs_kmalloc(sizeof(struct f2fs_inline_dentry),
@@ -486,16 +464,15 @@ static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage,
lock_page(ipage);
stat_dec_inline_dir(dir);
- clear_inode_flag(fi, FI_INLINE_DENTRY);
- update_inode(dir, ipage);
+ clear_inode_flag(dir, FI_INLINE_DENTRY);
kfree(backup_dentry);
return 0;
recover:
lock_page(ipage);
memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA);
- fi->i_current_depth = 0;
- i_size_write(dir, MAX_INLINE_DATA);
- update_inode(dir, ipage);
+ f2fs_i_depth_write(dir, 0);
+ f2fs_i_size_write(dir, MAX_INLINE_DATA);
+ set_page_dirty(ipage);
f2fs_put_page(ipage, 1);
kfree(backup_dentry);
@@ -559,8 +536,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name,
/* we don't need to mark_inode_dirty now */
if (inode) {
- F2FS_I(inode)->i_pino = dir->i_ino;
- update_inode(inode, page);
+ f2fs_i_pino_write(inode, dir->i_ino);
f2fs_put_page(page, 1);
}
@@ -568,11 +544,6 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name,
fail:
if (inode)
up_write(&F2FS_I(inode)->i_sem);
-
- if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) {
- update_inode(dir, ipage);
- clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR);
- }
out:
f2fs_put_page(ipage, 1);
return err;
@@ -596,13 +567,13 @@ void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page,
&inline_dentry->dentry_bitmap);
set_page_dirty(page);
+ f2fs_put_page(page, 1);
dir->i_ctime = dir->i_mtime = CURRENT_TIME;
+ f2fs_mark_inode_dirty_sync(dir);
if (inode)
- f2fs_drop_nlink(dir, inode, page);
-
- f2fs_put_page(page, 1);
+ f2fs_drop_nlink(dir, inode);
}
bool f2fs_empty_inline_dir(struct inode *dir)
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 2e68adab0d64..9ac5efc15347 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -18,6 +18,13 @@
#include <trace/events/f2fs.h>
+void f2fs_mark_inode_dirty_sync(struct inode *inode)
+{
+ if (f2fs_inode_dirtied(inode))
+ return;
+ mark_inode_dirty_sync(inode);
+}
+
void f2fs_set_inode_flags(struct inode *inode)
{
unsigned int flags = F2FS_I(inode)->i_flags;
@@ -35,6 +42,7 @@ void f2fs_set_inode_flags(struct inode *inode)
new_fl |= S_DIRSYNC;
inode_set_flags(inode, new_fl,
S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+ f2fs_mark_inode_dirty_sync(inode);
}
static void __get_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
@@ -85,8 +93,8 @@ static void __recover_inline_status(struct inode *inode, struct page *ipage)
if (*start++) {
f2fs_wait_on_page_writeback(ipage, NODE, true);
- set_inode_flag(F2FS_I(inode), FI_DATA_EXIST);
- set_raw_inline(F2FS_I(inode), F2FS_INODE(ipage));
+ set_inode_flag(inode, FI_DATA_EXIST);
+ set_raw_inline(inode, F2FS_INODE(ipage));
set_page_dirty(ipage);
return;
}
@@ -141,7 +149,7 @@ static int do_read_inode(struct inode *inode)
if (f2fs_init_extent_tree(inode, &ri->i_ext))
set_page_dirty(node_page);
- get_inline_info(fi, ri);
+ get_inline_info(inode, ri);
/* check data exist */
if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
@@ -151,7 +159,10 @@ static int do_read_inode(struct inode *inode)
__get_inode_rdev(inode, ri);
if (__written_first_block(ri))
- set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
+ set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+
+ if (!need_inode_block_update(sbi, inode->i_ino))
+ fi->last_disk_size = inode->i_size;
f2fs_put_page(node_page, 1);
@@ -227,6 +238,8 @@ int update_inode(struct inode *inode, struct page *node_page)
{
struct f2fs_inode *ri;
+ f2fs_inode_synced(inode);
+
f2fs_wait_on_page_writeback(node_page, NODE, true);
ri = F2FS_INODE(node_page);
@@ -244,7 +257,7 @@ int update_inode(struct inode *inode, struct page *node_page)
&ri->i_ext);
else
memset(&ri->i_ext, 0, sizeof(ri->i_ext));
- set_raw_inline(F2FS_I(inode), ri);
+ set_raw_inline(inode, ri);
ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
@@ -261,7 +274,6 @@ int update_inode(struct inode *inode, struct page *node_page)
__set_inode_rdev(inode, ri);
set_cold_node(inode, node_page);
- clear_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
/* deleted inode */
if (inode->i_nlink == 0)
@@ -285,6 +297,7 @@ retry:
} else if (err != -ENOENT) {
f2fs_stop_checkpoint(sbi, false);
}
+ f2fs_inode_synced(inode);
return 0;
}
ret = update_inode(inode, node_page);
@@ -300,7 +313,7 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
inode->i_ino == F2FS_META_INO(sbi))
return 0;
- if (!is_inode_flag_set(F2FS_I(inode), FI_DIRTY_INODE))
+ if (!is_inode_flag_set(inode, FI_DIRTY_INODE))
return 0;
/*
@@ -318,8 +331,7 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
void f2fs_evict_inode(struct inode *inode)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct f2fs_inode_info *fi = F2FS_I(inode);
- nid_t xnid = fi->i_xattr_nid;
+ nid_t xnid = F2FS_I(inode)->i_xattr_nid;
int err = 0;
/* some remained atomic pages should discarded */
@@ -341,12 +353,17 @@ void f2fs_evict_inode(struct inode *inode)
if (inode->i_nlink || is_bad_inode(inode))
goto no_delete;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ if (time_to_inject(FAULT_EVICT_INODE))
+ goto no_delete;
+#endif
+
sb_start_intwrite(inode->i_sb);
- set_inode_flag(fi, FI_NO_ALLOC);
+ set_inode_flag(inode, FI_NO_ALLOC);
i_size_write(inode, 0);
retry:
if (F2FS_HAS_BLOCKS(inode))
- err = f2fs_truncate(inode, true);
+ err = f2fs_truncate(inode);
if (!err) {
f2fs_lock_op(sbi);
@@ -360,6 +377,8 @@ retry:
goto retry;
}
+ if (err)
+ update_inode_page(inode);
sb_end_intwrite(inode->i_sb);
no_delete:
stat_dec_inline_xattr(inode);
@@ -369,13 +388,13 @@ no_delete:
invalidate_mapping_pages(NODE_MAPPING(sbi), inode->i_ino, inode->i_ino);
if (xnid)
invalidate_mapping_pages(NODE_MAPPING(sbi), xnid, xnid);
- if (is_inode_flag_set(fi, FI_APPEND_WRITE))
+ if (is_inode_flag_set(inode, FI_APPEND_WRITE))
add_ino_entry(sbi, inode->i_ino, APPEND_INO);
- if (is_inode_flag_set(fi, FI_UPDATE_WRITE))
+ if (is_inode_flag_set(inode, FI_UPDATE_WRITE))
add_ino_entry(sbi, inode->i_ino, UPDATE_INO);
- if (is_inode_flag_set(fi, FI_FREE_NID)) {
+ if (is_inode_flag_set(inode, FI_FREE_NID)) {
alloc_nid_failed(sbi, inode->i_ino);
- clear_inode_flag(fi, FI_FREE_NID);
+ clear_inode_flag(inode, FI_FREE_NID);
}
f2fs_bug_on(sbi, err &&
!exist_written_data(sbi, inode->i_ino, ORPHAN_INO));
@@ -407,11 +426,11 @@ void handle_failed_inode(struct inode *inode)
f2fs_msg(sbi->sb, KERN_WARNING,
"Too many orphan inodes, run fsck to fix.");
} else {
- add_orphan_inode(sbi, inode->i_ino);
+ add_orphan_inode(inode);
}
alloc_nid_done(sbi, inode->i_ino);
} else {
- set_inode_flag(F2FS_I(inode), FI_FREE_NID);
+ set_inode_flag(inode, FI_FREE_NID);
}
f2fs_unlock_op(sbi);
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 324ed3812f30..73fa356f8fbb 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -60,10 +60,14 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
if (f2fs_encrypted_inode(dir) && f2fs_may_encrypt(inode))
f2fs_set_encrypted_inode(inode);
+ set_inode_flag(inode, FI_NEW_INODE);
+
+ if (test_opt(sbi, INLINE_XATTR))
+ set_inode_flag(inode, FI_INLINE_XATTR);
if (test_opt(sbi, INLINE_DATA) && f2fs_may_inline_data(inode))
- set_inode_flag(F2FS_I(inode), FI_INLINE_DATA);
+ set_inode_flag(inode, FI_INLINE_DATA);
if (f2fs_may_inline_dentry(inode))
- set_inode_flag(F2FS_I(inode), FI_INLINE_DENTRY);
+ set_inode_flag(inode, FI_INLINE_DENTRY);
f2fs_init_extent_tree(inode, NULL);
@@ -72,14 +76,13 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
stat_inc_inline_dir(inode);
trace_f2fs_new_inode(inode, 0);
- mark_inode_dirty(inode);
return inode;
fail:
trace_f2fs_new_inode(inode, err);
make_bad_inode(inode);
if (nid_free)
- set_inode_flag(F2FS_I(inode), FI_FREE_NID);
+ set_inode_flag(inode, FI_FREE_NID);
iput(inode);
return ERR_PTR(err);
}
@@ -177,7 +180,7 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
inode->i_ctime = CURRENT_TIME;
ihold(inode);
- set_inode_flag(F2FS_I(inode), FI_INC_LINK);
+ set_inode_flag(inode, FI_INC_LINK);
f2fs_lock_op(sbi);
err = f2fs_add_link(dentry, inode);
if (err)
@@ -190,7 +193,7 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
f2fs_sync_fs(sbi->sb, 1);
return 0;
out:
- clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
+ clear_inode_flag(inode, FI_INC_LINK);
iput(inode);
f2fs_unlock_op(sbi);
return err;
@@ -199,9 +202,13 @@ out:
struct dentry *f2fs_get_parent(struct dentry *child)
{
struct qstr dotdot = QSTR_INIT("..", 2);
- unsigned long ino = f2fs_inode_by_name(d_inode(child), &dotdot);
- if (!ino)
+ struct page *page;
+ unsigned long ino = f2fs_inode_by_name(d_inode(child), &dotdot, &page);
+ if (!ino) {
+ if (IS_ERR(page))
+ return ERR_CAST(page);
return ERR_PTR(-ENOENT);
+ }
return d_obtain_alias(f2fs_iget(child->d_sb, ino));
}
@@ -229,6 +236,9 @@ static int __recover_dot_dentries(struct inode *dir, nid_t pino)
if (de) {
f2fs_dentry_kunmap(dir, page);
f2fs_put_page(page, 0);
+ } else if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ goto out;
} else {
err = __f2fs_add_link(dir, &dot, NULL, dir->i_ino, S_IFDIR);
if (err)
@@ -239,14 +249,14 @@ static int __recover_dot_dentries(struct inode *dir, nid_t pino)
if (de) {
f2fs_dentry_kunmap(dir, page);
f2fs_put_page(page, 0);
+ } else if (IS_ERR(page)) {
+ err = PTR_ERR(page);
} else {
err = __f2fs_add_link(dir, &dotdot, NULL, pino, S_IFDIR);
}
out:
- if (!err) {
- clear_inode_flag(F2FS_I(dir), FI_INLINE_DOTS);
- mark_inode_dirty(dir);
- }
+ if (!err)
+ clear_inode_flag(dir, FI_INLINE_DOTS);
f2fs_unlock_op(sbi);
return err;
@@ -281,8 +291,11 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
return ERR_PTR(-ENAMETOOLONG);
de = f2fs_find_entry(dir, &dentry->d_name, &page);
- if (!de)
+ if (!de) {
+ if (IS_ERR(page))
+ return (struct dentry *)page;
return d_splice_alias(inode, dentry);
+ }
ino = le32_to_cpu(de->ino);
f2fs_dentry_kunmap(dir, page);
@@ -329,8 +342,11 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
trace_f2fs_unlink_enter(dir, dentry);
de = f2fs_find_entry(dir, &dentry->d_name, &page);
- if (!de)
+ if (!de) {
+ if (IS_ERR(page))
+ err = PTR_ERR(page);
goto fail;
+ }
f2fs_balance_fs(sbi, true);
@@ -345,9 +361,6 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
f2fs_delete_entry(de, page, dir, inode);
f2fs_unlock_op(sbi);
- /* In order to evict this inode, we set it dirty */
- mark_inode_dirty(inode);
-
if (IS_DIRSYNC(dir))
f2fs_sync_fs(sbi->sb, 1);
fail:
@@ -492,7 +505,7 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
f2fs_balance_fs(sbi, true);
- set_inode_flag(F2FS_I(inode), FI_INC_LINK);
+ set_inode_flag(inode, FI_INC_LINK);
f2fs_lock_op(sbi);
err = f2fs_add_link(dentry, inode);
if (err)
@@ -509,7 +522,7 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
return 0;
out_fail:
- clear_inode_flag(F2FS_I(inode), FI_INC_LINK);
+ clear_inode_flag(inode, FI_INC_LINK);
handle_failed_inode(inode);
return err;
}
@@ -592,17 +605,17 @@ static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry,
* add this non-linked tmpfile to orphan list, in this way we could
* remove all unused data of tmpfile after abnormal power-off.
*/
- add_orphan_inode(sbi, inode->i_ino);
- f2fs_unlock_op(sbi);
-
+ add_orphan_inode(inode);
alloc_nid_done(sbi, inode->i_ino);
if (whiteout) {
- inode_dec_link_count(inode);
+ f2fs_i_links_write(inode, false);
*whiteout = inode;
} else {
d_tmpfile(dentry, inode);
}
+ /* link_count was changed by d_tmpfile as well. */
+ f2fs_unlock_op(sbi);
unlock_new_inode(inode);
return 0;
@@ -652,14 +665,19 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page);
- if (!old_entry)
+ if (!old_entry) {
+ if (IS_ERR(old_page))
+ err = PTR_ERR(old_page);
goto out;
+ }
if (S_ISDIR(old_inode->i_mode)) {
- err = -EIO;
old_dir_entry = f2fs_parent_dir(old_inode, &old_dir_page);
- if (!old_dir_entry)
+ if (!old_dir_entry) {
+ if (IS_ERR(old_dir_page))
+ err = PTR_ERR(old_dir_page);
goto out_old;
+ }
}
if (flags & RENAME_WHITEOUT) {
@@ -677,8 +695,11 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
err = -ENOENT;
new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name,
&new_page);
- if (!new_entry)
+ if (!new_entry) {
+ if (IS_ERR(new_page))
+ err = PTR_ERR(new_page);
goto out_whiteout;
+ }
f2fs_balance_fs(sbi, true);
@@ -700,19 +721,14 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
new_inode->i_ctime = CURRENT_TIME;
down_write(&F2FS_I(new_inode)->i_sem);
if (old_dir_entry)
- drop_nlink(new_inode);
- drop_nlink(new_inode);
+ f2fs_i_links_write(new_inode, false);
+ f2fs_i_links_write(new_inode, false);
up_write(&F2FS_I(new_inode)->i_sem);
- mark_inode_dirty(new_inode);
-
if (!new_inode->i_nlink)
- add_orphan_inode(sbi, new_inode->i_ino);
+ add_orphan_inode(new_inode);
else
release_orphan_inode(sbi);
-
- update_inode_page(old_inode);
- update_inode_page(new_inode);
} else {
f2fs_balance_fs(sbi, true);
@@ -724,10 +740,8 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
goto out_whiteout;
}
- if (old_dir_entry) {
- inc_nlink(new_dir);
- update_inode_page(new_dir);
- }
+ if (old_dir_entry)
+ f2fs_i_links_write(new_dir, true);
/*
* old entry and new entry can locate in the same inline
@@ -743,7 +757,9 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
old_entry = f2fs_find_entry(old_dir,
&old_dentry->d_name, &old_page);
if (!old_entry) {
- err = -EIO;
+ err = -ENOENT;
+ if (IS_ERR(old_page))
+ err = PTR_ERR(old_page);
f2fs_unlock_op(sbi);
goto out_whiteout;
}
@@ -757,13 +773,13 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
up_write(&F2FS_I(old_inode)->i_sem);
old_inode->i_ctime = CURRENT_TIME;
- mark_inode_dirty(old_inode);
+ f2fs_mark_inode_dirty_sync(old_inode);
f2fs_delete_entry(old_entry, old_page, old_dir, NULL);
if (whiteout) {
whiteout->i_state |= I_LINKABLE;
- set_inode_flag(F2FS_I(whiteout), FI_INC_LINK);
+ set_inode_flag(whiteout, FI_INC_LINK);
err = f2fs_add_link(old_dentry, whiteout);
if (err)
goto put_out_dir;
@@ -775,14 +791,11 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (old_dir != new_dir && !whiteout) {
f2fs_set_link(old_inode, old_dir_entry,
old_dir_page, new_dir);
- update_inode_page(old_inode);
} else {
f2fs_dentry_kunmap(old_inode, old_dir_page);
f2fs_put_page(old_dir_page, 0);
}
- drop_nlink(old_dir);
- mark_inode_dirty(old_dir);
- update_inode_page(old_dir);
+ f2fs_i_links_write(old_dir, false);
}
f2fs_unlock_op(sbi);
@@ -832,29 +845,39 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
return -EPERM;
old_entry = f2fs_find_entry(old_dir, &old_dentry->d_name, &old_page);
- if (!old_entry)
+ if (!old_entry) {
+ if (IS_ERR(old_page))
+ err = PTR_ERR(old_page);
goto out;
+ }
new_entry = f2fs_find_entry(new_dir, &new_dentry->d_name, &new_page);
- if (!new_entry)
+ if (!new_entry) {
+ if (IS_ERR(new_page))
+ err = PTR_ERR(new_page);
goto out_old;
+ }
/* prepare for updating ".." directory entry info later */
if (old_dir != new_dir) {
if (S_ISDIR(old_inode->i_mode)) {
- err = -EIO;
old_dir_entry = f2fs_parent_dir(old_inode,
&old_dir_page);
- if (!old_dir_entry)
+ if (!old_dir_entry) {
+ if (IS_ERR(old_dir_page))
+ err = PTR_ERR(old_dir_page);
goto out_new;
+ }
}
if (S_ISDIR(new_inode->i_mode)) {
- err = -EIO;
new_dir_entry = f2fs_parent_dir(new_inode,
&new_dir_page);
- if (!new_dir_entry)
+ if (!new_dir_entry) {
+ if (IS_ERR(new_dir_page))
+ err = PTR_ERR(new_dir_page);
goto out_old_dir;
+ }
}
}
@@ -904,19 +927,13 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
file_lost_pino(old_inode);
up_write(&F2FS_I(old_inode)->i_sem);
- update_inode_page(old_inode);
-
old_dir->i_ctime = CURRENT_TIME;
if (old_nlink) {
down_write(&F2FS_I(old_dir)->i_sem);
- if (old_nlink < 0)
- drop_nlink(old_dir);
- else
- inc_nlink(old_dir);
+ f2fs_i_links_write(old_dir, old_nlink > 0);
up_write(&F2FS_I(old_dir)->i_sem);
}
- mark_inode_dirty(old_dir);
- update_inode_page(old_dir);
+ f2fs_mark_inode_dirty_sync(old_dir);
/* update directory entry info of new dir inode */
f2fs_set_link(new_dir, new_entry, new_page, old_inode);
@@ -925,19 +942,13 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
file_lost_pino(new_inode);
up_write(&F2FS_I(new_inode)->i_sem);
- update_inode_page(new_inode);
-
new_dir->i_ctime = CURRENT_TIME;
if (new_nlink) {
down_write(&F2FS_I(new_dir)->i_sem);
- if (new_nlink < 0)
- drop_nlink(new_dir);
- else
- inc_nlink(new_dir);
+ f2fs_i_links_write(new_dir, new_nlink > 0);
up_write(&F2FS_I(new_dir)->i_sem);
}
- mark_inode_dirty(new_dir);
- update_inode_page(new_dir);
+ f2fs_mark_inode_dirty_sync(new_dir);
f2fs_unlock_op(sbi);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 1f21aae80c40..b2fa4b615925 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -52,6 +52,10 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
PAGE_SHIFT;
res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
+ if (excess_cached_nats(sbi))
+ res = false;
+ if (nm_i->nat_cnt > DEF_NAT_CACHE_THRESHOLD)
+ res = false;
} else if (type == DIRTY_DENTS) {
if (sbi->sb->s_bdi->wb.dirty_exceeded)
return false;
@@ -202,14 +206,14 @@ int need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
struct nat_entry *e;
bool need = false;
- down_read(&nm_i->nat_tree_lock);
+ percpu_down_read(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, nid);
if (e) {
if (!get_nat_flag(e, IS_CHECKPOINTED) &&
!get_nat_flag(e, HAS_FSYNCED_INODE))
need = true;
}
- up_read(&nm_i->nat_tree_lock);
+ percpu_up_read(&nm_i->nat_tree_lock);
return need;
}
@@ -219,11 +223,11 @@ bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
struct nat_entry *e;
bool is_cp = true;
- down_read(&nm_i->nat_tree_lock);
+ percpu_down_read(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, nid);
if (e && !get_nat_flag(e, IS_CHECKPOINTED))
is_cp = false;
- up_read(&nm_i->nat_tree_lock);
+ percpu_up_read(&nm_i->nat_tree_lock);
return is_cp;
}
@@ -233,13 +237,13 @@ bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
struct nat_entry *e;
bool need_update = true;
- down_read(&nm_i->nat_tree_lock);
+ percpu_down_read(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, ino);
if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
(get_nat_flag(e, IS_CHECKPOINTED) ||
get_nat_flag(e, HAS_FSYNCED_INODE)))
need_update = false;
- up_read(&nm_i->nat_tree_lock);
+ percpu_up_read(&nm_i->nat_tree_lock);
return need_update;
}
@@ -280,7 +284,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct nat_entry *e;
- down_write(&nm_i->nat_tree_lock);
+ percpu_down_write(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, ni->nid);
if (!e) {
e = grab_nat_entry(nm_i, ni->nid);
@@ -330,7 +334,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
set_nat_flag(e, HAS_FSYNCED_INODE, true);
set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
}
- up_write(&nm_i->nat_tree_lock);
+ percpu_up_write(&nm_i->nat_tree_lock);
}
int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
@@ -338,8 +342,7 @@ int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
struct f2fs_nm_info *nm_i = NM_I(sbi);
int nr = nr_shrink;
- if (!down_write_trylock(&nm_i->nat_tree_lock))
- return 0;
+ percpu_down_write(&nm_i->nat_tree_lock);
while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
struct nat_entry *ne;
@@ -348,7 +351,7 @@ int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
__del_from_nat_cache(nm_i, ne);
nr_shrink--;
}
- up_write(&nm_i->nat_tree_lock);
+ percpu_up_write(&nm_i->nat_tree_lock);
return nr - nr_shrink;
}
@@ -370,13 +373,13 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
ni->nid = nid;
/* Check nat cache */
- down_read(&nm_i->nat_tree_lock);
+ percpu_down_read(&nm_i->nat_tree_lock);
e = __lookup_nat_cache(nm_i, nid);
if (e) {
ni->ino = nat_get_ino(e);
ni->blk_addr = nat_get_blkaddr(e);
ni->version = nat_get_version(e);
- up_read(&nm_i->nat_tree_lock);
+ percpu_up_read(&nm_i->nat_tree_lock);
return;
}
@@ -400,11 +403,11 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
node_info_from_raw_nat(ni, &ne);
f2fs_put_page(page, 1);
cache:
- up_read(&nm_i->nat_tree_lock);
+ percpu_up_read(&nm_i->nat_tree_lock);
/* cache nat entry */
- down_write(&nm_i->nat_tree_lock);
+ percpu_down_write(&nm_i->nat_tree_lock);
cache_nat_entry(sbi, nid, &ne);
- up_write(&nm_i->nat_tree_lock);
+ percpu_up_write(&nm_i->nat_tree_lock);
}
/*
@@ -646,6 +649,7 @@ release_out:
if (err == -ENOENT) {
dn->cur_level = i;
dn->max_level = level;
+ dn->ofs_in_node = offset[level];
}
return err;
}
@@ -670,8 +674,7 @@ static void truncate_node(struct dnode_of_data *dn)
if (dn->nid == dn->inode->i_ino) {
remove_orphan_inode(sbi, dn->nid);
dec_valid_inode_count(sbi);
- } else {
- sync_inode_page(dn);
+ f2fs_inode_synced(dn->inode);
}
invalidate:
clear_node_page_dirty(dn->node_page);
@@ -953,7 +956,7 @@ int truncate_xattr_node(struct inode *inode, struct page *page)
if (IS_ERR(npage))
return PTR_ERR(npage);
- F2FS_I(inode)->i_xattr_nid = 0;
+ f2fs_i_xnid_write(inode, 0);
/* need to do checkpoint during fsync */
F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi));
@@ -1019,7 +1022,7 @@ struct page *new_node_page(struct dnode_of_data *dn,
struct page *page;
int err;
- if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
+ if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return ERR_PTR(-EPERM);
page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false);
@@ -1042,21 +1045,16 @@ struct page *new_node_page(struct dnode_of_data *dn,
f2fs_wait_on_page_writeback(page, NODE, true);
fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
set_cold_node(dn->inode, page);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
if (set_page_dirty(page))
dn->node_changed = true;
if (f2fs_has_xattr_block(ofs))
- F2FS_I(dn->inode)->i_xattr_nid = dn->nid;
+ f2fs_i_xnid_write(dn->inode, dn->nid);
- dn->node_page = page;
- if (ipage)
- update_inode(dn->inode, ipage);
- else
- sync_inode_page(dn);
if (ofs == 0)
inc_valid_inode_count(sbi);
-
return page;
fail:
@@ -1070,18 +1068,22 @@ fail:
* 0: f2fs_put_page(page, 0)
* LOCKED_PAGE or error: f2fs_put_page(page, 1)
*/
-static int read_node_page(struct page *page, int rw)
+static int read_node_page(struct page *page, int op_flags)
{
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
struct node_info ni;
struct f2fs_io_info fio = {
.sbi = sbi,
.type = NODE,
- .rw = rw,
+ .op = REQ_OP_READ,
+ .op_flags = op_flags,
.page = page,
.encrypted_page = NULL,
};
+ if (PageUptodate(page))
+ return LOCKED_PAGE;
+
get_node_info(sbi, page->index, &ni);
if (unlikely(ni.blk_addr == NULL_ADDR)) {
@@ -1089,9 +1091,6 @@ static int read_node_page(struct page *page, int rw)
return -ENOENT;
}
- if (PageUptodate(page))
- return LOCKED_PAGE;
-
fio.new_blkaddr = fio.old_blkaddr = ni.blk_addr;
return f2fs_submit_page_bio(&fio);
}
@@ -1118,7 +1117,7 @@ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
if (!apage)
return;
- err = read_node_page(apage, READA);
+ err = read_node_page(apage, REQ_RAHEAD);
f2fs_put_page(apage, err ? 1 : 0);
}
@@ -1149,16 +1148,21 @@ repeat:
lock_page(page);
- if (unlikely(!PageUptodate(page))) {
- f2fs_put_page(page, 1);
- return ERR_PTR(-EIO);
- }
if (unlikely(page->mapping != NODE_MAPPING(sbi))) {
f2fs_put_page(page, 1);
goto repeat;
}
+
+ if (unlikely(!PageUptodate(page)))
+ goto out_err;
page_hit:
- f2fs_bug_on(sbi, nid != nid_of_node(page));
+ if(unlikely(nid != nid_of_node(page))) {
+ f2fs_bug_on(sbi, 1);
+ ClearPageUptodate(page);
+out_err:
+ f2fs_put_page(page, 1);
+ return ERR_PTR(-EIO);
+ }
return page;
}
@@ -1175,24 +1179,6 @@ struct page *get_node_page_ra(struct page *parent, int start)
return __get_node_page(sbi, nid, parent, start);
}
-void sync_inode_page(struct dnode_of_data *dn)
-{
- int ret = 0;
-
- if (IS_INODE(dn->node_page) || dn->inode_page == dn->node_page) {
- ret = update_inode(dn->inode, dn->node_page);
- } else if (dn->inode_page) {
- if (!dn->inode_page_locked)
- lock_page(dn->inode_page);
- ret = update_inode(dn->inode, dn->inode_page);
- if (!dn->inode_page_locked)
- unlock_page(dn->inode_page);
- } else {
- ret = update_inode_page(dn->inode);
- }
- dn->node_changed = ret ? true: false;
-}
-
static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino)
{
struct inode *inode;
@@ -1318,7 +1304,7 @@ continue_unlock:
return last_page;
}
-int fsync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
+int fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
struct writeback_control *wbc, bool atomic)
{
pgoff_t index, end;
@@ -1326,6 +1312,7 @@ int fsync_node_pages(struct f2fs_sb_info *sbi, nid_t ino,
int ret = 0;
struct page *last_page = NULL;
bool marked = false;
+ nid_t ino = inode->i_ino;
if (atomic) {
last_page = last_fsync_dnode(sbi, ino);
@@ -1379,9 +1366,13 @@ continue_unlock:
if (!atomic || page == last_page) {
set_fsync_mark(page, 1);
- if (IS_INODE(page))
+ if (IS_INODE(page)) {
+ if (is_inode_flag_set(inode,
+ FI_DIRTY_INODE))
+ update_inode(inode, page);
set_dentry_mark(page,
need_dentry_mark(sbi, ino));
+ }
/* may be written by other thread */
if (!PageDirty(page))
set_page_dirty(page);
@@ -1568,7 +1559,8 @@ static int f2fs_write_node_page(struct page *page,
struct f2fs_io_info fio = {
.sbi = sbi,
.type = NODE,
- .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
+ .op = REQ_OP_WRITE,
+ .op_flags = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0,
.page = page,
.encrypted_page = NULL,
};
@@ -1628,6 +1620,7 @@ static int f2fs_write_node_pages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
+ struct blk_plug plug;
long diff;
/* balancing f2fs's metadata in background */
@@ -1641,7 +1634,9 @@ static int f2fs_write_node_pages(struct address_space *mapping,
diff = nr_pages_to_write(sbi, NODE, wbc);
wbc->sync_mode = WB_SYNC_NONE;
+ blk_start_plug(&plug);
sync_node_pages(sbi, wbc);
+ blk_finish_plug(&plug);
wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
return 0;
@@ -1655,9 +1650,10 @@ static int f2fs_set_node_page_dirty(struct page *page)
{
trace_f2fs_set_page_dirty(page, NODE);
- SetPageUptodate(page);
+ if (!PageUptodate(page))
+ SetPageUptodate(page);
if (!PageDirty(page)) {
- __set_page_dirty_nobuffers(page);
+ f2fs_set_page_dirty_nobuffers(page);
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
SetPagePrivate(page);
f2fs_trace_pid(page);
@@ -1776,7 +1772,7 @@ static void scan_nat_page(struct f2fs_sb_info *sbi,
}
}
-static void build_free_nids(struct f2fs_sb_info *sbi)
+void build_free_nids(struct f2fs_sb_info *sbi)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
@@ -1785,14 +1781,14 @@ static void build_free_nids(struct f2fs_sb_info *sbi)
nid_t nid = nm_i->next_scan_nid;
/* Enough entries */
- if (nm_i->fcnt > NAT_ENTRY_PER_BLOCK)
+ if (nm_i->fcnt >= NAT_ENTRY_PER_BLOCK)
return;
/* readahead nat pages to be scanned */
ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES,
META_NAT, true);
- down_read(&nm_i->nat_tree_lock);
+ percpu_down_read(&nm_i->nat_tree_lock);
while (1) {
struct page *page = get_current_nat_page(sbi, nid);
@@ -1824,7 +1820,7 @@ static void build_free_nids(struct f2fs_sb_info *sbi)
remove_free_nid(nm_i, nid);
}
up_read(&curseg->journal_rwsem);
- up_read(&nm_i->nat_tree_lock);
+ percpu_up_read(&nm_i->nat_tree_lock);
ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
nm_i->ra_nid_pages, META_NAT, false);
@@ -1923,12 +1919,15 @@ int try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
struct free_nid *i, *next;
int nr = nr_shrink;
+ if (nm_i->fcnt <= MAX_FREE_NIDS)
+ return 0;
+
if (!mutex_trylock(&nm_i->build_lock))
return 0;
spin_lock(&nm_i->free_nid_list_lock);
list_for_each_entry_safe(i, next, &nm_i->free_nid_list, list) {
- if (nr_shrink <= 0 || nm_i->fcnt <= NAT_ENTRY_PER_BLOCK)
+ if (nr_shrink <= 0 || nm_i->fcnt <= MAX_FREE_NIDS)
break;
if (i->state == NID_ALLOC)
continue;
@@ -1955,7 +1954,7 @@ void recover_inline_xattr(struct inode *inode, struct page *page)
ri = F2FS_INODE(page);
if (!(ri->i_inline & F2FS_INLINE_XATTR)) {
- clear_inode_flag(F2FS_I(inode), FI_INLINE_XATTR);
+ clear_inode_flag(inode, FI_INLINE_XATTR);
goto update_inode;
}
@@ -1997,13 +1996,11 @@ recover_xnid:
get_node_info(sbi, new_xnid, &ni);
ni.ino = inode->i_ino;
set_node_addr(sbi, &ni, NEW_ADDR, false);
- F2FS_I(inode)->i_xattr_nid = new_xnid;
+ f2fs_i_xnid_write(inode, new_xnid);
/* 3: update xattr blkaddr */
refresh_sit_entry(sbi, NEW_ADDR, blkaddr);
set_node_addr(sbi, &ni, blkaddr, false);
-
- update_inode_page(inode);
}
int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
@@ -2025,7 +2022,8 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
/* Should not use this inode from free nid list */
remove_free_nid(NM_I(sbi), ino);
- SetPageUptodate(ipage);
+ if (!PageUptodate(ipage))
+ SetPageUptodate(ipage);
fill_node_footer(ipage, ino, ino, 0, true);
src = F2FS_INODE(page);
@@ -2211,7 +2209,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
if (!nm_i->dirty_nat_cnt)
return;
- down_write(&nm_i->nat_tree_lock);
+ percpu_down_write(&nm_i->nat_tree_lock);
/*
* if there are no enough space in journal to store dirty nat
@@ -2234,7 +2232,7 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
list_for_each_entry_safe(set, tmp, &sets, set_list)
__flush_nat_entry_set(sbi, set);
- up_write(&nm_i->nat_tree_lock);
+ percpu_up_write(&nm_i->nat_tree_lock);
f2fs_bug_on(sbi, nm_i->dirty_nat_cnt);
}
@@ -2270,7 +2268,8 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
mutex_init(&nm_i->build_lock);
spin_lock_init(&nm_i->free_nid_list_lock);
- init_rwsem(&nm_i->nat_tree_lock);
+ if (percpu_init_rwsem(&nm_i->nat_tree_lock))
+ return -ENOMEM;
nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
@@ -2327,7 +2326,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
spin_unlock(&nm_i->free_nid_list_lock);
/* destroy nat cache */
- down_write(&nm_i->nat_tree_lock);
+ percpu_down_write(&nm_i->nat_tree_lock);
while ((found = __gang_lookup_nat_cache(nm_i,
nid, NATVEC_SIZE, natvec))) {
unsigned idx;
@@ -2352,8 +2351,9 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
kmem_cache_free(nat_entry_set_slab, setvec[idx]);
}
}
- up_write(&nm_i->nat_tree_lock);
+ percpu_up_write(&nm_i->nat_tree_lock);
+ percpu_free_rwsem(&nm_i->nat_tree_lock);
kfree(nm_i->nat_bitmap);
sbi->nm_info = NULL;
kfree(nm_i);
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index 1f4f9d4569d9..fc7684554b1a 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -15,18 +15,21 @@
#define NAT_BLOCK_OFFSET(start_nid) (start_nid / NAT_ENTRY_PER_BLOCK)
/* # of pages to perform synchronous readahead before building free nids */
-#define FREE_NID_PAGES 4
+#define FREE_NID_PAGES 8
+#define MAX_FREE_NIDS (NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES)
-#define DEF_RA_NID_PAGES 4 /* # of nid pages to be readaheaded */
+#define DEF_RA_NID_PAGES 0 /* # of nid pages to be readaheaded */
/* maximum readahead size for node during getting data blocks */
#define MAX_RA_NODE 128
/* control the memory footprint threshold (10MB per 1GB ram) */
-#define DEF_RAM_THRESHOLD 10
+#define DEF_RAM_THRESHOLD 1
/* control dirty nats ratio threshold (default: 10% over max nid count) */
#define DEF_DIRTY_NAT_RATIO_THRESHOLD 10
+/* control total # of nats */
+#define DEF_NAT_CACHE_THRESHOLD 100000
/* vector size for gang look-up from nat cache that consists of radix tree */
#define NATVEC_SIZE 64
@@ -126,6 +129,11 @@ static inline bool excess_dirty_nats(struct f2fs_sb_info *sbi)
NM_I(sbi)->dirty_nats_ratio / 100;
}
+static inline bool excess_cached_nats(struct f2fs_sb_info *sbi)
+{
+ return NM_I(sbi)->nat_cnt >= DEF_NAT_CACHE_THRESHOLD;
+}
+
enum mem_type {
FREE_NIDS, /* indicates the free nid list */
NAT_ENTRIES, /* indicates the cached nat entry */
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 3d7216d7a288..9e652d5a659b 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -153,9 +153,12 @@ retry:
f2fs_delete_entry(de, page, dir, einode);
iput(einode);
goto retry;
+ } else if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ } else {
+ err = __f2fs_add_link(dir, &name, inode,
+ inode->i_ino, inode->i_mode);
}
- err = __f2fs_add_link(dir, &name, inode, inode->i_ino, inode->i_mode);
-
goto out;
out_unmap_put:
@@ -175,7 +178,7 @@ static void recover_inode(struct inode *inode, struct page *page)
char *name;
inode->i_mode = le16_to_cpu(raw->i_mode);
- i_size_write(inode, le64_to_cpu(raw->i_size));
+ f2fs_i_size_write(inode, le64_to_cpu(raw->i_size));
inode->i_atime.tv_sec = le64_to_cpu(raw->i_mtime);
inode->i_ctime.tv_sec = le64_to_cpu(raw->i_ctime);
inode->i_mtime.tv_sec = le64_to_cpu(raw->i_mtime);
@@ -455,6 +458,9 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
continue;
}
+ if ((start + 1) << PAGE_SHIFT > i_size_read(inode))
+ f2fs_i_size_write(inode, (start + 1) << PAGE_SHIFT);
+
/*
* dest is reserved block, invalidate src block
* and then reserve one new block in dnode page.
@@ -476,6 +482,8 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
#endif
/* We should not get -ENOSPC */
f2fs_bug_on(sbi, err);
+ if (err)
+ goto err;
}
/* Check the previous node page having this index */
@@ -490,9 +498,6 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
}
}
- if (IS_INODE(dn.node_page))
- sync_inode_page(&dn);
-
copy_node_footer(dn.node_page, page);
fill_node_footer(dn.node_page, dn.nid, ni.ino,
ofs_of_node(page), false);
@@ -624,8 +629,12 @@ out:
if (err) {
bool invalidate = false;
- if (discard_next_dnode(sbi, blkaddr))
+ if (test_opt(sbi, LFS)) {
+ update_meta_page(sbi, NULL, blkaddr);
+ invalidate = true;
+ } else if (discard_next_dnode(sbi, blkaddr)) {
invalidate = true;
+ }
/* Flush all the NAT/SIT pages */
while (get_pages(sbi, F2FS_DIRTY_META))
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 2e6f537a0e7d..a46296f57b02 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -241,7 +241,7 @@ void drop_inmem_pages(struct inode *inode)
{
struct f2fs_inode_info *fi = F2FS_I(inode);
- clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
+ clear_inode_flag(inode, FI_ATOMIC_FILE);
mutex_lock(&fi->inmem_lock);
__revoke_inmem_pages(inode, &fi->inmem_pages, true, false);
@@ -257,7 +257,8 @@ static int __commit_inmem_pages(struct inode *inode,
struct f2fs_io_info fio = {
.sbi = sbi,
.type = DATA,
- .rw = WRITE_SYNC | REQ_PRIO,
+ .op = REQ_OP_WRITE,
+ .op_flags = WRITE_SYNC | REQ_PRIO,
.encrypted_page = NULL,
};
bool submit_bio = false;
@@ -345,6 +346,11 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
{
if (!need)
return;
+
+ /* balance_fs_bg is able to be pending */
+ if (excess_cached_nats(sbi))
+ f2fs_balance_fs_bg(sbi);
+
/*
* We should do GC or end up with checkpoint, if there are so many dirty
* dir/node pages without enough free segments.
@@ -366,7 +372,9 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
try_to_free_nats(sbi, NAT_ENTRY_PER_BLOCK);
if (!available_free_memory(sbi, FREE_NIDS))
- try_to_free_nids(sbi, NAT_ENTRY_PER_BLOCK * FREE_NID_PAGES);
+ try_to_free_nids(sbi, MAX_FREE_NIDS);
+ else
+ build_free_nids(sbi);
/* checkpoint is the only way to shrink partial cached entries */
if (!available_free_memory(sbi, NAT_ENTRIES) ||
@@ -406,7 +414,8 @@ repeat:
fcc->dispatch_list = llist_reverse_order(fcc->dispatch_list);
bio->bi_bdev = sbi->sb->s_bdev;
- ret = submit_bio_wait(WRITE_FLUSH, bio);
+ bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
+ ret = submit_bio_wait(bio);
llist_for_each_entry_safe(cmd, next,
fcc->dispatch_list, llnode) {
@@ -433,24 +442,29 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi)
if (test_opt(sbi, NOBARRIER))
return 0;
- if (!test_opt(sbi, FLUSH_MERGE)) {
+ if (!test_opt(sbi, FLUSH_MERGE) || !atomic_read(&fcc->submit_flush)) {
struct bio *bio = f2fs_bio_alloc(0);
int ret;
+ atomic_inc(&fcc->submit_flush);
bio->bi_bdev = sbi->sb->s_bdev;
- ret = submit_bio_wait(WRITE_FLUSH, bio);
+ bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH);
+ ret = submit_bio_wait(bio);
+ atomic_dec(&fcc->submit_flush);
bio_put(bio);
return ret;
}
init_completion(&cmd.wait);
+ atomic_inc(&fcc->submit_flush);
llist_add(&cmd.llnode, &fcc->issue_list);
if (!fcc->dispatch_list)
wake_up(&fcc->flush_wait_queue);
wait_for_completion(&cmd.wait);
+ atomic_dec(&fcc->submit_flush);
return cmd.ret;
}
@@ -464,6 +478,7 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi)
fcc = kzalloc(sizeof(struct flush_cmd_control), GFP_KERNEL);
if (!fcc)
return -ENOMEM;
+ atomic_set(&fcc->submit_flush, 0);
init_waitqueue_head(&fcc->flush_wait_queue);
init_llist_head(&fcc->issue_list);
SM_I(sbi)->cmd_control_info = fcc;
@@ -665,6 +680,10 @@ static void add_discard_addrs(struct f2fs_sb_info *sbi, struct cp_control *cpc)
break;
end = __find_rev_next_zero_bit(dmap, max_blocks, start + 1);
+ if (force && start && end != max_blocks
+ && (end - start) < cpc->trim_minlen)
+ continue;
+
__add_discard_entry(sbi, cpc, se, start, end);
}
}
@@ -702,6 +721,8 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
unsigned long *prefree_map = dirty_i->dirty_segmap[PRE];
unsigned int start = 0, end = -1;
+ unsigned int secno, start_segno;
+ bool force = (cpc->reason == CP_DISCARD);
mutex_lock(&dirty_i->seglist_lock);
@@ -718,17 +739,31 @@ void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc)
dirty_i->nr_dirty[PRE] -= end - start;
- if (!test_opt(sbi, DISCARD))
+ if (force || !test_opt(sbi, DISCARD))
continue;
- f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
+ if (!test_opt(sbi, LFS) || sbi->segs_per_sec == 1) {
+ f2fs_issue_discard(sbi, START_BLOCK(sbi, start),
(end - start) << sbi->log_blocks_per_seg);
+ continue;
+ }
+next:
+ secno = GET_SECNO(sbi, start);
+ start_segno = secno * sbi->segs_per_sec;
+ if (!IS_CURSEC(sbi, secno) &&
+ !get_valid_blocks(sbi, start, sbi->segs_per_sec))
+ f2fs_issue_discard(sbi, START_BLOCK(sbi, start_segno),
+ sbi->segs_per_sec << sbi->log_blocks_per_seg);
+
+ start = start_segno + sbi->segs_per_sec;
+ if (start < end)
+ goto next;
}
mutex_unlock(&dirty_i->seglist_lock);
/* send small discards */
list_for_each_entry_safe(entry, this, head, list) {
- if (cpc->reason == CP_DISCARD && entry->len < cpc->trim_minlen)
+ if (force && entry->len < cpc->trim_minlen)
goto skip;
f2fs_issue_discard(sbi, entry->blkaddr, entry->len);
cpc->trimmed += entry->len;
@@ -1216,6 +1251,9 @@ void allocate_new_segments(struct f2fs_sb_info *sbi)
{
int i;
+ if (test_opt(sbi, LFS))
+ return;
+
for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++)
__allocate_new_segments(sbi, i);
}
@@ -1389,11 +1427,17 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio)
{
int type = __get_segment_type(fio->page, fio->type);
+ if (fio->type == NODE || fio->type == DATA)
+ mutex_lock(&fio->sbi->wio_mutex[fio->type]);
+
allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
&fio->new_blkaddr, sum, type);
/* writeout dirty page into bdev */
f2fs_submit_page_mbio(fio);
+
+ if (fio->type == NODE || fio->type == DATA)
+ mutex_unlock(&fio->sbi->wio_mutex[fio->type]);
}
void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
@@ -1401,7 +1445,8 @@ void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
struct f2fs_io_info fio = {
.sbi = sbi,
.type = META,
- .rw = WRITE_SYNC | REQ_META | REQ_PRIO,
+ .op = REQ_OP_WRITE,
+ .op_flags = WRITE_SYNC | REQ_META | REQ_PRIO,
.old_blkaddr = page->index,
.new_blkaddr = page->index,
.page = page,
@@ -1409,7 +1454,7 @@ void write_meta_page(struct f2fs_sb_info *sbi, struct page *page)
};
if (unlikely(page->index >= MAIN_BLKADDR(sbi)))
- fio.rw &= ~REQ_META;
+ fio.op_flags &= ~REQ_META;
set_page_writeback(page);
f2fs_submit_page_mbio(&fio);
@@ -2373,7 +2418,11 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
sm_info->ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
sm_info->rec_prefree_segments = sm_info->main_segments *
DEF_RECLAIM_PREFREE_SEGMENTS / 100;
- sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
+ if (sm_info->rec_prefree_segments > DEF_MAX_RECLAIM_PREFREE_SEGMENTS)
+ sm_info->rec_prefree_segments = DEF_MAX_RECLAIM_PREFREE_SEGMENTS;
+
+ if (!test_opt(sbi, LFS))
+ sm_info->ipu_policy = 1 << F2FS_IPU_FSYNC;
sm_info->min_ipu_util = DEF_MIN_IPU_UTIL;
sm_info->min_fsync_blocks = DEF_MIN_FSYNC_BLOCKS;
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 7a756ff5a36d..b33f73ec60a4 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -16,6 +16,7 @@
#define NULL_SECNO ((unsigned int)(~0))
#define DEF_RECLAIM_PREFREE_SEGMENTS 5 /* 5% over total segments */
+#define DEF_MAX_RECLAIM_PREFREE_SEGMENTS 4096 /* 8GB in maximum */
/* L: Logical segment # in volume, R: Relative segment # in main area */
#define GET_L2R_SEGNO(free_i, segno) (segno - free_i->start_segno)
@@ -470,6 +471,10 @@ static inline bool need_SSR(struct f2fs_sb_info *sbi)
{
int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
+
+ if (test_opt(sbi, LFS))
+ return false;
+
return free_sections(sbi) <= (node_secs + 2 * dent_secs +
reserved_sections(sbi) + 1);
}
@@ -479,6 +484,8 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
+ node_secs += get_blocktype_secs(sbi, F2FS_DIRTY_IMETA);
+
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
return false;
@@ -531,6 +538,9 @@ static inline bool need_inplace_update(struct inode *inode)
if (S_ISDIR(inode->i_mode) || f2fs_is_atomic_file(inode))
return false;
+ if (test_opt(sbi, LFS))
+ return false;
+
if (policy & (0x1 << F2FS_IPU_FORCE))
return true;
if (policy & (0x1 << F2FS_IPU_SSR) && need_SSR(sbi))
@@ -544,7 +554,7 @@ static inline bool need_inplace_update(struct inode *inode)
/* this is only set during fdatasync */
if (policy & (0x1 << F2FS_IPU_FSYNC) &&
- is_inode_flag_set(F2FS_I(inode), FI_NEED_IPU))
+ is_inode_flag_set(inode, FI_NEED_IPU))
return true;
return false;
@@ -706,9 +716,9 @@ static inline int nr_pages_to_skip(struct f2fs_sb_info *sbi, int type)
if (type == DATA)
return sbi->blocks_per_seg;
else if (type == NODE)
- return 3 * sbi->blocks_per_seg;
+ return 8 * sbi->blocks_per_seg;
else if (type == META)
- return MAX_BIO_BLOCKS(sbi);
+ return 8 * MAX_BIO_BLOCKS(sbi);
else
return 0;
}
@@ -726,10 +736,8 @@ static inline long nr_pages_to_write(struct f2fs_sb_info *sbi, int type,
nr_to_write = wbc->nr_to_write;
- if (type == DATA)
- desired = 4096;
- else if (type == NODE)
- desired = 3 * max_hw_blocks(sbi);
+ if (type == NODE)
+ desired = 2 * max_hw_blocks(sbi);
else
desired = MAX_BIO_BLOCKS(sbi);
diff --git a/fs/f2fs/shrinker.c b/fs/f2fs/shrinker.c
index 93606f281bf9..46c915425923 100644
--- a/fs/f2fs/shrinker.c
+++ b/fs/f2fs/shrinker.c
@@ -13,6 +13,7 @@
#include <linux/f2fs_fs.h>
#include "f2fs.h"
+#include "node.h"
static LIST_HEAD(f2fs_list);
static DEFINE_SPINLOCK(f2fs_list_lock);
@@ -25,8 +26,8 @@ static unsigned long __count_nat_entries(struct f2fs_sb_info *sbi)
static unsigned long __count_free_nids(struct f2fs_sb_info *sbi)
{
- if (NM_I(sbi)->fcnt > NAT_ENTRY_PER_BLOCK)
- return NM_I(sbi)->fcnt - NAT_ENTRY_PER_BLOCK;
+ if (NM_I(sbi)->fcnt > MAX_FREE_NIDS)
+ return NM_I(sbi)->fcnt - MAX_FREE_NIDS;
return 0;
}
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 74cc8520b8b1..1b86d3f638ef 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -49,6 +49,7 @@ char *fault_name[FAULT_MAX] = {
[FAULT_ORPHAN] = "orphan",
[FAULT_BLOCK] = "no more block",
[FAULT_DIR_DEPTH] = "too big dir depth",
+ [FAULT_EVICT_INODE] = "evict_inode fail",
};
static void f2fs_build_fault_attr(unsigned int rate)
@@ -75,6 +76,7 @@ enum {
Opt_disable_roll_forward,
Opt_norecovery,
Opt_discard,
+ Opt_nodiscard,
Opt_noheap,
Opt_user_xattr,
Opt_nouser_xattr,
@@ -86,13 +88,17 @@ enum {
Opt_inline_data,
Opt_inline_dentry,
Opt_flush_merge,
+ Opt_noflush_merge,
Opt_nobarrier,
Opt_fastboot,
Opt_extent_cache,
Opt_noextent_cache,
Opt_noinline_data,
Opt_data_flush,
+ Opt_mode,
Opt_fault_injection,
+ Opt_lazytime,
+ Opt_nolazytime,
Opt_err,
};
@@ -101,6 +107,7 @@ static match_table_t f2fs_tokens = {
{Opt_disable_roll_forward, "disable_roll_forward"},
{Opt_norecovery, "norecovery"},
{Opt_discard, "discard"},
+ {Opt_nodiscard, "nodiscard"},
{Opt_noheap, "no_heap"},
{Opt_user_xattr, "user_xattr"},
{Opt_nouser_xattr, "nouser_xattr"},
@@ -112,13 +119,17 @@ static match_table_t f2fs_tokens = {
{Opt_inline_data, "inline_data"},
{Opt_inline_dentry, "inline_dentry"},
{Opt_flush_merge, "flush_merge"},
+ {Opt_noflush_merge, "noflush_merge"},
{Opt_nobarrier, "nobarrier"},
{Opt_fastboot, "fastboot"},
{Opt_extent_cache, "extent_cache"},
{Opt_noextent_cache, "noextent_cache"},
{Opt_noinline_data, "noinline_data"},
{Opt_data_flush, "data_flush"},
+ {Opt_mode, "mode=%s"},
{Opt_fault_injection, "fault_injection=%u"},
+ {Opt_lazytime, "lazytime"},
+ {Opt_nolazytime, "nolazytime"},
{Opt_err, NULL},
};
@@ -417,6 +428,8 @@ static int parse_options(struct super_block *sb, char *options)
"the device does not support discard");
}
break;
+ case Opt_nodiscard:
+ clear_opt(sbi, DISCARD);
case Opt_noheap:
set_opt(sbi, NOHEAP);
break;
@@ -478,6 +491,9 @@ static int parse_options(struct super_block *sb, char *options)
case Opt_flush_merge:
set_opt(sbi, FLUSH_MERGE);
break;
+ case Opt_noflush_merge:
+ clear_opt(sbi, FLUSH_MERGE);
+ break;
case Opt_nobarrier:
set_opt(sbi, NOBARRIER);
break;
@@ -496,6 +512,23 @@ static int parse_options(struct super_block *sb, char *options)
case Opt_data_flush:
set_opt(sbi, DATA_FLUSH);
break;
+ case Opt_mode:
+ name = match_strdup(&args[0]);
+
+ if (!name)
+ return -ENOMEM;
+ if (strlen(name) == 8 &&
+ !strncmp(name, "adaptive", 8)) {
+ set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
+ } else if (strlen(name) == 3 &&
+ !strncmp(name, "lfs", 3)) {
+ set_opt_mode(sbi, F2FS_MOUNT_LFS);
+ } else {
+ kfree(name);
+ return -EINVAL;
+ }
+ kfree(name);
+ break;
case Opt_fault_injection:
if (args->from && match_int(args, &arg))
return -EINVAL;
@@ -506,6 +539,12 @@ static int parse_options(struct super_block *sb, char *options)
"FAULT_INJECTION was not selected");
#endif
break;
+ case Opt_lazytime:
+ sb->s_flags |= MS_LAZYTIME;
+ break;
+ case Opt_nolazytime:
+ sb->s_flags &= ~MS_LAZYTIME;
+ break;
default:
f2fs_msg(sb, KERN_ERR,
"Unrecognized mount option \"%s\" or missing value",
@@ -537,13 +576,11 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
fi->i_advise = 0;
init_rwsem(&fi->i_sem);
INIT_LIST_HEAD(&fi->dirty_list);
+ INIT_LIST_HEAD(&fi->gdirty_list);
INIT_LIST_HEAD(&fi->inmem_pages);
mutex_init(&fi->inmem_lock);
-
- set_inode_flag(fi, FI_NEW_INODE);
-
- if (test_opt(F2FS_SB(sb), INLINE_XATTR))
- set_inode_flag(fi, FI_INLINE_XATTR);
+ init_rwsem(&fi->dio_rwsem[READ]);
+ init_rwsem(&fi->dio_rwsem[WRITE]);
/* Will be used by directory only */
fi->i_dir_level = F2FS_SB(sb)->dir_level;
@@ -559,7 +596,7 @@ static int f2fs_drop_inode(struct inode *inode)
* - f2fs_gc -> iput -> evict
* - inode_wait_for_writeback(inode)
*/
- if (!inode_unhashed(inode) && inode->i_state & I_SYNC) {
+ if ((!inode_unhashed(inode) && inode->i_state & I_SYNC)) {
if (!inode->i_nlink && !is_bad_inode(inode)) {
/* to avoid evict_inode call simultaneously */
atomic_inc(&inode->i_count);
@@ -573,10 +610,10 @@ static int f2fs_drop_inode(struct inode *inode)
f2fs_destroy_extent_node(inode);
sb_start_intwrite(inode->i_sb);
- i_size_write(inode, 0);
+ f2fs_i_size_write(inode, 0);
if (F2FS_HAS_BLOCKS(inode))
- f2fs_truncate(inode, true);
+ f2fs_truncate(inode);
sb_end_intwrite(inode->i_sb);
@@ -586,9 +623,47 @@ static int f2fs_drop_inode(struct inode *inode)
}
return 0;
}
+
return generic_drop_inode(inode);
}
+int f2fs_inode_dirtied(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+ spin_lock(&sbi->inode_lock[DIRTY_META]);
+ if (is_inode_flag_set(inode, FI_DIRTY_INODE)) {
+ spin_unlock(&sbi->inode_lock[DIRTY_META]);
+ return 1;
+ }
+
+ set_inode_flag(inode, FI_DIRTY_INODE);
+ list_add_tail(&F2FS_I(inode)->gdirty_list,
+ &sbi->inode_list[DIRTY_META]);
+ inc_page_count(sbi, F2FS_DIRTY_IMETA);
+ stat_inc_dirty_inode(sbi, DIRTY_META);
+ spin_unlock(&sbi->inode_lock[DIRTY_META]);
+
+ return 0;
+}
+
+void f2fs_inode_synced(struct inode *inode)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+ spin_lock(&sbi->inode_lock[DIRTY_META]);
+ if (!is_inode_flag_set(inode, FI_DIRTY_INODE)) {
+ spin_unlock(&sbi->inode_lock[DIRTY_META]);
+ return;
+ }
+ list_del_init(&F2FS_I(inode)->gdirty_list);
+ clear_inode_flag(inode, FI_DIRTY_INODE);
+ clear_inode_flag(inode, FI_AUTO_RECOVER);
+ dec_page_count(sbi, F2FS_DIRTY_IMETA);
+ stat_dec_dirty_inode(F2FS_I_SB(inode), DIRTY_META);
+ spin_unlock(&sbi->inode_lock[DIRTY_META]);
+}
+
/*
* f2fs_dirty_inode() is called from __mark_inode_dirty()
*
@@ -596,7 +671,19 @@ static int f2fs_drop_inode(struct inode *inode)
*/
static void f2fs_dirty_inode(struct inode *inode, int flags)
{
- set_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+
+ if (inode->i_ino == F2FS_NODE_INO(sbi) ||
+ inode->i_ino == F2FS_META_INO(sbi))
+ return;
+
+ if (flags == I_DIRTY_TIME)
+ return;
+
+ if (is_inode_flag_set(inode, FI_AUTO_RECOVER))
+ clear_inode_flag(inode, FI_AUTO_RECOVER);
+
+ f2fs_inode_dirtied(inode);
}
static void f2fs_i_callback(struct rcu_head *head)
@@ -619,6 +706,8 @@ static void destroy_percpu_info(struct f2fs_sb_info *sbi)
percpu_counter_destroy(&sbi->nr_pages[i]);
percpu_counter_destroy(&sbi->alloc_valid_block_count);
percpu_counter_destroy(&sbi->total_valid_inode_count);
+
+ percpu_free_rwsem(&sbi->cp_rwsem);
}
static void f2fs_put_super(struct super_block *sb)
@@ -738,7 +827,7 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_bsize = sbi->blocksize;
buf->f_blocks = total_count - start_count;
- buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count;
+ buf->f_bfree = user_block_count - valid_user_blocks(sbi) + ovp_count;
buf->f_bavail = user_block_count - valid_user_blocks(sbi);
buf->f_files = sbi->total_node_count - F2FS_RESERVED_NODE_NUM;
@@ -803,6 +892,12 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",noextent_cache");
if (test_opt(sbi, DATA_FLUSH))
seq_puts(seq, ",data_flush");
+
+ seq_puts(seq, ",mode=");
+ if (test_opt(sbi, ADAPTIVE))
+ seq_puts(seq, "adaptive");
+ else if (test_opt(sbi, LFS))
+ seq_puts(seq, "lfs");
seq_printf(seq, ",active_logs=%u", sbi->active_logs);
return 0;
@@ -866,7 +961,6 @@ static int _name##_open_fs(struct inode *inode, struct file *file) \
} \
\
static const struct file_operations f2fs_seq_##_name##_fops = { \
- .owner = THIS_MODULE, \
.open = _name##_open_fs, \
.read = seq_read, \
.llseek = seq_lseek, \
@@ -884,6 +978,14 @@ static void default_options(struct f2fs_sb_info *sbi)
set_opt(sbi, BG_GC);
set_opt(sbi, INLINE_DATA);
set_opt(sbi, EXTENT_CACHE);
+ sbi->sb->s_flags |= MS_LAZYTIME;
+ set_opt(sbi, FLUSH_MERGE);
+ if (f2fs_sb_mounted_hmsmr(sbi->sb)) {
+ set_opt_mode(sbi, F2FS_MOUNT_LFS);
+ set_opt(sbi, DISCARD);
+ } else {
+ set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
+ }
#ifdef CONFIG_F2FS_FS_XATTR
set_opt(sbi, XATTR_USER);
@@ -1367,6 +1469,8 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
INIT_LIST_HEAD(&sbi->s_list);
mutex_init(&sbi->umount_mutex);
+ mutex_init(&sbi->wio_mutex[NODE]);
+ mutex_init(&sbi->wio_mutex[DATA]);
#ifdef CONFIG_F2FS_FS_ENCRYPTION
memcpy(sbi->key_prefix, F2FS_KEY_DESC_PREFIX,
@@ -1379,6 +1483,9 @@ static int init_percpu_info(struct f2fs_sb_info *sbi)
{
int i, err;
+ if (percpu_init_rwsem(&sbi->cp_rwsem))
+ return -ENOMEM;
+
for (i = 0; i < NR_COUNT_TYPE; i++) {
err = percpu_counter_init(&sbi->nr_pages[i], 0, GFP_KERNEL);
if (err)
@@ -1530,6 +1637,8 @@ try_onemore:
goto free_sbi;
sb->s_fs_info = sbi;
+ sbi->raw_super = raw_super;
+
default_options(sbi);
/* parse mount options */
options = kstrdup((const char *)data, GFP_KERNEL);
@@ -1559,10 +1668,8 @@ try_onemore:
memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
/* init f2fs-specific super block info */
- sbi->raw_super = raw_super;
sbi->valid_super_block = valid_super_block;
mutex_init(&sbi->gc_mutex);
- mutex_init(&sbi->writepages);
mutex_init(&sbi->cp_mutex);
init_rwsem(&sbi->node_write);
@@ -1579,7 +1686,6 @@ try_onemore:
sbi->write_io[i].bio = NULL;
}
- init_rwsem(&sbi->cp_rwsem);
init_waitqueue_head(&sbi->cp_wait);
init_sb_info(sbi);
@@ -1762,6 +1868,7 @@ try_onemore:
return 0;
free_kobj:
+ f2fs_sync_inode_meta(sbi);
kobject_del(&sbi->s_kobj);
kobject_put(&sbi->s_kobj);
wait_for_completion(&sbi->s_kobj_unregister);
diff --git a/fs/f2fs/trace.c b/fs/f2fs/trace.c
index 562ce0821559..73b4e1d1912a 100644
--- a/fs/f2fs/trace.c
+++ b/fs/f2fs/trace.c
@@ -25,11 +25,11 @@ static inline void __print_last_io(void)
if (!last_io.len)
return;
- trace_printk("%3x:%3x %4x %-16s %2x %5x %12x %4x\n",
+ trace_printk("%3x:%3x %4x %-16s %2x %5x %5x %12x %4x\n",
last_io.major, last_io.minor,
last_io.pid, "----------------",
last_io.type,
- last_io.fio.rw,
+ last_io.fio.op, last_io.fio.op_flags,
last_io.fio.new_blkaddr,
last_io.len);
memset(&last_io, 0, sizeof(last_io));
@@ -101,7 +101,8 @@ void f2fs_trace_ios(struct f2fs_io_info *fio, int flush)
if (last_io.major == major && last_io.minor == minor &&
last_io.pid == pid &&
last_io.type == __file_type(inode, pid) &&
- last_io.fio.rw == fio->rw &&
+ last_io.fio.op == fio->op &&
+ last_io.fio.op_flags == fio->op_flags &&
last_io.fio.new_blkaddr + last_io.len ==
fio->new_blkaddr) {
last_io.len++;
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index e3decae3acfb..c8898b5148eb 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -106,7 +106,7 @@ static int f2fs_xattr_advise_set(const struct xattr_handler *handler,
return -EINVAL;
F2FS_I(inode)->i_advise |= *(char *)value;
- mark_inode_dirty(inode);
+ f2fs_mark_inode_dirty_sync(inode);
return 0;
}
@@ -299,6 +299,7 @@ static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
if (ipage) {
inline_addr = inline_xattr_addr(ipage);
f2fs_wait_on_page_writeback(ipage, NODE, true);
+ set_page_dirty(ipage);
} else {
page = get_node_page(sbi, inode->i_ino);
if (IS_ERR(page)) {
@@ -441,13 +442,12 @@ static int __f2fs_setxattr(struct inode *inode, int index,
const char *name, const void *value, size_t size,
struct page *ipage, int flags)
{
- struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_xattr_entry *here, *last;
void *base_addr;
int found, newsize;
size_t len;
__u32 new_hsize;
- int error = -ENOMEM;
+ int error = 0;
if (name == NULL)
return -EINVAL;
@@ -465,7 +465,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
base_addr = read_all_xattrs(inode, ipage);
if (!base_addr)
- goto exit;
+ return -ENOMEM;
/* find entry with wanted name. */
here = __find_xattr(base_addr, index, len, name);
@@ -539,19 +539,15 @@ static int __f2fs_setxattr(struct inode *inode, int index,
if (error)
goto exit;
- if (is_inode_flag_set(fi, FI_ACL_MODE)) {
- inode->i_mode = fi->i_acl_mode;
+ if (is_inode_flag_set(inode, FI_ACL_MODE)) {
+ inode->i_mode = F2FS_I(inode)->i_acl_mode;
inode->i_ctime = CURRENT_TIME;
- clear_inode_flag(fi, FI_ACL_MODE);
+ clear_inode_flag(inode, FI_ACL_MODE);
}
if (index == F2FS_XATTR_INDEX_ENCRYPTION &&
!strcmp(name, F2FS_XATTR_NAME_ENCRYPTION_CONTEXT))
f2fs_set_encrypted_inode(inode);
-
- if (ipage)
- update_inode(inode, ipage);
- else
- update_inode_page(inode);
+ f2fs_mark_inode_dirty_sync(inode);
exit:
kzfree(base_addr);
return error;
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 3bcf57925dca..da04c0298fab 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -1589,7 +1589,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
/*
* GFP_KERNEL is ok here, because while we do hold the
- * supeblock lock, memory pressure can't call back into
+ * superblock lock, memory pressure can't call back into
* the filesystem, since we're only just about to mount
* it and have no inodes etc active!
*/
@@ -1726,7 +1726,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
sbi->dir_entries = bpb.fat_dir_entries;
if (sbi->dir_entries & (sbi->dir_per_block - 1)) {
if (!silent)
- fat_msg(sb, KERN_ERR, "bogus directory-entries per block"
+ fat_msg(sb, KERN_ERR, "bogus number of directory entries"
" (%u)", sbi->dir_entries);
goto out_invalid;
}
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index c4589e981760..8a8698119ff7 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -267,7 +267,7 @@ int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs)
int i, err = 0;
for (i = 0; i < nr_bhs; i++)
- write_dirty_buffer(bhs[i], WRITE);
+ write_dirty_buffer(bhs[i], 0);
for (i = 0; i < nr_bhs; i++) {
wait_on_buffer(bhs[i]);
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c
index b7e2b33aa793..664655b2c55f 100644
--- a/fs/fat/namei_msdos.c
+++ b/fs/fat/namei_msdos.c
@@ -154,7 +154,7 @@ static int msdos_hash(const struct dentry *dentry, struct qstr *qstr)
error = msdos_format_name(qstr->name, qstr->len, msdos_name, options);
if (!error)
- qstr->hash = full_name_hash(msdos_name, MSDOS_NAME);
+ qstr->hash = full_name_hash(dentry, msdos_name, MSDOS_NAME);
return 0;
}
@@ -162,10 +162,10 @@ static int msdos_hash(const struct dentry *dentry, struct qstr *qstr)
* Compare two msdos names. If either of the names are invalid,
* we fall back to doing the standard name comparison.
*/
-static int msdos_cmp(const struct dentry *parent, const struct dentry *dentry,
+static int msdos_cmp(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
- struct fat_mount_options *options = &MSDOS_SB(parent->d_sb)->options;
+ struct fat_mount_options *options = &MSDOS_SB(dentry->d_sb)->options;
unsigned char a_msdos_name[MSDOS_NAME], b_msdos_name[MSDOS_NAME];
int error;
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 7092584f424a..92b7363dafa9 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -107,7 +107,7 @@ static unsigned int vfat_striptail_len(const struct qstr *qstr)
*/
static int vfat_hash(const struct dentry *dentry, struct qstr *qstr)
{
- qstr->hash = full_name_hash(qstr->name, vfat_striptail_len(qstr));
+ qstr->hash = full_name_hash(dentry, qstr->name, vfat_striptail_len(qstr));
return 0;
}
@@ -127,7 +127,7 @@ static int vfat_hashi(const struct dentry *dentry, struct qstr *qstr)
name = qstr->name;
len = vfat_striptail_len(qstr);
- hash = init_name_hash();
+ hash = init_name_hash(dentry);
while (len--)
hash = partial_name_hash(nls_tolower(t, *name++), hash);
qstr->hash = end_name_hash(hash);
@@ -138,10 +138,10 @@ static int vfat_hashi(const struct dentry *dentry, struct qstr *qstr)
/*
* Case insensitive compare of two vfat names.
*/
-static int vfat_cmpi(const struct dentry *parent, const struct dentry *dentry,
+static int vfat_cmpi(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
- struct nls_table *t = MSDOS_SB(parent->d_sb)->nls_io;
+ struct nls_table *t = MSDOS_SB(dentry->d_sb)->nls_io;
unsigned int alen, blen;
/* A filename cannot end in '.' or we treat it like it has none */
@@ -157,7 +157,7 @@ static int vfat_cmpi(const struct dentry *parent, const struct dentry *dentry,
/*
* Case sensitive compare of two vfat names.
*/
-static int vfat_cmp(const struct dentry *parent, const struct dentry *dentry,
+static int vfat_cmp(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
unsigned int alen, blen;
@@ -652,8 +652,8 @@ out_free:
return err;
}
-static int vfat_add_entry(struct inode *dir, struct qstr *qname, int is_dir,
- int cluster, struct timespec *ts,
+static int vfat_add_entry(struct inode *dir, const struct qstr *qname,
+ int is_dir, int cluster, struct timespec *ts,
struct fat_slot_info *sinfo)
{
struct msdos_dir_slot *slots;
@@ -688,7 +688,7 @@ cleanup:
return err;
}
-static int vfat_find(struct inode *dir, struct qstr *qname,
+static int vfat_find(struct inode *dir, const struct qstr *qname,
struct fat_slot_info *sinfo)
{
unsigned int len = vfat_striptail_len(qname);
diff --git a/fs/freevxfs/Kconfig b/fs/freevxfs/Kconfig
index 8dc1cd5c1efe..ce49df1020dd 100644
--- a/fs/freevxfs/Kconfig
+++ b/fs/freevxfs/Kconfig
@@ -5,12 +5,21 @@ config VXFS_FS
FreeVxFS is a file system driver that support the VERITAS VxFS(TM)
file system format. VERITAS VxFS(TM) is the standard file system
of SCO UnixWare (and possibly others) and optionally available
- for Sunsoft Solaris, HP-UX and many other operating systems.
- Currently only readonly access is supported.
+ for Sunsoft Solaris, HP-UX and many other operating systems. However
+ these particular OS implementations of vxfs may differ in on-disk
+ data endianess and/or superblock offset. The vxfs module has been
+ tested with SCO UnixWare and HP-UX B.10.20 (pa-risc 1.1 arch.)
+ Currently only readonly access is supported and VxFX versions
+ 2, 3 and 4. Tests were performed with HP-UX VxFS version 3.
NOTE: the file system type as used by mount(1), mount(2) and
fstab(5) is 'vxfs' as it describes the file system format, not
the actual driver.
+ There is a userspace utility for HP-UX logical volumes which makes
+ creating HP-UX logical volumes easy from HP-UX disk block device file
+ or regular file with image of the disk. See:
+ https://sourceforge.net/projects/linux-vxfs/
+
To compile this as a module, choose M here: the module will be
called freevxfs. If unsure, say N.
diff --git a/fs/freevxfs/vxfs.h b/fs/freevxfs/vxfs.h
index c8a92652612a..a41ea0ba6943 100644
--- a/fs/freevxfs/vxfs.h
+++ b/fs/freevxfs/vxfs.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
+ * Copyright (c) 2016 Krzysztof Blaszkowski
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -38,13 +39,6 @@
*/
#include <linux/types.h>
-
-/*
- * Data types for use with the VxFS ondisk format.
- */
-typedef int32_t vx_daddr_t;
-typedef int32_t vx_ino_t;
-
/*
* Superblock magic number (vxfs_super->vs_magic).
*/
@@ -60,6 +54,14 @@ typedef int32_t vx_ino_t;
*/
#define VXFS_NEFREE 32
+enum vxfs_byte_order {
+ VXFS_BO_LE,
+ VXFS_BO_BE,
+};
+
+typedef __u16 __bitwise __fs16;
+typedef __u32 __bitwise __fs32;
+typedef __u64 __bitwise __fs64;
/*
* VxFS superblock (disk).
@@ -71,83 +73,83 @@ struct vxfs_sb {
* Lots of this fields are no more used by version 2
* and never filesystems.
*/
- u_int32_t vs_magic; /* Magic number */
- int32_t vs_version; /* VxFS version */
- u_int32_t vs_ctime; /* create time - secs */
- u_int32_t vs_cutime; /* create time - usecs */
- int32_t __unused1; /* unused */
- int32_t __unused2; /* unused */
- vx_daddr_t vs_old_logstart; /* obsolete */
- vx_daddr_t vs_old_logend; /* obsolete */
- int32_t vs_bsize; /* block size */
- int32_t vs_size; /* number of blocks */
- int32_t vs_dsize; /* number of data blocks */
- u_int32_t vs_old_ninode; /* obsolete */
- int32_t vs_old_nau; /* obsolete */
- int32_t __unused3; /* unused */
- int32_t vs_old_defiextsize; /* obsolete */
- int32_t vs_old_ilbsize; /* obsolete */
- int32_t vs_immedlen; /* size of immediate data area */
- int32_t vs_ndaddr; /* number of direct extentes */
- vx_daddr_t vs_firstau; /* address of first AU */
- vx_daddr_t vs_emap; /* offset of extent map in AU */
- vx_daddr_t vs_imap; /* offset of inode map in AU */
- vx_daddr_t vs_iextop; /* offset of ExtOp. map in AU */
- vx_daddr_t vs_istart; /* offset of inode list in AU */
- vx_daddr_t vs_bstart; /* offset of fdblock in AU */
- vx_daddr_t vs_femap; /* aufirst + emap */
- vx_daddr_t vs_fimap; /* aufirst + imap */
- vx_daddr_t vs_fiextop; /* aufirst + iextop */
- vx_daddr_t vs_fistart; /* aufirst + istart */
- vx_daddr_t vs_fbstart; /* aufirst + bstart */
- int32_t vs_nindir; /* number of entries in indir */
- int32_t vs_aulen; /* length of AU in blocks */
- int32_t vs_auimlen; /* length of imap in blocks */
- int32_t vs_auemlen; /* length of emap in blocks */
- int32_t vs_auilen; /* length of ilist in blocks */
- int32_t vs_aupad; /* length of pad in blocks */
- int32_t vs_aublocks; /* data blocks in AU */
- int32_t vs_maxtier; /* log base 2 of aublocks */
- int32_t vs_inopb; /* number of inodes per blk */
- int32_t vs_old_inopau; /* obsolete */
- int32_t vs_old_inopilb; /* obsolete */
- int32_t vs_old_ndiripau; /* obsolete */
- int32_t vs_iaddrlen; /* size of indirect addr ext. */
- int32_t vs_bshift; /* log base 2 of bsize */
- int32_t vs_inoshift; /* log base 2 of inobp */
- int32_t vs_bmask; /* ~( bsize - 1 ) */
- int32_t vs_boffmask; /* bsize - 1 */
- int32_t vs_old_inomask; /* old_inopilb - 1 */
- int32_t vs_checksum; /* checksum of V1 data */
+ __fs32 vs_magic; /* Magic number */
+ __fs32 vs_version; /* VxFS version */
+ __fs32 vs_ctime; /* create time - secs */
+ __fs32 vs_cutime; /* create time - usecs */
+ __fs32 __unused1; /* unused */
+ __fs32 __unused2; /* unused */
+ __fs32 vs_old_logstart; /* obsolete */
+ __fs32 vs_old_logend; /* obsolete */
+ __fs32 vs_bsize; /* block size */
+ __fs32 vs_size; /* number of blocks */
+ __fs32 vs_dsize; /* number of data blocks */
+ __fs32 vs_old_ninode; /* obsolete */
+ __fs32 vs_old_nau; /* obsolete */
+ __fs32 __unused3; /* unused */
+ __fs32 vs_old_defiextsize; /* obsolete */
+ __fs32 vs_old_ilbsize; /* obsolete */
+ __fs32 vs_immedlen; /* size of immediate data area */
+ __fs32 vs_ndaddr; /* number of direct extentes */
+ __fs32 vs_firstau; /* address of first AU */
+ __fs32 vs_emap; /* offset of extent map in AU */
+ __fs32 vs_imap; /* offset of inode map in AU */
+ __fs32 vs_iextop; /* offset of ExtOp. map in AU */
+ __fs32 vs_istart; /* offset of inode list in AU */
+ __fs32 vs_bstart; /* offset of fdblock in AU */
+ __fs32 vs_femap; /* aufirst + emap */
+ __fs32 vs_fimap; /* aufirst + imap */
+ __fs32 vs_fiextop; /* aufirst + iextop */
+ __fs32 vs_fistart; /* aufirst + istart */
+ __fs32 vs_fbstart; /* aufirst + bstart */
+ __fs32 vs_nindir; /* number of entries in indir */
+ __fs32 vs_aulen; /* length of AU in blocks */
+ __fs32 vs_auimlen; /* length of imap in blocks */
+ __fs32 vs_auemlen; /* length of emap in blocks */
+ __fs32 vs_auilen; /* length of ilist in blocks */
+ __fs32 vs_aupad; /* length of pad in blocks */
+ __fs32 vs_aublocks; /* data blocks in AU */
+ __fs32 vs_maxtier; /* log base 2 of aublocks */
+ __fs32 vs_inopb; /* number of inodes per blk */
+ __fs32 vs_old_inopau; /* obsolete */
+ __fs32 vs_old_inopilb; /* obsolete */
+ __fs32 vs_old_ndiripau; /* obsolete */
+ __fs32 vs_iaddrlen; /* size of indirect addr ext. */
+ __fs32 vs_bshift; /* log base 2 of bsize */
+ __fs32 vs_inoshift; /* log base 2 of inobp */
+ __fs32 vs_bmask; /* ~( bsize - 1 ) */
+ __fs32 vs_boffmask; /* bsize - 1 */
+ __fs32 vs_old_inomask; /* old_inopilb - 1 */
+ __fs32 vs_checksum; /* checksum of V1 data */
/*
* Version 1, writable
*/
- int32_t vs_free; /* number of free blocks */
- int32_t vs_ifree; /* number of free inodes */
- int32_t vs_efree[VXFS_NEFREE]; /* number of free extents by size */
- int32_t vs_flags; /* flags ?!? */
- u_int8_t vs_mod; /* filesystem has been changed */
- u_int8_t vs_clean; /* clean FS */
- u_int16_t __unused4; /* unused */
- u_int32_t vs_firstlogid; /* mount time log ID */
- u_int32_t vs_wtime; /* last time written - sec */
- u_int32_t vs_wutime; /* last time written - usec */
- u_int8_t vs_fname[6]; /* FS name */
- u_int8_t vs_fpack[6]; /* FS pack name */
- int32_t vs_logversion; /* log format version */
- int32_t __unused5; /* unused */
+ __fs32 vs_free; /* number of free blocks */
+ __fs32 vs_ifree; /* number of free inodes */
+ __fs32 vs_efree[VXFS_NEFREE]; /* number of free extents by size */
+ __fs32 vs_flags; /* flags ?!? */
+ __u8 vs_mod; /* filesystem has been changed */
+ __u8 vs_clean; /* clean FS */
+ __fs16 __unused4; /* unused */
+ __fs32 vs_firstlogid; /* mount time log ID */
+ __fs32 vs_wtime; /* last time written - sec */
+ __fs32 vs_wutime; /* last time written - usec */
+ __u8 vs_fname[6]; /* FS name */
+ __u8 vs_fpack[6]; /* FS pack name */
+ __fs32 vs_logversion; /* log format version */
+ __u32 __unused5; /* unused */
/*
* Version 2, Read-only
*/
- vx_daddr_t vs_oltext[2]; /* OLT extent and replica */
- int32_t vs_oltsize; /* OLT extent size */
- int32_t vs_iauimlen; /* size of inode map */
- int32_t vs_iausize; /* size of IAU in blocks */
- int32_t vs_dinosize; /* size of inode in bytes */
- int32_t vs_old_dniaddr; /* indir levels per inode */
- int32_t vs_checksum2; /* checksum of V2 RO */
+ __fs32 vs_oltext[2]; /* OLT extent and replica */
+ __fs32 vs_oltsize; /* OLT extent size */
+ __fs32 vs_iauimlen; /* size of inode map */
+ __fs32 vs_iausize; /* size of IAU in blocks */
+ __fs32 vs_dinosize; /* size of inode in bytes */
+ __fs32 vs_old_dniaddr; /* indir levels per inode */
+ __fs32 vs_checksum2; /* checksum of V2 RO */
/*
* Actually much more...
@@ -168,8 +170,32 @@ struct vxfs_sb_info {
ino_t vsi_fshino; /* fileset header inode */
daddr_t vsi_oltext; /* OLT extent */
daddr_t vsi_oltsize; /* OLT size */
+ enum vxfs_byte_order byte_order;
};
+static inline u16 fs16_to_cpu(struct vxfs_sb_info *sbi, __fs16 a)
+{
+ if (sbi->byte_order == VXFS_BO_BE)
+ return be16_to_cpu((__force __be16)a);
+ else
+ return le16_to_cpu((__force __le16)a);
+}
+
+static inline u32 fs32_to_cpu(struct vxfs_sb_info *sbi, __fs32 a)
+{
+ if (sbi->byte_order == VXFS_BO_BE)
+ return be32_to_cpu((__force __be32)a);
+ else
+ return le32_to_cpu((__force __le32)a);
+}
+
+static inline u64 fs64_to_cpu(struct vxfs_sb_info *sbi, __fs64 a)
+{
+ if (sbi->byte_order == VXFS_BO_BE)
+ return be64_to_cpu((__force __be64)a);
+ else
+ return le64_to_cpu((__force __le64)a);
+}
/*
* File modes. File types above 0xf000 are vxfs internal only, they should
@@ -247,13 +273,6 @@ enum {
#define VXFS_ISIMMED(ip) VXFS_IS_ORG((ip), VXFS_ORG_IMMED)
#define VXFS_ISTYPED(ip) VXFS_IS_ORG((ip), VXFS_ORG_TYPED)
-
-/*
- * Get filesystem private data from VFS inode.
- */
-#define VXFS_INO(ip) \
- ((struct vxfs_inode_info *)(ip)->i_private)
-
/*
* Get filesystem private data from VFS superblock.
*/
diff --git a/fs/freevxfs/vxfs_bmap.c b/fs/freevxfs/vxfs_bmap.c
index f86fd3cacd5a..1fd41cf98b9f 100644
--- a/fs/freevxfs/vxfs_bmap.c
+++ b/fs/freevxfs/vxfs_bmap.c
@@ -68,8 +68,9 @@ vxfs_bmap_ext4(struct inode *ip, long bn)
{
struct super_block *sb = ip->i_sb;
struct vxfs_inode_info *vip = VXFS_INO(ip);
+ struct vxfs_sb_info *sbi = VXFS_SBI(sb);
unsigned long bsize = sb->s_blocksize;
- u32 indsize = vip->vii_ext4.ve4_indsize;
+ u32 indsize = fs32_to_cpu(sbi, vip->vii_ext4.ve4_indsize);
int i;
if (indsize > sb->s_blocksize)
@@ -77,22 +78,24 @@ vxfs_bmap_ext4(struct inode *ip, long bn)
for (i = 0; i < VXFS_NDADDR; i++) {
struct direct *d = vip->vii_ext4.ve4_direct + i;
- if (bn >= 0 && bn < d->size)
- return (bn + d->extent);
- bn -= d->size;
+ if (bn >= 0 && bn < fs32_to_cpu(sbi, d->size))
+ return (bn + fs32_to_cpu(sbi, d->extent));
+ bn -= fs32_to_cpu(sbi, d->size);
}
if ((bn / (indsize * indsize * bsize / 4)) == 0) {
struct buffer_head *buf;
daddr_t bno;
- u32 *indir;
+ __fs32 *indir;
- buf = sb_bread(sb, vip->vii_ext4.ve4_indir[0]);
+ buf = sb_bread(sb,
+ fs32_to_cpu(sbi, vip->vii_ext4.ve4_indir[0]));
if (!buf || !buffer_mapped(buf))
goto fail_buf;
- indir = (u32 *)buf->b_data;
- bno = indir[(bn/indsize) % (indsize*bn)] + (bn%indsize);
+ indir = (__fs32 *)buf->b_data;
+ bno = fs32_to_cpu(sbi, indir[(bn / indsize) % (indsize * bn)]) +
+ (bn % indsize);
brelse(buf);
return bno;
@@ -127,6 +130,7 @@ fail_buf:
static daddr_t
vxfs_bmap_indir(struct inode *ip, long indir, int size, long block)
{
+ struct vxfs_sb_info *sbi = VXFS_SBI(ip->i_sb);
struct buffer_head *bp = NULL;
daddr_t pblock = 0;
int i;
@@ -142,24 +146,27 @@ vxfs_bmap_indir(struct inode *ip, long indir, int size, long block)
typ = ((struct vxfs_typed *)bp->b_data) +
(i % VXFS_TYPED_PER_BLOCK(ip->i_sb));
- off = (typ->vt_hdr & VXFS_TYPED_OFFSETMASK);
+ off = fs64_to_cpu(sbi, typ->vt_hdr) & VXFS_TYPED_OFFSETMASK;
if (block < off) {
brelse(bp);
continue;
}
- switch ((u_int32_t)(typ->vt_hdr >> VXFS_TYPED_TYPESHIFT)) {
+ switch ((u_int32_t)(fs64_to_cpu(sbi, typ->vt_hdr) >>
+ VXFS_TYPED_TYPESHIFT)) {
case VXFS_TYPED_INDIRECT:
- pblock = vxfs_bmap_indir(ip, typ->vt_block,
- typ->vt_size, block - off);
+ pblock = vxfs_bmap_indir(ip,
+ fs32_to_cpu(sbi, typ->vt_block),
+ fs32_to_cpu(sbi, typ->vt_size),
+ block - off);
if (pblock == -2)
break;
goto out;
case VXFS_TYPED_DATA:
- if ((block - off) >= typ->vt_size)
+ if ((block - off) >= fs32_to_cpu(sbi, typ->vt_size))
break;
- pblock = (typ->vt_block + block - off);
+ pblock = fs32_to_cpu(sbi, typ->vt_block) + block - off;
goto out;
case VXFS_TYPED_INDIRECT_DEV4:
case VXFS_TYPED_DATA_DEV4: {
@@ -167,13 +174,15 @@ vxfs_bmap_indir(struct inode *ip, long indir, int size, long block)
(struct vxfs_typed_dev4 *)typ;
printk(KERN_INFO "\n\nTYPED_DEV4 detected!\n");
- printk(KERN_INFO "block: %Lu\tsize: %Ld\tdev: %d\n",
- (unsigned long long) typ4->vd4_block,
- (unsigned long long) typ4->vd4_size,
- typ4->vd4_dev);
+ printk(KERN_INFO "block: %llu\tsize: %lld\tdev: %d\n",
+ fs64_to_cpu(sbi, typ4->vd4_block),
+ fs64_to_cpu(sbi, typ4->vd4_size),
+ fs32_to_cpu(sbi, typ4->vd4_dev));
goto fail;
}
default:
+ printk(KERN_ERR "%s:%d vt_hdr %llu\n", __func__,
+ __LINE__, fs64_to_cpu(sbi, typ->vt_hdr));
BUG();
}
brelse(bp);
@@ -201,28 +210,33 @@ static daddr_t
vxfs_bmap_typed(struct inode *ip, long iblock)
{
struct vxfs_inode_info *vip = VXFS_INO(ip);
+ struct vxfs_sb_info *sbi = VXFS_SBI(ip->i_sb);
daddr_t pblock = 0;
int i;
for (i = 0; i < VXFS_NTYPED; i++) {
struct vxfs_typed *typ = vip->vii_org.typed + i;
- int64_t off = (typ->vt_hdr & VXFS_TYPED_OFFSETMASK);
+ u64 hdr = fs64_to_cpu(sbi, typ->vt_hdr);
+ int64_t off = (hdr & VXFS_TYPED_OFFSETMASK);
#ifdef DIAGNOSTIC
vxfs_typdump(typ);
#endif
if (iblock < off)
continue;
- switch ((u_int32_t)(typ->vt_hdr >> VXFS_TYPED_TYPESHIFT)) {
+ switch ((u32)(hdr >> VXFS_TYPED_TYPESHIFT)) {
case VXFS_TYPED_INDIRECT:
- pblock = vxfs_bmap_indir(ip, typ->vt_block,
- typ->vt_size, iblock - off);
+ pblock = vxfs_bmap_indir(ip,
+ fs32_to_cpu(sbi, typ->vt_block),
+ fs32_to_cpu(sbi, typ->vt_size),
+ iblock - off);
if (pblock == -2)
break;
return (pblock);
case VXFS_TYPED_DATA:
- if ((iblock - off) < typ->vt_size)
- return (typ->vt_block + iblock - off);
+ if ((iblock - off) < fs32_to_cpu(sbi, typ->vt_size))
+ return (fs32_to_cpu(sbi, typ->vt_block) +
+ iblock - off);
break;
case VXFS_TYPED_INDIRECT_DEV4:
case VXFS_TYPED_DATA_DEV4: {
@@ -230,10 +244,10 @@ vxfs_bmap_typed(struct inode *ip, long iblock)
(struct vxfs_typed_dev4 *)typ;
printk(KERN_INFO "\n\nTYPED_DEV4 detected!\n");
- printk(KERN_INFO "block: %Lu\tsize: %Ld\tdev: %d\n",
- (unsigned long long) typ4->vd4_block,
- (unsigned long long) typ4->vd4_size,
- typ4->vd4_dev);
+ printk(KERN_INFO "block: %llu\tsize: %lld\tdev: %d\n",
+ fs64_to_cpu(sbi, typ4->vd4_block),
+ fs64_to_cpu(sbi, typ4->vd4_size),
+ fs32_to_cpu(sbi, typ4->vd4_dev));
return 0;
}
default:
diff --git a/fs/freevxfs/vxfs_dir.h b/fs/freevxfs/vxfs_dir.h
index aaf1fb098639..acc5477b3f23 100644
--- a/fs/freevxfs/vxfs_dir.h
+++ b/fs/freevxfs/vxfs_dir.h
@@ -48,9 +48,9 @@
* Linux driver for now.
*/
struct vxfs_dirblk {
- u_int16_t d_free; /* free space in dirblock */
- u_int16_t d_nhash; /* no of hash chains */
- u_int16_t d_hash[1]; /* hash chain */
+ __fs16 d_free; /* free space in dirblock */
+ __fs16 d_nhash; /* no of hash chains */
+ __fs16 d_hash[1]; /* hash chain */
};
/*
@@ -63,10 +63,10 @@ struct vxfs_dirblk {
* VxFS directory entry.
*/
struct vxfs_direct {
- vx_ino_t d_ino; /* inode number */
- u_int16_t d_reclen; /* record length */
- u_int16_t d_namelen; /* d_name length */
- u_int16_t d_hashnext; /* next hash entry */
+ __fs32 d_ino; /* inode number */
+ __fs16 d_reclen; /* record length */
+ __fs16 d_namelen; /* d_name length */
+ __fs16 d_hashnext; /* next hash entry */
char d_name[VXFS_NAMELEN]; /* name */
};
@@ -87,6 +87,7 @@ struct vxfs_direct {
/*
* VXFS_DIRBLKOV is the overhead of a specific dirblock.
*/
-#define VXFS_DIRBLKOV(dbp) ((sizeof(short) * dbp->d_nhash) + 4)
+#define VXFS_DIRBLKOV(sbi, dbp) \
+ ((sizeof(short) * fs16_to_cpu(sbi, dbp->d_nhash)) + 4)
#endif /* _VXFS_DIR_H_ */
diff --git a/fs/freevxfs/vxfs_extern.h b/fs/freevxfs/vxfs_extern.h
index e3dcb4467d92..f5c428e21024 100644
--- a/fs/freevxfs/vxfs_extern.h
+++ b/fs/freevxfs/vxfs_extern.h
@@ -52,14 +52,10 @@ extern int vxfs_read_fshead(struct super_block *);
/* vxfs_inode.c */
extern const struct address_space_operations vxfs_immed_aops;
-extern struct kmem_cache *vxfs_inode_cachep;
extern void vxfs_dumpi(struct vxfs_inode_info *, ino_t);
-extern struct inode * vxfs_get_fake_inode(struct super_block *,
- struct vxfs_inode_info *);
-extern void vxfs_put_fake_inode(struct inode *);
-extern struct vxfs_inode_info * vxfs_blkiget(struct super_block *, u_long, ino_t);
-extern struct vxfs_inode_info * vxfs_stiget(struct super_block *, ino_t);
-extern struct inode * vxfs_iget(struct super_block *, ino_t);
+extern struct inode *vxfs_blkiget(struct super_block *, u_long, ino_t);
+extern struct inode *vxfs_stiget(struct super_block *, ino_t);
+extern struct inode *vxfs_iget(struct super_block *, ino_t);
extern void vxfs_evict_inode(struct inode *);
/* vxfs_lookup.c */
diff --git a/fs/freevxfs/vxfs_fshead.c b/fs/freevxfs/vxfs_fshead.c
index c9a6a94e58e9..a4610a77649e 100644
--- a/fs/freevxfs/vxfs_fshead.c
+++ b/fs/freevxfs/vxfs_fshead.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
+ * Copyright (c) 2016 Krzysztof Blaszkowski
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -108,31 +109,26 @@ vxfs_read_fshead(struct super_block *sbp)
{
struct vxfs_sb_info *infp = VXFS_SBI(sbp);
struct vxfs_fsh *pfp, *sfp;
- struct vxfs_inode_info *vip, *tip;
+ struct vxfs_inode_info *vip;
- vip = vxfs_blkiget(sbp, infp->vsi_iext, infp->vsi_fshino);
- if (!vip) {
+ infp->vsi_fship = vxfs_blkiget(sbp, infp->vsi_iext, infp->vsi_fshino);
+ if (!infp->vsi_fship) {
printk(KERN_ERR "vxfs: unable to read fsh inode\n");
return -EINVAL;
}
+
+ vip = VXFS_INO(infp->vsi_fship);
if (!VXFS_ISFSH(vip)) {
printk(KERN_ERR "vxfs: fsh list inode is of wrong type (%x)\n",
vip->vii_mode & VXFS_TYPE_MASK);
- goto out_free_fship;
+ goto out_iput_fship;
}
-
#ifdef DIAGNOSTIC
printk("vxfs: fsh inode dump:\n");
vxfs_dumpi(vip, infp->vsi_fshino);
#endif
- infp->vsi_fship = vxfs_get_fake_inode(sbp, vip);
- if (!infp->vsi_fship) {
- printk(KERN_ERR "vxfs: unable to get fsh inode\n");
- goto out_free_fship;
- }
-
sfp = vxfs_getfsh(infp->vsi_fship, 0);
if (!sfp) {
printk(KERN_ERR "vxfs: unable to get structural fsh\n");
@@ -153,14 +149,10 @@ vxfs_read_fshead(struct super_block *sbp)
vxfs_dumpfsh(pfp);
#endif
- tip = vxfs_blkiget(sbp, infp->vsi_iext, sfp->fsh_ilistino[0]);
- if (!tip)
- goto out_free_pfp;
-
- infp->vsi_stilist = vxfs_get_fake_inode(sbp, tip);
+ infp->vsi_stilist = vxfs_blkiget(sbp, infp->vsi_iext,
+ fs32_to_cpu(infp, sfp->fsh_ilistino[0]));
if (!infp->vsi_stilist) {
printk(KERN_ERR "vxfs: unable to get structural list inode\n");
- kfree(tip);
goto out_free_pfp;
}
if (!VXFS_ISILT(VXFS_INO(infp->vsi_stilist))) {
@@ -169,13 +161,9 @@ vxfs_read_fshead(struct super_block *sbp)
goto out_iput_stilist;
}
- tip = vxfs_stiget(sbp, pfp->fsh_ilistino[0]);
- if (!tip)
- goto out_iput_stilist;
- infp->vsi_ilist = vxfs_get_fake_inode(sbp, tip);
+ infp->vsi_ilist = vxfs_stiget(sbp, fs32_to_cpu(infp, pfp->fsh_ilistino[0]));
if (!infp->vsi_ilist) {
printk(KERN_ERR "vxfs: unable to get inode list inode\n");
- kfree(tip);
goto out_iput_stilist;
}
if (!VXFS_ISILT(VXFS_INO(infp->vsi_ilist))) {
@@ -184,6 +172,8 @@ vxfs_read_fshead(struct super_block *sbp)
goto out_iput_ilist;
}
+ kfree(pfp);
+ kfree(sfp);
return 0;
out_iput_ilist:
@@ -197,7 +187,4 @@ vxfs_read_fshead(struct super_block *sbp)
out_iput_fship:
iput(infp->vsi_fship);
return -EINVAL;
- out_free_fship:
- kfree(vip);
- return -EINVAL;
}
diff --git a/fs/freevxfs/vxfs_fshead.h b/fs/freevxfs/vxfs_fshead.h
index ead0d640c181..e026f0c49159 100644
--- a/fs/freevxfs/vxfs_fshead.h
+++ b/fs/freevxfs/vxfs_fshead.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
+ * Copyright (c) 2016 Krzysztof Blaszkowski
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -42,20 +43,20 @@
* Fileset header
*/
struct vxfs_fsh {
- u_int32_t fsh_version; /* fileset header version */
- u_int32_t fsh_fsindex; /* fileset index */
- u_int32_t fsh_time; /* modification time - sec */
- u_int32_t fsh_utime; /* modification time - usec */
- u_int32_t fsh_extop; /* extop flags */
- vx_ino_t fsh_ninodes; /* allocated inodes */
- u_int32_t fsh_nau; /* number of IAUs */
- u_int32_t fsh_old_ilesize; /* old size of ilist */
- u_int32_t fsh_dflags; /* flags */
- u_int32_t fsh_quota; /* quota limit */
- vx_ino_t fsh_maxinode; /* maximum inode number */
- vx_ino_t fsh_iauino; /* IAU inode */
- vx_ino_t fsh_ilistino[2]; /* ilist inodes */
- vx_ino_t fsh_lctino; /* link count table inode */
+ __fs32 fsh_version; /* fileset header version */
+ __fs32 fsh_fsindex; /* fileset index */
+ __fs32 fsh_time; /* modification time - sec */
+ __fs32 fsh_utime; /* modification time - usec */
+ __fs32 fsh_extop; /* extop flags */
+ __fs32 fsh_ninodes; /* allocated inodes */
+ __fs32 fsh_nau; /* number of IAUs */
+ __fs32 fsh_old_ilesize; /* old size of ilist */
+ __fs32 fsh_dflags; /* flags */
+ __fs32 fsh_quota; /* quota limit */
+ __fs32 fsh_maxinode; /* maximum inode number */
+ __fs32 fsh_iauino; /* IAU inode */
+ __fs32 fsh_ilistino[2]; /* ilist inodes */
+ __fs32 fsh_lctino; /* link count table inode */
/*
* Slightly more fields follow, but they
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index 3e2ccade61ed..1f41b25ef38b 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
+ * Copyright (c) 2016 Krzysztof Blaszkowski
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -42,9 +43,6 @@
#include "vxfs_extern.h"
-struct kmem_cache *vxfs_inode_cachep;
-
-
#ifdef DIAGNOSTIC
/*
* Dump inode contents (partially).
@@ -68,6 +66,83 @@ vxfs_dumpi(struct vxfs_inode_info *vip, ino_t ino)
}
#endif
+/**
+ * vxfs_transmod - mode for a VxFS inode
+ * @vip: VxFS inode
+ *
+ * Description:
+ * vxfs_transmod returns a Linux mode_t for a given
+ * VxFS inode structure.
+ */
+static __inline__ umode_t
+vxfs_transmod(struct vxfs_inode_info *vip)
+{
+ umode_t ret = vip->vii_mode & ~VXFS_TYPE_MASK;
+
+ if (VXFS_ISFIFO(vip))
+ ret |= S_IFIFO;
+ if (VXFS_ISCHR(vip))
+ ret |= S_IFCHR;
+ if (VXFS_ISDIR(vip))
+ ret |= S_IFDIR;
+ if (VXFS_ISBLK(vip))
+ ret |= S_IFBLK;
+ if (VXFS_ISLNK(vip))
+ ret |= S_IFLNK;
+ if (VXFS_ISREG(vip))
+ ret |= S_IFREG;
+ if (VXFS_ISSOC(vip))
+ ret |= S_IFSOCK;
+
+ return (ret);
+}
+
+static inline void dip2vip_cpy(struct vxfs_sb_info *sbi,
+ struct vxfs_inode_info *vip, struct vxfs_dinode *dip)
+{
+ struct inode *inode = &vip->vfs_inode;
+
+ vip->vii_mode = fs32_to_cpu(sbi, dip->vdi_mode);
+ vip->vii_nlink = fs32_to_cpu(sbi, dip->vdi_nlink);
+ vip->vii_uid = fs32_to_cpu(sbi, dip->vdi_uid);
+ vip->vii_gid = fs32_to_cpu(sbi, dip->vdi_gid);
+ vip->vii_size = fs64_to_cpu(sbi, dip->vdi_size);
+ vip->vii_atime = fs32_to_cpu(sbi, dip->vdi_atime);
+ vip->vii_autime = fs32_to_cpu(sbi, dip->vdi_autime);
+ vip->vii_mtime = fs32_to_cpu(sbi, dip->vdi_mtime);
+ vip->vii_mutime = fs32_to_cpu(sbi, dip->vdi_mutime);
+ vip->vii_ctime = fs32_to_cpu(sbi, dip->vdi_ctime);
+ vip->vii_cutime = fs32_to_cpu(sbi, dip->vdi_cutime);
+ vip->vii_orgtype = dip->vdi_orgtype;
+
+ vip->vii_blocks = fs32_to_cpu(sbi, dip->vdi_blocks);
+ vip->vii_gen = fs32_to_cpu(sbi, dip->vdi_gen);
+
+ if (VXFS_ISDIR(vip))
+ vip->vii_dotdot = fs32_to_cpu(sbi, dip->vdi_dotdot);
+ else if (!VXFS_ISREG(vip) && !VXFS_ISLNK(vip))
+ vip->vii_rdev = fs32_to_cpu(sbi, dip->vdi_rdev);
+
+ /* don't endian swap the fields that differ by orgtype */
+ memcpy(&vip->vii_org, &dip->vdi_org, sizeof(vip->vii_org));
+
+ inode->i_mode = vxfs_transmod(vip);
+ i_uid_write(inode, (uid_t)vip->vii_uid);
+ i_gid_write(inode, (gid_t)vip->vii_gid);
+
+ set_nlink(inode, vip->vii_nlink);
+ inode->i_size = vip->vii_size;
+
+ inode->i_atime.tv_sec = vip->vii_atime;
+ inode->i_ctime.tv_sec = vip->vii_ctime;
+ inode->i_mtime.tv_sec = vip->vii_mtime;
+ inode->i_atime.tv_nsec = 0;
+ inode->i_ctime.tv_nsec = 0;
+ inode->i_mtime.tv_nsec = 0;
+
+ inode->i_blocks = vip->vii_blocks;
+ inode->i_generation = vip->vii_gen;
+}
/**
* vxfs_blkiget - find inode based on extent #
@@ -85,50 +160,55 @@ vxfs_dumpi(struct vxfs_inode_info *vip, ino_t ino)
* buffercache. This function should not be used outside the
* read_super() method, otherwise the data may be incoherent.
*/
-struct vxfs_inode_info *
+struct inode *
vxfs_blkiget(struct super_block *sbp, u_long extent, ino_t ino)
{
struct buffer_head *bp;
+ struct inode *inode;
u_long block, offset;
+ inode = new_inode(sbp);
+ if (!inode)
+ return NULL;
+ inode->i_ino = get_next_ino();
+
block = extent + ((ino * VXFS_ISIZE) / sbp->s_blocksize);
offset = ((ino % (sbp->s_blocksize / VXFS_ISIZE)) * VXFS_ISIZE);
bp = sb_bread(sbp, block);
if (bp && buffer_mapped(bp)) {
- struct vxfs_inode_info *vip;
+ struct vxfs_inode_info *vip = VXFS_INO(inode);
struct vxfs_dinode *dip;
- if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, GFP_KERNEL)))
- goto fail;
dip = (struct vxfs_dinode *)(bp->b_data + offset);
- memcpy(vip, dip, sizeof(*vip));
+ dip2vip_cpy(VXFS_SBI(sbp), vip, dip);
+ vip->vfs_inode.i_mapping->a_ops = &vxfs_aops;
#ifdef DIAGNOSTIC
vxfs_dumpi(vip, ino);
#endif
brelse(bp);
- return (vip);
+ return inode;
}
-fail:
printk(KERN_WARNING "vxfs: unable to read block %ld\n", block);
brelse(bp);
+ iput(inode);
return NULL;
}
/**
* __vxfs_iget - generic find inode facility
- * @sbp: VFS superblock
- * @ino: inode number
* @ilistp: inode list
+ * @vip: VxFS inode to fill in
+ * @ino: inode number
*
* Description:
* Search the for inode number @ino in the filesystem
* described by @sbp. Use the specified inode table (@ilistp).
- * Returns the matching VxFS inode on success, else an error code.
+ * Returns the matching inode on success, else an error code.
*/
-static struct vxfs_inode_info *
-__vxfs_iget(ino_t ino, struct inode *ilistp)
+static int
+__vxfs_iget(struct inode *ilistp, struct vxfs_inode_info *vip, ino_t ino)
{
struct page *pp;
u_long offset;
@@ -137,28 +217,22 @@ __vxfs_iget(ino_t ino, struct inode *ilistp)
pp = vxfs_get_page(ilistp->i_mapping, ino * VXFS_ISIZE / PAGE_SIZE);
if (!IS_ERR(pp)) {
- struct vxfs_inode_info *vip;
struct vxfs_dinode *dip;
caddr_t kaddr = (char *)page_address(pp);
- if (!(vip = kmem_cache_alloc(vxfs_inode_cachep, GFP_KERNEL)))
- goto fail;
dip = (struct vxfs_dinode *)(kaddr + offset);
- memcpy(vip, dip, sizeof(*vip));
+ dip2vip_cpy(VXFS_SBI(ilistp->i_sb), vip, dip);
+ vip->vfs_inode.i_mapping->a_ops = &vxfs_aops;
#ifdef DIAGNOSTIC
vxfs_dumpi(vip, ino);
#endif
vxfs_put_page(pp);
- return (vip);
+ return 0;
}
- printk(KERN_WARNING "vxfs: error on page %p\n", pp);
- return ERR_CAST(pp);
-
-fail:
- printk(KERN_WARNING "vxfs: unable to read inode %ld\n", (unsigned long)ino);
- vxfs_put_page(pp);
- return ERR_PTR(-ENOMEM);
+ printk(KERN_WARNING "vxfs: error on page 0x%p for inode %ld\n",
+ pp, (unsigned long)ino);
+ return PTR_ERR(pp);
}
/**
@@ -169,116 +243,26 @@ fail:
* Description:
* Find inode @ino in the filesystem described by @sbp using
* the structural inode list.
- * Returns the matching VxFS inode on success, else a NULL pointer.
- */
-struct vxfs_inode_info *
-vxfs_stiget(struct super_block *sbp, ino_t ino)
-{
- struct vxfs_inode_info *vip;
-
- vip = __vxfs_iget(ino, VXFS_SBI(sbp)->vsi_stilist);
- return IS_ERR(vip) ? NULL : vip;
-}
-
-/**
- * vxfs_transmod - mode for a VxFS inode
- * @vip: VxFS inode
- *
- * Description:
- * vxfs_transmod returns a Linux mode_t for a given
- * VxFS inode structure.
- */
-static __inline__ umode_t
-vxfs_transmod(struct vxfs_inode_info *vip)
-{
- umode_t ret = vip->vii_mode & ~VXFS_TYPE_MASK;
-
- if (VXFS_ISFIFO(vip))
- ret |= S_IFIFO;
- if (VXFS_ISCHR(vip))
- ret |= S_IFCHR;
- if (VXFS_ISDIR(vip))
- ret |= S_IFDIR;
- if (VXFS_ISBLK(vip))
- ret |= S_IFBLK;
- if (VXFS_ISLNK(vip))
- ret |= S_IFLNK;
- if (VXFS_ISREG(vip))
- ret |= S_IFREG;
- if (VXFS_ISSOC(vip))
- ret |= S_IFSOCK;
-
- return (ret);
-}
-
-/**
- * vxfs_iinit- helper to fill inode fields
- * @ip: VFS inode
- * @vip: VxFS inode
- *
- * Description:
- * vxfs_instino is a helper function to fill in all relevant
- * fields in @ip from @vip.
- */
-static void
-vxfs_iinit(struct inode *ip, struct vxfs_inode_info *vip)
-{
-
- ip->i_mode = vxfs_transmod(vip);
- i_uid_write(ip, (uid_t)vip->vii_uid);
- i_gid_write(ip, (gid_t)vip->vii_gid);
-
- set_nlink(ip, vip->vii_nlink);
- ip->i_size = vip->vii_size;
-
- ip->i_atime.tv_sec = vip->vii_atime;
- ip->i_ctime.tv_sec = vip->vii_ctime;
- ip->i_mtime.tv_sec = vip->vii_mtime;
- ip->i_atime.tv_nsec = 0;
- ip->i_ctime.tv_nsec = 0;
- ip->i_mtime.tv_nsec = 0;
-
- ip->i_blocks = vip->vii_blocks;
- ip->i_generation = vip->vii_gen;
-
- ip->i_private = vip;
-
-}
-
-/**
- * vxfs_get_fake_inode - get fake inode structure
- * @sbp: filesystem superblock
- * @vip: fspriv inode
- *
- * Description:
- * vxfs_fake_inode gets a fake inode (not in the inode hash) for a
- * superblock, vxfs_inode pair.
- * Returns the filled VFS inode.
+ * Returns the matching inode on success, else a NULL pointer.
*/
struct inode *
-vxfs_get_fake_inode(struct super_block *sbp, struct vxfs_inode_info *vip)
+vxfs_stiget(struct super_block *sbp, ino_t ino)
{
- struct inode *ip = NULL;
-
- if ((ip = new_inode(sbp))) {
- ip->i_ino = get_next_ino();
- vxfs_iinit(ip, vip);
- ip->i_mapping->a_ops = &vxfs_aops;
+ struct inode *inode;
+ int error;
+
+ inode = new_inode(sbp);
+ if (!inode)
+ return NULL;
+ inode->i_ino = get_next_ino();
+
+ error = __vxfs_iget(VXFS_SBI(sbp)->vsi_stilist, VXFS_INO(inode), ino);
+ if (error) {
+ iput(inode);
+ return NULL;
}
- return (ip);
-}
-/**
- * vxfs_put_fake_inode - free faked inode
- * *ip: VFS inode
- *
- * Description:
- * vxfs_put_fake_inode frees all data associated with @ip.
- */
-void
-vxfs_put_fake_inode(struct inode *ip)
-{
- iput(ip);
+ return inode;
}
/**
@@ -296,6 +280,7 @@ vxfs_iget(struct super_block *sbp, ino_t ino)
struct vxfs_inode_info *vip;
const struct address_space_operations *aops;
struct inode *ip;
+ int error;
ip = iget_locked(sbp, ino);
if (!ip)
@@ -303,14 +288,13 @@ vxfs_iget(struct super_block *sbp, ino_t ino)
if (!(ip->i_state & I_NEW))
return ip;
- vip = __vxfs_iget(ino, VXFS_SBI(sbp)->vsi_ilist);
- if (IS_ERR(vip)) {
+ vip = VXFS_INO(ip);
+ error = __vxfs_iget(VXFS_SBI(sbp)->vsi_ilist, vip, ino);
+ if (error) {
iget_failed(ip);
- return ERR_CAST(vip);
+ return ERR_PTR(error);
}
- vxfs_iinit(ip, vip);
-
if (VXFS_ISIMMED(vip))
aops = &vxfs_immed_aops;
else
@@ -341,12 +325,6 @@ vxfs_iget(struct super_block *sbp, ino_t ino)
return ip;
}
-static void vxfs_i_callback(struct rcu_head *head)
-{
- struct inode *inode = container_of(head, struct inode, i_rcu);
- kmem_cache_free(vxfs_inode_cachep, inode->i_private);
-}
-
/**
* vxfs_evict_inode - remove inode from main memory
* @ip: inode to discard.
@@ -360,5 +338,4 @@ vxfs_evict_inode(struct inode *ip)
{
truncate_inode_pages_final(&ip->i_data);
clear_inode(ip);
- call_rcu(&ip->i_rcu, vxfs_i_callback);
}
diff --git a/fs/freevxfs/vxfs_inode.h b/fs/freevxfs/vxfs_inode.h
index 240aeb11263f..f012abed125d 100644
--- a/fs/freevxfs/vxfs_inode.h
+++ b/fs/freevxfs/vxfs_inode.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
+ * Copyright (c) 2016 Krzysztof Blaszkowski
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -66,74 +67,74 @@ enum {
* Data stored immediately in the inode.
*/
struct vxfs_immed {
- u_int8_t vi_immed[VXFS_NIMMED];
+ __u8 vi_immed[VXFS_NIMMED];
};
struct vxfs_ext4 {
- u_int32_t ve4_spare; /* ?? */
- u_int32_t ve4_indsize; /* Indirect extent size */
- vx_daddr_t ve4_indir[VXFS_NIADDR]; /* Indirect extents */
+ __fs32 ve4_spare; /* ?? */
+ __fs32 ve4_indsize; /* Indirect extent size */
+ __fs32 ve4_indir[VXFS_NIADDR]; /* Indirect extents */
struct direct { /* Direct extents */
- vx_daddr_t extent; /* Extent number */
- int32_t size; /* Size of extent */
+ __fs32 extent; /* Extent number */
+ __fs32 size; /* Size of extent */
} ve4_direct[VXFS_NDADDR];
};
struct vxfs_typed {
- u_int64_t vt_hdr; /* Header, 0xTTOOOOOOOOOOOOOO; T=type,O=offs */
- vx_daddr_t vt_block; /* Extent block */
- int32_t vt_size; /* Size in blocks */
+ __fs64 vt_hdr; /* Header, 0xTTOOOOOOOOOOOOOO; T=type,O=offs */
+ __fs32 vt_block; /* Extent block */
+ __fs32 vt_size; /* Size in blocks */
};
struct vxfs_typed_dev4 {
- u_int64_t vd4_hdr; /* Header, 0xTTOOOOOOOOOOOOOO; T=type,O=offs */
- u_int64_t vd4_block; /* Extent block */
- u_int64_t vd4_size; /* Size in blocks */
- int32_t vd4_dev; /* Device ID */
- u_int32_t __pad1;
+ __fs64 vd4_hdr; /* Header, 0xTTOOOOOOOOOOOOOO; T=type,O=offs */
+ __fs64 vd4_block; /* Extent block */
+ __fs64 vd4_size; /* Size in blocks */
+ __fs32 vd4_dev; /* Device ID */
+ __u8 __pad1;
};
/*
* The inode as contained on the physical device.
*/
struct vxfs_dinode {
- int32_t vdi_mode;
- u_int32_t vdi_nlink; /* Link count */
- u_int32_t vdi_uid; /* UID */
- u_int32_t vdi_gid; /* GID */
- u_int64_t vdi_size; /* Inode size in bytes */
- u_int32_t vdi_atime; /* Last time accessed - sec */
- u_int32_t vdi_autime; /* Last time accessed - usec */
- u_int32_t vdi_mtime; /* Last modify time - sec */
- u_int32_t vdi_mutime; /* Last modify time - usec */
- u_int32_t vdi_ctime; /* Create time - sec */
- u_int32_t vdi_cutime; /* Create time - usec */
- u_int8_t vdi_aflags; /* Allocation flags */
- u_int8_t vdi_orgtype; /* Organisation type */
- u_int16_t vdi_eopflags;
- u_int32_t vdi_eopdata;
+ __fs32 vdi_mode;
+ __fs32 vdi_nlink; /* Link count */
+ __fs32 vdi_uid; /* UID */
+ __fs32 vdi_gid; /* GID */
+ __fs64 vdi_size; /* Inode size in bytes */
+ __fs32 vdi_atime; /* Last time accessed - sec */
+ __fs32 vdi_autime; /* Last time accessed - usec */
+ __fs32 vdi_mtime; /* Last modify time - sec */
+ __fs32 vdi_mutime; /* Last modify time - usec */
+ __fs32 vdi_ctime; /* Create time - sec */
+ __fs32 vdi_cutime; /* Create time - usec */
+ __u8 vdi_aflags; /* Allocation flags */
+ __u8 vdi_orgtype; /* Organisation type */
+ __fs16 vdi_eopflags;
+ __fs32 vdi_eopdata;
union {
- u_int32_t rdev;
- u_int32_t dotdot;
+ __fs32 rdev;
+ __fs32 dotdot;
struct {
- u_int32_t reserved;
- u_int32_t fixextsize;
+ __u32 reserved;
+ __fs32 fixextsize;
} i_regular;
struct {
- u_int32_t matchino;
- u_int32_t fsetindex;
+ __fs32 matchino;
+ __fs32 fsetindex;
} i_vxspec;
- u_int64_t align;
+ __u64 align;
} vdi_ftarea;
- u_int32_t vdi_blocks; /* How much blocks does inode occupy */
- u_int32_t vdi_gen; /* Inode generation */
- u_int64_t vdi_version; /* Version */
+ __fs32 vdi_blocks; /* How much blocks does inode occupy */
+ __fs32 vdi_gen; /* Inode generation */
+ __fs64 vdi_version; /* Version */
union {
struct vxfs_immed immed;
struct vxfs_ext4 ext4;
struct vxfs_typed typed[VXFS_NTYPED];
} vdi_org;
- u_int32_t vdi_iattrino;
+ __fs32 vdi_iattrino;
};
#define vdi_rdev vdi_ftarea.rdev
@@ -149,32 +150,45 @@ struct vxfs_dinode {
/*
* The inode as represented in the main memory.
- *
- * TBD: This should become a separate structure...
*/
-#define vxfs_inode_info vxfs_dinode
-
-#define vii_mode vdi_mode
-#define vii_uid vdi_uid
-#define vii_gid vdi_gid
-#define vii_nlink vdi_nlink
-#define vii_size vdi_size
-#define vii_atime vdi_atime
-#define vii_ctime vdi_ctime
-#define vii_mtime vdi_mtime
-#define vii_blocks vdi_blocks
-#define vii_org vdi_org
-#define vii_orgtype vdi_orgtype
-#define vii_gen vdi_gen
-
-#define vii_rdev vdi_ftarea.rdev
-#define vii_dotdot vdi_ftarea.dotdot
-#define vii_fixextsize vdi_ftarea.regular.fixextsize
-#define vii_matchino vdi_ftarea.vxspec.matchino
-#define vii_fsetindex vdi_ftarea.vxspec.fsetindex
-
-#define vii_immed vdi_org.immed
-#define vii_ext4 vdi_org.ext4
-#define vii_typed vdi_org.typed
+struct vxfs_inode_info {
+ struct inode vfs_inode;
+
+ __u32 vii_mode;
+ __u32 vii_nlink; /* Link count */
+ __u32 vii_uid; /* UID */
+ __u32 vii_gid; /* GID */
+ __u64 vii_size; /* Inode size in bytes */
+ __u32 vii_atime; /* Last time accessed - sec */
+ __u32 vii_autime; /* Last time accessed - usec */
+ __u32 vii_mtime; /* Last modify time - sec */
+ __u32 vii_mutime; /* Last modify time - usec */
+ __u32 vii_ctime; /* Create time - sec */
+ __u32 vii_cutime; /* Create time - usec */
+ __u8 vii_orgtype; /* Organisation type */
+ union {
+ __u32 rdev;
+ __u32 dotdot;
+ } vii_ftarea;
+ __u32 vii_blocks; /* How much blocks does inode occupy */
+ __u32 vii_gen; /* Inode generation */
+ union {
+ struct vxfs_immed immed;
+ struct vxfs_ext4 ext4;
+ struct vxfs_typed typed[VXFS_NTYPED];
+ } vii_org;
+};
+
+#define vii_rdev vii_ftarea.rdev
+#define vii_dotdot vii_ftarea.dotdot
+
+#define vii_immed vii_org.immed
+#define vii_ext4 vii_org.ext4
+#define vii_typed vii_org.typed
+
+static inline struct vxfs_inode_info *VXFS_INO(struct inode *inode)
+{
+ return container_of(inode, struct vxfs_inode_info, vfs_inode);
+}
#endif /* _VXFS_INODE_H_ */
diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c
index 6d576b97f2c8..ce4785fd81c6 100644
--- a/fs/freevxfs/vxfs_lookup.c
+++ b/fs/freevxfs/vxfs_lookup.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
+ * Copyright (c) 2016 Krzysztof Blaszkowski
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -61,33 +62,6 @@ const struct file_operations vxfs_dir_operations = {
.iterate_shared = vxfs_readdir,
};
-static inline u_long
-dir_blocks(struct inode *ip)
-{
- u_long bsize = ip->i_sb->s_blocksize;
- return (ip->i_size + bsize - 1) & ~(bsize - 1);
-}
-
-/*
- * NOTE! unlike strncmp, vxfs_match returns 1 for success, 0 for failure.
- *
- * len <= VXFS_NAMELEN and de != NULL are guaranteed by caller.
- */
-static inline int
-vxfs_match(int len, const char * const name, struct vxfs_direct *de)
-{
- if (len != de->d_namelen)
- return 0;
- if (!de->d_ino)
- return 0;
- return !memcmp(name, de->d_name, len);
-}
-
-static inline struct vxfs_direct *
-vxfs_next_entry(struct vxfs_direct *de)
-{
- return ((struct vxfs_direct *)((char*)de + de->d_reclen));
-}
/**
* vxfs_find_entry - find a mathing directory entry for a dentry
@@ -106,50 +80,64 @@ vxfs_next_entry(struct vxfs_direct *de)
static struct vxfs_direct *
vxfs_find_entry(struct inode *ip, struct dentry *dp, struct page **ppp)
{
- u_long npages, page, nblocks, pblocks, block;
- u_long bsize = ip->i_sb->s_blocksize;
- const char *name = dp->d_name.name;
- int namelen = dp->d_name.len;
-
- npages = dir_pages(ip);
- nblocks = dir_blocks(ip);
- pblocks = VXFS_BLOCK_PER_PAGE(ip->i_sb);
-
- for (page = 0; page < npages; page++) {
- caddr_t kaddr;
- struct page *pp;
+ u_long bsize = ip->i_sb->s_blocksize;
+ const char *name = dp->d_name.name;
+ int namelen = dp->d_name.len;
+ loff_t limit = VXFS_DIRROUND(ip->i_size);
+ struct vxfs_direct *de_exit = NULL;
+ loff_t pos = 0;
+ struct vxfs_sb_info *sbi = VXFS_SBI(ip->i_sb);
- pp = vxfs_get_page(ip->i_mapping, page);
+ while (pos < limit) {
+ struct page *pp;
+ char *kaddr;
+ int pg_ofs = pos & ~PAGE_MASK;
+
+ pp = vxfs_get_page(ip->i_mapping, pos >> PAGE_SHIFT);
if (IS_ERR(pp))
- continue;
- kaddr = (caddr_t)page_address(pp);
-
- for (block = 0; block <= nblocks && block <= pblocks; block++) {
- caddr_t baddr, limit;
- struct vxfs_dirblk *dbp;
- struct vxfs_direct *de;
-
- baddr = kaddr + (block * bsize);
- limit = baddr + bsize - VXFS_DIRLEN(1);
-
- dbp = (struct vxfs_dirblk *)baddr;
- de = (struct vxfs_direct *)(baddr + VXFS_DIRBLKOV(dbp));
-
- for (; (caddr_t)de <= limit; de = vxfs_next_entry(de)) {
- if (!de->d_reclen)
- break;
- if (!de->d_ino)
- continue;
- if (vxfs_match(namelen, name, de)) {
- *ppp = pp;
- return (de);
- }
+ return NULL;
+ kaddr = (char *)page_address(pp);
+
+ while (pg_ofs < PAGE_SIZE && pos < limit) {
+ struct vxfs_direct *de;
+
+ if ((pos & (bsize - 1)) < 4) {
+ struct vxfs_dirblk *dbp =
+ (struct vxfs_dirblk *)
+ (kaddr + (pos & ~PAGE_MASK));
+ int overhead = VXFS_DIRBLKOV(sbi, dbp);
+
+ pos += overhead;
+ pg_ofs += overhead;
+ }
+ de = (struct vxfs_direct *)(kaddr + pg_ofs);
+
+ if (!de->d_reclen) {
+ pos += bsize - 1;
+ pos &= ~(bsize - 1);
+ break;
+ }
+
+ pg_ofs += fs16_to_cpu(sbi, de->d_reclen);
+ pos += fs16_to_cpu(sbi, de->d_reclen);
+ if (!de->d_ino)
+ continue;
+
+ if (namelen != fs16_to_cpu(sbi, de->d_namelen))
+ continue;
+ if (!memcmp(name, de->d_name, namelen)) {
+ *ppp = pp;
+ de_exit = de;
+ break;
}
}
- vxfs_put_page(pp);
+ if (!de_exit)
+ vxfs_put_page(pp);
+ else
+ break;
}
- return NULL;
+ return de_exit;
}
/**
@@ -173,7 +161,7 @@ vxfs_inode_by_name(struct inode *dip, struct dentry *dp)
de = vxfs_find_entry(dip, dp, &pp);
if (de) {
- ino = de->d_ino;
+ ino = fs32_to_cpu(VXFS_SBI(dip->i_sb), de->d_ino);
kunmap(pp);
put_page(pp);
}
@@ -233,74 +221,80 @@ vxfs_readdir(struct file *fp, struct dir_context *ctx)
struct inode *ip = file_inode(fp);
struct super_block *sbp = ip->i_sb;
u_long bsize = sbp->s_blocksize;
- u_long page, npages, block, pblocks, nblocks, offset;
- loff_t pos;
+ loff_t pos, limit;
+ struct vxfs_sb_info *sbi = VXFS_SBI(sbp);
if (ctx->pos == 0) {
if (!dir_emit_dot(fp, ctx))
- return 0;
- ctx->pos = 1;
+ goto out;
+ ctx->pos++;
}
if (ctx->pos == 1) {
if (!dir_emit(ctx, "..", 2, VXFS_INO(ip)->vii_dotdot, DT_DIR))
- return 0;
- ctx->pos = 2;
+ goto out;
+ ctx->pos++;
}
- pos = ctx->pos - 2;
-
- if (pos > VXFS_DIRROUND(ip->i_size))
- return 0;
- npages = dir_pages(ip);
- nblocks = dir_blocks(ip);
- pblocks = VXFS_BLOCK_PER_PAGE(sbp);
+ limit = VXFS_DIRROUND(ip->i_size);
+ if (ctx->pos > limit)
+ goto out;
- page = pos >> PAGE_SHIFT;
- offset = pos & ~PAGE_MASK;
- block = (u_long)(pos >> sbp->s_blocksize_bits) % pblocks;
+ pos = ctx->pos & ~3L;
- for (; page < npages; page++, block = 0) {
- char *kaddr;
- struct page *pp;
+ while (pos < limit) {
+ struct page *pp;
+ char *kaddr;
+ int pg_ofs = pos & ~PAGE_MASK;
+ int rc = 0;
- pp = vxfs_get_page(ip->i_mapping, page);
+ pp = vxfs_get_page(ip->i_mapping, pos >> PAGE_SHIFT);
if (IS_ERR(pp))
- continue;
+ return -ENOMEM;
+
kaddr = (char *)page_address(pp);
- for (; block <= nblocks && block <= pblocks; block++) {
- char *baddr, *limit;
- struct vxfs_dirblk *dbp;
- struct vxfs_direct *de;
+ while (pg_ofs < PAGE_SIZE && pos < limit) {
+ struct vxfs_direct *de;
- baddr = kaddr + (block * bsize);
- limit = baddr + bsize - VXFS_DIRLEN(1);
-
- dbp = (struct vxfs_dirblk *)baddr;
- de = (struct vxfs_direct *)
- (offset ?
- (kaddr + offset) :
- (baddr + VXFS_DIRBLKOV(dbp)));
-
- for (; (char *)de <= limit; de = vxfs_next_entry(de)) {
- if (!de->d_reclen)
- break;
- if (!de->d_ino)
- continue;
-
- offset = (char *)de - kaddr;
- ctx->pos = ((page << PAGE_SHIFT) | offset) + 2;
- if (!dir_emit(ctx, de->d_name, de->d_namelen,
- de->d_ino, DT_UNKNOWN)) {
- vxfs_put_page(pp);
- return 0;
- }
+ if ((pos & (bsize - 1)) < 4) {
+ struct vxfs_dirblk *dbp =
+ (struct vxfs_dirblk *)
+ (kaddr + (pos & ~PAGE_MASK));
+ int overhead = VXFS_DIRBLKOV(sbi, dbp);
+
+ pos += overhead;
+ pg_ofs += overhead;
+ }
+ de = (struct vxfs_direct *)(kaddr + pg_ofs);
+
+ if (!de->d_reclen) {
+ pos += bsize - 1;
+ pos &= ~(bsize - 1);
+ break;
+ }
+
+ pg_ofs += fs16_to_cpu(sbi, de->d_reclen);
+ pos += fs16_to_cpu(sbi, de->d_reclen);
+ if (!de->d_ino)
+ continue;
+
+ rc = dir_emit(ctx, de->d_name,
+ fs16_to_cpu(sbi, de->d_namelen),
+ fs32_to_cpu(sbi, de->d_ino),
+ DT_UNKNOWN);
+ if (!rc) {
+ /* the dir entry was not read, fix pos. */
+ pos -= fs16_to_cpu(sbi, de->d_reclen);
+ break;
}
- offset = 0;
}
vxfs_put_page(pp);
- offset = 0;
+ if (!rc)
+ break;
}
- ctx->pos = ((page << PAGE_SHIFT) | offset) + 2;
+
+ ctx->pos = pos | 2;
+
+out:
return 0;
}
diff --git a/fs/freevxfs/vxfs_olt.c b/fs/freevxfs/vxfs_olt.c
index 049500847903..813da6685151 100644
--- a/fs/freevxfs/vxfs_olt.c
+++ b/fs/freevxfs/vxfs_olt.c
@@ -43,14 +43,14 @@ static inline void
vxfs_get_fshead(struct vxfs_oltfshead *fshp, struct vxfs_sb_info *infp)
{
BUG_ON(infp->vsi_fshino);
- infp->vsi_fshino = fshp->olt_fsino[0];
+ infp->vsi_fshino = fs32_to_cpu(infp, fshp->olt_fsino[0]);
}
static inline void
vxfs_get_ilist(struct vxfs_oltilist *ilistp, struct vxfs_sb_info *infp)
{
BUG_ON(infp->vsi_iext);
- infp->vsi_iext = ilistp->olt_iext[0];
+ infp->vsi_iext = fs32_to_cpu(infp, ilistp->olt_iext[0]);
}
static inline u_long
@@ -81,13 +81,12 @@ vxfs_read_olt(struct super_block *sbp, u_long bsize)
struct vxfs_olt *op;
char *oaddr, *eaddr;
-
bp = sb_bread(sbp, vxfs_oblock(sbp, infp->vsi_oltext, bsize));
if (!bp || !bp->b_data)
goto fail;
op = (struct vxfs_olt *)bp->b_data;
- if (op->olt_magic != VXFS_OLT_MAGIC) {
+ if (fs32_to_cpu(infp, op->olt_magic) != VXFS_OLT_MAGIC) {
printk(KERN_NOTICE "vxfs: ivalid olt magic number\n");
goto fail;
}
@@ -102,14 +101,14 @@ vxfs_read_olt(struct super_block *sbp, u_long bsize)
goto fail;
}
- oaddr = bp->b_data + op->olt_size;
+ oaddr = bp->b_data + fs32_to_cpu(infp, op->olt_size);
eaddr = bp->b_data + (infp->vsi_oltsize * sbp->s_blocksize);
while (oaddr < eaddr) {
struct vxfs_oltcommon *ocp =
(struct vxfs_oltcommon *)oaddr;
- switch (ocp->olt_type) {
+ switch (fs32_to_cpu(infp, ocp->olt_type)) {
case VXFS_OLT_FSHEAD:
vxfs_get_fshead((struct vxfs_oltfshead *)oaddr, infp);
break;
@@ -118,11 +117,11 @@ vxfs_read_olt(struct super_block *sbp, u_long bsize)
break;
}
- oaddr += ocp->olt_size;
+ oaddr += fs32_to_cpu(infp, ocp->olt_size);
}
brelse(bp);
- return 0;
+ return (infp->vsi_fshino && infp->vsi_iext) ? 0 : -EINVAL;
fail:
brelse(bp);
diff --git a/fs/freevxfs/vxfs_olt.h b/fs/freevxfs/vxfs_olt.h
index b7b3af502615..0c0b0c9fa557 100644
--- a/fs/freevxfs/vxfs_olt.h
+++ b/fs/freevxfs/vxfs_olt.h
@@ -63,83 +63,83 @@ enum {
* the initial inode list, the fileset header or the device configuration.
*/
struct vxfs_olt {
- u_int32_t olt_magic; /* magic number */
- u_int32_t olt_size; /* size of this entry */
- u_int32_t olt_checksum; /* checksum of extent */
- u_int32_t __unused1; /* ??? */
- u_int32_t olt_mtime; /* time of last mod. (sec) */
- u_int32_t olt_mutime; /* time of last mod. (usec) */
- u_int32_t olt_totfree; /* free space in OLT extent */
- vx_daddr_t olt_extents[2]; /* addr of this extent, replica */
- u_int32_t olt_esize; /* size of this extent */
- vx_daddr_t olt_next[2]; /* addr of next extent, replica */
- u_int32_t olt_nsize; /* size of next extent */
- u_int32_t __unused2; /* align to 8 byte boundary */
+ __fs32 olt_magic; /* magic number */
+ __fs32 olt_size; /* size of this entry */
+ __fs32 olt_checksum; /* checksum of extent */
+ __u32 __unused1; /* ??? */
+ __fs32 olt_mtime; /* time of last mod. (sec) */
+ __fs32 olt_mutime; /* time of last mod. (usec) */
+ __fs32 olt_totfree; /* free space in OLT extent */
+ __fs32 olt_extents[2]; /* addr of this extent, replica */
+ __fs32 olt_esize; /* size of this extent */
+ __fs32 olt_next[2]; /* addr of next extent, replica */
+ __fs32 olt_nsize; /* size of next extent */
+ __u32 __unused2; /* align to 8 byte boundary */
};
/*
* VxFS common OLT entry (on disk).
*/
struct vxfs_oltcommon {
- u_int32_t olt_type; /* type of this record */
- u_int32_t olt_size; /* size of this record */
+ __fs32 olt_type; /* type of this record */
+ __fs32 olt_size; /* size of this record */
};
/*
* VxFS free OLT entry (on disk).
*/
struct vxfs_oltfree {
- u_int32_t olt_type; /* type of this record */
- u_int32_t olt_fsize; /* size of this free record */
+ __fs32 olt_type; /* type of this record */
+ __fs32 olt_fsize; /* size of this free record */
};
/*
* VxFS initial-inode list (on disk).
*/
struct vxfs_oltilist {
- u_int32_t olt_type; /* type of this record */
- u_int32_t olt_size; /* size of this record */
- vx_ino_t olt_iext[2]; /* initial inode list, replica */
+ __fs32 olt_type; /* type of this record */
+ __fs32 olt_size; /* size of this record */
+ __fs32 olt_iext[2]; /* initial inode list, replica */
};
/*
* Current Usage Table
*/
struct vxfs_oltcut {
- u_int32_t olt_type; /* type of this record */
- u_int32_t olt_size; /* size of this record */
- vx_ino_t olt_cutino; /* inode of current usage table */
- u_int32_t __pad; /* unused, 8 byte align */
+ __fs32 olt_type; /* type of this record */
+ __fs32 olt_size; /* size of this record */
+ __fs32 olt_cutino; /* inode of current usage table */
+ __u8 __pad; /* unused, 8 byte align */
};
/*
* Inodes containing Superblock, Intent log and OLTs
*/
struct vxfs_oltsb {
- u_int32_t olt_type; /* type of this record */
- u_int32_t olt_size; /* size of this record */
- vx_ino_t olt_sbino; /* inode of superblock file */
- u_int32_t __unused1; /* ??? */
- vx_ino_t olt_logino[2]; /* inode of log file,replica */
- vx_ino_t olt_oltino[2]; /* inode of OLT, replica */
+ __fs32 olt_type; /* type of this record */
+ __fs32 olt_size; /* size of this record */
+ __fs32 olt_sbino; /* inode of superblock file */
+ __u32 __unused1; /* ??? */
+ __fs32 olt_logino[2]; /* inode of log file,replica */
+ __fs32 olt_oltino[2]; /* inode of OLT, replica */
};
/*
* Inode containing device configuration + it's replica
*/
struct vxfs_oltdev {
- u_int32_t olt_type; /* type of this record */
- u_int32_t olt_size; /* size of this record */
- vx_ino_t olt_devino[2]; /* inode of device config files */
+ __fs32 olt_type; /* type of this record */
+ __fs32 olt_size; /* size of this record */
+ __fs32 olt_devino[2]; /* inode of device config files */
};
/*
* Fileset header
*/
struct vxfs_oltfshead {
- u_int32_t olt_type; /* type number */
- u_int32_t olt_size; /* size of this record */
- vx_ino_t olt_fsino[2]; /* inodes of fileset header */
+ __fs32 olt_type; /* type number */
+ __fs32 olt_size; /* size of this record */
+ __fs32 olt_fsino[2]; /* inodes of fileset header */
};
#endif /* _VXFS_OLT_H_ */
diff --git a/fs/freevxfs/vxfs_super.c b/fs/freevxfs/vxfs_super.c
index 7ca8c75d50d3..455ce5b77e9b 100644
--- a/fs/freevxfs/vxfs_super.c
+++ b/fs/freevxfs/vxfs_super.c
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2000-2001 Christoph Hellwig.
+ * Copyright (c) 2016 Krzysztof Blaszkowski
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -48,22 +49,11 @@
#include "vxfs_inode.h"
-MODULE_AUTHOR("Christoph Hellwig");
+MODULE_AUTHOR("Christoph Hellwig, Krzysztof Blaszkowski");
MODULE_DESCRIPTION("Veritas Filesystem (VxFS) driver");
MODULE_LICENSE("Dual BSD/GPL");
-
-
-static void vxfs_put_super(struct super_block *);
-static int vxfs_statfs(struct dentry *, struct kstatfs *);
-static int vxfs_remount(struct super_block *, int *, char *);
-
-static const struct super_operations vxfs_super_ops = {
- .evict_inode = vxfs_evict_inode,
- .put_super = vxfs_put_super,
- .statfs = vxfs_statfs,
- .remount_fs = vxfs_remount,
-};
+static struct kmem_cache *vxfs_inode_cachep;
/**
* vxfs_put_super - free superblock resources
@@ -79,9 +69,9 @@ vxfs_put_super(struct super_block *sbp)
{
struct vxfs_sb_info *infp = VXFS_SBI(sbp);
- vxfs_put_fake_inode(infp->vsi_fship);
- vxfs_put_fake_inode(infp->vsi_ilist);
- vxfs_put_fake_inode(infp->vsi_stilist);
+ iput(infp->vsi_fship);
+ iput(infp->vsi_ilist);
+ iput(infp->vsi_stilist);
brelse(infp->vsi_bp);
kfree(infp);
@@ -109,14 +99,15 @@ static int
vxfs_statfs(struct dentry *dentry, struct kstatfs *bufp)
{
struct vxfs_sb_info *infp = VXFS_SBI(dentry->d_sb);
+ struct vxfs_sb *raw_sb = infp->vsi_raw;
bufp->f_type = VXFS_SUPER_MAGIC;
bufp->f_bsize = dentry->d_sb->s_blocksize;
- bufp->f_blocks = infp->vsi_raw->vs_dsize;
- bufp->f_bfree = infp->vsi_raw->vs_free;
+ bufp->f_blocks = fs32_to_cpu(infp, raw_sb->vs_dsize);
+ bufp->f_bfree = fs32_to_cpu(infp, raw_sb->vs_free);
bufp->f_bavail = 0;
bufp->f_files = 0;
- bufp->f_ffree = infp->vsi_raw->vs_ifree;
+ bufp->f_ffree = fs32_to_cpu(infp, raw_sb->vs_ifree);
bufp->f_namelen = VXFS_NAMELEN;
return 0;
@@ -129,6 +120,81 @@ static int vxfs_remount(struct super_block *sb, int *flags, char *data)
return 0;
}
+static struct inode *vxfs_alloc_inode(struct super_block *sb)
+{
+ struct vxfs_inode_info *vi;
+
+ vi = kmem_cache_alloc(vxfs_inode_cachep, GFP_KERNEL);
+ if (!vi)
+ return NULL;
+ inode_init_once(&vi->vfs_inode);
+ return &vi->vfs_inode;
+}
+
+static void vxfs_i_callback(struct rcu_head *head)
+{
+ struct inode *inode = container_of(head, struct inode, i_rcu);
+
+ kmem_cache_free(vxfs_inode_cachep, VXFS_INO(inode));
+}
+
+static void vxfs_destroy_inode(struct inode *inode)
+{
+ call_rcu(&inode->i_rcu, vxfs_i_callback);
+}
+
+static const struct super_operations vxfs_super_ops = {
+ .alloc_inode = vxfs_alloc_inode,
+ .destroy_inode = vxfs_destroy_inode,
+ .evict_inode = vxfs_evict_inode,
+ .put_super = vxfs_put_super,
+ .statfs = vxfs_statfs,
+ .remount_fs = vxfs_remount,
+};
+
+static int vxfs_try_sb_magic(struct super_block *sbp, int silent,
+ unsigned blk, __fs32 magic)
+{
+ struct buffer_head *bp;
+ struct vxfs_sb *rsbp;
+ struct vxfs_sb_info *infp = VXFS_SBI(sbp);
+ int rc = -ENOMEM;
+
+ bp = sb_bread(sbp, blk);
+ do {
+ if (!bp || !buffer_mapped(bp)) {
+ if (!silent) {
+ printk(KERN_WARNING
+ "vxfs: unable to read disk superblock at %u\n",
+ blk);
+ }
+ break;
+ }
+
+ rc = -EINVAL;
+ rsbp = (struct vxfs_sb *)bp->b_data;
+ if (rsbp->vs_magic != magic) {
+ if (!silent)
+ printk(KERN_NOTICE
+ "vxfs: WRONG superblock magic %08x at %u\n",
+ rsbp->vs_magic, blk);
+ break;
+ }
+
+ rc = 0;
+ infp->vsi_raw = rsbp;
+ infp->vsi_bp = bp;
+ } while (0);
+
+ if (rc) {
+ infp->vsi_raw = NULL;
+ infp->vsi_bp = NULL;
+ brelse(bp);
+ }
+
+ return rc;
+}
+
/**
* vxfs_read_super - read superblock into memory and initialize filesystem
* @sbp: VFS superblock (to fill)
@@ -149,10 +215,10 @@ static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent)
{
struct vxfs_sb_info *infp;
struct vxfs_sb *rsbp;
- struct buffer_head *bp = NULL;
u_long bsize;
struct inode *root;
int ret = -EINVAL;
+ u32 j;
sbp->s_flags |= MS_RDONLY;
@@ -168,42 +234,43 @@ static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent)
goto out;
}
- bp = sb_bread(sbp, 1);
- if (!bp || !buffer_mapped(bp)) {
- if (!silent) {
- printk(KERN_WARNING
- "vxfs: unable to read disk superblock\n");
- }
- goto out;
- }
+ sbp->s_op = &vxfs_super_ops;
+ sbp->s_fs_info = infp;
- rsbp = (struct vxfs_sb *)bp->b_data;
- if (rsbp->vs_magic != VXFS_SUPER_MAGIC) {
+ if (!vxfs_try_sb_magic(sbp, silent, 1,
+ (__force __fs32)cpu_to_le32(VXFS_SUPER_MAGIC))) {
+ /* Unixware, x86 */
+ infp->byte_order = VXFS_BO_LE;
+ } else if (!vxfs_try_sb_magic(sbp, silent, 8,
+ (__force __fs32)cpu_to_be32(VXFS_SUPER_MAGIC))) {
+ /* HP-UX, parisc */
+ infp->byte_order = VXFS_BO_BE;
+ } else {
if (!silent)
- printk(KERN_NOTICE "vxfs: WRONG superblock magic\n");
+ printk(KERN_NOTICE "vxfs: can't find superblock.\n");
goto out;
}
- if ((rsbp->vs_version < 2 || rsbp->vs_version > 4) && !silent) {
- printk(KERN_NOTICE "vxfs: unsupported VxFS version (%d)\n",
- rsbp->vs_version);
+ rsbp = infp->vsi_raw;
+ j = fs32_to_cpu(infp, rsbp->vs_version);
+ if ((j < 2 || j > 4) && !silent) {
+ printk(KERN_NOTICE "vxfs: unsupported VxFS version (%d)\n", j);
goto out;
}
#ifdef DIAGNOSTIC
- printk(KERN_DEBUG "vxfs: supported VxFS version (%d)\n", rsbp->vs_version);
- printk(KERN_DEBUG "vxfs: blocksize: %d\n", rsbp->vs_bsize);
+ printk(KERN_DEBUG "vxfs: supported VxFS version (%d)\n", j);
+ printk(KERN_DEBUG "vxfs: blocksize: %d\n",
+ fs32_to_cpu(infp, rsbp->vs_bsize));
#endif
- sbp->s_magic = rsbp->vs_magic;
- sbp->s_fs_info = infp;
+ sbp->s_magic = fs32_to_cpu(infp, rsbp->vs_magic);
- infp->vsi_raw = rsbp;
- infp->vsi_bp = bp;
- infp->vsi_oltext = rsbp->vs_oltext[0];
- infp->vsi_oltsize = rsbp->vs_oltsize;
+ infp->vsi_oltext = fs32_to_cpu(infp, rsbp->vs_oltext[0]);
+ infp->vsi_oltsize = fs32_to_cpu(infp, rsbp->vs_oltsize);
- if (!sb_set_blocksize(sbp, rsbp->vs_bsize)) {
+ j = fs32_to_cpu(infp, rsbp->vs_bsize);
+ if (!sb_set_blocksize(sbp, j)) {
printk(KERN_WARNING "vxfs: unable to set final block size\n");
goto out;
}
@@ -218,7 +285,6 @@ static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent)
goto out;
}
- sbp->s_op = &vxfs_super_ops;
root = vxfs_iget(sbp, VXFS_ROOT_INO);
if (IS_ERR(root)) {
ret = PTR_ERR(root);
@@ -233,11 +299,11 @@ static int vxfs_fill_super(struct super_block *sbp, void *dp, int silent)
return 0;
out_free_ilist:
- vxfs_put_fake_inode(infp->vsi_fship);
- vxfs_put_fake_inode(infp->vsi_ilist);
- vxfs_put_fake_inode(infp->vsi_stilist);
+ iput(infp->vsi_fship);
+ iput(infp->vsi_ilist);
+ iput(infp->vsi_stilist);
out:
- brelse(bp);
+ brelse(infp->vsi_bp);
kfree(infp);
return ret;
}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index fe7e83a45eff..4d09d4441e3e 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -981,6 +981,42 @@ void inode_io_list_del(struct inode *inode)
}
/*
+ * mark an inode as under writeback on the sb
+ */
+void sb_mark_inode_writeback(struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+ unsigned long flags;
+
+ if (list_empty(&inode->i_wb_list)) {
+ spin_lock_irqsave(&sb->s_inode_wblist_lock, flags);
+ if (list_empty(&inode->i_wb_list)) {
+ list_add_tail(&inode->i_wb_list, &sb->s_inodes_wb);
+ trace_sb_mark_inode_writeback(inode);
+ }
+ spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags);
+ }
+}
+
+/*
+ * clear an inode as under writeback on the sb
+ */
+void sb_clear_inode_writeback(struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+ unsigned long flags;
+
+ if (!list_empty(&inode->i_wb_list)) {
+ spin_lock_irqsave(&sb->s_inode_wblist_lock, flags);
+ if (!list_empty(&inode->i_wb_list)) {
+ list_del_init(&inode->i_wb_list);
+ trace_sb_clear_inode_writeback(inode);
+ }
+ spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags);
+ }
+}
+
+/*
* Redirty an inode: set its when-it-was dirtied timestamp and move it to the
* furthest end of its superblock's dirty-inode list.
*
@@ -1291,6 +1327,7 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
dirty = inode->i_state & I_DIRTY;
if (inode->i_state & I_DIRTY_TIME) {
if ((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) ||
+ wbc->sync_mode == WB_SYNC_ALL ||
unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) ||
unlikely(time_after(jiffies,
(inode->dirtied_time_when +
@@ -1771,8 +1808,8 @@ static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
*/
static unsigned long get_nr_dirty_pages(void)
{
- return global_page_state(NR_FILE_DIRTY) +
- global_page_state(NR_UNSTABLE_NFS) +
+ return global_node_page_state(NR_FILE_DIRTY) +
+ global_node_page_state(NR_UNSTABLE_NFS) +
get_nr_dirty_inodes();
}
@@ -2154,7 +2191,7 @@ EXPORT_SYMBOL(__mark_inode_dirty);
*/
static void wait_sb_inodes(struct super_block *sb)
{
- struct inode *inode, *old_inode = NULL;
+ LIST_HEAD(sync_list);
/*
* We need to be protected against the filesystem going from
@@ -2163,38 +2200,60 @@ static void wait_sb_inodes(struct super_block *sb)
WARN_ON(!rwsem_is_locked(&sb->s_umount));
mutex_lock(&sb->s_sync_lock);
- spin_lock(&sb->s_inode_list_lock);
/*
- * Data integrity sync. Must wait for all pages under writeback,
- * because there may have been pages dirtied before our sync
- * call, but which had writeout started before we write it out.
- * In which case, the inode may not be on the dirty list, but
- * we still have to wait for that writeout.
+ * Splice the writeback list onto a temporary list to avoid waiting on
+ * inodes that have started writeback after this point.
+ *
+ * Use rcu_read_lock() to keep the inodes around until we have a
+ * reference. s_inode_wblist_lock protects sb->s_inodes_wb as well as
+ * the local list because inodes can be dropped from either by writeback
+ * completion.
+ */
+ rcu_read_lock();
+ spin_lock_irq(&sb->s_inode_wblist_lock);
+ list_splice_init(&sb->s_inodes_wb, &sync_list);
+
+ /*
+ * Data integrity sync. Must wait for all pages under writeback, because
+ * there may have been pages dirtied before our sync call, but which had
+ * writeout started before we write it out. In which case, the inode
+ * may not be on the dirty list, but we still have to wait for that
+ * writeout.
*/
- list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
+ while (!list_empty(&sync_list)) {
+ struct inode *inode = list_first_entry(&sync_list, struct inode,
+ i_wb_list);
struct address_space *mapping = inode->i_mapping;
+ /*
+ * Move each inode back to the wb list before we drop the lock
+ * to preserve consistency between i_wb_list and the mapping
+ * writeback tag. Writeback completion is responsible to remove
+ * the inode from either list once the writeback tag is cleared.
+ */
+ list_move_tail(&inode->i_wb_list, &sb->s_inodes_wb);
+
+ /*
+ * The mapping can appear untagged while still on-list since we
+ * do not have the mapping lock. Skip it here, wb completion
+ * will remove it.
+ */
+ if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
+ continue;
+
+ spin_unlock_irq(&sb->s_inode_wblist_lock);
+
spin_lock(&inode->i_lock);
- if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
- (mapping->nrpages == 0)) {
+ if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) {
spin_unlock(&inode->i_lock);
+
+ spin_lock_irq(&sb->s_inode_wblist_lock);
continue;
}
__iget(inode);
spin_unlock(&inode->i_lock);
- spin_unlock(&sb->s_inode_list_lock);
-
- /*
- * We hold a reference to 'inode' so it couldn't have been
- * removed from s_inodes list while we dropped the
- * s_inode_list_lock. We cannot iput the inode now as we can
- * be holding the last reference and we cannot iput it under
- * s_inode_list_lock. So we keep the reference and iput it
- * later.
- */
- iput(old_inode);
- old_inode = inode;
+ rcu_read_unlock();
/*
* We keep the error status of individual mapping so that
@@ -2205,10 +2264,13 @@ static void wait_sb_inodes(struct super_block *sb)
cond_resched();
- spin_lock(&sb->s_inode_list_lock);
+ iput(inode);
+
+ rcu_read_lock();
+ spin_lock_irq(&sb->s_inode_wblist_lock);
}
- spin_unlock(&sb->s_inode_list_lock);
- iput(old_inode);
+ spin_unlock_irq(&sb->s_inode_wblist_lock);
+ rcu_read_unlock();
mutex_unlock(&sb->s_sync_lock);
}
diff --git a/fs/fscache/histogram.c b/fs/fscache/histogram.c
index 7d637e2335fd..15a3d042247e 100644
--- a/fs/fscache/histogram.c
+++ b/fs/fscache/histogram.c
@@ -99,7 +99,6 @@ static int fscache_histogram_open(struct inode *inode, struct file *file)
}
const struct file_operations fscache_histogram_fops = {
- .owner = THIS_MODULE,
.open = fscache_histogram_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
index 6b028b7c4250..5d5ddaa84b21 100644
--- a/fs/fscache/object-list.c
+++ b/fs/fscache/object-list.c
@@ -404,7 +404,6 @@ static int fscache_objlist_release(struct inode *inode, struct file *file)
}
const struct file_operations fscache_objlist_fops = {
- .owner = THIS_MODULE,
.open = fscache_objlist_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
index 7cfa0aacdf6d..7ac6e839b065 100644
--- a/fs/fscache/stats.c
+++ b/fs/fscache/stats.c
@@ -295,7 +295,6 @@ static int fscache_stats_open(struct inode *inode, struct file *file)
}
const struct file_operations fscache_stats_fops = {
- .owner = THIS_MODULE,
.open = fscache_stats_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index cbece1221417..a94d2ed81ab4 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -99,19 +99,6 @@ void fuse_request_free(struct fuse_req *req)
kmem_cache_free(fuse_req_cachep, req);
}
-static void block_sigs(sigset_t *oldset)
-{
- sigset_t mask;
-
- siginitsetinv(&mask, sigmask(SIGKILL));
- sigprocmask(SIG_BLOCK, &mask, oldset);
-}
-
-static void restore_sigs(sigset_t *oldset)
-{
- sigprocmask(SIG_SETMASK, oldset, NULL);
-}
-
void __fuse_get_request(struct fuse_req *req)
{
atomic_inc(&req->count);
@@ -151,15 +138,9 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
atomic_inc(&fc->num_waiting);
if (fuse_block_alloc(fc, for_background)) {
- sigset_t oldset;
- int intr;
-
- block_sigs(&oldset);
- intr = wait_event_interruptible_exclusive(fc->blocked_waitq,
- !fuse_block_alloc(fc, for_background));
- restore_sigs(&oldset);
err = -EINTR;
- if (intr)
+ if (wait_event_killable_exclusive(fc->blocked_waitq,
+ !fuse_block_alloc(fc, for_background)))
goto out;
}
/* Matches smp_wmb() in fuse_set_initialized() */
@@ -446,14 +427,9 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
}
if (!test_bit(FR_FORCE, &req->flags)) {
- sigset_t oldset;
-
/* Only fatal signals may interrupt this */
- block_sigs(&oldset);
- err = wait_event_interruptible(req->waitq,
+ err = wait_event_killable(req->waitq,
test_bit(FR_FINISHED, &req->flags));
- restore_sigs(&oldset);
-
if (!err)
return;
@@ -1525,7 +1501,6 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
goto err;
fuse_copy_finish(cs);
buf[outarg.namelen] = 0;
- name.hash = full_name_hash(name.name, name.len);
down_read(&fc->killsb);
err = -ENOENT;
@@ -1576,7 +1551,6 @@ static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
goto err;
fuse_copy_finish(cs);
buf[outarg.namelen] = 0;
- name.hash = full_name_hash(name.name, name.len);
down_read(&fc->killsb);
err = -ENOENT;
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 264f07c7754e..c47b7780ce37 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -146,7 +146,7 @@ static void fuse_invalidate_entry(struct dentry *entry)
}
static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args,
- u64 nodeid, struct qstr *name,
+ u64 nodeid, const struct qstr *name,
struct fuse_entry_out *outarg)
{
memset(outarg, 0, sizeof(struct fuse_entry_out));
@@ -282,7 +282,7 @@ int fuse_valid_type(int m)
S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m);
}
-int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name,
+int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name,
struct fuse_entry_out *outarg, struct inode **inode)
{
struct fuse_conn *fc = get_fuse_conn_super(sb);
@@ -480,7 +480,7 @@ static int fuse_atomic_open(struct inode *dir, struct dentry *entry,
struct fuse_conn *fc = get_fuse_conn(dir);
struct dentry *res = NULL;
- if (d_unhashed(entry)) {
+ if (d_in_lookup(entry)) {
res = fuse_lookup(dir, entry, 0);
if (IS_ERR(res))
return PTR_ERR(res);
@@ -955,6 +955,7 @@ int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
if (!dir)
goto unlock;
+ name->hash = full_name_hash(dir, name->name, name->len);
entry = d_lookup(dir, name);
dput(dir);
if (!entry)
@@ -1204,7 +1205,7 @@ static int fuse_direntplus_link(struct file *file,
fc = get_fuse_conn(dir);
- name.hash = full_name_hash(name.name, name.len);
+ name.hash = full_name_hash(parent, name.name, name.len);
dentry = d_lookup(parent, &name);
if (!dentry) {
retry:
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 9154f8679024..f394aff59c36 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -417,6 +417,10 @@ static int fuse_flush(struct file *file, fl_owner_t id)
fuse_sync_writes(inode);
inode_unlock(inode);
+ err = filemap_check_errors(file->f_mapping);
+ if (err)
+ return err;
+
req = fuse_get_req_nofail_nopages(fc, file);
memset(&inarg, 0, sizeof(inarg));
inarg.fh = ff->fh;
@@ -462,6 +466,16 @@ int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
goto out;
fuse_sync_writes(inode);
+
+ /*
+ * Due to implementation of fuse writeback
+ * filemap_write_and_wait_range() does not catch errors.
+ * We have to do this directly after fuse_sync_writes()
+ */
+ err = filemap_check_errors(file->f_mapping);
+ if (err)
+ goto out;
+
err = sync_inode_metadata(inode, 1);
if (err)
goto out;
@@ -562,7 +576,6 @@ static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
*/
static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
{
- bool is_sync = is_sync_kiocb(io->iocb);
int left;
spin_lock(&io->lock);
@@ -572,11 +585,11 @@ static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
io->bytes = pos;
left = --io->reqs;
- if (!left && is_sync)
+ if (!left && io->blocking)
complete(io->done);
spin_unlock(&io->lock);
- if (!left && !is_sync) {
+ if (!left && !io->blocking) {
ssize_t res = fuse_get_res_by_io(io);
if (res >= 0) {
@@ -1452,7 +1465,7 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
list_del(&req->writepages_entry);
for (i = 0; i < req->num_pages; i++) {
dec_wb_stat(&bdi->wb, WB_WRITEBACK);
- dec_zone_page_state(req->pages[i], NR_WRITEBACK_TEMP);
+ dec_node_page_state(req->pages[i], NR_WRITEBACK_TEMP);
wb_writeout_inc(&bdi->wb);
}
wake_up(&fi->page_waitq);
@@ -1642,7 +1655,7 @@ static int fuse_writepage_locked(struct page *page)
req->inode = inode;
inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
- inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
+ inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
spin_lock(&fc->lock);
list_add(&req->writepages_entry, &fi->writepages);
@@ -1756,7 +1769,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
spin_unlock(&fc->lock);
dec_wb_stat(&bdi->wb, WB_WRITEBACK);
- dec_zone_page_state(page, NR_WRITEBACK_TEMP);
+ dec_node_page_state(page, NR_WRITEBACK_TEMP);
wb_writeout_inc(&bdi->wb);
fuse_writepage_free(fc, new_req);
fuse_request_free(new_req);
@@ -1855,7 +1868,7 @@ static int fuse_writepages_fill(struct page *page,
req->page_descs[req->num_pages].length = PAGE_SIZE;
inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
- inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
+ inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP);
err = 0;
if (is_writeback && fuse_writepage_in_flight(req, page)) {
@@ -2850,7 +2863,6 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
size_t count = iov_iter_count(iter);
loff_t offset = iocb->ki_pos;
struct fuse_io_priv *io;
- bool is_sync = is_sync_kiocb(iocb);
pos = offset;
inode = file->f_mapping->host;
@@ -2885,17 +2897,16 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
*/
io->async = async_dio;
io->iocb = iocb;
+ io->blocking = is_sync_kiocb(iocb);
/*
- * We cannot asynchronously extend the size of a file. We have no method
- * to wait on real async I/O requests, so we must submit this request
- * synchronously.
+ * We cannot asynchronously extend the size of a file.
+ * In such case the aio will behave exactly like sync io.
*/
- if (!is_sync && (offset + count > i_size) &&
- iov_iter_rw(iter) == WRITE)
- io->async = false;
+ if ((offset + count > i_size) && iov_iter_rw(iter) == WRITE)
+ io->blocking = true;
- if (io->async && is_sync) {
+ if (io->async && io->blocking) {
/*
* Additional reference to keep io around after
* calling fuse_aio_complete()
@@ -2915,7 +2926,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
/* we have a non-extending, async request, so return */
- if (!is_sync)
+ if (!io->blocking)
return -EIOCBQUEUED;
wait_for_completion(&wait);
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 929c383432b0..d98d8cc84def 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -259,6 +259,7 @@ struct fuse_io_priv {
struct kiocb *iocb;
struct file *file;
struct completion *done;
+ bool blocking;
};
#define FUSE_IO_PRIV_SYNC(f) \
@@ -703,7 +704,7 @@ struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
int generation, struct fuse_attr *attr,
u64 attr_valid, u64 attr_version);
-int fuse_lookup_name(struct super_block *sb, u64 nodeid, struct qstr *name,
+int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name,
struct fuse_entry_out *outarg, struct inode **inode);
/**
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 9961d8432ce3..4e05b51120f4 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -673,13 +673,11 @@ static struct dentry *fuse_get_dentry(struct super_block *sb,
inode = ilookup5(sb, handle->nodeid, fuse_inode_eq, &handle->nodeid);
if (!inode) {
struct fuse_entry_out outarg;
- struct qstr name;
+ const struct qstr name = QSTR_INIT(".", 1);
if (!fc->export_support)
goto out_err;
- name.len = 1;
- name.name = ".";
err = fuse_lookup_name(sb, handle->nodeid, &name, &outarg,
&inode);
if (err && err != -ENOENT)
@@ -775,14 +773,12 @@ static struct dentry *fuse_get_parent(struct dentry *child)
struct inode *inode;
struct dentry *parent;
struct fuse_entry_out outarg;
- struct qstr name;
+ const struct qstr name = QSTR_INIT("..", 2);
int err;
if (!fc->export_support)
return ERR_PTR(-ESTALE);
- name.len = 2;
- name.name = "..";
err = fuse_lookup_name(child_inode->i_sb, get_node_id(child_inode),
&name, &outarg, &inode);
if (err) {
@@ -942,7 +938,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK |
FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
- FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
+ FUSE_FLOCK_LOCKS | FUSE_HAS_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT |
FUSE_PARALLEL_DIROPS;
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 37b7bc14c8da..82df36886938 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -140,6 +140,32 @@ static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
return nobh_writepage(page, gfs2_get_block_noalloc, wbc);
}
+/* This is the same as calling block_write_full_page, but it also
+ * writes pages outside of i_size
+ */
+int gfs2_write_full_page(struct page *page, get_block_t *get_block,
+ struct writeback_control *wbc)
+{
+ struct inode * const inode = page->mapping->host;
+ loff_t i_size = i_size_read(inode);
+ const pgoff_t end_index = i_size >> PAGE_SHIFT;
+ unsigned offset;
+
+ /*
+ * The page straddles i_size. It must be zeroed out on each and every
+ * writepage invocation because it may be mmapped. "A file is mapped
+ * in multiples of the page size. For a file that is not a multiple of
+ * the page size, the remaining memory is zeroed when mapped, and
+ * writes to that region are not written out to the file."
+ */
+ offset = i_size & (PAGE_SIZE-1);
+ if (page->index == end_index && offset)
+ zero_user_segment(page, offset, PAGE_SIZE);
+
+ return __block_write_full_page(inode, page, get_block, wbc,
+ end_buffer_async_write);
+}
+
/**
* __gfs2_jdata_writepage - The core of jdata writepage
* @page: The page to write
@@ -165,7 +191,7 @@ static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *w
}
gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize-1);
}
- return block_write_full_page(page, gfs2_get_block_noalloc, wbc);
+ return gfs2_write_full_page(page, gfs2_get_block_noalloc, wbc);
}
/**
@@ -180,27 +206,20 @@ static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *w
static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
{
struct inode *inode = page->mapping->host;
+ struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
int ret;
- int done_trans = 0;
- if (PageChecked(page)) {
- if (wbc->sync_mode != WB_SYNC_ALL)
- goto out_ignore;
- ret = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
- if (ret)
- goto out_ignore;
- done_trans = 1;
- }
- ret = gfs2_writepage_common(page, wbc);
- if (ret > 0)
- ret = __gfs2_jdata_writepage(page, wbc);
- if (done_trans)
- gfs2_trans_end(sdp);
+ if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
+ goto out;
+ if (PageChecked(page) || current->journal_info)
+ goto out_ignore;
+ ret = __gfs2_jdata_writepage(page, wbc);
return ret;
out_ignore:
redirty_page_for_writepage(wbc, page);
+out:
unlock_page(page);
return 0;
}
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 24ce1cdd434a..6e2bec1cd289 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -285,7 +285,8 @@ static void gfs2_metapath_ra(struct gfs2_glock *gl,
if (trylock_buffer(rabh)) {
if (!buffer_uptodate(rabh)) {
rabh->b_end_io = end_buffer_read_sync;
- submit_bh(READA | REQ_META, rabh);
+ submit_bh(REQ_OP_READ, REQ_RAHEAD | REQ_META,
+ rabh);
continue;
}
unlock_buffer(rabh);
@@ -974,7 +975,7 @@ static int gfs2_block_truncate_page(struct address_space *mapping, loff_t from)
if (!buffer_uptodate(bh)) {
err = -EIO;
- ll_rw_block(READ, 1, &bh);
+ ll_rw_block(REQ_OP_READ, 0, 1, &bh);
wait_on_buffer(bh);
/* Uhhuh. Read error. Complain and punt. */
if (!buffer_uptodate(bh))
diff --git a/fs/gfs2/dentry.c b/fs/gfs2/dentry.c
index 30822b148f3e..5173b98ca036 100644
--- a/fs/gfs2/dentry.c
+++ b/fs/gfs2/dentry.c
@@ -117,7 +117,7 @@ static int gfs2_dentry_delete(const struct dentry *dentry)
return 0;
ginode = GFS2_I(d_inode(dentry));
- if (!ginode->i_iopen_gh.gh_gl)
+ if (!gfs2_holder_initialized(&ginode->i_iopen_gh))
return 0;
if (test_bit(GLF_DEMOTE, &ginode->i_iopen_gh.gh_gl->gl_flags))
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index 271d93905bac..fcb59b23f1e3 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -1513,7 +1513,7 @@ static void gfs2_dir_readahead(struct inode *inode, unsigned hsize, u32 index,
continue;
}
bh->b_end_io = end_buffer_read_sync;
- submit_bh(READA | REQ_META, bh);
+ submit_bh(REQ_OP_READ, REQ_RAHEAD | REQ_META, bh);
continue;
}
brelse(bh);
@@ -1663,7 +1663,8 @@ struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name,
brelse(bh);
if (fail_on_exist)
return ERR_PTR(-EEXIST);
- inode = gfs2_inode_lookup(dir->i_sb, dtype, addr, formal_ino);
+ inode = gfs2_inode_lookup(dir->i_sb, dtype, addr, formal_ino,
+ GFS2_BLKST_FREE /* ignore */);
if (!IS_ERR(inode))
GFS2_I(inode)->i_rahead = rahead;
return inode;
diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
index d5bda8513457..a332f3cd925e 100644
--- a/fs/gfs2/export.c
+++ b/fs/gfs2/export.c
@@ -137,21 +137,10 @@ static struct dentry *gfs2_get_dentry(struct super_block *sb,
struct gfs2_sbd *sdp = sb->s_fs_info;
struct inode *inode;
- inode = gfs2_ilookup(sb, inum->no_addr);
- if (inode) {
- if (GFS2_I(inode)->i_no_formal_ino != inum->no_formal_ino) {
- iput(inode);
- return ERR_PTR(-ESTALE);
- }
- goto out_inode;
- }
-
inode = gfs2_lookup_by_inum(sdp, inum->no_addr, &inum->no_formal_ino,
GFS2_BLKST_DINODE);
if (IS_ERR(inode))
return ERR_CAST(inode);
-
-out_inode:
return d_obtain_alias(inode);
}
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index e0f98e483aec..320e65e61938 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -1098,7 +1098,7 @@ static void do_unflock(struct file *file, struct file_lock *fl)
mutex_lock(&fp->f_fl_mutex);
locks_lock_file_wait(file, fl);
- if (fl_gh->gh_gl) {
+ if (gfs2_holder_initialized(fl_gh)) {
gfs2_glock_dq(fl_gh);
gfs2_holder_uninit(fl_gh);
}
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 706fd9352f36..3a90b2b5b9bb 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -575,7 +575,6 @@ static void delete_work_func(struct work_struct *work)
{
struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- struct gfs2_inode *ip;
struct inode *inode;
u64 no_addr = gl->gl_name.ln_number;
@@ -585,13 +584,7 @@ static void delete_work_func(struct work_struct *work)
if (test_bit(GLF_INODE_CREATING, &gl->gl_flags))
goto out;
- ip = gl->gl_object;
- /* Note: Unsafe to dereference ip as we don't hold right refs/locks */
-
- if (ip)
- inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
- else
- inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
+ inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
if (inode && !IS_ERR(inode)) {
d_prune_aliases(inode);
iput(inode);
@@ -808,7 +801,7 @@ void gfs2_holder_uninit(struct gfs2_holder *gh)
{
put_pid(gh->gh_owner_pid);
gfs2_glock_put(gh->gh_gl);
- gh->gh_gl = NULL;
+ gfs2_holder_mark_uninitialized(gh);
gh->gh_ip = 0;
}
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 46ab67fc16da..ab1ef322f7a5 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -247,4 +247,14 @@ extern void gfs2_unregister_debugfs(void);
extern const struct lm_lockops gfs2_dlm_ops;
+static inline void gfs2_holder_mark_uninitialized(struct gfs2_holder *gh)
+{
+ gh->gh_gl = NULL;
+}
+
+static inline bool gfs2_holder_initialized(struct gfs2_holder *gh)
+{
+ return gh->gh_gl;
+}
+
#endif /* __GLOCK_DOT_H__ */
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 21dc784f66c2..e4da0ecd3285 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -37,9 +37,35 @@
#include "super.h"
#include "glops.h"
-struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr)
+static int iget_test(struct inode *inode, void *opaque)
{
- return ilookup(sb, (unsigned long)no_addr);
+ u64 no_addr = *(u64 *)opaque;
+
+ return GFS2_I(inode)->i_no_addr == no_addr;
+}
+
+static int iget_set(struct inode *inode, void *opaque)
+{
+ u64 no_addr = *(u64 *)opaque;
+
+ GFS2_I(inode)->i_no_addr = no_addr;
+ inode->i_ino = no_addr;
+ return 0;
+}
+
+static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr)
+{
+ struct inode *inode;
+
+repeat:
+ inode = iget5_locked(sb, no_addr, iget_test, iget_set, &no_addr);
+ if (!inode)
+ return inode;
+ if (is_bad_inode(inode)) {
+ iput(inode);
+ goto repeat;
+ }
+ return inode;
}
/**
@@ -78,26 +104,37 @@ static void gfs2_set_iop(struct inode *inode)
/**
* gfs2_inode_lookup - Lookup an inode
* @sb: The super block
- * @no_addr: The inode number
* @type: The type of the inode
+ * @no_addr: The inode number
+ * @no_formal_ino: The inode generation number
+ * @blktype: Requested block type (GFS2_BLKST_DINODE or GFS2_BLKST_UNLINKED;
+ * GFS2_BLKST_FREE do indicate not to verify)
+ *
+ * If @type is DT_UNKNOWN, the inode type is fetched from disk.
+ *
+ * If @blktype is anything other than GFS2_BLKST_FREE (which is used as a
+ * placeholder because it doesn't otherwise make sense), the on-disk block type
+ * is verified to be @blktype.
*
* Returns: A VFS inode, or an error
*/
struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
- u64 no_addr, u64 no_formal_ino)
+ u64 no_addr, u64 no_formal_ino,
+ unsigned int blktype)
{
struct inode *inode;
struct gfs2_inode *ip;
struct gfs2_glock *io_gl = NULL;
+ struct gfs2_holder i_gh;
int error;
- inode = iget_locked(sb, (unsigned long)no_addr);
+ gfs2_holder_mark_uninitialized(&i_gh);
+ inode = gfs2_iget(sb, no_addr);
if (!inode)
return ERR_PTR(-ENOMEM);
ip = GFS2_I(inode);
- ip->i_no_addr = no_addr;
if (inode->i_state & I_NEW) {
struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -112,10 +149,29 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
if (unlikely(error))
goto fail_put;
+ if (type == DT_UNKNOWN || blktype != GFS2_BLKST_FREE) {
+ /*
+ * The GL_SKIP flag indicates to skip reading the inode
+ * block. We read the inode with gfs2_inode_refresh
+ * after possibly checking the block type.
+ */
+ error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE,
+ GL_SKIP, &i_gh);
+ if (error)
+ goto fail_put;
+
+ if (blktype != GFS2_BLKST_FREE) {
+ error = gfs2_check_blk_type(sdp, no_addr,
+ blktype);
+ if (error)
+ goto fail_put;
+ }
+ }
+
set_bit(GIF_INVALID, &ip->i_flags);
error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
if (unlikely(error))
- goto fail_iopen;
+ goto fail_put;
ip->i_iopen_gh.gh_gl->gl_object = ip;
gfs2_glock_put(io_gl);
@@ -134,6 +190,8 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
unlock_new_inode(inode);
}
+ if (gfs2_holder_initialized(&i_gh))
+ gfs2_glock_dq_uninit(&i_gh);
return inode;
fail_refresh:
@@ -141,10 +199,11 @@ fail_refresh:
ip->i_iopen_gh.gh_gl->gl_object = NULL;
gfs2_glock_dq_wait(&ip->i_iopen_gh);
gfs2_holder_uninit(&ip->i_iopen_gh);
-fail_iopen:
+fail_put:
if (io_gl)
gfs2_glock_put(io_gl);
-fail_put:
+ if (gfs2_holder_initialized(&i_gh))
+ gfs2_glock_dq_uninit(&i_gh);
ip->i_gl->gl_object = NULL;
fail:
iget_failed(inode);
@@ -155,23 +214,12 @@ struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
u64 *no_formal_ino, unsigned int blktype)
{
struct super_block *sb = sdp->sd_vfs;
- struct gfs2_holder i_gh;
- struct inode *inode = NULL;
+ struct inode *inode;
int error;
- /* Must not read in block until block type is verified */
- error = gfs2_glock_nq_num(sdp, no_addr, &gfs2_inode_glops,
- LM_ST_EXCLUSIVE, GL_SKIP, &i_gh);
- if (error)
- return ERR_PTR(error);
-
- error = gfs2_check_blk_type(sdp, no_addr, blktype);
- if (error)
- goto fail;
-
- inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0);
+ inode = gfs2_inode_lookup(sb, DT_UNKNOWN, no_addr, 0, blktype);
if (IS_ERR(inode))
- goto fail;
+ return inode;
/* Two extra checks for NFS only */
if (no_formal_ino) {
@@ -182,16 +230,12 @@ struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
error = -EIO;
if (GFS2_I(inode)->i_diskflags & GFS2_DIF_SYSTEM)
goto fail_iput;
-
- error = 0;
}
+ return inode;
-fail:
- gfs2_glock_dq_uninit(&i_gh);
- return error ? ERR_PTR(error) : inode;
fail_iput:
iput(inode);
- goto fail;
+ return ERR_PTR(error);
}
@@ -236,8 +280,8 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
struct gfs2_holder d_gh;
int error = 0;
struct inode *inode = NULL;
- int unlock = 0;
+ gfs2_holder_mark_uninitialized(&d_gh);
if (!name->len || name->len > GFS2_FNAMESIZE)
return ERR_PTR(-ENAMETOOLONG);
@@ -252,7 +296,6 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
if (error)
return ERR_PTR(error);
- unlock = 1;
}
if (!is_root) {
@@ -265,7 +308,7 @@ struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name,
if (IS_ERR(inode))
error = PTR_ERR(inode);
out:
- if (unlock)
+ if (gfs2_holder_initialized(&d_gh))
gfs2_glock_dq_uninit(&d_gh);
if (error == -ENOENT)
return NULL;
@@ -1189,7 +1232,7 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
struct dentry *d;
bool excl = !!(flags & O_EXCL);
- if (!d_unhashed(dentry))
+ if (!d_in_lookup(dentry))
goto skip_lookup;
d = __gfs2_lookup(dir, dentry, file, opened);
@@ -1309,7 +1352,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
struct gfs2_inode *ip = GFS2_I(d_inode(odentry));
struct gfs2_inode *nip = NULL;
struct gfs2_sbd *sdp = GFS2_SB(odir);
- struct gfs2_holder ghs[5], r_gh = { .gh_gl = NULL, };
+ struct gfs2_holder ghs[5], r_gh;
struct gfs2_rgrpd *nrgd;
unsigned int num_gh;
int dir_rename = 0;
@@ -1317,6 +1360,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
unsigned int x;
int error;
+ gfs2_holder_mark_uninitialized(&r_gh);
if (d_really_is_positive(ndentry)) {
nip = GFS2_I(d_inode(ndentry));
if (ip == nip)
@@ -1506,7 +1550,7 @@ out_gunlock:
gfs2_holder_uninit(ghs + x);
}
out_gunlock_r:
- if (r_gh.gh_gl)
+ if (gfs2_holder_initialized(&r_gh))
gfs2_glock_dq_uninit(&r_gh);
out:
return error;
@@ -1532,13 +1576,14 @@ static int gfs2_exchange(struct inode *odir, struct dentry *odentry,
struct gfs2_inode *oip = GFS2_I(odentry->d_inode);
struct gfs2_inode *nip = GFS2_I(ndentry->d_inode);
struct gfs2_sbd *sdp = GFS2_SB(odir);
- struct gfs2_holder ghs[5], r_gh = { .gh_gl = NULL, };
+ struct gfs2_holder ghs[5], r_gh;
unsigned int num_gh;
unsigned int x;
umode_t old_mode = oip->i_inode.i_mode;
umode_t new_mode = nip->i_inode.i_mode;
int error;
+ gfs2_holder_mark_uninitialized(&r_gh);
error = gfs2_rindex_update(sdp);
if (error)
return error;
@@ -1646,7 +1691,7 @@ out_gunlock:
gfs2_holder_uninit(ghs + x);
}
out_gunlock_r:
- if (r_gh.gh_gl)
+ if (gfs2_holder_initialized(&r_gh))
gfs2_glock_dq_uninit(&r_gh);
out:
return error;
@@ -1743,9 +1788,8 @@ int gfs2_permission(struct inode *inode, int mask)
struct gfs2_inode *ip;
struct gfs2_holder i_gh;
int error;
- int unlock = 0;
-
+ gfs2_holder_mark_uninitialized(&i_gh);
ip = GFS2_I(inode);
if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
if (mask & MAY_NOT_BLOCK)
@@ -1753,14 +1797,13 @@ int gfs2_permission(struct inode *inode, int mask)
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
if (error)
return error;
- unlock = 1;
}
if ((mask & MAY_WRITE) && IS_IMMUTABLE(inode))
- error = -EACCES;
+ error = -EPERM;
else
error = generic_permission(inode, mask);
- if (unlock)
+ if (gfs2_holder_initialized(&i_gh))
gfs2_glock_dq_uninit(&i_gh);
return error;
@@ -1932,17 +1975,16 @@ static int gfs2_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder gh;
int error;
- int unlock = 0;
+ gfs2_holder_mark_uninitialized(&gh);
if (gfs2_glock_is_locked_by_me(ip->i_gl) == NULL) {
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
if (error)
return error;
- unlock = 1;
}
generic_fillattr(inode, stat);
- if (unlock)
+ if (gfs2_holder_initialized(&gh))
gfs2_glock_dq_uninit(&gh);
return 0;
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index e1af0d4aa308..7710dfd3af35 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -94,11 +94,11 @@ err:
}
extern struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned type,
- u64 no_addr, u64 no_formal_ino);
+ u64 no_addr, u64 no_formal_ino,
+ unsigned int blktype);
extern struct inode *gfs2_lookup_by_inum(struct gfs2_sbd *sdp, u64 no_addr,
u64 *no_formal_ino,
unsigned int blktype);
-extern struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr);
extern int gfs2_inode_refresh(struct gfs2_inode *ip);
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 0ff028c15199..e58ccef09c91 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -657,7 +657,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
struct gfs2_log_header *lh;
unsigned int tail;
u32 hash;
- int rw = WRITE_FLUSH_FUA | REQ_META;
+ int op_flags = WRITE_FLUSH_FUA | REQ_META;
struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
lh = page_address(page);
@@ -682,12 +682,12 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags)
if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) {
gfs2_ordered_wait(sdp);
log_flush_wait(sdp);
- rw = WRITE_SYNC | REQ_META | REQ_PRIO;
+ op_flags = WRITE_SYNC | REQ_META | REQ_PRIO;
}
sdp->sd_log_idle = (tail == sdp->sd_log_flush_head);
gfs2_log_write_page(sdp, page);
- gfs2_log_flush_bio(sdp, rw);
+ gfs2_log_flush_bio(sdp, REQ_OP_WRITE, op_flags);
log_flush_wait(sdp);
if (sdp->sd_log_tail != tail)
@@ -738,7 +738,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl,
gfs2_ordered_write(sdp);
lops_before_commit(sdp, tr);
- gfs2_log_flush_bio(sdp, WRITE);
+ gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0);
if (sdp->sd_log_head != sdp->sd_log_flush_head) {
log_flush_wait(sdp);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index d5369a109781..49d5a1b61b06 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -230,17 +230,19 @@ static void gfs2_end_log_write(struct bio *bio)
/**
* gfs2_log_flush_bio - Submit any pending log bio
* @sdp: The superblock
- * @rw: The rw flags
+ * @op: REQ_OP
+ * @op_flags: rq_flag_bits
*
* Submit any pending part-built or full bio to the block device. If
* there is no pending bio, then this is a no-op.
*/
-void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int rw)
+void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int op, int op_flags)
{
if (sdp->sd_log_bio) {
atomic_inc(&sdp->sd_log_in_flight);
- submit_bio(rw, sdp->sd_log_bio);
+ bio_set_op_attrs(sdp->sd_log_bio, op, op_flags);
+ submit_bio(sdp->sd_log_bio);
sdp->sd_log_bio = NULL;
}
}
@@ -299,7 +301,7 @@ static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno)
nblk >>= sdp->sd_fsb2bb_shift;
if (blkno == nblk)
return bio;
- gfs2_log_flush_bio(sdp, WRITE);
+ gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0);
}
return gfs2_log_alloc_bio(sdp, blkno);
@@ -328,7 +330,7 @@ static void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
bio = gfs2_log_get_bio(sdp, blkno);
ret = bio_add_page(bio, page, size, offset);
if (ret == 0) {
- gfs2_log_flush_bio(sdp, WRITE);
+ gfs2_log_flush_bio(sdp, REQ_OP_WRITE, 0);
bio = gfs2_log_alloc_bio(sdp, blkno);
ret = bio_add_page(bio, page, size, offset);
WARN_ON(ret == 0);
@@ -535,9 +537,9 @@ static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
return 0;
- gfs2_replay_incr_blk(sdp, &start);
+ gfs2_replay_incr_blk(jd, &start);
- for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
+ for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
blkno = be64_to_cpu(*ptr++);
jd->jd_found_blocks++;
@@ -693,7 +695,7 @@ static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
offset = sizeof(struct gfs2_log_descriptor);
- for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
+ for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
error = gfs2_replay_read_block(jd, start, &bh);
if (error)
return error;
@@ -762,7 +764,6 @@ static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
__be64 *ptr, int pass)
{
struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
- struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
struct gfs2_glock *gl = ip->i_gl;
unsigned int blks = be32_to_cpu(ld->ld_data1);
struct buffer_head *bh_log, *bh_ip;
@@ -773,8 +774,8 @@ static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
return 0;
- gfs2_replay_incr_blk(sdp, &start);
- for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
+ gfs2_replay_incr_blk(jd, &start);
+ for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
blkno = be64_to_cpu(*ptr++);
esc = be64_to_cpu(*ptr++);
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h
index a65a7ba32ffd..e529f536c117 100644
--- a/fs/gfs2/lops.h
+++ b/fs/gfs2/lops.h
@@ -27,7 +27,7 @@ extern const struct gfs2_log_operations gfs2_databuf_lops;
extern const struct gfs2_log_operations *gfs2_log_ops[];
extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page);
-extern void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int rw);
+extern void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int op, int op_flags);
extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh);
static inline unsigned int buf_limit(struct gfs2_sbd *sdp)
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index f99f8e94de3f..74fd0139e6c2 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -45,6 +45,7 @@ static void gfs2_init_inode_once(void *foo)
memset(&ip->i_res, 0, sizeof(ip->i_res));
RB_CLEAR_NODE(&ip->i_res.rs_node);
ip->i_hash_cache = NULL;
+ gfs2_holder_mark_uninitialized(&ip->i_iopen_gh);
}
static void gfs2_init_glock_once(void *foo)
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 8eaadabbc771..950b8be68e41 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -37,8 +37,8 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
{
struct buffer_head *bh, *head;
int nr_underway = 0;
- int write_op = REQ_META | REQ_PRIO |
- (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
+ int write_flags = REQ_META | REQ_PRIO |
+ (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0);
BUG_ON(!PageLocked(page));
BUG_ON(!page_has_buffers(page));
@@ -79,7 +79,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb
do {
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
- submit_bh(write_op, bh);
+ submit_bh(REQ_OP_WRITE, write_flags, bh);
nr_underway++;
}
bh = next;
@@ -213,7 +213,8 @@ static void gfs2_meta_read_endio(struct bio *bio)
* Submit several consecutive buffer head I/O requests as a single bio I/O
* request. (See submit_bh_wbc.)
*/
-static void gfs2_submit_bhs(int rw, struct buffer_head *bhs[], int num)
+static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[],
+ int num)
{
struct buffer_head *bh = bhs[0];
struct bio *bio;
@@ -230,7 +231,8 @@ static void gfs2_submit_bhs(int rw, struct buffer_head *bhs[], int num)
bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
}
bio->bi_end_io = gfs2_meta_read_endio;
- submit_bio(rw, bio);
+ bio_set_op_attrs(bio, op, op_flags);
+ submit_bio(bio);
}
/**
@@ -280,7 +282,7 @@ int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
}
}
- gfs2_submit_bhs(READ_SYNC | REQ_META | REQ_PRIO, bhs, num);
+ gfs2_submit_bhs(REQ_OP_READ, READ_SYNC | REQ_META | REQ_PRIO, bhs, num);
if (!(flags & DIO_WAIT))
return 0;
@@ -448,7 +450,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
if (buffer_uptodate(first_bh))
goto out;
if (!buffer_locked(first_bh))
- ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh);
+ ll_rw_block(REQ_OP_READ, READ_SYNC | REQ_META, 1, &first_bh);
dblock++;
extlen--;
@@ -457,7 +459,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
bh = gfs2_getbuf(gl, dblock, CREATE);
if (!buffer_uptodate(bh) && !buffer_locked(bh))
- ll_rw_block(READA | REQ_META, 1, &bh);
+ ll_rw_block(REQ_OP_READ, REQ_RAHEAD | REQ_META, 1, &bh);
brelse(bh);
dblock++;
extlen--;
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 45463600fb81..ef1e1822977f 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -246,7 +246,8 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
bio->bi_end_io = end_bio_io_page;
bio->bi_private = page;
- submit_bio(READ_SYNC | REQ_META, bio);
+ bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC | REQ_META);
+ submit_bio(bio);
wait_on_page_locked(page);
bio_put(bio);
if (!PageUptodate(page)) {
@@ -454,7 +455,8 @@ static int gfs2_lookup_root(struct super_block *sb, struct dentry **dptr,
struct dentry *dentry;
struct inode *inode;
- inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0);
+ inode = gfs2_inode_lookup(sb, DT_DIR, no_addr, 0,
+ GFS2_BLKST_FREE /* ignore */);
if (IS_ERR(inode)) {
fs_err(sdp, "can't read in %s inode: %ld\n", name, PTR_ERR(inode));
return PTR_ERR(inode);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index ce7d69a2fdc0..77930ca25303 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -730,7 +730,7 @@ static int gfs2_write_buf_to_page(struct gfs2_inode *ip, unsigned long index,
if (PageUptodate(page))
set_buffer_uptodate(bh);
if (!buffer_uptodate(bh)) {
- ll_rw_block(READ | REQ_META, 1, &bh);
+ ll_rw_block(REQ_OP_READ, REQ_META, 1, &bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh))
goto unlock_out;
@@ -883,7 +883,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
&data_blocks, &ind_blocks);
- ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
+ ghs = kmalloc(num_qd * sizeof(struct gfs2_holder), GFP_NOFS);
if (!ghs)
return -ENOMEM;
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 1b645773c98e..113b6095a58d 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -338,7 +338,7 @@ static int foreach_descriptor(struct gfs2_jdesc *jd, unsigned int start,
struct gfs2_log_header_host lh;
error = get_log_header(jd, start, &lh);
if (!error) {
- gfs2_replay_incr_blk(sdp, &start);
+ gfs2_replay_incr_blk(jd, &start);
brelse(bh);
continue;
}
@@ -360,7 +360,7 @@ static int foreach_descriptor(struct gfs2_jdesc *jd, unsigned int start,
}
while (length--)
- gfs2_replay_incr_blk(sdp, &start);
+ gfs2_replay_incr_blk(jd, &start);
brelse(bh);
}
@@ -390,7 +390,7 @@ static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header_host *hea
struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
lblock = head->lh_blkno;
- gfs2_replay_incr_blk(sdp, &lblock);
+ gfs2_replay_incr_blk(jd, &lblock);
bh_map.b_size = 1 << ip->i_inode.i_blkbits;
error = gfs2_block_map(&ip->i_inode, lblock, &bh_map, 0);
if (error)
diff --git a/fs/gfs2/recovery.h b/fs/gfs2/recovery.h
index 6142836cce96..11fdfab4bf99 100644
--- a/fs/gfs2/recovery.h
+++ b/fs/gfs2/recovery.h
@@ -14,9 +14,9 @@
extern struct workqueue_struct *gfs_recovery_wq;
-static inline void gfs2_replay_incr_blk(struct gfs2_sbd *sdp, unsigned int *blk)
+static inline void gfs2_replay_incr_blk(struct gfs2_jdesc *jd, unsigned int *blk)
{
- if (++*blk == sdp->sd_jdesc->jd_blocks)
+ if (++*blk == jd->jd_blocks)
*blk = 0;
}
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 5bd216901e89..86ccc0159393 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -658,6 +658,7 @@ void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
if (rgd) {
spin_lock(&rgd->rd_rsspin);
__rs_deltree(rs);
+ BUG_ON(rs->rs_free);
spin_unlock(&rgd->rd_rsspin);
}
}
@@ -671,10 +672,8 @@ void gfs2_rs_deltree(struct gfs2_blkreserv *rs)
void gfs2_rsqa_delete(struct gfs2_inode *ip, atomic_t *wcount)
{
down_write(&ip->i_rw_mutex);
- if ((wcount == NULL) || (atomic_read(wcount) <= 1)) {
+ if ((wcount == NULL) || (atomic_read(wcount) <= 1))
gfs2_rs_deltree(&ip->i_res);
- BUG_ON(ip->i_res.rs_free);
- }
up_write(&ip->i_rw_mutex);
gfs2_qa_delete(ip, wcount);
}
@@ -722,6 +721,7 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
gfs2_free_clones(rgd);
kfree(rgd->rd_bits);
+ rgd->rd_bits = NULL;
return_all_reservations(rgd);
kmem_cache_free(gfs2_rgrpd_cachep, rgd);
}
@@ -916,9 +916,6 @@ static int read_rindex_entry(struct gfs2_inode *ip)
if (error)
goto fail;
- rgd->rd_gl->gl_object = rgd;
- rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
- rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
if (rgd->rd_data > sdp->sd_max_rg_data)
@@ -926,14 +923,20 @@ static int read_rindex_entry(struct gfs2_inode *ip)
spin_lock(&sdp->sd_rindex_spin);
error = rgd_insert(rgd);
spin_unlock(&sdp->sd_rindex_spin);
- if (!error)
+ if (!error) {
+ rgd->rd_gl->gl_object = rgd;
+ rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
+ rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr +
+ rgd->rd_length) * bsize) - 1;
return 0;
+ }
error = 0; /* someone else read in the rgrp; free it and ignore it */
gfs2_glock_put(rgd->rd_gl);
fail:
kfree(rgd->rd_bits);
+ rgd->rd_bits = NULL;
kmem_cache_free(gfs2_rgrpd_cachep, rgd);
return error;
}
@@ -2096,7 +2099,7 @@ void gfs2_inplace_release(struct gfs2_inode *ip)
{
struct gfs2_blkreserv *rs = &ip->i_res;
- if (rs->rs_rgd_gh.gh_gl)
+ if (gfs2_holder_initialized(&rs->rs_rgd_gh))
gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
}
@@ -2596,7 +2599,7 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
{
unsigned int x;
- rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder),
+ rlist->rl_ghs = kmalloc(rlist->rl_rgrps * sizeof(struct gfs2_holder),
GFP_NOFS | __GFP_NOFAIL);
for (x = 0; x < rlist->rl_rgrps; x++)
gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 9b2ff353e45f..3a7e60bb39f8 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -855,7 +855,7 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
wait_event(sdp->sd_reserving_log_wait, atomic_read(&sdp->sd_reserving_log) == 0);
gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks);
- if (freeze_gh.gh_gl)
+ if (gfs2_holder_initialized(&freeze_gh))
gfs2_glock_dq_uninit(&freeze_gh);
gfs2_quota_cleanup(sdp);
@@ -1033,7 +1033,7 @@ static int gfs2_unfreeze(struct super_block *sb)
mutex_lock(&sdp->sd_freeze_mutex);
if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
- sdp->sd_freeze_gh.gh_gl == NULL) {
+ !gfs2_holder_initialized(&sdp->sd_freeze_gh)) {
mutex_unlock(&sdp->sd_freeze_mutex);
return 0;
}
@@ -1084,9 +1084,11 @@ static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host
int error = 0, err;
memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
- gha = kcalloc(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
+ gha = kmalloc(slots * sizeof(struct gfs2_holder), GFP_KERNEL);
if (!gha)
return -ENOMEM;
+ for (x = 0; x < slots; x++)
+ gfs2_holder_mark_uninitialized(gha + x);
rgd_next = gfs2_rgrpd_get_first(sdp);
@@ -1096,7 +1098,7 @@ static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host
for (x = 0; x < slots; x++) {
gh = gha + x;
- if (gh->gh_gl && gfs2_glock_poll(gh)) {
+ if (gfs2_holder_initialized(gh) && gfs2_glock_poll(gh)) {
err = gfs2_glock_wait(gh);
if (err) {
gfs2_holder_uninit(gh);
@@ -1109,7 +1111,7 @@ static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host
}
}
- if (gh->gh_gl)
+ if (gfs2_holder_initialized(gh))
done = 0;
else if (rgd_next && !error) {
error = gfs2_glock_nq_init(rgd_next->rd_gl,
@@ -1304,9 +1306,11 @@ static int gfs2_drop_inode(struct inode *inode)
{
struct gfs2_inode *ip = GFS2_I(inode);
- if (!test_bit(GIF_FREE_VFS_INODE, &ip->i_flags) && inode->i_nlink) {
+ if (!test_bit(GIF_FREE_VFS_INODE, &ip->i_flags) &&
+ inode->i_nlink &&
+ gfs2_holder_initialized(&ip->i_iopen_gh)) {
struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
- if (gl && test_bit(GLF_DEMOTE, &gl->gl_flags))
+ if (test_bit(GLF_DEMOTE, &gl->gl_flags))
clear_nlink(inode);
}
return generic_drop_inode(inode);
@@ -1551,7 +1555,7 @@ static void gfs2_evict_inode(struct inode *inode)
goto out_truncate;
}
- if (ip->i_iopen_gh.gh_gl &&
+ if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
gfs2_glock_dq_wait(&ip->i_iopen_gh);
@@ -1610,7 +1614,7 @@ out_unlock:
if (gfs2_rs_active(&ip->i_res))
gfs2_rs_deltree(&ip->i_res);
- if (ip->i_iopen_gh.gh_gl) {
+ if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
gfs2_glock_dq_wait(&ip->i_iopen_gh);
@@ -1632,7 +1636,7 @@ out:
gfs2_glock_add_to_lru(ip->i_gl);
gfs2_glock_put(ip->i_gl);
ip->i_gl = NULL;
- if (ip->i_iopen_gh.gh_gl) {
+ if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
ip->i_iopen_gh.gh_gl->gl_object = NULL;
ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
gfs2_glock_dq_wait(&ip->i_iopen_gh);
diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c
index 98cde8ba5dc2..8f4afd3f5108 100644
--- a/fs/hfs/catalog.c
+++ b/fs/hfs/catalog.c
@@ -20,7 +20,7 @@
*
* Given the ID of the parent and the name build a search key.
*/
-void hfs_cat_build_key(struct super_block *sb, btree_key *key, u32 parent, struct qstr *name)
+void hfs_cat_build_key(struct super_block *sb, btree_key *key, u32 parent, const struct qstr *name)
{
key->cat.reserved = 0;
key->cat.ParID = cpu_to_be32(parent);
@@ -64,7 +64,7 @@ static int hfs_cat_build_record(hfs_cat_rec *rec, u32 cnid, struct inode *inode)
static int hfs_cat_build_thread(struct super_block *sb,
hfs_cat_rec *rec, int type,
- u32 parentid, struct qstr *name)
+ u32 parentid, const struct qstr *name)
{
rec->type = type;
memset(rec->thread.reserved, 0, sizeof(rec->thread.reserved));
@@ -79,7 +79,7 @@ static int hfs_cat_build_thread(struct super_block *sb,
* Add a new file or directory to the catalog B-tree and
* return a (struct hfs_cat_entry) for it in '*result'.
*/
-int hfs_cat_create(u32 cnid, struct inode *dir, struct qstr *str, struct inode *inode)
+int hfs_cat_create(u32 cnid, struct inode *dir, const struct qstr *str, struct inode *inode)
{
struct hfs_find_data fd;
struct super_block *sb;
@@ -210,7 +210,7 @@ int hfs_cat_find_brec(struct super_block *sb, u32 cnid,
* Delete the indicated file or directory.
* The associated thread is also removed unless ('with_thread'==0).
*/
-int hfs_cat_delete(u32 cnid, struct inode *dir, struct qstr *str)
+int hfs_cat_delete(u32 cnid, struct inode *dir, const struct qstr *str)
{
struct super_block *sb;
struct hfs_find_data fd;
@@ -277,8 +277,8 @@ out:
* If the destination exists it is removed and a
* (struct hfs_cat_entry) for it is returned in '*result'.
*/
-int hfs_cat_move(u32 cnid, struct inode *src_dir, struct qstr *src_name,
- struct inode *dst_dir, struct qstr *dst_name)
+int hfs_cat_move(u32 cnid, struct inode *src_dir, const struct qstr *src_name,
+ struct inode *dst_dir, const struct qstr *dst_name)
{
struct super_block *sb;
struct hfs_find_data src_fd, dst_fd;
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h
index ee2f385811c8..16f5172ee40d 100644
--- a/fs/hfs/hfs_fs.h
+++ b/fs/hfs/hfs_fs.h
@@ -178,11 +178,11 @@ extern int hfs_clear_vbm_bits(struct super_block *, u16, u16);
extern int hfs_cat_keycmp(const btree_key *, const btree_key *);
struct hfs_find_data;
extern int hfs_cat_find_brec(struct super_block *, u32, struct hfs_find_data *);
-extern int hfs_cat_create(u32, struct inode *, struct qstr *, struct inode *);
-extern int hfs_cat_delete(u32, struct inode *, struct qstr *);
-extern int hfs_cat_move(u32, struct inode *, struct qstr *,
- struct inode *, struct qstr *);
-extern void hfs_cat_build_key(struct super_block *, btree_key *, u32, struct qstr *);
+extern int hfs_cat_create(u32, struct inode *, const struct qstr *, struct inode *);
+extern int hfs_cat_delete(u32, struct inode *, const struct qstr *);
+extern int hfs_cat_move(u32, struct inode *, const struct qstr *,
+ struct inode *, const struct qstr *);
+extern void hfs_cat_build_key(struct super_block *, btree_key *, u32, const struct qstr *);
/* dir.c */
extern const struct file_operations hfs_dir_operations;
@@ -201,7 +201,7 @@ extern int hfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
extern const struct address_space_operations hfs_aops;
extern const struct address_space_operations hfs_btree_aops;
-extern struct inode *hfs_new_inode(struct inode *, struct qstr *, umode_t);
+extern struct inode *hfs_new_inode(struct inode *, const struct qstr *, umode_t);
extern void hfs_inode_write_fork(struct inode *, struct hfs_extent *, __be32 *, __be32 *);
extern int hfs_write_inode(struct inode *, struct writeback_control *);
extern int hfs_inode_setattr(struct dentry *, struct iattr *);
@@ -233,11 +233,11 @@ extern const struct dentry_operations hfs_dentry_operations;
extern int hfs_hash_dentry(const struct dentry *, struct qstr *);
extern int hfs_strcmp(const unsigned char *, unsigned int,
const unsigned char *, unsigned int);
-extern int hfs_compare_dentry(const struct dentry *parent, const struct dentry *dentry,
+extern int hfs_compare_dentry(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name);
/* trans.c */
-extern void hfs_asc2mac(struct super_block *, struct hfs_name *, struct qstr *);
+extern void hfs_asc2mac(struct super_block *, struct hfs_name *, const struct qstr *);
extern int hfs_mac2asc(struct super_block *, char *, const struct hfs_name *);
/* super.c */
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 8eed66af5b82..c6a32415735b 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -128,7 +128,7 @@ static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
- struct inode *inode = file_inode(file)->i_mapping->host;
+ struct inode *inode = mapping->host;
size_t count = iov_iter_count(iter);
ssize_t ret;
@@ -177,7 +177,7 @@ const struct address_space_operations hfs_aops = {
/*
* hfs_new_inode
*/
-struct inode *hfs_new_inode(struct inode *dir, struct qstr *name, umode_t mode)
+struct inode *hfs_new_inode(struct inode *dir, const struct qstr *name, umode_t mode)
{
struct super_block *sb = dir->i_sb;
struct inode *inode = new_inode(sb);
diff --git a/fs/hfs/string.c b/fs/hfs/string.c
index 85b610c3909f..3912209153a8 100644
--- a/fs/hfs/string.c
+++ b/fs/hfs/string.c
@@ -59,7 +59,7 @@ int hfs_hash_dentry(const struct dentry *dentry, struct qstr *this)
if (len > HFS_NAMELEN)
len = HFS_NAMELEN;
- hash = init_name_hash();
+ hash = init_name_hash(dentry);
for (; len; len--)
hash = partial_name_hash(caseorder[*name++], hash);
this->hash = end_name_hash(hash);
@@ -92,7 +92,7 @@ int hfs_strcmp(const unsigned char *s1, unsigned int len1,
* Test for equality of two strings in the HFS filename character ordering.
* return 1 on failure and 0 on success
*/
-int hfs_compare_dentry(const struct dentry *parent, const struct dentry *dentry,
+int hfs_compare_dentry(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
const unsigned char *n1, *n2;
diff --git a/fs/hfs/trans.c b/fs/hfs/trans.c
index b1ce4c7ad3fb..39f5e343bf4d 100644
--- a/fs/hfs/trans.c
+++ b/fs/hfs/trans.c
@@ -94,7 +94,7 @@ out:
* This routine is a inverse to hfs_mac2triv().
* A ':' is replaced by a '/'.
*/
-void hfs_asc2mac(struct super_block *sb, struct hfs_name *out, struct qstr *in)
+void hfs_asc2mac(struct super_block *sb, struct hfs_name *out, const struct qstr *in)
{
struct nls_table *nls_disk = HFS_SB(sb)->nls_disk;
struct nls_table *nls_io = HFS_SB(sb)->nls_io;
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index fb707e8f423a..142534d3c2d5 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -40,7 +40,7 @@ int hfsplus_cat_bin_cmp_key(const hfsplus_btree_key *k1,
/* Generates key for catalog file/folders record. */
int hfsplus_cat_build_key(struct super_block *sb,
- hfsplus_btree_key *key, u32 parent, struct qstr *str)
+ hfsplus_btree_key *key, u32 parent, const struct qstr *str)
{
int len, err;
@@ -174,7 +174,7 @@ static int hfsplus_cat_build_record(hfsplus_cat_entry *entry,
static int hfsplus_fill_cat_thread(struct super_block *sb,
hfsplus_cat_entry *entry, int type,
- u32 parentid, struct qstr *str)
+ u32 parentid, const struct qstr *str)
{
int err;
@@ -250,7 +250,7 @@ static void hfsplus_subfolders_dec(struct inode *dir)
}
int hfsplus_create_cat(u32 cnid, struct inode *dir,
- struct qstr *str, struct inode *inode)
+ const struct qstr *str, struct inode *inode)
{
struct super_block *sb = dir->i_sb;
struct hfs_find_data fd;
@@ -318,7 +318,7 @@ err2:
return err;
}
-int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
+int hfsplus_delete_cat(u32 cnid, struct inode *dir, const struct qstr *str)
{
struct super_block *sb = dir->i_sb;
struct hfs_find_data fd;
@@ -415,8 +415,8 @@ out:
}
int hfsplus_rename_cat(u32 cnid,
- struct inode *src_dir, struct qstr *src_name,
- struct inode *dst_dir, struct qstr *dst_name)
+ struct inode *src_dir, const struct qstr *src_name,
+ struct inode *dst_dir, const struct qstr *dst_name)
{
struct super_block *sb = src_dir->i_sb;
struct hfs_find_data src_fd, dst_fd;
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index fdc3446d934a..a3f03b247463 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -445,17 +445,17 @@ int hfsplus_cat_case_cmp_key(const hfsplus_btree_key *k1,
int hfsplus_cat_bin_cmp_key(const hfsplus_btree_key *k1,
const hfsplus_btree_key *k2);
int hfsplus_cat_build_key(struct super_block *sb, hfsplus_btree_key *key,
- u32 parent, struct qstr *str);
+ u32 parent, const struct qstr *str);
void hfsplus_cat_build_key_with_cnid(struct super_block *sb,
hfsplus_btree_key *key, u32 parent);
void hfsplus_cat_set_perms(struct inode *inode, struct hfsplus_perm *perms);
int hfsplus_find_cat(struct super_block *sb, u32 cnid,
struct hfs_find_data *fd);
-int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str,
+int hfsplus_create_cat(u32 cnid, struct inode *dir, const struct qstr *str,
struct inode *inode);
-int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str);
-int hfsplus_rename_cat(u32 cnid, struct inode *src_dir, struct qstr *src_name,
- struct inode *dst_dir, struct qstr *dst_name);
+int hfsplus_delete_cat(u32 cnid, struct inode *dir, const struct qstr *str);
+int hfsplus_rename_cat(u32 cnid, struct inode *src_dir, const struct qstr *src_name,
+ struct inode *dst_dir, const struct qstr *dst_name);
/* dir.c */
extern const struct inode_operations hfsplus_dir_inode_operations;
@@ -520,13 +520,12 @@ int hfsplus_uni2asc(struct super_block *sb, const struct hfsplus_unistr *ustr,
int hfsplus_asc2uni(struct super_block *sb, struct hfsplus_unistr *ustr,
int max_unistr_len, const char *astr, int len);
int hfsplus_hash_dentry(const struct dentry *dentry, struct qstr *str);
-int hfsplus_compare_dentry(const struct dentry *parent,
- const struct dentry *dentry, unsigned int len,
+int hfsplus_compare_dentry(const struct dentry *dentry, unsigned int len,
const char *str, const struct qstr *name);
/* wrapper.c */
int hfsplus_submit_bio(struct super_block *sb, sector_t sector, void *buf,
- void **data, int rw);
+ void **data, int op, int op_flags);
int hfsplus_read_wrapper(struct super_block *sb);
/* time macros */
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index ef9fefe364a6..19462d773fe2 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -126,7 +126,7 @@ static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
- struct inode *inode = file_inode(file)->i_mapping->host;
+ struct inode *inode = mapping->host;
size_t count = iov_iter_count(iter);
ssize_t ret;
diff --git a/fs/hfsplus/part_tbl.c b/fs/hfsplus/part_tbl.c
index eb355d81e279..63164ebc52fa 100644
--- a/fs/hfsplus/part_tbl.c
+++ b/fs/hfsplus/part_tbl.c
@@ -112,7 +112,8 @@ static int hfs_parse_new_pmap(struct super_block *sb, void *buf,
if ((u8 *)pm - (u8 *)buf >= buf_size) {
res = hfsplus_submit_bio(sb,
*part_start + HFS_PMAP_BLK + i,
- buf, (void **)&pm, READ);
+ buf, (void **)&pm, REQ_OP_READ,
+ 0);
if (res)
return res;
}
@@ -136,7 +137,7 @@ int hfs_part_find(struct super_block *sb,
return -ENOMEM;
res = hfsplus_submit_bio(sb, *part_start + HFS_PMAP_BLK,
- buf, &data, READ);
+ buf, &data, REQ_OP_READ, 0);
if (res)
goto out;
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 755bf30ba1ce..11854dd84572 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -220,7 +220,8 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait)
error2 = hfsplus_submit_bio(sb,
sbi->part_start + HFSPLUS_VOLHEAD_SECTOR,
- sbi->s_vhdr_buf, NULL, WRITE_SYNC);
+ sbi->s_vhdr_buf, NULL, REQ_OP_WRITE,
+ WRITE_SYNC);
if (!error)
error = error2;
if (!write_backup)
@@ -228,7 +229,8 @@ static int hfsplus_sync_fs(struct super_block *sb, int wait)
error2 = hfsplus_submit_bio(sb,
sbi->part_start + sbi->sect_count - 2,
- sbi->s_backup_vhdr_buf, NULL, WRITE_SYNC);
+ sbi->s_backup_vhdr_buf, NULL, REQ_OP_WRITE,
+ WRITE_SYNC);
if (!error)
error2 = error;
out:
diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c
index e8ef121a4d8b..e563939882f3 100644
--- a/fs/hfsplus/unicode.c
+++ b/fs/hfsplus/unicode.c
@@ -346,7 +346,7 @@ int hfsplus_hash_dentry(const struct dentry *dentry, struct qstr *str)
casefold = test_bit(HFSPLUS_SB_CASEFOLD, &HFSPLUS_SB(sb)->flags);
decompose = !test_bit(HFSPLUS_SB_NODECOMPOSE, &HFSPLUS_SB(sb)->flags);
- hash = init_name_hash();
+ hash = init_name_hash(dentry);
astr = str->name;
len = str->len;
while (len > 0) {
@@ -385,10 +385,10 @@ int hfsplus_hash_dentry(const struct dentry *dentry, struct qstr *str)
* Composed unicode characters are decomposed and case-folding is performed
* if the appropriate bits are (un)set on the superblock.
*/
-int hfsplus_compare_dentry(const struct dentry *parent, const struct dentry *dentry,
+int hfsplus_compare_dentry(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
- struct super_block *sb = parent->d_sb;
+ struct super_block *sb = dentry->d_sb;
int casefold, decompose, size;
int dsize1, dsize2, len1, len2;
const u16 *dstr1, *dstr2;
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index cc6235671437..ebb85e5f6549 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -30,7 +30,8 @@ struct hfsplus_wd {
* @sector: block to read or write, for blocks of HFSPLUS_SECTOR_SIZE bytes
* @buf: buffer for I/O
* @data: output pointer for location of requested data
- * @rw: direction of I/O
+ * @op: direction of I/O
+ * @op_flags: request op flags
*
* The unit of I/O is hfsplus_min_io_size(sb), which may be bigger than
* HFSPLUS_SECTOR_SIZE, and @buf must be sized accordingly. On reads
@@ -44,7 +45,7 @@ struct hfsplus_wd {
* will work correctly.
*/
int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
- void *buf, void **data, int rw)
+ void *buf, void **data, int op, int op_flags)
{
struct bio *bio;
int ret = 0;
@@ -65,8 +66,9 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
bio = bio_alloc(GFP_NOIO, 1);
bio->bi_iter.bi_sector = sector;
bio->bi_bdev = sb->s_bdev;
+ bio_set_op_attrs(bio, op, op_flags);
- if (!(rw & WRITE) && data)
+ if (op != WRITE && data)
*data = (u8 *)buf + offset;
while (io_size > 0) {
@@ -83,7 +85,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
buf = (u8 *)buf + len;
}
- ret = submit_bio_wait(rw, bio);
+ ret = submit_bio_wait(bio);
out:
bio_put(bio);
return ret < 0 ? ret : 0;
@@ -181,7 +183,7 @@ int hfsplus_read_wrapper(struct super_block *sb)
reread:
error = hfsplus_submit_bio(sb, part_start + HFSPLUS_VOLHEAD_SECTOR,
sbi->s_vhdr_buf, (void **)&sbi->s_vhdr,
- READ);
+ REQ_OP_READ, 0);
if (error)
goto out_free_backup_vhdr;
@@ -213,7 +215,8 @@ reread:
error = hfsplus_submit_bio(sb, part_start + part_size - 2,
sbi->s_backup_vhdr_buf,
- (void **)&sbi->s_backup_vhdr, READ);
+ (void **)&sbi->s_backup_vhdr, REQ_OP_READ,
+ 0);
if (error)
goto out_free_backup_vhdr;
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 5c57654927a6..90e46cd752fe 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -959,10 +959,11 @@ static int hostfs_fill_sb_common(struct super_block *sb, void *d, int silent)
if (S_ISLNK(root_inode->i_mode)) {
char *name = follow_link(host_root_path);
- if (IS_ERR(name))
+ if (IS_ERR(name)) {
err = PTR_ERR(name);
- else
- err = read_name(root_inode, name);
+ goto out_put;
+ }
+ err = read_name(root_inode, name);
kfree(name);
if (err)
goto out_put;
diff --git a/fs/hpfs/dentry.c b/fs/hpfs/dentry.c
index fa27980f2229..bb87d65f0d97 100644
--- a/fs/hpfs/dentry.c
+++ b/fs/hpfs/dentry.c
@@ -26,7 +26,7 @@ static int hpfs_hash_dentry(const struct dentry *dentry, struct qstr *qstr)
/*return -ENOENT;*/
x:
- hash = init_name_hash();
+ hash = init_name_hash(dentry);
for (i = 0; i < l; i++)
hash = partial_name_hash(hpfs_upcase(hpfs_sb(dentry->d_sb)->sb_cp_table,qstr->name[i]), hash);
qstr->hash = end_name_hash(hash);
@@ -34,7 +34,7 @@ static int hpfs_hash_dentry(const struct dentry *dentry, struct qstr *qstr)
return 0;
}
-static int hpfs_compare_dentry(const struct dentry *parent, const struct dentry *dentry,
+static int hpfs_compare_dentry(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
unsigned al = len;
@@ -50,7 +50,7 @@ static int hpfs_compare_dentry(const struct dentry *parent, const struct dentry
if (hpfs_chk_name(name->name, &bl))
return 1;
- if (hpfs_compare_names(parent->d_sb, str, al, name->name, bl, 0))
+ if (hpfs_compare_names(dentry->d_sb, str, al, name->name, bl, 0))
return 1;
return 0;
}
diff --git a/fs/inode.c b/fs/inode.c
index 4ccbc21b30ce..7e3ef3af3db9 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -345,7 +345,7 @@ EXPORT_SYMBOL(inc_nlink);
void address_space_init_once(struct address_space *mapping)
{
memset(mapping, 0, sizeof(*mapping));
- INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
+ INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC | __GFP_ACCOUNT);
spin_lock_init(&mapping->tree_lock);
init_rwsem(&mapping->i_mmap_rwsem);
INIT_LIST_HEAD(&mapping->private_list);
@@ -365,6 +365,7 @@ void inode_init_once(struct inode *inode)
INIT_HLIST_NODE(&inode->i_hash);
INIT_LIST_HEAD(&inode->i_devices);
INIT_LIST_HEAD(&inode->i_io_list);
+ INIT_LIST_HEAD(&inode->i_wb_list);
INIT_LIST_HEAD(&inode->i_lru);
address_space_init_once(&inode->i_data);
i_size_ordered_init(inode);
@@ -507,6 +508,7 @@ void clear_inode(struct inode *inode)
BUG_ON(!list_empty(&inode->i_data.private_list));
BUG_ON(!(inode->i_state & I_FREEING));
BUG_ON(inode->i_state & I_CLEAR);
+ BUG_ON(!list_empty(&inode->i_wb_list));
/* don't need i_lock here, no concurrent mods to i_state */
inode->i_state = I_FREEING | I_CLEAR;
}
@@ -1617,6 +1619,13 @@ bool atime_needs_update(const struct path *path, struct inode *inode)
if (inode->i_flags & S_NOATIME)
return false;
+
+ /* Atime updates will likely cause i_uid and i_gid to be written
+ * back improprely if their true value is unknown to the vfs.
+ */
+ if (HAS_UNMAPPED_ID(inode))
+ return false;
+
if (IS_NOATIME(inode))
return false;
if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
@@ -1720,7 +1729,6 @@ int dentry_needs_remove_privs(struct dentry *dentry)
mask |= ATTR_KILL_PRIV;
return mask;
}
-EXPORT_SYMBOL(dentry_needs_remove_privs);
static int __remove_privs(struct dentry *dentry, int kill)
{
@@ -1740,8 +1748,8 @@ static int __remove_privs(struct dentry *dentry, int kill)
*/
int file_remove_privs(struct file *file)
{
- struct dentry *dentry = file->f_path.dentry;
- struct inode *inode = d_inode(dentry);
+ struct dentry *dentry = file_dentry(file);
+ struct inode *inode = file_inode(file);
int kill;
int error = 0;
@@ -1749,7 +1757,7 @@ int file_remove_privs(struct file *file)
if (IS_NOSEC(inode))
return 0;
- kill = file_needs_remove_privs(file);
+ kill = dentry_needs_remove_privs(dentry);
if (kill < 0)
return kill;
if (kill)
diff --git a/fs/internal.h b/fs/internal.h
index f57ced528cde..ba0737649d4a 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -11,6 +11,7 @@
struct super_block;
struct file_system_type;
+struct iomap;
struct linux_binprm;
struct path;
struct mount;
@@ -39,6 +40,8 @@ static inline int __sync_blockdev(struct block_device *bdev, int wait)
* buffer.c
*/
extern void guard_bio_eod(int rw, struct bio *bio);
+extern int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
+ get_block_t *get_block, struct iomap *iomap);
/*
* char_dev.c
@@ -108,12 +111,14 @@ extern long do_handle_open(int mountdirfd,
struct file_handle __user *ufh, int open_flag);
extern int open_check_o_direct(struct file *f);
extern int vfs_open(const struct path *, struct file *, const struct cred *);
+extern struct file *filp_clone_open(struct file *);
/*
* inode.c
*/
extern long prune_icache_sb(struct super_block *sb, struct shrink_control *sc);
extern void inode_add_lru(struct inode *inode);
+extern int dentry_needs_remove_privs(struct dentry *dentry);
/*
* fs-writeback.c
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 116a333e9c77..0f56deb24ce6 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -590,6 +590,7 @@ static long ioctl_file_dedupe_range(struct file *file, void __user *arg)
goto out;
}
+ same->dest_count = count;
ret = vfs_dedupe_file_range(file, same);
if (ret)
goto out;
diff --git a/fs/iomap.c b/fs/iomap.c
new file mode 100644
index 000000000000..48141b8eff5f
--- /dev/null
+++ b/fs/iomap.c
@@ -0,0 +1,497 @@
+/*
+ * Copyright (C) 2010 Red Hat, Inc.
+ * Copyright (c) 2016 Christoph Hellwig.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#include <linux/module.h>
+#include <linux/compiler.h>
+#include <linux/fs.h>
+#include <linux/iomap.h>
+#include <linux/uaccess.h>
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/pagemap.h>
+#include <linux/file.h>
+#include <linux/uio.h>
+#include <linux/backing-dev.h>
+#include <linux/buffer_head.h>
+#include <linux/dax.h>
+#include "internal.h"
+
+typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len,
+ void *data, struct iomap *iomap);
+
+/*
+ * Execute a iomap write on a segment of the mapping that spans a
+ * contiguous range of pages that have identical block mapping state.
+ *
+ * This avoids the need to map pages individually, do individual allocations
+ * for each page and most importantly avoid the need for filesystem specific
+ * locking per page. Instead, all the operations are amortised over the entire
+ * range of pages. It is assumed that the filesystems will lock whatever
+ * resources they require in the iomap_begin call, and release them in the
+ * iomap_end call.
+ */
+static loff_t
+iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
+ struct iomap_ops *ops, void *data, iomap_actor_t actor)
+{
+ struct iomap iomap = { 0 };
+ loff_t written = 0, ret;
+
+ /*
+ * Need to map a range from start position for length bytes. This can
+ * span multiple pages - it is only guaranteed to return a range of a
+ * single type of pages (e.g. all into a hole, all mapped or all
+ * unwritten). Failure at this point has nothing to undo.
+ *
+ * If allocation is required for this range, reserve the space now so
+ * that the allocation is guaranteed to succeed later on. Once we copy
+ * the data into the page cache pages, then we cannot fail otherwise we
+ * expose transient stale data. If the reserve fails, we can safely
+ * back out at this point as there is nothing to undo.
+ */
+ ret = ops->iomap_begin(inode, pos, length, flags, &iomap);
+ if (ret)
+ return ret;
+ if (WARN_ON(iomap.offset > pos))
+ return -EIO;
+
+ /*
+ * Cut down the length to the one actually provided by the filesystem,
+ * as it might not be able to give us the whole size that we requested.
+ */
+ if (iomap.offset + iomap.length < pos + length)
+ length = iomap.offset + iomap.length - pos;
+
+ /*
+ * Now that we have guaranteed that the space allocation will succeed.
+ * we can do the copy-in page by page without having to worry about
+ * failures exposing transient data.
+ */
+ written = actor(inode, pos, length, data, &iomap);
+
+ /*
+ * Now the data has been copied, commit the range we've copied. This
+ * should not fail unless the filesystem has had a fatal error.
+ */
+ ret = ops->iomap_end(inode, pos, length, written > 0 ? written : 0,
+ flags, &iomap);
+
+ return written ? written : ret;
+}
+
+static void
+iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
+{
+ loff_t i_size = i_size_read(inode);
+
+ /*
+ * Only truncate newly allocated pages beyoned EOF, even if the
+ * write started inside the existing inode size.
+ */
+ if (pos + len > i_size)
+ truncate_pagecache_range(inode, max(pos, i_size), pos + len);
+}
+
+static int
+iomap_write_begin(struct inode *inode, loff_t pos, unsigned len, unsigned flags,
+ struct page **pagep, struct iomap *iomap)
+{
+ pgoff_t index = pos >> PAGE_SHIFT;
+ struct page *page;
+ int status = 0;
+
+ BUG_ON(pos + len > iomap->offset + iomap->length);
+
+ page = grab_cache_page_write_begin(inode->i_mapping, index, flags);
+ if (!page)
+ return -ENOMEM;
+
+ status = __block_write_begin_int(page, pos, len, NULL, iomap);
+ if (unlikely(status)) {
+ unlock_page(page);
+ put_page(page);
+ page = NULL;
+
+ iomap_write_failed(inode, pos, len);
+ }
+
+ *pagep = page;
+ return status;
+}
+
+static int
+iomap_write_end(struct inode *inode, loff_t pos, unsigned len,
+ unsigned copied, struct page *page)
+{
+ int ret;
+
+ ret = generic_write_end(NULL, inode->i_mapping, pos, len,
+ copied, page, NULL);
+ if (ret < len)
+ iomap_write_failed(inode, pos, len);
+ return ret;
+}
+
+static loff_t
+iomap_write_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
+ struct iomap *iomap)
+{
+ struct iov_iter *i = data;
+ long status = 0;
+ ssize_t written = 0;
+ unsigned int flags = AOP_FLAG_NOFS;
+
+ /*
+ * Copies from kernel address space cannot fail (NFSD is a big user).
+ */
+ if (!iter_is_iovec(i))
+ flags |= AOP_FLAG_UNINTERRUPTIBLE;
+
+ do {
+ struct page *page;
+ unsigned long offset; /* Offset into pagecache page */
+ unsigned long bytes; /* Bytes to write to page */
+ size_t copied; /* Bytes copied from user */
+
+ offset = (pos & (PAGE_SIZE - 1));
+ bytes = min_t(unsigned long, PAGE_SIZE - offset,
+ iov_iter_count(i));
+again:
+ if (bytes > length)
+ bytes = length;
+
+ /*
+ * Bring in the user page that we will copy from _first_.
+ * Otherwise there's a nasty deadlock on copying from the
+ * same page as we're writing to, without it being marked
+ * up-to-date.
+ *
+ * Not only is this an optimisation, but it is also required
+ * to check that the address is actually valid, when atomic
+ * usercopies are used, below.
+ */
+ if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
+ status = -EFAULT;
+ break;
+ }
+
+ status = iomap_write_begin(inode, pos, bytes, flags, &page,
+ iomap);
+ if (unlikely(status))
+ break;
+
+ if (mapping_writably_mapped(inode->i_mapping))
+ flush_dcache_page(page);
+
+ pagefault_disable();
+ copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes);
+ pagefault_enable();
+
+ flush_dcache_page(page);
+ mark_page_accessed(page);
+
+ status = iomap_write_end(inode, pos, bytes, copied, page);
+ if (unlikely(status < 0))
+ break;
+ copied = status;
+
+ cond_resched();
+
+ iov_iter_advance(i, copied);
+ if (unlikely(copied == 0)) {
+ /*
+ * If we were unable to copy any data at all, we must
+ * fall back to a single segment length write.
+ *
+ * If we didn't fallback here, we could livelock
+ * because not all segments in the iov can be copied at
+ * once without a pagefault.
+ */
+ bytes = min_t(unsigned long, PAGE_SIZE - offset,
+ iov_iter_single_seg_count(i));
+ goto again;
+ }
+ pos += copied;
+ written += copied;
+ length -= copied;
+
+ balance_dirty_pages_ratelimited(inode->i_mapping);
+ } while (iov_iter_count(i) && length);
+
+ return written ? written : status;
+}
+
+ssize_t
+iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *iter,
+ struct iomap_ops *ops)
+{
+ struct inode *inode = iocb->ki_filp->f_mapping->host;
+ loff_t pos = iocb->ki_pos, ret = 0, written = 0;
+
+ while (iov_iter_count(iter)) {
+ ret = iomap_apply(inode, pos, iov_iter_count(iter),
+ IOMAP_WRITE, ops, iter, iomap_write_actor);
+ if (ret <= 0)
+ break;
+ pos += ret;
+ written += ret;
+ }
+
+ return written ? written : ret;
+}
+EXPORT_SYMBOL_GPL(iomap_file_buffered_write);
+
+static int iomap_zero(struct inode *inode, loff_t pos, unsigned offset,
+ unsigned bytes, struct iomap *iomap)
+{
+ struct page *page;
+ int status;
+
+ status = iomap_write_begin(inode, pos, bytes,
+ AOP_FLAG_UNINTERRUPTIBLE | AOP_FLAG_NOFS, &page, iomap);
+ if (status)
+ return status;
+
+ zero_user(page, offset, bytes);
+ mark_page_accessed(page);
+
+ return iomap_write_end(inode, pos, bytes, bytes, page);
+}
+
+static int iomap_dax_zero(loff_t pos, unsigned offset, unsigned bytes,
+ struct iomap *iomap)
+{
+ sector_t sector = iomap->blkno +
+ (((pos & ~(PAGE_SIZE - 1)) - iomap->offset) >> 9);
+
+ return __dax_zero_page_range(iomap->bdev, sector, offset, bytes);
+}
+
+static loff_t
+iomap_zero_range_actor(struct inode *inode, loff_t pos, loff_t count,
+ void *data, struct iomap *iomap)
+{
+ bool *did_zero = data;
+ loff_t written = 0;
+ int status;
+
+ /* already zeroed? we're done. */
+ if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
+ return count;
+
+ do {
+ unsigned offset, bytes;
+
+ offset = pos & (PAGE_SIZE - 1); /* Within page */
+ bytes = min_t(unsigned, PAGE_SIZE - offset, count);
+
+ if (IS_DAX(inode))
+ status = iomap_dax_zero(pos, offset, bytes, iomap);
+ else
+ status = iomap_zero(inode, pos, offset, bytes, iomap);
+ if (status < 0)
+ return status;
+
+ pos += bytes;
+ count -= bytes;
+ written += bytes;
+ if (did_zero)
+ *did_zero = true;
+ } while (count > 0);
+
+ return written;
+}
+
+int
+iomap_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
+ struct iomap_ops *ops)
+{
+ loff_t ret;
+
+ while (len > 0) {
+ ret = iomap_apply(inode, pos, len, IOMAP_ZERO,
+ ops, did_zero, iomap_zero_range_actor);
+ if (ret <= 0)
+ return ret;
+
+ pos += ret;
+ len -= ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iomap_zero_range);
+
+int
+iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
+ struct iomap_ops *ops)
+{
+ unsigned blocksize = (1 << inode->i_blkbits);
+ unsigned off = pos & (blocksize - 1);
+
+ /* Block boundary? Nothing to do */
+ if (!off)
+ return 0;
+ return iomap_zero_range(inode, pos, blocksize - off, did_zero, ops);
+}
+EXPORT_SYMBOL_GPL(iomap_truncate_page);
+
+static loff_t
+iomap_page_mkwrite_actor(struct inode *inode, loff_t pos, loff_t length,
+ void *data, struct iomap *iomap)
+{
+ struct page *page = data;
+ int ret;
+
+ ret = __block_write_begin_int(page, pos & ~PAGE_MASK, length,
+ NULL, iomap);
+ if (ret)
+ return ret;
+
+ block_commit_write(page, 0, length);
+ return length;
+}
+
+int iomap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
+ struct iomap_ops *ops)
+{
+ struct page *page = vmf->page;
+ struct inode *inode = file_inode(vma->vm_file);
+ unsigned long length;
+ loff_t offset, size;
+ ssize_t ret;
+
+ lock_page(page);
+ size = i_size_read(inode);
+ if ((page->mapping != inode->i_mapping) ||
+ (page_offset(page) > size)) {
+ /* We overload EFAULT to mean page got truncated */
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+
+ /* page is wholly or partially inside EOF */
+ if (((page->index + 1) << PAGE_SHIFT) > size)
+ length = size & ~PAGE_MASK;
+ else
+ length = PAGE_SIZE;
+
+ offset = page_offset(page);
+ while (length > 0) {
+ ret = iomap_apply(inode, offset, length, IOMAP_WRITE,
+ ops, page, iomap_page_mkwrite_actor);
+ if (unlikely(ret <= 0))
+ goto out_unlock;
+ offset += ret;
+ length -= ret;
+ }
+
+ set_page_dirty(page);
+ wait_for_stable_page(page);
+ return 0;
+out_unlock:
+ unlock_page(page);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
+
+struct fiemap_ctx {
+ struct fiemap_extent_info *fi;
+ struct iomap prev;
+};
+
+static int iomap_to_fiemap(struct fiemap_extent_info *fi,
+ struct iomap *iomap, u32 flags)
+{
+ switch (iomap->type) {
+ case IOMAP_HOLE:
+ /* skip holes */
+ return 0;
+ case IOMAP_DELALLOC:
+ flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
+ break;
+ case IOMAP_UNWRITTEN:
+ flags |= FIEMAP_EXTENT_UNWRITTEN;
+ break;
+ case IOMAP_MAPPED:
+ break;
+ }
+
+ return fiemap_fill_next_extent(fi, iomap->offset,
+ iomap->blkno != IOMAP_NULL_BLOCK ? iomap->blkno << 9: 0,
+ iomap->length, flags | FIEMAP_EXTENT_MERGED);
+
+}
+
+static loff_t
+iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
+ struct iomap *iomap)
+{
+ struct fiemap_ctx *ctx = data;
+ loff_t ret = length;
+
+ if (iomap->type == IOMAP_HOLE)
+ return length;
+
+ ret = iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
+ ctx->prev = *iomap;
+ switch (ret) {
+ case 0: /* success */
+ return length;
+ case 1: /* extent array full */
+ return 0;
+ default:
+ return ret;
+ }
+}
+
+int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
+ loff_t start, loff_t len, struct iomap_ops *ops)
+{
+ struct fiemap_ctx ctx;
+ loff_t ret;
+
+ memset(&ctx, 0, sizeof(ctx));
+ ctx.fi = fi;
+ ctx.prev.type = IOMAP_HOLE;
+
+ ret = fiemap_check_flags(fi, FIEMAP_FLAG_SYNC);
+ if (ret)
+ return ret;
+
+ ret = filemap_write_and_wait(inode->i_mapping);
+ if (ret)
+ return ret;
+
+ while (len > 0) {
+ ret = iomap_apply(inode, start, len, 0, ops, &ctx,
+ iomap_fiemap_actor);
+ if (ret < 0)
+ return ret;
+ if (ret == 0)
+ break;
+
+ start += ret;
+ len -= ret;
+ }
+
+ if (ctx.prev.type != IOMAP_HOLE) {
+ ret = iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iomap_fiemap);
diff --git a/fs/isofs/compress.c b/fs/isofs/compress.c
index 2e4e834d1a98..44af14b2e916 100644
--- a/fs/isofs/compress.c
+++ b/fs/isofs/compress.c
@@ -81,7 +81,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
blocknum = block_start >> bufshift;
memset(bhs, 0, (needblocks + 1) * sizeof(struct buffer_head *));
haveblocks = isofs_get_blocks(inode, blocknum, bhs, needblocks);
- ll_rw_block(READ, haveblocks, bhs);
+ ll_rw_block(REQ_OP_READ, 0, haveblocks, bhs);
curbh = 0;
curpage = 0;
@@ -361,7 +361,6 @@ static int zisofs_readpage(struct file *file, struct page *page)
const struct address_space_operations zisofs_aops = {
.readpage = zisofs_readpage,
- /* No sync_page operation supported? */
/* No bmap operation supported */
};
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 131dedc920d8..ad0c745ebad7 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -29,18 +29,15 @@
#define BEQUIET
static int isofs_hashi(const struct dentry *parent, struct qstr *qstr);
-static int isofs_dentry_cmpi(const struct dentry *parent,
- const struct dentry *dentry,
+static int isofs_dentry_cmpi(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name);
#ifdef CONFIG_JOLIET
static int isofs_hashi_ms(const struct dentry *parent, struct qstr *qstr);
static int isofs_hash_ms(const struct dentry *parent, struct qstr *qstr);
-static int isofs_dentry_cmpi_ms(const struct dentry *parent,
- const struct dentry *dentry,
+static int isofs_dentry_cmpi_ms(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name);
-static int isofs_dentry_cmp_ms(const struct dentry *parent,
- const struct dentry *dentry,
+static int isofs_dentry_cmp_ms(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name);
#endif
@@ -174,7 +171,7 @@ struct iso9660_options{
* Compute the hash for the isofs name corresponding to the dentry.
*/
static int
-isofs_hashi_common(struct qstr *qstr, int ms)
+isofs_hashi_common(const struct dentry *dentry, struct qstr *qstr, int ms)
{
const char *name;
int len;
@@ -188,7 +185,7 @@ isofs_hashi_common(struct qstr *qstr, int ms)
len--;
}
- hash = init_name_hash();
+ hash = init_name_hash(dentry);
while (len--) {
c = tolower(*name++);
hash = partial_name_hash(c, hash);
@@ -231,11 +228,11 @@ static int isofs_dentry_cmp_common(
static int
isofs_hashi(const struct dentry *dentry, struct qstr *qstr)
{
- return isofs_hashi_common(qstr, 0);
+ return isofs_hashi_common(dentry, qstr, 0);
}
static int
-isofs_dentry_cmpi(const struct dentry *parent, const struct dentry *dentry,
+isofs_dentry_cmpi(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
return isofs_dentry_cmp_common(len, str, name, 0, 1);
@@ -246,7 +243,7 @@ isofs_dentry_cmpi(const struct dentry *parent, const struct dentry *dentry,
* Compute the hash for the isofs name corresponding to the dentry.
*/
static int
-isofs_hash_common(struct qstr *qstr, int ms)
+isofs_hash_common(const struct dentry *dentry, struct qstr *qstr, int ms)
{
const char *name;
int len;
@@ -258,7 +255,7 @@ isofs_hash_common(struct qstr *qstr, int ms)
len--;
}
- qstr->hash = full_name_hash(name, len);
+ qstr->hash = full_name_hash(dentry, name, len);
return 0;
}
@@ -266,24 +263,24 @@ isofs_hash_common(struct qstr *qstr, int ms)
static int
isofs_hash_ms(const struct dentry *dentry, struct qstr *qstr)
{
- return isofs_hash_common(qstr, 1);
+ return isofs_hash_common(dentry, qstr, 1);
}
static int
isofs_hashi_ms(const struct dentry *dentry, struct qstr *qstr)
{
- return isofs_hashi_common(qstr, 1);
+ return isofs_hashi_common(dentry, qstr, 1);
}
static int
-isofs_dentry_cmp_ms(const struct dentry *parent, const struct dentry *dentry,
+isofs_dentry_cmp_ms(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
return isofs_dentry_cmp_common(len, str, name, 1, 0);
}
static int
-isofs_dentry_cmpi_ms(const struct dentry *parent, const struct dentry *dentry,
+isofs_dentry_cmpi_ms(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
return isofs_dentry_cmp_common(len, str, name, 1, 1);
diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c
index 7b543e6b6526..aee592767f1d 100644
--- a/fs/isofs/namei.c
+++ b/fs/isofs/namei.c
@@ -22,7 +22,7 @@ isofs_cmp(struct dentry *dentry, const char *compare, int dlen)
qstr.len = dlen;
if (likely(!dentry->d_op))
return dentry->d_name.len != dlen || memcmp(dentry->d_name.name, compare, dlen);
- return dentry->d_op->d_compare(NULL, NULL, dentry->d_name.len, dentry->d_name.name, &qstr);
+ return dentry->d_op->d_compare(NULL, dentry->d_name.len, dentry->d_name.name, &qstr);
}
/*
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 70078096117d..5bb565f9989c 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -124,7 +124,7 @@ static int journal_submit_commit_record(journal_t *journal,
struct commit_header *tmp;
struct buffer_head *bh;
int ret;
- struct timespec now = current_kernel_time();
+ struct timespec64 now = current_kernel_time64();
*cbh = NULL;
@@ -155,9 +155,9 @@ static int journal_submit_commit_record(journal_t *journal,
if (journal->j_flags & JBD2_BARRIER &&
!jbd2_has_feature_async_commit(journal))
- ret = submit_bh(WRITE_SYNC | WRITE_FLUSH_FUA, bh);
+ ret = submit_bh(REQ_OP_WRITE, WRITE_SYNC | WRITE_FLUSH_FUA, bh);
else
- ret = submit_bh(WRITE_SYNC, bh);
+ ret = submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh);
*cbh = bh;
return ret;
@@ -718,7 +718,7 @@ start_journal_io:
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
bh->b_end_io = journal_end_buffer_io_sync;
- submit_bh(WRITE_SYNC, bh);
+ submit_bh(REQ_OP_WRITE, WRITE_SYNC, bh);
}
cond_resched();
stats.run.rs_blocks_logged += bufs;
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index e3ca4b4cac84..46261a6f902d 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -691,6 +691,7 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
{
int err = 0;
+ jbd2_might_wait_for_commit(journal);
read_lock(&journal->j_state_lock);
#ifdef CONFIG_JBD2_DEBUG
if (!tid_geq(journal->j_commit_request, tid)) {
@@ -1091,6 +1092,7 @@ static void jbd2_stats_proc_exit(journal_t *journal)
static journal_t * journal_init_common (void)
{
+ static struct lock_class_key jbd2_trans_commit_key;
journal_t *journal;
int err;
@@ -1126,6 +1128,9 @@ static journal_t * journal_init_common (void)
spin_lock_init(&journal->j_history_lock);
+ lockdep_init_map(&journal->j_trans_commit_map, "jbd2_handle",
+ &jbd2_trans_commit_key, 0);
+
return journal;
}
@@ -1346,15 +1351,15 @@ static int journal_reset(journal_t *journal)
return jbd2_journal_start_thread(journal);
}
-static int jbd2_write_superblock(journal_t *journal, int write_op)
+static int jbd2_write_superblock(journal_t *journal, int write_flags)
{
struct buffer_head *bh = journal->j_sb_buffer;
journal_superblock_t *sb = journal->j_superblock;
int ret;
- trace_jbd2_write_superblock(journal, write_op);
+ trace_jbd2_write_superblock(journal, write_flags);
if (!(journal->j_flags & JBD2_BARRIER))
- write_op &= ~(REQ_FUA | REQ_FLUSH);
+ write_flags &= ~(REQ_FUA | REQ_PREFLUSH);
lock_buffer(bh);
if (buffer_write_io_error(bh)) {
/*
@@ -1374,7 +1379,7 @@ static int jbd2_write_superblock(journal_t *journal, int write_op)
jbd2_superblock_csum_set(journal, sb);
get_bh(bh);
bh->b_end_io = end_buffer_write_sync;
- ret = submit_bh(write_op, bh);
+ ret = submit_bh(REQ_OP_WRITE, write_flags, bh);
wait_on_buffer(bh);
if (buffer_write_io_error(bh)) {
clear_buffer_write_io_error(bh);
@@ -1498,7 +1503,7 @@ static int journal_get_superblock(journal_t *journal)
J_ASSERT(bh != NULL);
if (!buffer_uptodate(bh)) {
- ll_rw_block(READ, 1, &bh);
+ ll_rw_block(REQ_OP_READ, 0, 1, &bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
printk(KERN_ERR
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 805bc6bcd8ab..02dd3360cb20 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -104,7 +104,7 @@ static int do_readahead(journal_t *journal, unsigned int start)
if (!buffer_uptodate(bh) && !buffer_locked(bh)) {
bufs[nbufs++] = bh;
if (nbufs == MAXBUF) {
- ll_rw_block(READ, nbufs, bufs);
+ ll_rw_block(REQ_OP_READ, 0, nbufs, bufs);
journal_brelse_array(bufs, nbufs);
nbufs = 0;
}
@@ -113,7 +113,7 @@ static int do_readahead(journal_t *journal, unsigned int start)
}
if (nbufs)
- ll_rw_block(READ, nbufs, bufs);
+ ll_rw_block(REQ_OP_READ, 0, nbufs, bufs);
err = 0;
failed:
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 1749519b362f..b5bc3e249163 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -182,6 +182,8 @@ static int add_transaction_credits(journal_t *journal, int blocks,
int needed;
int total = blocks + rsv_blocks;
+ jbd2_might_wait_for_commit(journal);
+
/*
* If the current transaction is locked down for commit, wait
* for the lock to be released.
@@ -382,13 +384,11 @@ repeat:
read_unlock(&journal->j_state_lock);
current->journal_info = handle;
- lock_map_acquire(&handle->h_lockdep_map);
+ rwsem_acquire_read(&journal->j_trans_commit_map, 0, 0, _THIS_IP_);
jbd2_journal_free_transaction(new_transaction);
return 0;
}
-static struct lock_class_key jbd2_handle_key;
-
/* Allocate a new handle. This should probably be in a slab... */
static handle_t *new_handle(int nblocks)
{
@@ -398,9 +398,6 @@ static handle_t *new_handle(int nblocks)
handle->h_buffer_credits = nblocks;
handle->h_ref = 1;
- lockdep_init_map(&handle->h_lockdep_map, "jbd2_handle",
- &jbd2_handle_key, 0);
-
return handle;
}
@@ -672,7 +669,7 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
if (need_to_start)
jbd2_log_start_commit(journal, tid);
- lock_map_release(&handle->h_lockdep_map);
+ rwsem_release(&journal->j_trans_commit_map, 1, _THIS_IP_);
handle->h_buffer_credits = nblocks;
ret = start_this_handle(journal, handle, gfp_mask);
return ret;
@@ -700,6 +697,8 @@ void jbd2_journal_lock_updates(journal_t *journal)
{
DEFINE_WAIT(wait);
+ jbd2_might_wait_for_commit(journal);
+
write_lock(&journal->j_state_lock);
++journal->j_barrier_count;
@@ -1750,11 +1749,11 @@ int jbd2_journal_stop(handle_t *handle)
wake_up(&journal->j_wait_transaction_locked);
}
+ rwsem_release(&journal->j_trans_commit_map, 1, _THIS_IP_);
+
if (wait_for_commit)
err = jbd2_log_wait_commit(journal, tid);
- lock_map_release(&handle->h_lockdep_map);
-
if (handle->h_rsv_handle)
jbd2_journal_free_reserved(handle->h_rsv_handle);
free_and_exit:
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
index 84c4bf3631a2..30eb33ff8189 100644
--- a/fs/jffs2/dir.c
+++ b/fs/jffs2/dir.c
@@ -81,6 +81,7 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
struct jffs2_full_dirent *fd = NULL, *fd_list;
uint32_t ino = 0;
struct inode *inode = NULL;
+ unsigned int nhash;
jffs2_dbg(1, "jffs2_lookup()\n");
@@ -89,11 +90,14 @@ static struct dentry *jffs2_lookup(struct inode *dir_i, struct dentry *target,
dir_f = JFFS2_INODE_INFO(dir_i);
+ /* The 'nhash' on the fd_list is not the same as the dentry hash */
+ nhash = full_name_hash(NULL, target->d_name.name, target->d_name.len);
+
mutex_lock(&dir_f->sem);
/* NB: The 2.2 backport will need to explicitly check for '.' and '..' here */
- for (fd_list = dir_f->dents; fd_list && fd_list->nhash <= target->d_name.hash; fd_list = fd_list->next) {
- if (fd_list->nhash == target->d_name.hash &&
+ for (fd_list = dir_f->dents; fd_list && fd_list->nhash <= nhash; fd_list = fd_list->next) {
+ if (fd_list->nhash == nhash &&
(!fd || fd_list->version > fd->version) &&
strlen(fd_list->name) == target->d_name.len &&
!strncmp(fd_list->name, target->d_name.name, target->d_name.len)) {
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index bfebbf13698c..06a71dbd4833 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -674,7 +674,7 @@ static inline int read_direntry(struct jffs2_sb_info *c, struct jffs2_raw_node_r
}
}
- fd->nhash = full_name_hash(fd->name, rd->nsize);
+ fd->nhash = full_name_hash(NULL, fd->name, rd->nsize);
fd->next = NULL;
fd->name[rd->nsize] = '\0';
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index 9ad5ba4b299b..90431dd613b8 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -1100,7 +1100,7 @@ static int jffs2_scan_dirent_node(struct jffs2_sb_info *c, struct jffs2_eraseblo
fd->next = NULL;
fd->version = je32_to_cpu(rd->version);
fd->ino = je32_to_cpu(rd->ino);
- fd->nhash = full_name_hash(fd->name, checkedlen);
+ fd->nhash = full_name_hash(NULL, fd->name, checkedlen);
fd->type = rd->type;
jffs2_add_fd_to_list(c, fd, &ic->scan_dents);
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
index bc5385471a6e..be7c8a6a5748 100644
--- a/fs/jffs2/summary.c
+++ b/fs/jffs2/summary.c
@@ -476,7 +476,7 @@ static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eras
fd->next = NULL;
fd->version = je32_to_cpu(spd->version);
fd->ino = je32_to_cpu(spd->ino);
- fd->nhash = full_name_hash(fd->name, checkedlen);
+ fd->nhash = full_name_hash(NULL, fd->name, checkedlen);
fd->type = spd->type;
jffs2_add_fd_to_list(c, fd, &ic->scan_dents);
diff --git a/fs/jffs2/write.c b/fs/jffs2/write.c
index 7fb187ab2682..cda9a361368e 100644
--- a/fs/jffs2/write.c
+++ b/fs/jffs2/write.c
@@ -245,7 +245,7 @@ struct jffs2_full_dirent *jffs2_write_dirent(struct jffs2_sb_info *c, struct jff
fd->version = je32_to_cpu(rd->version);
fd->ino = je32_to_cpu(rd->ino);
- fd->nhash = full_name_hash(name, namelen);
+ fd->nhash = full_name_hash(NULL, name, namelen);
fd->type = rd->type;
memcpy(fd->name, name, namelen);
fd->name[namelen]=0;
@@ -598,7 +598,7 @@ int jffs2_do_unlink(struct jffs2_sb_info *c, struct jffs2_inode_info *dir_f,
jffs2_add_fd_to_list(c, fd, &dir_f->dents);
mutex_unlock(&dir_f->sem);
} else {
- uint32_t nhash = full_name_hash(name, namelen);
+ uint32_t nhash = full_name_hash(NULL, name, namelen);
fd = dir_f->dents;
/* We don't actually want to reserve any space, but we do
diff --git a/fs/jfs/jfs_debug.c b/fs/jfs/jfs_debug.c
index dd824d9b0b1a..a37eb5f8cbc0 100644
--- a/fs/jfs/jfs_debug.c
+++ b/fs/jfs/jfs_debug.c
@@ -58,7 +58,6 @@ static ssize_t jfs_loglevel_proc_write(struct file *file,
}
static const struct file_operations jfs_loglevel_proc_fops = {
- .owner = THIS_MODULE,
.open = jfs_loglevel_proc_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 63759d723920..a21ea8b3e5fa 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -2002,12 +2002,13 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
bio->bi_end_io = lbmIODone;
bio->bi_private = bp;
+ bio_set_op_attrs(bio, REQ_OP_READ, READ_SYNC);
/*check if journaling to disk has been disabled*/
if (log->no_integrity) {
bio->bi_iter.bi_size = 0;
lbmIODone(bio);
} else {
- submit_bio(READ_SYNC, bio);
+ submit_bio(bio);
}
wait_event(bp->l_ioevent, (bp->l_flag != lbmREAD));
@@ -2145,13 +2146,14 @@ static void lbmStartIO(struct lbuf * bp)
bio->bi_end_io = lbmIODone;
bio->bi_private = bp;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_SYNC);
/* check if journaling to disk has been disabled */
if (log->no_integrity) {
bio->bi_iter.bi_size = 0;
lbmIODone(bio);
} else {
- submit_bio(WRITE_SYNC, bio);
+ submit_bio(bio);
INCREMENT(lmStat.submitted);
}
}
@@ -2515,7 +2517,6 @@ static int jfs_lmstats_proc_open(struct inode *inode, struct file *file)
}
const struct file_operations jfs_lmstats_proc_fops = {
- .owner = THIS_MODULE,
.open = jfs_lmstats_proc_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index b60e015cc757..489aaa1403e5 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -411,7 +411,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
inc_io(page);
if (!bio->bi_iter.bi_size)
goto dump_bio;
- submit_bio(WRITE, bio);
+ submit_bio(bio);
nr_underway++;
bio = NULL;
} else
@@ -434,6 +434,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
bio->bi_end_io = metapage_write_end_io;
bio->bi_private = page;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
/* Don't call bio_add_page yet, we may add to this vec */
bio_offset = offset;
@@ -448,7 +449,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
if (!bio->bi_iter.bi_size)
goto dump_bio;
- submit_bio(WRITE, bio);
+ submit_bio(bio);
nr_underway++;
}
if (redirty)
@@ -506,7 +507,7 @@ static int metapage_readpage(struct file *fp, struct page *page)
insert_metapage(page, NULL);
inc_io(page);
if (bio)
- submit_bio(READ, bio);
+ submit_bio(bio);
bio = bio_alloc(GFP_NOFS, 1);
bio->bi_bdev = inode->i_sb->s_bdev;
@@ -514,6 +515,7 @@ static int metapage_readpage(struct file *fp, struct page *page)
pblock << (inode->i_blkbits - 9);
bio->bi_end_io = metapage_read_end_io;
bio->bi_private = page;
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
len = xlen << inode->i_blkbits;
offset = block_offset << inode->i_blkbits;
if (bio_add_page(bio, page, len, offset) < len)
@@ -523,7 +525,7 @@ static int metapage_readpage(struct file *fp, struct page *page)
block_offset++;
}
if (bio)
- submit_bio(READ, bio);
+ submit_bio(bio);
else
unlock_page(page);
@@ -828,7 +830,6 @@ static int jfs_mpstat_proc_open(struct inode *inode, struct file *file)
}
const struct file_operations jfs_mpstat_proc_fops = {
- .owner = THIS_MODULE,
.open = jfs_mpstat_proc_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index eddf2b6eda85..2e58978d6f45 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -3040,7 +3040,6 @@ static int jfs_txanchor_proc_open(struct inode *inode, struct file *file)
}
const struct file_operations jfs_txanchor_proc_fops = {
- .owner = THIS_MODULE,
.open = jfs_txanchor_proc_open,
.read = seq_read,
.llseek = seq_lseek,
@@ -3081,7 +3080,6 @@ static int jfs_txstats_proc_open(struct inode *inode, struct file *file)
}
const struct file_operations jfs_txstats_proc_fops = {
- .owner = THIS_MODULE,
.open = jfs_txstats_proc_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/fs/jfs/jfs_xtree.c b/fs/jfs/jfs_xtree.c
index 5ad7748860ce..5cde6d2fcfca 100644
--- a/fs/jfs/jfs_xtree.c
+++ b/fs/jfs/jfs_xtree.c
@@ -3894,7 +3894,6 @@ static int jfs_xtstat_proc_open(struct inode *inode, struct file *file)
}
const struct file_operations jfs_xtstat_proc_fops = {
- .owner = THIS_MODULE,
.open = jfs_xtstat_proc_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 539deddecbb0..814b0c58016c 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -1564,7 +1564,7 @@ static int jfs_ci_hash(const struct dentry *dir, struct qstr *this)
unsigned long hash;
int i;
- hash = init_name_hash();
+ hash = init_name_hash(dir);
for (i=0; i < this->len; i++)
hash = partial_name_hash(tolower(this->name[i]), hash);
this->hash = end_name_hash(hash);
@@ -1572,7 +1572,7 @@ static int jfs_ci_hash(const struct dentry *dir, struct qstr *this)
return 0;
}
-static int jfs_ci_compare(const struct dentry *parent, const struct dentry *dentry,
+static int jfs_ci_compare(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
int i, result = 1;
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 8a652404eef6..e57174d43683 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -336,11 +336,11 @@ struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn)
*/
static unsigned int kernfs_name_hash(const char *name, const void *ns)
{
- unsigned long hash = init_name_hash();
+ unsigned long hash = init_name_hash(ns);
unsigned int len = strlen(name);
while (len--)
hash = partial_name_hash(*name++, hash);
- hash = (end_name_hash(hash) ^ hash_ptr((void *)ns, 31));
+ hash = end_name_hash(hash);
hash &= 0x7fffffffU;
/* Reserve hash numbers 0, 1 and INT_MAX for magic directory entries */
if (hash < 2)
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index 63534f5f9073..b3d73ad52b22 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -152,6 +152,8 @@ static int kernfs_fill_super(struct super_block *sb, unsigned long magic)
struct dentry *root;
info->sb = sb;
+ /* Userspace would break if executables or devices appear on sysfs */
+ sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV;
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = magic;
@@ -241,7 +243,8 @@ struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags,
info->root = root;
info->ns = ns;
- sb = sget(fs_type, kernfs_test_super, kernfs_set_super, flags, info);
+ sb = sget_userns(fs_type, kernfs_test_super, kernfs_set_super, flags,
+ &init_user_ns, info);
if (IS_ERR(sb) || sb->s_fs_info != info)
kfree(info);
if (IS_ERR(sb))
diff --git a/fs/lockd/procfs.c b/fs/lockd/procfs.c
index 2a0a98480e39..8f72cb237ef3 100644
--- a/fs/lockd/procfs.c
+++ b/fs/lockd/procfs.c
@@ -64,7 +64,6 @@ static const struct file_operations lockd_end_grace_operations = {
.read = nlm_end_grace_read,
.llseek = default_llseek,
.release = simple_transaction_release,
- .owner = THIS_MODULE,
};
int __init
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index cc26f8f215f5..a8329cc47dec 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -14,7 +14,7 @@
#define PAGE_OFS(ofs) ((ofs) & (PAGE_SIZE-1))
-static int sync_request(struct page *page, struct block_device *bdev, int rw)
+static int sync_request(struct page *page, struct block_device *bdev, int op)
{
struct bio bio;
struct bio_vec bio_vec;
@@ -29,8 +29,9 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw)
bio.bi_bdev = bdev;
bio.bi_iter.bi_sector = page->index * (PAGE_SIZE >> 9);
bio.bi_iter.bi_size = PAGE_SIZE;
+ bio_set_op_attrs(&bio, op, 0);
- return submit_bio_wait(rw, &bio);
+ return submit_bio_wait(&bio);
}
static int bdev_readpage(void *_sb, struct page *page)
@@ -95,8 +96,9 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
bio->bi_iter.bi_sector = ofs >> 9;
bio->bi_private = sb;
bio->bi_end_io = writeseg_end_io;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
atomic_inc(&super->s_pending_writes);
- submit_bio(WRITE, bio);
+ submit_bio(bio);
ofs += i * PAGE_SIZE;
index += i;
@@ -122,8 +124,9 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
bio->bi_iter.bi_sector = ofs >> 9;
bio->bi_private = sb;
bio->bi_end_io = writeseg_end_io;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
atomic_inc(&super->s_pending_writes);
- submit_bio(WRITE, bio);
+ submit_bio(bio);
return 0;
}
@@ -185,8 +188,9 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
bio->bi_iter.bi_sector = ofs >> 9;
bio->bi_private = sb;
bio->bi_end_io = erase_end_io;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
atomic_inc(&super->s_pending_writes);
- submit_bio(WRITE, bio);
+ submit_bio(bio);
ofs += i * PAGE_SIZE;
index += i;
@@ -206,8 +210,9 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
bio->bi_iter.bi_sector = ofs >> 9;
bio->bi_private = sb;
bio->bi_end_io = erase_end_io;
+ bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
atomic_inc(&super->s_pending_writes);
- submit_bio(WRITE, bio);
+ submit_bio(bio);
return 0;
}
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index 2d5336bd4efd..9568064ecadf 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -95,7 +95,7 @@ static int beyond_eof(struct inode *inode, loff_t bix)
* of each character and pick a prime nearby, preferably a bit-sparse
* one.
*/
-static u32 hash_32(const char *s, int len, u32 seed)
+static u32 logfs_hash_32(const char *s, int len, u32 seed)
{
u32 hash = seed;
int i;
@@ -156,10 +156,10 @@ static pgoff_t hash_index(u32 hash, int round)
static struct page *logfs_get_dd_page(struct inode *dir, struct dentry *dentry)
{
- struct qstr *name = &dentry->d_name;
+ const struct qstr *name = &dentry->d_name;
struct page *page;
struct logfs_disk_dentry *dd;
- u32 hash = hash_32(name->name, name->len, 0);
+ u32 hash = logfs_hash_32(name->name, name->len, 0);
pgoff_t index;
int round;
@@ -323,7 +323,7 @@ static int logfs_readdir(struct file *file, struct dir_context *ctx)
return 0;
}
-static void logfs_set_name(struct logfs_disk_dentry *dd, struct qstr *name)
+static void logfs_set_name(struct logfs_disk_dentry *dd, const struct qstr *name)
{
dd->namelen = cpu_to_be16(name->len);
memcpy(dd->name, name->name, name->len);
@@ -370,7 +370,7 @@ static int logfs_write_dir(struct inode *dir, struct dentry *dentry,
{
struct page *page;
struct logfs_disk_dentry *dd;
- u32 hash = hash_32(dentry->d_name.name, dentry->d_name.len, 0);
+ u32 hash = logfs_hash_32(dentry->d_name.name, dentry->d_name.len, 0);
pgoff_t index;
int round, err;
diff --git a/fs/mpage.c b/fs/mpage.c
index eedc644b78d7..d2413af0823a 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -50,17 +50,18 @@ static void mpage_end_io(struct bio *bio)
bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page;
- page_endio(page, bio_data_dir(bio), bio->bi_error);
+ page_endio(page, op_is_write(bio_op(bio)), bio->bi_error);
}
bio_put(bio);
}
-static struct bio *mpage_bio_submit(int rw, struct bio *bio)
+static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio)
{
bio->bi_end_io = mpage_end_io;
- guard_bio_eod(rw, bio);
- submit_bio(rw, bio);
+ bio_set_op_attrs(bio, op, op_flags);
+ guard_bio_eod(op, bio);
+ submit_bio(bio);
return NULL;
}
@@ -71,6 +72,8 @@ mpage_alloc(struct block_device *bdev,
{
struct bio *bio;
+ /* Restrict the given (page cache) mask for slab allocations */
+ gfp_flags &= GFP_KERNEL;
bio = bio_alloc(gfp_flags, nr_vecs);
if (bio == NULL && (current->flags & PF_MEMALLOC)) {
@@ -269,7 +272,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
* This page will go to BIO. Do we need to send this BIO off first?
*/
if (bio && (*last_block_in_bio != blocks[0] - 1))
- bio = mpage_bio_submit(READ, bio);
+ bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
alloc_new:
if (bio == NULL) {
@@ -286,7 +289,7 @@ alloc_new:
length = first_hole << blkbits;
if (bio_add_page(bio, page, length, 0) < length) {
- bio = mpage_bio_submit(READ, bio);
+ bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
goto alloc_new;
}
@@ -294,7 +297,7 @@ alloc_new:
nblocks = map_bh->b_size >> blkbits;
if ((buffer_boundary(map_bh) && relative_block == nblocks) ||
(first_hole != blocks_per_page))
- bio = mpage_bio_submit(READ, bio);
+ bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
else
*last_block_in_bio = blocks[blocks_per_page - 1];
out:
@@ -302,7 +305,7 @@ out:
confused:
if (bio)
- bio = mpage_bio_submit(READ, bio);
+ bio = mpage_bio_submit(REQ_OP_READ, 0, bio);
if (!PageUptodate(page))
block_read_full_page(page, get_block);
else
@@ -362,7 +365,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
sector_t last_block_in_bio = 0;
struct buffer_head map_bh;
unsigned long first_logical_block = 0;
- gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
+ gfp_t gfp = readahead_gfp_mask(mapping);
map_bh.b_state = 0;
map_bh.b_size = 0;
@@ -384,7 +387,7 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages,
}
BUG_ON(!list_empty(pages));
if (bio)
- mpage_bio_submit(READ, bio);
+ mpage_bio_submit(REQ_OP_READ, 0, bio);
return 0;
}
EXPORT_SYMBOL(mpage_readpages);
@@ -405,7 +408,7 @@ int mpage_readpage(struct page *page, get_block_t get_block)
bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
&map_bh, &first_logical_block, get_block, gfp);
if (bio)
- mpage_bio_submit(READ, bio);
+ mpage_bio_submit(REQ_OP_READ, 0, bio);
return 0;
}
EXPORT_SYMBOL(mpage_readpage);
@@ -486,7 +489,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
struct buffer_head map_bh;
loff_t i_size = i_size_read(inode);
int ret = 0;
- int wr = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
+ int op_flags = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : 0);
if (page_has_buffers(page)) {
struct buffer_head *head = page_buffers(page);
@@ -595,7 +598,7 @@ page_is_mapped:
* This page will go to BIO. Do we need to send this BIO off first?
*/
if (bio && mpd->last_block_in_bio != blocks[0] - 1)
- bio = mpage_bio_submit(wr, bio);
+ bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
alloc_new:
if (bio == NULL) {
@@ -622,7 +625,7 @@ alloc_new:
wbc_account_io(wbc, page, PAGE_SIZE);
length = first_unmapped << blkbits;
if (bio_add_page(bio, page, length, 0) < length) {
- bio = mpage_bio_submit(wr, bio);
+ bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
goto alloc_new;
}
@@ -632,7 +635,7 @@ alloc_new:
set_page_writeback(page);
unlock_page(page);
if (boundary || (first_unmapped != blocks_per_page)) {
- bio = mpage_bio_submit(wr, bio);
+ bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
if (boundary_block) {
write_boundary_block(boundary_bdev,
boundary_block, 1 << blkbits);
@@ -644,7 +647,7 @@ alloc_new:
confused:
if (bio)
- bio = mpage_bio_submit(wr, bio);
+ bio = mpage_bio_submit(REQ_OP_WRITE, op_flags, bio);
if (mpd->use_writepage) {
ret = mapping->a_ops->writepage(page, wbc);
@@ -701,9 +704,9 @@ mpage_writepages(struct address_space *mapping,
ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd);
if (mpd.bio) {
- int wr = (wbc->sync_mode == WB_SYNC_ALL ?
- WRITE_SYNC : WRITE);
- mpage_bio_submit(wr, mpd.bio);
+ int op_flags = (wbc->sync_mode == WB_SYNC_ALL ?
+ WRITE_SYNC : 0);
+ mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio);
}
}
blk_finish_plug(&plug);
@@ -722,9 +725,9 @@ int mpage_writepage(struct page *page, get_block_t get_block,
};
int ret = __mpage_writepage(page, wbc, &mpd);
if (mpd.bio) {
- int wr = (wbc->sync_mode == WB_SYNC_ALL ?
- WRITE_SYNC : WRITE);
- mpage_bio_submit(wr, mpd.bio);
+ int op_flags = (wbc->sync_mode == WB_SYNC_ALL ?
+ WRITE_SYNC : 0);
+ mpage_bio_submit(REQ_OP_WRITE, op_flags, mpd.bio);
}
return ret;
}
diff --git a/fs/namei.c b/fs/namei.c
index 70580ab1445c..adb04146df09 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -36,6 +36,7 @@
#include <linux/posix_acl.h>
#include <linux/hash.h>
#include <linux/bitops.h>
+#include <linux/init_task.h>
#include <asm/uaccess.h>
#include "internal.h"
@@ -409,6 +410,14 @@ int __inode_permission(struct inode *inode, int mask)
* Nobody gets write access to an immutable file.
*/
if (IS_IMMUTABLE(inode))
+ return -EPERM;
+
+ /*
+ * Updating mtime will likely cause i_uid and i_gid to be
+ * written back improperly if their true value is unknown
+ * to the vfs.
+ */
+ if (HAS_UNMAPPED_ID(inode))
return -EACCES;
}
@@ -901,6 +910,7 @@ static inline int may_follow_link(struct nameidata *nd)
{
const struct inode *inode;
const struct inode *parent;
+ kuid_t puid;
if (!sysctl_protected_symlinks)
return 0;
@@ -916,7 +926,8 @@ static inline int may_follow_link(struct nameidata *nd)
return 0;
/* Allowed if parent directory and link owner match. */
- if (uid_eq(parent->i_uid, inode->i_uid))
+ puid = parent->i_uid;
+ if (uid_valid(puid) && uid_eq(puid, inode->i_uid))
return 0;
if (nd->flags & LOOKUP_RCU)
@@ -1089,6 +1100,7 @@ static int follow_automount(struct path *path, struct nameidata *nd,
bool *need_mntput)
{
struct vfsmount *mnt;
+ const struct cred *old_cred;
int err;
if (!path->dentry->d_op || !path->dentry->d_op->d_automount)
@@ -1110,11 +1122,16 @@ static int follow_automount(struct path *path, struct nameidata *nd,
path->dentry->d_inode)
return -EISDIR;
+ if (path->dentry->d_sb->s_user_ns != &init_user_ns)
+ return -EACCES;
+
nd->total_link_count++;
if (nd->total_link_count >= 40)
return -ELOOP;
+ old_cred = override_creds(&init_cred);
mnt = path->dentry->d_op->d_automount(path);
+ revert_creds(old_cred);
if (IS_ERR(mnt)) {
/*
* The filesystem is allowed to return -EISDIR here to indicate
@@ -1449,9 +1466,8 @@ static int follow_dotdot(struct nameidata *nd)
}
/*
- * This looks up the name in dcache, possibly revalidates the old dentry and
- * allocates a new one if not found or not valid. In the need_lookup argument
- * returns whether i_op->lookup is necessary.
+ * This looks up the name in dcache and possibly revalidates the found dentry.
+ * NULL is returned if the dentry does not exist in the cache.
*/
static struct dentry *lookup_dcache(const struct qstr *name,
struct dentry *dir,
@@ -1890,9 +1906,9 @@ static inline unsigned int fold_hash(unsigned long x, unsigned long y)
* payload bytes, to match the way that hash_name() iterates until it
* finds the delimiter after the name.
*/
-unsigned int full_name_hash(const char *name, unsigned int len)
+unsigned int full_name_hash(const void *salt, const char *name, unsigned int len)
{
- unsigned long a, x = 0, y = 0;
+ unsigned long a, x = 0, y = (unsigned long)salt;
for (;;) {
if (!len)
@@ -1911,15 +1927,19 @@ done:
EXPORT_SYMBOL(full_name_hash);
/* Return the "hash_len" (hash and length) of a null-terminated string */
-u64 hashlen_string(const char *name)
+u64 hashlen_string(const void *salt, const char *name)
{
- unsigned long a = 0, x = 0, y = 0, adata, mask, len;
+ unsigned long a = 0, x = 0, y = (unsigned long)salt;
+ unsigned long adata, mask, len;
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
- len = -sizeof(unsigned long);
+ len = 0;
+ goto inside;
+
do {
HASH_MIX(x, y, a);
len += sizeof(unsigned long);
+inside:
a = load_unaligned_zeropad(name+len);
} while (!has_zero(a, &adata, &constants));
@@ -1935,15 +1955,19 @@ EXPORT_SYMBOL(hashlen_string);
* Calculate the length and hash of the path component, and
* return the "hash_len" as the result.
*/
-static inline u64 hash_name(const char *name)
+static inline u64 hash_name(const void *salt, const char *name)
{
- unsigned long a = 0, b, x = 0, y = 0, adata, bdata, mask, len;
+ unsigned long a = 0, b, x = 0, y = (unsigned long)salt;
+ unsigned long adata, bdata, mask, len;
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
- len = -sizeof(unsigned long);
+ len = 0;
+ goto inside;
+
do {
HASH_MIX(x, y, a);
len += sizeof(unsigned long);
+inside:
a = load_unaligned_zeropad(name+len);
b = a ^ REPEAT_BYTE('/');
} while (!(has_zero(a, &adata, &constants) | has_zero(b, &bdata, &constants)));
@@ -1959,9 +1983,9 @@ static inline u64 hash_name(const char *name)
#else /* !CONFIG_DCACHE_WORD_ACCESS: Slow, byte-at-a-time version */
/* Return the hash of a string of known length */
-unsigned int full_name_hash(const char *name, unsigned int len)
+unsigned int full_name_hash(const void *salt, const char *name, unsigned int len)
{
- unsigned long hash = init_name_hash();
+ unsigned long hash = init_name_hash(salt);
while (len--)
hash = partial_name_hash((unsigned char)*name++, hash);
return end_name_hash(hash);
@@ -1969,9 +1993,9 @@ unsigned int full_name_hash(const char *name, unsigned int len)
EXPORT_SYMBOL(full_name_hash);
/* Return the "hash_len" (hash and length) of a null-terminated string */
-u64 hashlen_string(const char *name)
+u64 hashlen_string(const void *salt, const char *name)
{
- unsigned long hash = init_name_hash();
+ unsigned long hash = init_name_hash(salt);
unsigned long len = 0, c;
c = (unsigned char)*name;
@@ -1988,9 +2012,9 @@ EXPORT_SYMBOL(hashlen_string);
* We know there's a real path component here of at least
* one character.
*/
-static inline u64 hash_name(const char *name)
+static inline u64 hash_name(const void *salt, const char *name)
{
- unsigned long hash = init_name_hash();
+ unsigned long hash = init_name_hash(salt);
unsigned long len = 0, c;
c = (unsigned char)*name;
@@ -2030,7 +2054,7 @@ static int link_path_walk(const char *name, struct nameidata *nd)
if (err)
return err;
- hash_len = hash_name(name);
+ hash_len = hash_name(nd->path.dentry, name);
type = LAST_NORM;
if (name[0] == '.') switch (hashlen_len(hash_len)) {
@@ -2389,33 +2413,6 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
EXPORT_SYMBOL(vfs_path_lookup);
/**
- * lookup_hash - lookup single pathname component on already hashed name
- * @name: name and hash to lookup
- * @base: base directory to lookup from
- *
- * The name must have been verified and hashed (see lookup_one_len()). Using
- * this after just full_name_hash() is unsafe.
- *
- * This function also doesn't check for search permission on base directory.
- *
- * Use lookup_one_len_unlocked() instead, unless you really know what you are
- * doing.
- *
- * Do not hold i_mutex; this helper takes i_mutex if necessary.
- */
-struct dentry *lookup_hash(const struct qstr *name, struct dentry *base)
-{
- struct dentry *ret;
-
- ret = lookup_dcache(name, base, 0);
- if (!ret)
- ret = lookup_slow(name, base, 0);
-
- return ret;
-}
-EXPORT_SYMBOL(lookup_hash);
-
-/**
* lookup_one_len - filesystem helper to lookup single pathname component
* @name: pathname component to lookup
* @base: base directory to lookup from
@@ -2436,7 +2433,7 @@ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len)
this.name = name;
this.len = len;
- this.hash = full_name_hash(name, len);
+ this.hash = full_name_hash(base, name, len);
if (!len)
return ERR_PTR(-EACCES);
@@ -2486,10 +2483,11 @@ struct dentry *lookup_one_len_unlocked(const char *name,
struct qstr this;
unsigned int c;
int err;
+ struct dentry *ret;
this.name = name;
this.len = len;
- this.hash = full_name_hash(name, len);
+ this.hash = full_name_hash(base, name, len);
if (!len)
return ERR_PTR(-EACCES);
@@ -2517,7 +2515,10 @@ struct dentry *lookup_one_len_unlocked(const char *name,
if (err)
return ERR_PTR(err);
- return lookup_hash(&this, base);
+ ret = lookup_dcache(&this, base, 0);
+ if (!ret)
+ ret = lookup_slow(&this, base, 0);
+ return ret;
}
EXPORT_SYMBOL(lookup_one_len_unlocked);
@@ -2757,10 +2758,11 @@ EXPORT_SYMBOL(__check_sticky);
* c. have CAP_FOWNER capability
* 6. If the victim is append-only or immutable we can't do antyhing with
* links pointing to it.
- * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR.
- * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR.
- * 9. We can't remove a root or mountpoint.
- * 10. We don't allow removal of NFS sillyrenamed files; it's handled by
+ * 7. If the victim has an unknown uid or gid we can't change the inode.
+ * 8. If we were asked to remove a directory and victim isn't one - ENOTDIR.
+ * 9. If we were asked to remove a non-directory and victim isn't one - EISDIR.
+ * 10. We can't remove a root or mountpoint.
+ * 11. We don't allow removal of NFS sillyrenamed files; it's handled by
* nfs_async_unlink().
*/
static int may_delete(struct inode *dir, struct dentry *victim, bool isdir)
@@ -2782,7 +2784,7 @@ static int may_delete(struct inode *dir, struct dentry *victim, bool isdir)
return -EPERM;
if (check_sticky(dir, inode) || IS_APPEND(inode) ||
- IS_IMMUTABLE(inode) || IS_SWAPFILE(inode))
+ IS_IMMUTABLE(inode) || IS_SWAPFILE(inode) || HAS_UNMAPPED_ID(inode))
return -EPERM;
if (isdir) {
if (!d_is_dir(victim))
@@ -2803,16 +2805,22 @@ static int may_delete(struct inode *dir, struct dentry *victim, bool isdir)
* 1. We can't do it if child already exists (open has special treatment for
* this case, but since we are inlined it's OK)
* 2. We can't do it if dir is read-only (done in permission())
- * 3. We should have write and exec permissions on dir
- * 4. We can't do it if dir is immutable (done in permission())
+ * 3. We can't do it if the fs can't represent the fsuid or fsgid.
+ * 4. We should have write and exec permissions on dir
+ * 5. We can't do it if dir is immutable (done in permission())
*/
static inline int may_create(struct inode *dir, struct dentry *child)
{
+ struct user_namespace *s_user_ns;
audit_inode_child(dir, child, AUDIT_TYPE_CHILD_CREATE);
if (child->d_inode)
return -EEXIST;
if (IS_DEADDIR(dir))
return -ENOENT;
+ s_user_ns = dir->i_sb->s_user_ns;
+ if (!kuid_has_mapping(s_user_ns, current_fsuid()) ||
+ !kgid_has_mapping(s_user_ns, current_fsgid()))
+ return -EOVERFLOW;
return inode_permission(dir, MAY_WRITE | MAY_EXEC);
}
@@ -2881,6 +2889,12 @@ int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
}
EXPORT_SYMBOL(vfs_create);
+bool may_open_dev(const struct path *path)
+{
+ return !(path->mnt->mnt_flags & MNT_NODEV) &&
+ !(path->mnt->mnt_sb->s_iflags & SB_I_NODEV);
+}
+
static int may_open(struct path *path, int acc_mode, int flag)
{
struct dentry *dentry = path->dentry;
@@ -2899,7 +2913,7 @@ static int may_open(struct path *path, int acc_mode, int flag)
break;
case S_IFBLK:
case S_IFCHR:
- if (path->mnt->mnt_flags & MNT_NODEV)
+ if (!may_open_dev(path))
return -EACCES;
/*FALLTHRU*/
case S_IFIFO:
@@ -4151,6 +4165,13 @@ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_de
*/
if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
return -EPERM;
+ /*
+ * Updating the link count will likely cause i_uid and i_gid to
+ * be writen back improperly if their true value is unknown to
+ * the vfs.
+ */
+ if (HAS_UNMAPPED_ID(inode))
+ return -EPERM;
if (!dir->i_op->link)
return -EPERM;
if (S_ISDIR(inode->i_mode))
@@ -4328,7 +4349,7 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
* Check source == target.
* On overlayfs need to look at underlying inodes.
*/
- if (vfs_select_inode(old_dentry, 0) == vfs_select_inode(new_dentry, 0))
+ if (d_real_inode(old_dentry) == d_real_inode(new_dentry))
return 0;
error = may_delete(old_dir, old_dentry, is_dir);
diff --git a/fs/namespace.c b/fs/namespace.c
index 419f746d851d..7bb2cda3bfef 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -2186,13 +2186,7 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
}
if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) &&
!(mnt_flags & MNT_NODEV)) {
- /* Was the nodev implicitly added in mount? */
- if ((mnt->mnt_ns->user_ns != &init_user_ns) &&
- !(sb->s_type->fs_flags & FS_USERNS_DEV_MOUNT)) {
- mnt_flags |= MNT_NODEV;
- } else {
- return -EPERM;
- }
+ return -EPERM;
}
if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) &&
!(mnt_flags & MNT_NOSUID)) {
@@ -2376,7 +2370,7 @@ unlock:
return err;
}
-static bool fs_fully_visible(struct file_system_type *fs_type, int *new_mnt_flags);
+static bool mount_too_revealing(struct vfsmount *mnt, int *new_mnt_flags);
/*
* create a new mount for userspace and request it to be added into the
@@ -2386,7 +2380,6 @@ static int do_new_mount(struct path *path, const char *fstype, int flags,
int mnt_flags, const char *name, void *data)
{
struct file_system_type *type;
- struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
struct vfsmount *mnt;
int err;
@@ -2397,26 +2390,6 @@ static int do_new_mount(struct path *path, const char *fstype, int flags,
if (!type)
return -ENODEV;
- if (user_ns != &init_user_ns) {
- if (!(type->fs_flags & FS_USERNS_MOUNT)) {
- put_filesystem(type);
- return -EPERM;
- }
- /* Only in special cases allow devices from mounts
- * created outside the initial user namespace.
- */
- if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) {
- flags |= MS_NODEV;
- mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
- }
- if (type->fs_flags & FS_USERNS_VISIBLE) {
- if (!fs_fully_visible(type, &mnt_flags)) {
- put_filesystem(type);
- return -EPERM;
- }
- }
- }
-
mnt = vfs_kern_mount(type, flags, name, data);
if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) &&
!mnt->mnt_sb->s_subtype)
@@ -2426,6 +2399,11 @@ static int do_new_mount(struct path *path, const char *fstype, int flags,
if (IS_ERR(mnt))
return PTR_ERR(mnt);
+ if (mount_too_revealing(mnt, &mnt_flags)) {
+ mntput(mnt);
+ return -EPERM;
+ }
+
err = do_add_mount(real_mount(mnt), path, mnt_flags);
if (err)
mntput(mnt);
@@ -3217,22 +3195,19 @@ bool current_chrooted(void)
return chrooted;
}
-static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags)
+static bool mnt_already_visible(struct mnt_namespace *ns, struct vfsmount *new,
+ int *new_mnt_flags)
{
- struct mnt_namespace *ns = current->nsproxy->mnt_ns;
int new_flags = *new_mnt_flags;
struct mount *mnt;
bool visible = false;
- if (unlikely(!ns))
- return false;
-
down_read(&namespace_sem);
list_for_each_entry(mnt, &ns->list, mnt_list) {
struct mount *child;
int mnt_flags;
- if (mnt->mnt.mnt_sb->s_type != type)
+ if (mnt->mnt.mnt_sb->s_type != new->mnt_sb->s_type)
continue;
/* This mount is not fully visible if it's root directory
@@ -3241,12 +3216,8 @@ static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags)
if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
continue;
- /* Read the mount flags and filter out flags that
- * may safely be ignored.
- */
+ /* A local view of the mount flags */
mnt_flags = mnt->mnt.mnt_flags;
- if (mnt->mnt.mnt_sb->s_iflags & SB_I_NOEXEC)
- mnt_flags &= ~(MNT_LOCK_NOSUID | MNT_LOCK_NOEXEC);
/* Don't miss readonly hidden in the superblock flags */
if (mnt->mnt.mnt_sb->s_flags & MS_RDONLY)
@@ -3258,15 +3229,6 @@ static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags)
if ((mnt_flags & MNT_LOCK_READONLY) &&
!(new_flags & MNT_READONLY))
continue;
- if ((mnt_flags & MNT_LOCK_NODEV) &&
- !(new_flags & MNT_NODEV))
- continue;
- if ((mnt_flags & MNT_LOCK_NOSUID) &&
- !(new_flags & MNT_NOSUID))
- continue;
- if ((mnt_flags & MNT_LOCK_NOEXEC) &&
- !(new_flags & MNT_NOEXEC))
- continue;
if ((mnt_flags & MNT_LOCK_ATIME) &&
((mnt_flags & MNT_ATIME_MASK) != (new_flags & MNT_ATIME_MASK)))
continue;
@@ -3286,9 +3248,6 @@ static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags)
}
/* Preserve the locked attributes */
*new_mnt_flags |= mnt_flags & (MNT_LOCK_READONLY | \
- MNT_LOCK_NODEV | \
- MNT_LOCK_NOSUID | \
- MNT_LOCK_NOEXEC | \
MNT_LOCK_ATIME);
visible = true;
goto found;
@@ -3299,6 +3258,42 @@ found:
return visible;
}
+static bool mount_too_revealing(struct vfsmount *mnt, int *new_mnt_flags)
+{
+ const unsigned long required_iflags = SB_I_NOEXEC | SB_I_NODEV;
+ struct mnt_namespace *ns = current->nsproxy->mnt_ns;
+ unsigned long s_iflags;
+
+ if (ns->user_ns == &init_user_ns)
+ return false;
+
+ /* Can this filesystem be too revealing? */
+ s_iflags = mnt->mnt_sb->s_iflags;
+ if (!(s_iflags & SB_I_USERNS_VISIBLE))
+ return false;
+
+ if ((s_iflags & required_iflags) != required_iflags) {
+ WARN_ONCE(1, "Expected s_iflags to contain 0x%lx\n",
+ required_iflags);
+ return true;
+ }
+
+ return !mnt_already_visible(ns, mnt, new_mnt_flags);
+}
+
+bool mnt_may_suid(struct vfsmount *mnt)
+{
+ /*
+ * Foreign mounts (accessed via fchdir or through /proc
+ * symlinks) are always treated as if they are nosuid. This
+ * prevents namespaces from trusting potentially unsafe
+ * suid/sgid bits, file caps, or security labels that originate
+ * in other namespaces.
+ */
+ return !(mnt->mnt_flags & MNT_NOSUID) && check_mnt(real_mount(mnt)) &&
+ current_in_userns(mnt->mnt_sb->s_user_ns);
+}
+
static struct ns_common *mntns_get(struct task_struct *task)
{
struct ns_common *ns = NULL;
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index bfdad003ee56..17de5c13dfae 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -74,7 +74,7 @@ const struct inode_operations ncp_dir_inode_operations =
*/
static int ncp_lookup_validate(struct dentry *, unsigned int);
static int ncp_hash_dentry(const struct dentry *, struct qstr *);
-static int ncp_compare_dentry(const struct dentry *, const struct dentry *,
+static int ncp_compare_dentry(const struct dentry *,
unsigned int, const char *, const struct qstr *);
static int ncp_delete_dentry(const struct dentry *);
static void ncp_d_prune(struct dentry *dentry);
@@ -139,7 +139,7 @@ ncp_hash_dentry(const struct dentry *dentry, struct qstr *this)
int i;
t = NCP_IO_TABLE(sb);
- hash = init_name_hash();
+ hash = init_name_hash(dentry);
for (i=0; i<this->len ; i++)
hash = partial_name_hash(ncp_tolower(t, this->name[i]),
hash);
@@ -154,7 +154,7 @@ ncp_hash_dentry(const struct dentry *dentry, struct qstr *this)
* the callers will handle races.
*/
static int
-ncp_compare_dentry(const struct dentry *parent, const struct dentry *dentry,
+ncp_compare_dentry(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
struct inode *pinode;
@@ -162,7 +162,7 @@ ncp_compare_dentry(const struct dentry *parent, const struct dentry *dentry,
if (len != name->len)
return 1;
- pinode = d_inode_rcu(parent);
+ pinode = d_inode_rcu(dentry->d_parent);
if (!pinode)
return 1;
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 17a42e4eb872..f55a4e756047 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -102,14 +102,15 @@ static inline void put_parallel(struct parallel_io *p)
}
static struct bio *
-bl_submit_bio(int rw, struct bio *bio)
+bl_submit_bio(struct bio *bio)
{
if (bio) {
get_parallel(bio->bi_private);
dprintk("%s submitting %s bio %u@%llu\n", __func__,
- rw == READ ? "read" : "write", bio->bi_iter.bi_size,
+ bio_op(bio) == READ ? "read" : "write",
+ bio->bi_iter.bi_size,
(unsigned long long)bio->bi_iter.bi_sector);
- submit_bio(rw, bio);
+ submit_bio(bio);
}
return NULL;
}
@@ -158,7 +159,7 @@ do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect,
if (disk_addr < map->start || disk_addr >= map->start + map->len) {
if (!dev->map(dev, disk_addr, map))
return ERR_PTR(-EIO);
- bio = bl_submit_bio(rw, bio);
+ bio = bl_submit_bio(bio);
}
disk_addr += map->disk_offset;
disk_addr -= map->start;
@@ -174,9 +175,10 @@ retry:
disk_addr >> SECTOR_SHIFT, end_io, par);
if (!bio)
return ERR_PTR(-ENOMEM);
+ bio_set_op_attrs(bio, rw, 0);
}
if (bio_add_page(bio, page, *len, offset) < *len) {
- bio = bl_submit_bio(rw, bio);
+ bio = bl_submit_bio(bio);
goto retry;
}
return bio;
@@ -252,7 +254,7 @@ bl_read_pagelist(struct nfs_pgio_header *header)
for (i = pg_index; i < header->page_array.npages; i++) {
if (extent_length <= 0) {
/* We've used up the previous extent */
- bio = bl_submit_bio(READ, bio);
+ bio = bl_submit_bio(bio);
/* Get the next one */
if (!ext_tree_lookup(bl, isect, &be, false)) {
@@ -273,7 +275,7 @@ bl_read_pagelist(struct nfs_pgio_header *header)
}
if (is_hole(&be)) {
- bio = bl_submit_bio(READ, bio);
+ bio = bl_submit_bio(bio);
/* Fill hole w/ zeroes w/o accessing device */
dprintk("%s Zeroing page for hole\n", __func__);
zero_user_segment(pages[i], pg_offset, pg_len);
@@ -306,7 +308,7 @@ bl_read_pagelist(struct nfs_pgio_header *header)
header->res.count = (isect << SECTOR_SHIFT) - header->args.offset;
}
out:
- bl_submit_bio(READ, bio);
+ bl_submit_bio(bio);
blk_finish_plug(&plug);
put_parallel(par);
return PNFS_ATTEMPTED;
@@ -398,7 +400,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync)
for (i = pg_index; i < header->page_array.npages; i++) {
if (extent_length <= 0) {
/* We've used up the previous extent */
- bio = bl_submit_bio(WRITE, bio);
+ bio = bl_submit_bio(bio);
/* Get the next one */
if (!ext_tree_lookup(bl, isect, &be, true)) {
header->pnfs_error = -EINVAL;
@@ -427,7 +429,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync)
header->res.count = header->args.count;
out:
- bl_submit_bio(WRITE, bio);
+ bl_submit_bio(bio);
blk_finish_plug(&plug);
put_parallel(par);
return PNFS_ATTEMPTED;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 4849d0f778dc..003ebce4bbc4 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -1100,7 +1100,6 @@ static const struct file_operations nfs_server_list_fops = {
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
- .owner = THIS_MODULE,
};
static int nfs_volume_list_open(struct inode *inode, struct file *file);
@@ -1121,7 +1120,6 @@ static const struct file_operations nfs_volume_list_fops = {
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
- .owner = THIS_MODULE,
};
/*
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index e2d606abc9e8..177fefb26c18 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -232,7 +232,7 @@ int nfs_readdir_make_qstr(struct qstr *string, const char *name, unsigned int le
* in a page cache page which kmemleak does not scan.
*/
kmemleak_not_leak(string->name);
- string->hash = full_name_hash(name, len);
+ string->hash = full_name_hash(NULL, name, len);
return 0;
}
@@ -502,7 +502,7 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
if (filename.len == 2 && filename.name[1] == '.')
return;
}
- filename.hash = full_name_hash(filename.name, filename.len);
+ filename.hash = full_name_hash(parent, filename.name, filename.len);
dentry = d_lookup(parent, &filename);
again:
@@ -734,7 +734,7 @@ struct page *get_cache_page(nfs_readdir_descriptor_t *desc)
struct page *page;
for (;;) {
- page = read_cache_page(file_inode(desc->file)->i_mapping,
+ page = read_cache_page(desc->file->f_mapping,
desc->page_index, (filler_t *)nfs_readdir_filler, desc);
if (IS_ERR(page) || grab_page(page))
break;
@@ -1397,19 +1397,18 @@ struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned in
if (IS_ERR(label))
goto out;
- /* Protect against concurrent sillydeletes */
trace_nfs_lookup_enter(dir, dentry, flags);
error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);
if (error == -ENOENT)
goto no_entry;
if (error < 0) {
res = ERR_PTR(error);
- goto out_unblock_sillyrename;
+ goto out_label;
}
inode = nfs_fhget(dentry->d_sb, fhandle, fattr, label);
res = ERR_CAST(inode);
if (IS_ERR(res))
- goto out_unblock_sillyrename;
+ goto out_label;
/* Success: notify readdir to use READDIRPLUS */
nfs_advise_use_readdirplus(dir);
@@ -1418,11 +1417,11 @@ no_entry:
res = d_splice_alias(inode, dentry);
if (res != NULL) {
if (IS_ERR(res))
- goto out_unblock_sillyrename;
+ goto out_label;
dentry = res;
}
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
-out_unblock_sillyrename:
+out_label:
trace_nfs_lookup_exit(dir, dentry, flags, error);
nfs4_label_free(label);
out:
@@ -1485,11 +1484,13 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
struct file *file, unsigned open_flags,
umode_t mode, int *opened)
{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
struct nfs_open_context *ctx;
struct dentry *res;
struct iattr attr = { .ia_valid = ATTR_OPEN };
struct inode *inode;
unsigned int lookup_flags = 0;
+ bool switched = false;
int err;
/* Expect a negative dentry */
@@ -1504,7 +1505,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
/* NFS only supports OPEN on regular files */
if ((open_flags & O_DIRECTORY)) {
- if (!d_unhashed(dentry)) {
+ if (!d_in_lookup(dentry)) {
/*
* Hashed negative dentry with O_DIRECTORY: dentry was
* revalidated and is fine, no need to perform lookup
@@ -1528,6 +1529,17 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
attr.ia_size = 0;
}
+ if (!(open_flags & O_CREAT) && !d_in_lookup(dentry)) {
+ d_drop(dentry);
+ switched = true;
+ dentry = d_alloc_parallel(dentry->d_parent,
+ &dentry->d_name, &wq);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+ if (unlikely(!d_in_lookup(dentry)))
+ return finish_no_open(file, dentry);
+ }
+
ctx = create_nfs_open_context(dentry, open_flags);
err = PTR_ERR(ctx);
if (IS_ERR(ctx))
@@ -1563,14 +1575,23 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
put_nfs_open_context(ctx);
out:
+ if (unlikely(switched)) {
+ d_lookup_done(dentry);
+ dput(dentry);
+ }
return err;
no_open:
res = nfs_lookup(dir, dentry, lookup_flags);
- err = PTR_ERR(res);
+ if (switched) {
+ d_lookup_done(dentry);
+ if (!res)
+ res = dentry;
+ else
+ dput(dentry);
+ }
if (IS_ERR(res))
- goto out;
-
+ return PTR_ERR(res);
return finish_no_open(file, res);
}
EXPORT_SYMBOL_GPL(nfs_atomic_open);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index f0f4b8d3d83f..72b7d13ee3c6 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -250,9 +250,7 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
/**
* nfs_direct_IO - NFS address space operation for direct I/O
* @iocb: target I/O control block
- * @iov: array of vectors that define I/O buffer
- * @pos: offset in file to begin the operation
- * @nr_segs: size of iovec array
+ * @iter: I/O buffer
*
* The presence of this routine in the address space ops vector means
* the NFS client supports direct I/O. However, for most direct IO, we
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 8de509b65e8d..7ce5e023c3c3 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -663,7 +663,7 @@ void nfs_mark_page_unstable(struct page *page, struct nfs_commit_info *cinfo)
if (!cinfo->dreq) {
struct inode *inode = page_file_mapping(page)->host;
- inc_zone_page_state(page, NR_UNSTABLE_NFS);
+ inc_node_page_state(page, NR_UNSTABLE_NFS);
inc_wb_stat(&inode_to_bdi(inode)->wb, WB_RECLAIMABLE);
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
}
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index cb28cceefebe..698be9361280 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -144,7 +144,7 @@ nfs3_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
}
static int
-nfs3_proc_lookup(struct inode *dir, struct qstr *name,
+nfs3_proc_lookup(struct inode *dir, const struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr,
struct nfs4_label *label)
{
@@ -404,7 +404,7 @@ out:
}
static int
-nfs3_proc_remove(struct inode *dir, struct qstr *name)
+nfs3_proc_remove(struct inode *dir, const struct qstr *name)
{
struct nfs_removeargs arg = {
.fh = NFS_FH(dir),
@@ -480,7 +480,7 @@ nfs3_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
}
static int
-nfs3_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
+nfs3_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
{
struct nfs3_linkargs arg = {
.fromfh = NFS_FH(inode),
@@ -582,7 +582,7 @@ out:
}
static int
-nfs3_proc_rmdir(struct inode *dir, struct qstr *name)
+nfs3_proc_rmdir(struct inode *dir, const struct qstr *name)
{
struct nfs_fattr *dir_attr;
struct nfs3_diropargs arg = {
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 74a0e34e5ded..9bf64eacba5b 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -225,7 +225,8 @@ int nfs_atomic_open(struct inode *, struct dentry *, struct file *,
extern struct file_system_type nfs4_fs_type;
/* nfs4namespace.c */
-struct rpc_clnt *nfs4_negotiate_security(struct rpc_clnt *, struct inode *, struct qstr *);
+struct rpc_clnt *nfs4_negotiate_security(struct rpc_clnt *, struct inode *,
+ const struct qstr *);
struct vfsmount *nfs4_submount(struct nfs_server *, struct dentry *,
struct nfs_fh *, struct nfs_fattr *);
int nfs4_replace_transport(struct nfs_server *server,
@@ -252,7 +253,7 @@ extern int nfs4_proc_fs_locations(struct rpc_clnt *, struct inode *, const struc
extern int nfs4_proc_get_locations(struct inode *, struct nfs4_fs_locations *,
struct page *page, struct rpc_cred *);
extern int nfs4_proc_fsid_present(struct inode *, struct rpc_cred *);
-extern struct rpc_clnt *nfs4_proc_lookup_mountpoint(struct inode *, struct qstr *,
+extern struct rpc_clnt *nfs4_proc_lookup_mountpoint(struct inode *, const struct qstr *,
struct nfs_fh *, struct nfs_fattr *);
extern int nfs4_proc_secinfo(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *);
extern const struct xattr_handler *nfs4_xattr_handlers[];
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index f592672373cb..d21104912676 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -208,7 +208,7 @@ static struct rpc_clnt *nfs_find_best_sec(struct rpc_clnt *clnt,
*/
struct rpc_clnt *
nfs4_negotiate_security(struct rpc_clnt *clnt, struct inode *inode,
- struct qstr *name)
+ const struct qstr *name)
{
struct page *page;
struct nfs4_secinfo_flavors *flavors;
@@ -397,7 +397,7 @@ struct vfsmount *nfs4_submount(struct nfs_server *server, struct dentry *dentry,
rpc_authflavor_t flavor = server->client->cl_auth->au_flavor;
struct dentry *parent = dget_parent(dentry);
struct inode *dir = d_inode(parent);
- struct qstr *name = &dentry->d_name;
+ const struct qstr *name = &dentry->d_name;
struct rpc_clnt *client;
struct vfsmount *mnt;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index b9e18679af50..1949bbd806eb 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3538,7 +3538,7 @@ static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr)
}
static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
- struct qstr *name, struct nfs_fh *fhandle,
+ const struct qstr *name, struct nfs_fh *fhandle,
struct nfs_fattr *fattr, struct nfs4_label *label)
{
struct nfs4_exception exception = { };
@@ -3580,7 +3580,7 @@ out:
return err;
}
-static int nfs4_proc_lookup(struct inode *dir, struct qstr *name,
+static int nfs4_proc_lookup(struct inode *dir, const struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr,
struct nfs4_label *label)
{
@@ -3596,7 +3596,7 @@ static int nfs4_proc_lookup(struct inode *dir, struct qstr *name,
}
struct rpc_clnt *
-nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
+nfs4_proc_lookup_mountpoint(struct inode *dir, const struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
struct rpc_clnt *client = NFS_CLIENT(dir);
@@ -3755,7 +3755,7 @@ out:
return status;
}
-static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
+static int _nfs4_proc_remove(struct inode *dir, const struct qstr *name)
{
struct nfs_server *server = NFS_SERVER(dir);
struct nfs_removeargs args = {
@@ -3778,7 +3778,7 @@ static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
return status;
}
-static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
+static int nfs4_proc_remove(struct inode *dir, const struct qstr *name)
{
struct nfs4_exception exception = { };
int err;
@@ -3861,7 +3861,7 @@ static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
return 1;
}
-static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
+static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs4_link_arg arg = {
@@ -3908,7 +3908,7 @@ out:
return status;
}
-static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
+static int nfs4_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
{
struct nfs4_exception exception = { };
int err;
@@ -3930,7 +3930,7 @@ struct nfs4_createdata {
};
static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
- struct qstr *name, struct iattr *sattr, u32 ftype)
+ const struct qstr *name, struct iattr *sattr, u32 ftype)
{
struct nfs4_createdata *data;
diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
index 9c150b153782..cfb8f7ce5cf6 100644
--- a/fs/nfs/nfs4trace.h
+++ b/fs/nfs/nfs4trace.h
@@ -1235,8 +1235,8 @@ DECLARE_EVENT_CLASS(nfs4_idmap_event,
len = 0;
__entry->error = error < 0 ? error : 0;
__entry->id = id;
- memcpy(__get_dynamic_array(name), name, len);
- ((char *)__get_dynamic_array(name))[len] = 0;
+ memcpy(__get_str(name), name, len);
+ __get_str(name)[len] = 0;
),
TP_printk(
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index fe80a1c26340..2ca9167bc97d 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -706,9 +706,9 @@ TRACE_EVENT(nfs_sillyrename_unlink,
__entry->dev = dir->i_sb->s_dev;
__entry->dir = NFS_FILEID(dir);
__entry->error = error;
- memcpy(__get_dynamic_array(name),
+ memcpy(__get_str(name),
data->args.name.name, len);
- ((char *)__get_dynamic_array(name))[len] = 0;
+ __get_str(name)[len] = 0;
),
TP_printk(
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index b417bbcd9704..b7bca8303989 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -145,7 +145,7 @@ nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
}
static int
-nfs_proc_lookup(struct inode *dir, struct qstr *name,
+nfs_proc_lookup(struct inode *dir, const struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr,
struct nfs4_label *label)
{
@@ -299,7 +299,7 @@ out:
}
static int
-nfs_proc_remove(struct inode *dir, struct qstr *name)
+nfs_proc_remove(struct inode *dir, const struct qstr *name)
{
struct nfs_removeargs arg = {
.fh = NFS_FH(dir),
@@ -357,7 +357,7 @@ nfs_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
}
static int
-nfs_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
+nfs_proc_link(struct inode *inode, struct inode *dir, const struct qstr *name)
{
struct nfs_linkargs arg = {
.fromfh = NFS_FH(inode),
@@ -456,7 +456,7 @@ out:
}
static int
-nfs_proc_rmdir(struct inode *dir, struct qstr *name)
+nfs_proc_rmdir(struct inode *dir, const struct qstr *name)
{
struct nfs_diropargs arg = {
.fh = NFS_FH(dir),
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index 1868246f56e6..191aa577dd1f 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -162,7 +162,7 @@ static int nfs_call_unlink(struct dentry *dentry, struct nfs_unlinkdata *data)
* @dentry: dentry to unlink
*/
static int
-nfs_async_unlink(struct dentry *dentry, struct qstr *name)
+nfs_async_unlink(struct dentry *dentry, const struct qstr *name)
{
struct nfs_unlinkdata *data;
int status = -ENOMEM;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 510c5befdf9b..3a6724c6eb5f 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -887,7 +887,7 @@ nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
static void
nfs_clear_page_commit(struct page *page)
{
- dec_zone_page_state(page, NR_UNSTABLE_NFS);
+ dec_node_page_state(page, NR_UNSTABLE_NFS);
dec_wb_stat(&inode_to_bdi(page_file_mapping(page)->host)->wb,
WB_RECLAIMABLE);
}
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index c9f583d7bac8..47febcf99185 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -90,6 +90,7 @@ config NFSD_BLOCKLAYOUT
bool "NFSv4.1 server support for pNFS block layouts"
depends on NFSD_V4 && BLOCK
select NFSD_PNFS
+ select EXPORTFS_BLOCK_OPS
help
This option enables support for the exporting pNFS block layouts
in the kernel's NFS server. The pNFS block layout enables NFS
@@ -102,6 +103,7 @@ config NFSD_SCSILAYOUT
bool "NFSv4.1 server support for pNFS SCSI layouts"
depends on NFSD_V4 && BLOCK
select NFSD_PNFS
+ select EXPORTFS_BLOCK_OPS
help
This option enables support for the exporting pNFS SCSI layouts
in the kernel's NFS server. The pNFS SCSI layout enables NFS
@@ -111,6 +113,23 @@ config NFSD_SCSILAYOUT
If unsure, say N.
+config NFSD_FLEXFILELAYOUT
+ bool "NFSv4.1 server support for pNFS Flex File layouts"
+ depends on NFSD_V4
+ select NFSD_PNFS
+ help
+ This option enables support for the exporting pNFS Flex File
+ layouts in the kernel's NFS server. The pNFS Flex File layout
+ enables NFS clients to directly perform I/O to NFSv3 devices
+ accesible to both the server and the clients. See
+ draft-ietf-nfsv4-flex-files for more details.
+
+ Warning, this server implements the bare minimum functionality
+ to be a flex file server - it is for testing the client,
+ not for use in production.
+
+ If unsure, say N.
+
config NFSD_V4_SECURITY_LABEL
bool "Provide Security Label support for NFSv4 server"
depends on NFSD_V4 && SECURITY
diff --git a/fs/nfsd/Makefile b/fs/nfsd/Makefile
index 3ae5f3c77e28..5f5d3a76980c 100644
--- a/fs/nfsd/Makefile
+++ b/fs/nfsd/Makefile
@@ -20,3 +20,4 @@ nfsd-$(CONFIG_NFSD_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4idmap.o \
nfsd-$(CONFIG_NFSD_PNFS) += nfs4layouts.o
nfsd-$(CONFIG_NFSD_BLOCKLAYOUT) += blocklayout.o blocklayoutxdr.o
nfsd-$(CONFIG_NFSD_SCSILAYOUT) += blocklayout.o blocklayoutxdr.o
+nfsd-$(CONFIG_NFSD_FLEXFILELAYOUT) += flexfilelayout.o flexfilelayoutxdr.o
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
index 31f3df193bdb..5a1708441510 100644
--- a/fs/nfsd/blocklayout.c
+++ b/fs/nfsd/blocklayout.c
@@ -2,6 +2,7 @@
* Copyright (c) 2014-2016 Christoph Hellwig.
*/
#include <linux/exportfs.h>
+#include <linux/iomap.h>
#include <linux/genhd.h>
#include <linux/slab.h>
#include <linux/pr.h>
@@ -162,6 +163,7 @@ nfsd4_block_get_device_info_simple(struct super_block *sb,
static __be32
nfsd4_block_proc_getdeviceinfo(struct super_block *sb,
+ struct svc_rqst *rqstp,
struct nfs4_client *clp,
struct nfsd4_getdeviceinfo *gdp)
{
@@ -354,6 +356,7 @@ nfsd4_block_get_device_info_scsi(struct super_block *sb,
static __be32
nfsd4_scsi_proc_getdeviceinfo(struct super_block *sb,
+ struct svc_rqst *rqstp,
struct nfs4_client *clp,
struct nfsd4_getdeviceinfo *gdp)
{
diff --git a/fs/nfsd/blocklayoutxdr.c b/fs/nfsd/blocklayoutxdr.c
index 6c3b316f932e..ac6f54546fdd 100644
--- a/fs/nfsd/blocklayoutxdr.c
+++ b/fs/nfsd/blocklayoutxdr.c
@@ -3,6 +3,7 @@
*/
#include <linux/sunrpc/svc.h>
#include <linux/exportfs.h>
+#include <linux/iomap.h>
#include <linux/nfs4.h>
#include "nfsd.h"
@@ -43,7 +44,7 @@ nfsd4_block_encode_volume(struct xdr_stream *xdr, struct pnfs_block_volume *b)
switch (b->type) {
case PNFS_BLOCK_VOLUME_SIMPLE:
- len = 4 + 4 + 8 + 4 + b->simple.sig_len;
+ len = 4 + 4 + 8 + 4 + (XDR_QUADLEN(b->simple.sig_len) << 2);
p = xdr_reserve_space(xdr, len);
if (!p)
return -ETOOSMALL;
@@ -54,7 +55,7 @@ nfsd4_block_encode_volume(struct xdr_stream *xdr, struct pnfs_block_volume *b)
p = xdr_encode_opaque(p, b->simple.sig, b->simple.sig_len);
break;
case PNFS_BLOCK_VOLUME_SCSI:
- len = 4 + 4 + 4 + 4 + b->scsi.designator_len + 8;
+ len = 4 + 4 + 4 + 4 + (XDR_QUADLEN(b->scsi.designator_len) << 2) + 8;
p = xdr_reserve_space(xdr, len);
if (!p)
return -ETOOSMALL;
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index b4d84b579f20..43e109cc0ccc 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -706,7 +706,7 @@ static void svc_export_init(struct cache_head *cnew, struct cache_head *citem)
new->ex_fslocs.locations = NULL;
new->ex_fslocs.locations_count = 0;
new->ex_fslocs.migrated = 0;
- new->ex_layout_type = 0;
+ new->ex_layout_types = 0;
new->ex_uuid = NULL;
new->cd = item->cd;
}
@@ -731,7 +731,7 @@ static void export_update(struct cache_head *cnew, struct cache_head *citem)
item->ex_fslocs.locations_count = 0;
new->ex_fslocs.migrated = item->ex_fslocs.migrated;
item->ex_fslocs.migrated = 0;
- new->ex_layout_type = item->ex_layout_type;
+ new->ex_layout_types = item->ex_layout_types;
new->ex_nflavors = item->ex_nflavors;
for (i = 0; i < MAX_SECINFO_LIST; i++) {
new->ex_flavors[i] = item->ex_flavors[i];
@@ -954,6 +954,16 @@ __be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp)
rqstp->rq_cred.cr_flavor == RPC_AUTH_UNIX)
return 0;
}
+
+ /* If the compound op contains a spo_must_allowed op,
+ * it will be sent with integrity/protection which
+ * will have to be expressly allowed on mounts that
+ * don't support it
+ */
+
+ if (nfsd4_spo_must_allow(rqstp))
+ return 0;
+
return nfserr_wrongsec;
}
diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h
index 2e315072bf3f..730f15eeb7ed 100644
--- a/fs/nfsd/export.h
+++ b/fs/nfsd/export.h
@@ -57,7 +57,7 @@ struct svc_export {
struct nfsd4_fs_locations ex_fslocs;
uint32_t ex_nflavors;
struct exp_flavor_info ex_flavors[MAX_SECINFO_LIST];
- enum pnfs_layouttype ex_layout_type;
+ u32 ex_layout_types;
struct nfsd4_deviceid_map *ex_devid_map;
struct cache_detail *cd;
};
diff --git a/fs/nfsd/flexfilelayout.c b/fs/nfsd/flexfilelayout.c
new file mode 100644
index 000000000000..df880e9fa71f
--- /dev/null
+++ b/fs/nfsd/flexfilelayout.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2016 Tom Haynes <loghyr@primarydata.com>
+ *
+ * The following implements a super-simple flex-file server
+ * where the NFSv4.1 mds is also the ds. And the storage is
+ * the same. I.e., writing to the mds via a NFSv4.1 WRITE
+ * goes to the same location as the NFSv3 WRITE.
+ */
+#include <linux/slab.h>
+
+#include <linux/nfsd/debug.h>
+
+#include <linux/sunrpc/addr.h>
+
+#include "flexfilelayoutxdr.h"
+#include "pnfs.h"
+
+#define NFSDDBG_FACILITY NFSDDBG_PNFS
+
+static __be32
+nfsd4_ff_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
+ struct nfsd4_layoutget *args)
+{
+ struct nfsd4_layout_seg *seg = &args->lg_seg;
+ u32 device_generation = 0;
+ int error;
+ uid_t u;
+
+ struct pnfs_ff_layout *fl;
+
+ /*
+ * The super simple flex file server has 1 mirror, 1 data server,
+ * and 1 file handle. So instead of 4 allocs, do 1 for now.
+ * Zero it out for the stateid - don't want junk in there!
+ */
+ error = -ENOMEM;
+ fl = kzalloc(sizeof(*fl), GFP_KERNEL);
+ if (!fl)
+ goto out_error;
+ args->lg_content = fl;
+
+ /*
+ * Avoid layout commit, try to force the I/O to the DS,
+ * and for fun, cause all IOMODE_RW layout segments to
+ * effectively be WRITE only.
+ */
+ fl->flags = FF_FLAGS_NO_LAYOUTCOMMIT | FF_FLAGS_NO_IO_THRU_MDS |
+ FF_FLAGS_NO_READ_IO;
+
+ /* Do not allow a IOMODE_READ segment to have write pemissions */
+ if (seg->iomode == IOMODE_READ) {
+ u = from_kuid(&init_user_ns, inode->i_uid) + 1;
+ fl->uid = make_kuid(&init_user_ns, u);
+ } else
+ fl->uid = inode->i_uid;
+ fl->gid = inode->i_gid;
+
+ error = nfsd4_set_deviceid(&fl->deviceid, fhp, device_generation);
+ if (error)
+ goto out_error;
+
+ fl->fh.size = fhp->fh_handle.fh_size;
+ memcpy(fl->fh.data, &fhp->fh_handle.fh_base, fl->fh.size);
+
+ /* Give whole file layout segments */
+ seg->offset = 0;
+ seg->length = NFS4_MAX_UINT64;
+
+ dprintk("GET: 0x%llx:0x%llx %d\n", seg->offset, seg->length,
+ seg->iomode);
+ return 0;
+
+out_error:
+ seg->length = 0;
+ return nfserrno(error);
+}
+
+static __be32
+nfsd4_ff_proc_getdeviceinfo(struct super_block *sb, struct svc_rqst *rqstp,
+ struct nfs4_client *clp, struct nfsd4_getdeviceinfo *gdp)
+{
+ struct pnfs_ff_device_addr *da;
+
+ u16 port;
+ char addr[INET6_ADDRSTRLEN];
+
+ da = kzalloc(sizeof(struct pnfs_ff_device_addr), GFP_KERNEL);
+ if (!da)
+ return nfserrno(-ENOMEM);
+
+ gdp->gd_device = da;
+
+ da->version = 3;
+ da->minor_version = 0;
+
+ da->rsize = svc_max_payload(rqstp);
+ da->wsize = da->rsize;
+
+ rpc_ntop((struct sockaddr *)&rqstp->rq_daddr,
+ addr, INET6_ADDRSTRLEN);
+ if (rqstp->rq_daddr.ss_family == AF_INET) {
+ struct sockaddr_in *sin;
+
+ sin = (struct sockaddr_in *)&rqstp->rq_daddr;
+ port = ntohs(sin->sin_port);
+ snprintf(da->netaddr.netid, FF_NETID_LEN + 1, "tcp");
+ da->netaddr.netid_len = 3;
+ } else {
+ struct sockaddr_in6 *sin6;
+
+ sin6 = (struct sockaddr_in6 *)&rqstp->rq_daddr;
+ port = ntohs(sin6->sin6_port);
+ snprintf(da->netaddr.netid, FF_NETID_LEN + 1, "tcp6");
+ da->netaddr.netid_len = 4;
+ }
+
+ da->netaddr.addr_len =
+ snprintf(da->netaddr.addr, FF_ADDR_LEN + 1,
+ "%s.%hhu.%hhu", addr, port >> 8, port & 0xff);
+
+ da->tightly_coupled = false;
+
+ return 0;
+}
+
+const struct nfsd4_layout_ops ff_layout_ops = {
+ .notify_types =
+ NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE,
+ .proc_getdeviceinfo = nfsd4_ff_proc_getdeviceinfo,
+ .encode_getdeviceinfo = nfsd4_ff_encode_getdeviceinfo,
+ .proc_layoutget = nfsd4_ff_proc_layoutget,
+ .encode_layoutget = nfsd4_ff_encode_layoutget,
+};
diff --git a/fs/nfsd/flexfilelayoutxdr.c b/fs/nfsd/flexfilelayoutxdr.c
new file mode 100644
index 000000000000..5e3fd7fc1a9f
--- /dev/null
+++ b/fs/nfsd/flexfilelayoutxdr.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2016 Tom Haynes <loghyr@primarydata.com>
+ */
+#include <linux/sunrpc/svc.h>
+#include <linux/nfs4.h>
+
+#include "nfsd.h"
+#include "flexfilelayoutxdr.h"
+
+#define NFSDDBG_FACILITY NFSDDBG_PNFS
+
+struct ff_idmap {
+ char buf[11];
+ int len;
+};
+
+__be32
+nfsd4_ff_encode_layoutget(struct xdr_stream *xdr,
+ struct nfsd4_layoutget *lgp)
+{
+ struct pnfs_ff_layout *fl = lgp->lg_content;
+ int len, mirror_len, ds_len, fh_len;
+ __be32 *p;
+
+ /*
+ * Unlike nfsd4_encode_user, we know these will
+ * always be stringified.
+ */
+ struct ff_idmap uid;
+ struct ff_idmap gid;
+
+ fh_len = 4 + fl->fh.size;
+
+ uid.len = sprintf(uid.buf, "%u", from_kuid(&init_user_ns, fl->uid));
+ gid.len = sprintf(gid.buf, "%u", from_kgid(&init_user_ns, fl->gid));
+
+ /* 8 + len for recording the length, name, and padding */
+ ds_len = 20 + sizeof(stateid_opaque_t) + 4 + fh_len +
+ 8 + uid.len + 8 + gid.len;
+
+ mirror_len = 4 + ds_len;
+
+ /* The layout segment */
+ len = 20 + mirror_len;
+
+ p = xdr_reserve_space(xdr, sizeof(__be32) + len);
+ if (!p)
+ return nfserr_toosmall;
+
+ *p++ = cpu_to_be32(len);
+ p = xdr_encode_hyper(p, 0); /* stripe unit of 1 */
+
+ *p++ = cpu_to_be32(1); /* single mirror */
+ *p++ = cpu_to_be32(1); /* single data server */
+
+ p = xdr_encode_opaque_fixed(p, &fl->deviceid,
+ sizeof(struct nfsd4_deviceid));
+
+ *p++ = cpu_to_be32(1); /* efficiency */
+
+ *p++ = cpu_to_be32(fl->stateid.si_generation);
+ p = xdr_encode_opaque_fixed(p, &fl->stateid.si_opaque,
+ sizeof(stateid_opaque_t));
+
+ *p++ = cpu_to_be32(1); /* single file handle */
+ p = xdr_encode_opaque(p, fl->fh.data, fl->fh.size);
+
+ p = xdr_encode_opaque(p, uid.buf, uid.len);
+ p = xdr_encode_opaque(p, gid.buf, gid.len);
+
+ *p++ = cpu_to_be32(fl->flags);
+ *p++ = cpu_to_be32(0); /* No stats collect hint */
+
+ return 0;
+}
+
+__be32
+nfsd4_ff_encode_getdeviceinfo(struct xdr_stream *xdr,
+ struct nfsd4_getdeviceinfo *gdp)
+{
+ struct pnfs_ff_device_addr *da = gdp->gd_device;
+ int len;
+ int ver_len;
+ int addr_len;
+ __be32 *p;
+
+ /* len + padding for two strings */
+ addr_len = 16 + da->netaddr.netid_len + da->netaddr.addr_len;
+ ver_len = 20;
+
+ len = 4 + ver_len + 4 + addr_len;
+
+ p = xdr_reserve_space(xdr, len + sizeof(__be32));
+ if (!p)
+ return nfserr_resource;
+
+ /*
+ * Fill in the overall length and number of volumes at the beginning
+ * of the layout.
+ */
+ *p++ = cpu_to_be32(len);
+ *p++ = cpu_to_be32(1); /* 1 netaddr */
+ p = xdr_encode_opaque(p, da->netaddr.netid, da->netaddr.netid_len);
+ p = xdr_encode_opaque(p, da->netaddr.addr, da->netaddr.addr_len);
+
+ *p++ = cpu_to_be32(1); /* 1 versions */
+
+ *p++ = cpu_to_be32(da->version);
+ *p++ = cpu_to_be32(da->minor_version);
+ *p++ = cpu_to_be32(da->rsize);
+ *p++ = cpu_to_be32(da->wsize);
+ *p++ = cpu_to_be32(da->tightly_coupled);
+
+ return 0;
+}
diff --git a/fs/nfsd/flexfilelayoutxdr.h b/fs/nfsd/flexfilelayoutxdr.h
new file mode 100644
index 000000000000..467defd4e563
--- /dev/null
+++ b/fs/nfsd/flexfilelayoutxdr.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2016 Tom Haynes <loghyr@primarydata.com>
+ */
+#ifndef _NFSD_FLEXFILELAYOUTXDR_H
+#define _NFSD_FLEXFILELAYOUTXDR_H 1
+
+#include <linux/inet.h>
+#include "xdr4.h"
+
+#define FF_FLAGS_NO_LAYOUTCOMMIT 1
+#define FF_FLAGS_NO_IO_THRU_MDS 2
+#define FF_FLAGS_NO_READ_IO 4
+
+struct xdr_stream;
+
+#define FF_NETID_LEN (4)
+#define FF_ADDR_LEN (INET6_ADDRSTRLEN + 8)
+struct pnfs_ff_netaddr {
+ char netid[FF_NETID_LEN + 1];
+ char addr[FF_ADDR_LEN + 1];
+ u32 netid_len;
+ u32 addr_len;
+};
+
+struct pnfs_ff_device_addr {
+ struct pnfs_ff_netaddr netaddr;
+ u32 version;
+ u32 minor_version;
+ u32 rsize;
+ u32 wsize;
+ bool tightly_coupled;
+};
+
+struct pnfs_ff_layout {
+ u32 flags;
+ u32 stats_collect_hint;
+ kuid_t uid;
+ kgid_t gid;
+ struct nfsd4_deviceid deviceid;
+ stateid_t stateid;
+ struct nfs_fh fh;
+};
+
+__be32 nfsd4_ff_encode_getdeviceinfo(struct xdr_stream *xdr,
+ struct nfsd4_getdeviceinfo *gdp);
+__be32 nfsd4_ff_encode_layoutget(struct xdr_stream *xdr,
+ struct nfsd4_layoutget *lgp);
+
+#endif /* _NFSD_FLEXFILELAYOUTXDR_H */
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index 953c0755cb37..2be9602b0221 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -27,6 +27,9 @@ static const struct nfsd4_callback_ops nfsd4_cb_layout_ops;
static const struct lock_manager_operations nfsd4_layouts_lm_ops;
const struct nfsd4_layout_ops *nfsd4_layout_ops[LAYOUT_TYPE_MAX] = {
+#ifdef CONFIG_NFSD_FLEXFILELAYOUT
+ [LAYOUT_FLEX_FILES] = &ff_layout_ops,
+#endif
#ifdef CONFIG_NFSD_BLOCKLAYOUT
[LAYOUT_BLOCK_VOLUME] = &bl_layout_ops,
#endif
@@ -122,28 +125,35 @@ nfsd4_set_deviceid(struct nfsd4_deviceid *id, const struct svc_fh *fhp,
void nfsd4_setup_layout_type(struct svc_export *exp)
{
+#if defined(CONFIG_NFSD_BLOCKLAYOUT) || defined(CONFIG_NFSD_SCSILAYOUT)
struct super_block *sb = exp->ex_path.mnt->mnt_sb;
+#endif
if (!(exp->ex_flags & NFSEXP_PNFS))
return;
/*
- * Check if the file system supports exporting a block-like layout.
+ * If flex file is configured, use it by default. Otherwise
+ * check if the file system supports exporting a block-like layout.
* If the block device supports reservations prefer the SCSI layout,
* otherwise advertise the block layout.
*/
+#ifdef CONFIG_NFSD_FLEXFILELAYOUT
+ exp->ex_layout_types |= 1 << LAYOUT_FLEX_FILES;
+#endif
#ifdef CONFIG_NFSD_BLOCKLAYOUT
+ /* overwrite flex file layout selection if needed */
if (sb->s_export_op->get_uuid &&
sb->s_export_op->map_blocks &&
sb->s_export_op->commit_blocks)
- exp->ex_layout_type = LAYOUT_BLOCK_VOLUME;
+ exp->ex_layout_types |= 1 << LAYOUT_BLOCK_VOLUME;
#endif
#ifdef CONFIG_NFSD_SCSILAYOUT
/* overwrite block layout selection if needed */
if (sb->s_export_op->map_blocks &&
sb->s_export_op->commit_blocks &&
sb->s_bdev && sb->s_bdev->bd_disk->fops->pr_ops)
- exp->ex_layout_type = LAYOUT_SCSI;
+ exp->ex_layout_types |= 1 << LAYOUT_SCSI;
#endif
}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index de1ff1d98bb1..1fb222752b2b 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -605,8 +605,7 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
fh_init(&resfh, NFS4_FHSIZE);
- status = fh_verify(rqstp, &cstate->current_fh, S_IFDIR,
- NFSD_MAY_CREATE);
+ status = fh_verify(rqstp, &cstate->current_fh, S_IFDIR, NFSD_MAY_NOP);
if (status)
return status;
@@ -1219,12 +1218,12 @@ nfsd4_verify(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
static const struct nfsd4_layout_ops *
nfsd4_layout_verify(struct svc_export *exp, unsigned int layout_type)
{
- if (!exp->ex_layout_type) {
+ if (!exp->ex_layout_types) {
dprintk("%s: export does not support pNFS\n", __func__);
return NULL;
}
- if (exp->ex_layout_type != layout_type) {
+ if (!(exp->ex_layout_types & (1 << layout_type))) {
dprintk("%s: layout type %d not supported\n",
__func__, layout_type);
return NULL;
@@ -1270,7 +1269,7 @@ nfsd4_getdeviceinfo(struct svc_rqst *rqstp,
nfserr = nfs_ok;
if (gdp->gd_maxcount != 0) {
nfserr = ops->proc_getdeviceinfo(exp->ex_path.mnt->mnt_sb,
- cstate->session->se_client, gdp);
+ rqstp, cstate->session->se_client, gdp);
}
gdp->gd_notify_types &= ops->notify_types;
@@ -2335,6 +2334,45 @@ static struct nfsd4_operation nfsd4_ops[] = {
},
};
+/**
+ * nfsd4_spo_must_allow - Determine if the compound op contains an
+ * operation that is allowed to be sent with machine credentials
+ *
+ * @rqstp: a pointer to the struct svc_rqst
+ *
+ * Checks to see if the compound contains a spo_must_allow op
+ * and confirms that it was sent with the proper machine creds.
+ */
+
+bool nfsd4_spo_must_allow(struct svc_rqst *rqstp)
+{
+ struct nfsd4_compoundres *resp = rqstp->rq_resp;
+ struct nfsd4_compoundargs *argp = rqstp->rq_argp;
+ struct nfsd4_op *this = &argp->ops[resp->opcnt - 1];
+ struct nfsd4_compound_state *cstate = &resp->cstate;
+ struct nfs4_op_map *allow = &cstate->clp->cl_spo_must_allow;
+ u32 opiter;
+
+ if (!cstate->minorversion)
+ return false;
+
+ if (cstate->spo_must_allowed == true)
+ return true;
+
+ opiter = resp->opcnt;
+ while (opiter < argp->opcnt) {
+ this = &argp->ops[opiter++];
+ if (test_bit(this->opnum, allow->u.longs) &&
+ cstate->clp->cl_mach_cred &&
+ nfsd4_mach_creds_match(cstate->clp, rqstp)) {
+ cstate->spo_must_allowed = true;
+ return true;
+ }
+ }
+ cstate->spo_must_allowed = false;
+ return false;
+}
+
int nfsd4_max_reply(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
struct nfsd4_operation *opdesc;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 70d0b9b33031..8410ca275db1 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1200,27 +1200,6 @@ free_ol_stateid_reaplist(struct list_head *reaplist)
}
}
-static void release_lockowner(struct nfs4_lockowner *lo)
-{
- struct nfs4_client *clp = lo->lo_owner.so_client;
- struct nfs4_ol_stateid *stp;
- struct list_head reaplist;
-
- INIT_LIST_HEAD(&reaplist);
-
- spin_lock(&clp->cl_lock);
- unhash_lockowner_locked(lo);
- while (!list_empty(&lo->lo_owner.so_stateids)) {
- stp = list_first_entry(&lo->lo_owner.so_stateids,
- struct nfs4_ol_stateid, st_perstateowner);
- WARN_ON(!unhash_lock_stateid(stp));
- put_ol_stateid_locked(stp, &reaplist);
- }
- spin_unlock(&clp->cl_lock);
- free_ol_stateid_reaplist(&reaplist);
- nfs4_put_stateowner(&lo->lo_owner);
-}
-
static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
struct list_head *reaplist)
{
@@ -1972,7 +1951,7 @@ static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
service == RPC_GSS_SVC_PRIVACY;
}
-static bool mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
+bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
{
struct svc_cred *cr = &rqstp->rq_cred;
@@ -2388,6 +2367,22 @@ nfsd4_exchange_id(struct svc_rqst *rqstp,
switch (exid->spa_how) {
case SP4_MACH_CRED:
+ exid->spo_must_enforce[0] = 0;
+ exid->spo_must_enforce[1] = (
+ 1 << (OP_BIND_CONN_TO_SESSION - 32) |
+ 1 << (OP_EXCHANGE_ID - 32) |
+ 1 << (OP_CREATE_SESSION - 32) |
+ 1 << (OP_DESTROY_SESSION - 32) |
+ 1 << (OP_DESTROY_CLIENTID - 32));
+
+ exid->spo_must_allow[0] &= (1 << (OP_CLOSE) |
+ 1 << (OP_OPEN_DOWNGRADE) |
+ 1 << (OP_LOCKU) |
+ 1 << (OP_DELEGRETURN));
+
+ exid->spo_must_allow[1] &= (
+ 1 << (OP_TEST_STATEID - 32) |
+ 1 << (OP_FREE_STATEID - 32));
if (!svc_rqst_integrity_protected(rqstp)) {
status = nfserr_inval;
goto out_nolock;
@@ -2424,7 +2419,7 @@ nfsd4_exchange_id(struct svc_rqst *rqstp,
status = nfserr_inval;
goto out;
}
- if (!mach_creds_match(conf, rqstp)) {
+ if (!nfsd4_mach_creds_match(conf, rqstp)) {
status = nfserr_wrong_cred;
goto out;
}
@@ -2473,6 +2468,8 @@ out_new:
goto out;
}
new->cl_minorversion = cstate->minorversion;
+ new->cl_spo_must_allow.u.words[0] = exid->spo_must_allow[0];
+ new->cl_spo_must_allow.u.words[1] = exid->spo_must_allow[1];
gen_clid(new, nn);
add_to_unconfirmed(new);
@@ -2676,7 +2673,7 @@ nfsd4_create_session(struct svc_rqst *rqstp,
if (conf) {
status = nfserr_wrong_cred;
- if (!mach_creds_match(conf, rqstp))
+ if (!nfsd4_mach_creds_match(conf, rqstp))
goto out_free_conn;
cs_slot = &conf->cl_cs_slot;
status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
@@ -2692,7 +2689,7 @@ nfsd4_create_session(struct svc_rqst *rqstp,
goto out_free_conn;
}
status = nfserr_wrong_cred;
- if (!mach_creds_match(unconf, rqstp))
+ if (!nfsd4_mach_creds_match(unconf, rqstp))
goto out_free_conn;
cs_slot = &unconf->cl_cs_slot;
status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
@@ -2801,7 +2798,7 @@ __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
if (!session)
goto out_no_session;
status = nfserr_wrong_cred;
- if (!mach_creds_match(session->se_client, rqstp))
+ if (!nfsd4_mach_creds_match(session->se_client, rqstp))
goto out;
status = nfsd4_map_bcts_dir(&bcts->dir);
if (status)
@@ -2848,7 +2845,7 @@ nfsd4_destroy_session(struct svc_rqst *r,
if (!ses)
goto out_client_lock;
status = nfserr_wrong_cred;
- if (!mach_creds_match(ses->se_client, r))
+ if (!nfsd4_mach_creds_match(ses->se_client, r))
goto out_put_session;
status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
if (status)
@@ -3087,7 +3084,7 @@ nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *csta
status = nfserr_stale_clientid;
goto out;
}
- if (!mach_creds_match(clp, rqstp)) {
+ if (!nfsd4_mach_creds_match(clp, rqstp)) {
clp = NULL;
status = nfserr_wrong_cred;
goto out;
@@ -3112,7 +3109,7 @@ nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *csta
* We don't take advantage of the rca_one_fs case.
* That's OK, it's optional, we can safely ignore it.
*/
- return nfs_ok;
+ return nfs_ok;
}
status = nfserr_complete_already;
@@ -5945,6 +5942,7 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
__be32 status;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
struct nfs4_client *clp;
+ LIST_HEAD (reaplist);
dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
clid->cl_boot, clid->cl_id);
@@ -5975,9 +5973,23 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
nfs4_get_stateowner(sop);
break;
}
+ if (!lo) {
+ spin_unlock(&clp->cl_lock);
+ return status;
+ }
+
+ unhash_lockowner_locked(lo);
+ while (!list_empty(&lo->lo_owner.so_stateids)) {
+ stp = list_first_entry(&lo->lo_owner.so_stateids,
+ struct nfs4_ol_stateid,
+ st_perstateowner);
+ WARN_ON(!unhash_lock_stateid(stp));
+ put_ol_stateid_locked(stp, &reaplist);
+ }
spin_unlock(&clp->cl_lock);
- if (lo)
- release_lockowner(lo);
+ free_ol_stateid_reaplist(&reaplist);
+ nfs4_put_stateowner(&lo->lo_owner);
+
return status;
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 9df898ba648f..0aa0236a1429 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1299,16 +1299,14 @@ nfsd4_decode_exchange_id(struct nfsd4_compoundargs *argp,
break;
case SP4_MACH_CRED:
/* spo_must_enforce */
- READ_BUF(4);
- dummy = be32_to_cpup(p++);
- READ_BUF(dummy * 4);
- p += dummy;
-
+ status = nfsd4_decode_bitmap(argp,
+ exid->spo_must_enforce);
+ if (status)
+ goto out;
/* spo_must_allow */
- READ_BUF(4);
- dummy = be32_to_cpup(p++);
- READ_BUF(dummy * 4);
- p += dummy;
+ status = nfsd4_decode_bitmap(argp, exid->spo_must_allow);
+ if (status)
+ goto out;
break;
case SP4_SSV:
/* ssp_ops */
@@ -2164,22 +2162,20 @@ nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp,
}
static inline __be32
-nfsd4_encode_layout_type(struct xdr_stream *xdr, enum pnfs_layouttype layout_type)
+nfsd4_encode_layout_types(struct xdr_stream *xdr, u32 layout_types)
{
- __be32 *p;
+ __be32 *p;
+ unsigned long i = hweight_long(layout_types);
- if (layout_type) {
- p = xdr_reserve_space(xdr, 8);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(1);
- *p++ = cpu_to_be32(layout_type);
- } else {
- p = xdr_reserve_space(xdr, 4);
- if (!p)
- return nfserr_resource;
- *p++ = cpu_to_be32(0);
- }
+ p = xdr_reserve_space(xdr, 4 + 4 * i);
+ if (!p)
+ return nfserr_resource;
+
+ *p++ = cpu_to_be32(i);
+
+ for (i = LAYOUT_NFSV4_1_FILES; i < LAYOUT_TYPE_MAX; ++i)
+ if (layout_types & (1 << i))
+ *p++ = cpu_to_be32(i);
return 0;
}
@@ -2754,13 +2750,13 @@ out_acl:
}
#ifdef CONFIG_NFSD_PNFS
if (bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) {
- status = nfsd4_encode_layout_type(xdr, exp->ex_layout_type);
+ status = nfsd4_encode_layout_types(xdr, exp->ex_layout_types);
if (status)
goto out;
}
if (bmval2 & FATTR4_WORD2_LAYOUT_TYPES) {
- status = nfsd4_encode_layout_type(xdr, exp->ex_layout_type);
+ status = nfsd4_encode_layout_types(xdr, exp->ex_layout_types);
if (status)
goto out;
}
@@ -3867,14 +3863,6 @@ nfsd4_encode_write(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_w
return nfserr;
}
-static const u32 nfs4_minimal_spo_must_enforce[2] = {
- [1] = 1 << (OP_BIND_CONN_TO_SESSION - 32) |
- 1 << (OP_EXCHANGE_ID - 32) |
- 1 << (OP_CREATE_SESSION - 32) |
- 1 << (OP_DESTROY_SESSION - 32) |
- 1 << (OP_DESTROY_CLIENTID - 32)
-};
-
static __be32
nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
struct nfsd4_exchange_id *exid)
@@ -3885,6 +3873,7 @@ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
char *server_scope;
int major_id_sz;
int server_scope_sz;
+ int status = 0;
uint64_t minor_id = 0;
if (nfserr)
@@ -3913,18 +3902,20 @@ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
case SP4_NONE:
break;
case SP4_MACH_CRED:
- /* spo_must_enforce, spo_must_allow */
- p = xdr_reserve_space(xdr, 16);
- if (!p)
- return nfserr_resource;
-
/* spo_must_enforce bitmap: */
- *p++ = cpu_to_be32(2);
- *p++ = cpu_to_be32(nfs4_minimal_spo_must_enforce[0]);
- *p++ = cpu_to_be32(nfs4_minimal_spo_must_enforce[1]);
- /* empty spo_must_allow bitmap: */
- *p++ = cpu_to_be32(0);
-
+ status = nfsd4_encode_bitmap(xdr,
+ exid->spo_must_enforce[0],
+ exid->spo_must_enforce[1],
+ exid->spo_must_enforce[2]);
+ if (status)
+ goto out;
+ /* spo_must_allow bitmap: */
+ status = nfsd4_encode_bitmap(xdr,
+ exid->spo_must_allow[0],
+ exid->spo_must_allow[1],
+ exid->spo_must_allow[2]);
+ if (status)
+ goto out;
break;
default:
WARN_ON_ONCE(1);
@@ -3951,6 +3942,8 @@ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
/* Implementation id */
*p++ = cpu_to_be32(0); /* zero length nfs_impl_id4 array */
return 0;
+out:
+ return status;
}
static __be32
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 9690cb4dd588..65ad0165a94f 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -158,7 +158,6 @@ static const struct file_operations exports_proc_operations = {
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
- .owner = THIS_MODULE,
};
static int exports_nfsd_open(struct inode *inode, struct file *file)
@@ -171,7 +170,6 @@ static const struct file_operations exports_nfsd_operations = {
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
- .owner = THIS_MODULE,
};
static int export_features_show(struct seq_file *m, void *v)
@@ -217,7 +215,6 @@ static const struct file_operations pool_stats_operations = {
.read = seq_read,
.llseek = seq_lseek,
.release = nfsd_pool_stats_release,
- .owner = THIS_MODULE,
};
static struct file_operations reply_cache_stats_operations = {
@@ -1154,20 +1151,15 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
#endif
/* last one */ {""}
};
- struct net *net = data;
- int ret;
-
- ret = simple_fill_super(sb, 0x6e667364, nfsd_files);
- if (ret)
- return ret;
- sb->s_fs_info = get_net(net);
- return 0;
+ get_net(sb->s_fs_info);
+ return simple_fill_super(sb, 0x6e667364, nfsd_files);
}
static struct dentry *nfsd_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
- return mount_ns(fs_type, flags, current->nsproxy->net_ns, nfsd_fill_super);
+ struct net *net = current->nsproxy->net_ns;
+ return mount_ns(fs_type, flags, data, net, net->user_ns, nfsd_fill_super);
}
static void nfsd_umount(struct super_block *sb)
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index cf980523898b..9446849888d5 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -124,6 +124,7 @@ void nfs4_state_shutdown_net(struct net *net);
void nfs4_reset_lease(time_t leasetime);
int nfs4_reset_recoverydir(char *recdir);
char * nfs4_recoverydir(void);
+bool nfsd4_spo_must_allow(struct svc_rqst *rqstp);
#else
static inline int nfsd4_init_slabs(void) { return 0; }
static inline void nfsd4_free_slabs(void) { }
@@ -134,6 +135,10 @@ static inline void nfs4_state_shutdown_net(struct net *net) { }
static inline void nfs4_reset_lease(time_t leasetime) { }
static inline int nfs4_reset_recoverydir(char *recdir) { return 0; }
static inline char * nfs4_recoverydir(void) {return NULL; }
+static inline bool nfsd4_spo_must_allow(struct svc_rqst *rqstp)
+{
+ return false;
+}
#endif
/*
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index a8919444c460..cfe7500d5847 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -59,14 +59,20 @@ static int nfsd_acceptable(void *expv, struct dentry *dentry)
* the write call).
*/
static inline __be32
-nfsd_mode_check(struct svc_rqst *rqstp, umode_t mode, umode_t requested)
+nfsd_mode_check(struct svc_rqst *rqstp, struct dentry *dentry,
+ umode_t requested)
{
- mode &= S_IFMT;
+ umode_t mode = d_inode(dentry)->i_mode & S_IFMT;
if (requested == 0) /* the caller doesn't care */
return nfs_ok;
- if (mode == requested)
+ if (mode == requested) {
+ if (mode == S_IFDIR && !d_can_lookup(dentry)) {
+ WARN_ON_ONCE(1);
+ return nfserr_notdir;
+ }
return nfs_ok;
+ }
/*
* v4 has an error more specific than err_notdir which we should
* return in preference to err_notdir:
@@ -298,7 +304,7 @@ out:
* that it expects something not of the given type.
*
* @access is formed from the NFSD_MAY_* constants defined in
- * include/linux/nfsd/nfsd.h.
+ * fs/nfsd/vfs.h.
*/
__be32
fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
@@ -340,7 +346,7 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
if (error)
goto out;
- error = nfsd_mode_check(rqstp, d_inode(dentry)->i_mode, type);
+ error = nfsd_mode_check(rqstp, dentry, type);
if (error)
goto out;
@@ -533,7 +539,7 @@ fh_compose(struct svc_fh *fhp, struct svc_export *exp, struct dentry *dentry,
* the reference filehandle (if it is in the same export)
* or the export options.
*/
- set_version_and_fsid_type(fhp, exp, ref_fh);
+ set_version_and_fsid_type(fhp, exp, ref_fh);
if (ref_fh == fhp)
fh_put(ref_fh);
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index 4cd78ef4c95c..e9214768cde9 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -251,9 +251,6 @@ nfsd_proc_create(struct svc_rqst *rqstp, struct nfsd_createargs *argp,
/* Check for NFSD_MAY_WRITE in nfsd_create if necessary */
- nfserr = nfserr_acces;
- if (!argp->len)
- goto done;
nfserr = nfserr_exist;
if (isdotent(argp->name, argp->len))
goto done;
@@ -362,8 +359,8 @@ nfsd_proc_create(struct svc_rqst *rqstp, struct nfsd_createargs *argp,
nfserr = 0;
if (!inode) {
/* File doesn't exist. Create it and set attrs */
- nfserr = nfsd_create(rqstp, dirfhp, argp->name, argp->len,
- attr, type, rdev, newfhp);
+ nfserr = nfsd_create_locked(rqstp, dirfhp, argp->name,
+ argp->len, attr, type, rdev, newfhp);
} else if (type == S_IFREG) {
dprintk("nfsd: existing %s, valid=%x, size=%ld\n",
argp->name, attr->ia_valid, (long) attr->ia_size);
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index 79d964aa8079..41b468a6a90f 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -240,7 +240,7 @@ nfssvc_decode_diropargs(struct svc_rqst *rqstp, __be32 *p,
|| !(p = decode_filename(p, &args->name, &args->len)))
return 0;
- return xdr_argsize_check(rqstp, p);
+ return xdr_argsize_check(rqstp, p);
}
int
diff --git a/fs/nfsd/pnfs.h b/fs/nfsd/pnfs.h
index 7d073b9b1553..0c2a716e8741 100644
--- a/fs/nfsd/pnfs.h
+++ b/fs/nfsd/pnfs.h
@@ -21,6 +21,7 @@ struct nfsd4_layout_ops {
u32 notify_types;
__be32 (*proc_getdeviceinfo)(struct super_block *sb,
+ struct svc_rqst *rqstp,
struct nfs4_client *clp,
struct nfsd4_getdeviceinfo *gdevp);
__be32 (*encode_getdeviceinfo)(struct xdr_stream *xdr,
@@ -44,6 +45,9 @@ extern const struct nfsd4_layout_ops bl_layout_ops;
#ifdef CONFIG_NFSD_SCSILAYOUT
extern const struct nfsd4_layout_ops scsi_layout_ops;
#endif
+#ifdef CONFIG_NFSD_FLEXFILELAYOUT
+extern const struct nfsd4_layout_ops ff_layout_ops;
+#endif
__be32 nfsd4_preprocess_layout_stateid(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate, stateid_t *stateid,
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 64053eadeb81..b95adf9a1595 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -345,6 +345,7 @@ struct nfs4_client {
u32 cl_exchange_flags;
/* number of rpc's in progress over an associated session: */
atomic_t cl_refcount;
+ struct nfs4_op_map cl_spo_must_allow;
/* for nfs41 callbacks */
/* We currently support a single back channel with a single slot */
diff --git a/fs/nfsd/stats.c b/fs/nfsd/stats.c
index cd90878a76aa..d97338bb6a39 100644
--- a/fs/nfsd/stats.c
+++ b/fs/nfsd/stats.c
@@ -84,7 +84,6 @@ static int nfsd_proc_open(struct inode *inode, struct file *file)
}
static const struct file_operations nfsd_proc_fops = {
- .owner = THIS_MODULE,
.open = nfsd_proc_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 6fbd81ecb410..ba944123167b 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -1135,96 +1135,37 @@ nfsd_check_ignore_resizing(struct iattr *iap)
iap->ia_valid &= ~ATTR_SIZE;
}
-/*
- * Create a file (regular, directory, device, fifo); UNIX sockets
- * not yet implemented.
- * If the response fh has been verified, the parent directory should
- * already be locked. Note that the parent directory is left locked.
- *
- * N.B. Every call to nfsd_create needs an fh_put for _both_ fhp and resfhp
- */
+/* The parent directory should already be locked: */
__be32
-nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
+nfsd_create_locked(struct svc_rqst *rqstp, struct svc_fh *fhp,
char *fname, int flen, struct iattr *iap,
int type, dev_t rdev, struct svc_fh *resfhp)
{
- struct dentry *dentry, *dchild = NULL;
+ struct dentry *dentry, *dchild;
struct inode *dirp;
__be32 err;
__be32 err2;
int host_err;
- err = nfserr_perm;
- if (!flen)
- goto out;
- err = nfserr_exist;
- if (isdotent(fname, flen))
- goto out;
-
- err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
- if (err)
- goto out;
-
dentry = fhp->fh_dentry;
dirp = d_inode(dentry);
- err = nfserr_notdir;
- if (!dirp->i_op->lookup)
- goto out;
- /*
- * Check whether the response file handle has been verified yet.
- * If it has, the parent directory should already be locked.
- */
- if (!resfhp->fh_dentry) {
- host_err = fh_want_write(fhp);
- if (host_err)
- goto out_nfserr;
-
- /* called from nfsd_proc_mkdir, or possibly nfsd3_proc_create */
- fh_lock_nested(fhp, I_MUTEX_PARENT);
- dchild = lookup_one_len(fname, dentry, flen);
- host_err = PTR_ERR(dchild);
- if (IS_ERR(dchild))
- goto out_nfserr;
- err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
- if (err)
- goto out;
- } else {
- /* called from nfsd_proc_create */
- dchild = dget(resfhp->fh_dentry);
- if (!fhp->fh_locked) {
- /* not actually possible */
- printk(KERN_ERR
- "nfsd_create: parent %pd2 not locked!\n",
+ dchild = dget(resfhp->fh_dentry);
+ if (!fhp->fh_locked) {
+ WARN_ONCE(1, "nfsd_create: parent %pd2 not locked!\n",
dentry);
- err = nfserr_io;
- goto out;
- }
- }
- /*
- * Make sure the child dentry is still negative ...
- */
- err = nfserr_exist;
- if (d_really_is_positive(dchild)) {
- dprintk("nfsd_create: dentry %pd/%pd not negative!\n",
- dentry, dchild);
- goto out;
+ err = nfserr_io;
+ goto out;
}
+ err = nfsd_permission(rqstp, fhp->fh_export, dentry, NFSD_MAY_CREATE);
+ if (err)
+ goto out;
+
if (!(iap->ia_valid & ATTR_MODE))
iap->ia_mode = 0;
iap->ia_mode = (iap->ia_mode & S_IALLUGO) | type;
- err = nfserr_inval;
- if (!S_ISREG(type) && !S_ISDIR(type) && !special_file(type)) {
- printk(KERN_WARNING "nfsd: bad file type %o in nfsd_create\n",
- type);
- goto out;
- }
-
- /*
- * Get the dir op function pointer.
- */
err = 0;
host_err = 0;
switch (type) {
@@ -1242,6 +1183,10 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
case S_IFSOCK:
host_err = vfs_mknod(dirp, dchild, iap->ia_mode, rdev);
break;
+ default:
+ printk(KERN_WARNING "nfsd: bad file type %o in nfsd_create\n",
+ type);
+ host_err = -EINVAL;
}
if (host_err < 0)
goto out_nfserr;
@@ -1251,7 +1196,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
/*
* nfsd_create_setattr already committed the child. Transactional
* filesystems had a chance to commit changes for both parent and
- * child * simultaneously making the following commit_metadata a
+ * child simultaneously making the following commit_metadata a
* noop.
*/
err2 = nfserrno(commit_metadata(fhp));
@@ -1263,8 +1208,7 @@ nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
if (!err)
err = fh_update(resfhp);
out:
- if (dchild && !IS_ERR(dchild))
- dput(dchild);
+ dput(dchild);
return err;
out_nfserr:
@@ -1272,6 +1216,50 @@ out_nfserr:
goto out;
}
+/*
+ * Create a filesystem object (regular, directory, special).
+ * Note that the parent directory is left locked.
+ *
+ * N.B. Every call to nfsd_create needs an fh_put for _both_ fhp and resfhp
+ */
+__be32
+nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ char *fname, int flen, struct iattr *iap,
+ int type, dev_t rdev, struct svc_fh *resfhp)
+{
+ struct dentry *dentry, *dchild = NULL;
+ struct inode *dirp;
+ __be32 err;
+ int host_err;
+
+ if (isdotent(fname, flen))
+ return nfserr_exist;
+
+ err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_NOP);
+ if (err)
+ return err;
+
+ dentry = fhp->fh_dentry;
+ dirp = d_inode(dentry);
+
+ host_err = fh_want_write(fhp);
+ if (host_err)
+ return nfserrno(host_err);
+
+ fh_lock_nested(fhp, I_MUTEX_PARENT);
+ dchild = lookup_one_len(fname, dentry, flen);
+ host_err = PTR_ERR(dchild);
+ if (IS_ERR(dchild))
+ return nfserrno(host_err);
+ err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
+ if (err) {
+ dput(dchild);
+ return err;
+ }
+ return nfsd_create_locked(rqstp, fhp, fname, flen, iap, type,
+ rdev, resfhp);
+}
+
#ifdef CONFIG_NFSD_V3
/*
@@ -1304,12 +1292,6 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
dentry = fhp->fh_dentry;
dirp = d_inode(dentry);
- /* Get all the sanity checks out of the way before
- * we lock the parent. */
- err = nfserr_notdir;
- if (!dirp->i_op->lookup)
- goto out;
-
host_err = fh_want_write(fhp);
if (host_err)
goto out_nfserr;
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index 2d573ec057f8..3cbb1b33777b 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -59,6 +59,9 @@ __be32 nfsd4_vfs_fallocate(struct svc_rqst *, struct svc_fh *,
__be32 nfsd4_clone_file_range(struct file *, u64, struct file *,
u64, u64);
#endif /* CONFIG_NFSD_V4 */
+__be32 nfsd_create_locked(struct svc_rqst *, struct svc_fh *,
+ char *name, int len, struct iattr *attrs,
+ int type, dev_t rdev, struct svc_fh *res);
__be32 nfsd_create(struct svc_rqst *, struct svc_fh *,
char *name, int len, struct iattr *attrs,
int type, dev_t rdev, struct svc_fh *res);
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index d9554813e58a..beea0c5edc51 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -59,6 +59,7 @@ struct nfsd4_compound_state {
struct nfsd4_session *session;
struct nfsd4_slot *slot;
int data_offset;
+ bool spo_must_allowed;
size_t iovlen;
u32 minorversion;
__be32 status;
@@ -403,6 +404,8 @@ struct nfsd4_exchange_id {
clientid_t clientid;
u32 seqid;
int spa_how;
+ u32 spo_must_enforce[3];
+ u32 spo_must_allow[3];
};
struct nfsd4_sequence {
@@ -654,6 +657,8 @@ set_change_info(struct nfsd4_change_info *cinfo, struct svc_fh *fhp)
}
+
+bool nfsd4_mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp);
int nfs4svc_encode_voidres(struct svc_rqst *, __be32 *, void *);
int nfs4svc_decode_compoundargs(struct svc_rqst *, __be32 *,
struct nfsd4_compoundargs *);
diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c
index 1a85d94f5b25..2c90e285d7c6 100644
--- a/fs/nilfs2/alloc.c
+++ b/fs/nilfs2/alloc.c
@@ -622,10 +622,10 @@ void nilfs_palloc_commit_free_entry(struct inode *inode,
lock = nilfs_mdt_bgl_lock(inode, group);
if (!nilfs_clear_bit_atomic(lock, group_offset, bitmap))
- nilfs_warning(inode->i_sb, __func__,
- "entry number %llu already freed: ino=%lu",
- (unsigned long long)req->pr_entry_nr,
- (unsigned long)inode->i_ino);
+ nilfs_msg(inode->i_sb, KERN_WARNING,
+ "%s (ino=%lu): entry number %llu already freed",
+ __func__, inode->i_ino,
+ (unsigned long long)req->pr_entry_nr);
else
nilfs_palloc_group_desc_add_entries(desc, lock, 1);
@@ -663,10 +663,10 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode,
lock = nilfs_mdt_bgl_lock(inode, group);
if (!nilfs_clear_bit_atomic(lock, group_offset, bitmap))
- nilfs_warning(inode->i_sb, __func__,
- "entry number %llu already freed: ino=%lu",
- (unsigned long long)req->pr_entry_nr,
- (unsigned long)inode->i_ino);
+ nilfs_msg(inode->i_sb, KERN_WARNING,
+ "%s (ino=%lu): entry number %llu already freed",
+ __func__, inode->i_ino,
+ (unsigned long long)req->pr_entry_nr);
else
nilfs_palloc_group_desc_add_entries(desc, lock, 1);
@@ -772,10 +772,10 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
do {
if (!nilfs_clear_bit_atomic(lock, group_offset,
bitmap)) {
- nilfs_warning(inode->i_sb, __func__,
- "entry number %llu already freed: ino=%lu",
- (unsigned long long)entry_nrs[j],
- (unsigned long)inode->i_ino);
+ nilfs_msg(inode->i_sb, KERN_WARNING,
+ "%s (ino=%lu): entry number %llu already freed",
+ __func__, inode->i_ino,
+ (unsigned long long)entry_nrs[j]);
} else {
n++;
}
@@ -816,12 +816,11 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
for (k = 0; k < nempties; k++) {
ret = nilfs_palloc_delete_entry_block(inode,
last_nrs[k]);
- if (ret && ret != -ENOENT) {
- nilfs_warning(inode->i_sb, __func__,
- "failed to delete block of entry %llu: ino=%lu, err=%d",
- (unsigned long long)last_nrs[k],
- (unsigned long)inode->i_ino, ret);
- }
+ if (ret && ret != -ENOENT)
+ nilfs_msg(inode->i_sb, KERN_WARNING,
+ "error %d deleting block that object (entry=%llu, ino=%lu) belongs to",
+ ret, (unsigned long long)last_nrs[k],
+ inode->i_ino);
}
desc_kaddr = kmap_atomic(desc_bh->b_page);
@@ -835,12 +834,10 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
if (nfree == nilfs_palloc_entries_per_group(inode)) {
ret = nilfs_palloc_delete_bitmap_block(inode, group);
- if (ret && ret != -ENOENT) {
- nilfs_warning(inode->i_sb, __func__,
- "failed to delete bitmap block of group %lu: ino=%lu, err=%d",
- group,
- (unsigned long)inode->i_ino, ret);
- }
+ if (ret && ret != -ENOENT)
+ nilfs_msg(inode->i_sb, KERN_WARNING,
+ "error %d deleting bitmap block of group=%lu, ino=%lu",
+ ret, group, inode->i_ino);
}
}
return 0;
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index f2a7877e0c8c..01fb1831ca25 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -41,8 +41,8 @@ static int nilfs_bmap_convert_error(struct nilfs_bmap *bmap,
struct inode *inode = bmap->b_inode;
if (err == -EINVAL) {
- nilfs_error(inode->i_sb, fname,
- "broken bmap (inode number=%lu)", inode->i_ino);
+ __nilfs_error(inode->i_sb, fname,
+ "broken bmap (inode number=%lu)", inode->i_ino);
err = -EIO;
}
return err;
diff --git a/fs/nilfs2/bmap.h b/fs/nilfs2/bmap.h
index b6a4c8f93ac8..2b6ffbe5997a 100644
--- a/fs/nilfs2/bmap.h
+++ b/fs/nilfs2/bmap.h
@@ -22,7 +22,7 @@
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/buffer_head.h>
-#include <linux/nilfs2_fs.h>
+#include <linux/nilfs2_ondisk.h> /* nilfs_binfo, nilfs_inode, etc */
#include "alloc.h"
#include "dat.h"
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index 0576033699bc..d5c23da43513 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -41,7 +41,7 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
struct inode *inode = NILFS_BTNC_I(btnc);
struct buffer_head *bh;
- bh = nilfs_grab_buffer(inode, btnc, blocknr, 1 << BH_NILFS_Node);
+ bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
if (unlikely(!bh))
return NULL;
@@ -62,7 +62,7 @@ nilfs_btnode_create_block(struct address_space *btnc, __u64 blocknr)
}
int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
- sector_t pblocknr, int mode,
+ sector_t pblocknr, int mode, int mode_flags,
struct buffer_head **pbh, sector_t *submit_ptr)
{
struct buffer_head *bh;
@@ -70,7 +70,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
struct page *page;
int err;
- bh = nilfs_grab_buffer(inode, btnc, blocknr, 1 << BH_NILFS_Node);
+ bh = nilfs_grab_buffer(inode, btnc, blocknr, BIT(BH_NILFS_Node));
if (unlikely(!bh))
return -ENOMEM;
@@ -95,7 +95,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
}
}
- if (mode == READA) {
+ if (mode_flags & REQ_RAHEAD) {
if (pblocknr != *submit_ptr + 1 || !trylock_buffer(bh)) {
err = -EBUSY; /* internal code */
brelse(bh);
@@ -114,7 +114,7 @@ int nilfs_btnode_submit_block(struct address_space *btnc, __u64 blocknr,
bh->b_blocknr = pblocknr; /* set block address for read */
bh->b_end_io = end_buffer_read_sync;
get_bh(bh);
- submit_bh(mode, bh);
+ submit_bh(mode, mode_flags, bh);
bh->b_blocknr = blocknr; /* set back to the given block address */
*submit_ptr = pblocknr;
err = 0;
diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h
index 2cc1b80e18f7..4e8aaa1aeb65 100644
--- a/fs/nilfs2/btnode.h
+++ b/fs/nilfs2/btnode.h
@@ -43,7 +43,7 @@ void nilfs_btnode_cache_clear(struct address_space *);
struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
__u64 blocknr);
int nilfs_btnode_submit_block(struct address_space *, __u64, sector_t, int,
- struct buffer_head **, sector_t *);
+ int, struct buffer_head **, sector_t *);
void nilfs_btnode_delete(struct buffer_head *);
int nilfs_btnode_prepare_change_key(struct address_space *,
struct nilfs_btnode_chkey_ctxt *);
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index eccb1c89ccbb..2e315f9f2e51 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -339,12 +339,14 @@ static int nilfs_btree_node_lookup(const struct nilfs_btree_node *node,
* nilfs_btree_node_broken - verify consistency of btree node
* @node: btree node block to be examined
* @size: node size (in bytes)
+ * @inode: host inode of btree
* @blocknr: block number
*
* Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
*/
static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
- size_t size, sector_t blocknr)
+ size_t size, struct inode *inode,
+ sector_t blocknr)
{
int level, flags, nchildren;
int ret = 0;
@@ -358,9 +360,10 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
(flags & NILFS_BTREE_NODE_ROOT) ||
nchildren < 0 ||
nchildren > NILFS_BTREE_NODE_NCHILDREN_MAX(size))) {
- printk(KERN_CRIT "NILFS: bad btree node (blocknr=%llu): "
- "level = %d, flags = 0x%x, nchildren = %d\n",
- (unsigned long long)blocknr, level, flags, nchildren);
+ nilfs_msg(inode->i_sb, KERN_CRIT,
+ "bad btree node (ino=%lu, blocknr=%llu): level = %d, flags = 0x%x, nchildren = %d",
+ inode->i_ino, (unsigned long long)blocknr, level,
+ flags, nchildren);
ret = 1;
}
return ret;
@@ -369,12 +372,12 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
/**
* nilfs_btree_root_broken - verify consistency of btree root node
* @node: btree root node to be examined
- * @ino: inode number
+ * @inode: host inode of btree
*
* Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
*/
static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
- unsigned long ino)
+ struct inode *inode)
{
int level, flags, nchildren;
int ret = 0;
@@ -387,8 +390,9 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
level >= NILFS_BTREE_LEVEL_MAX ||
nchildren < 0 ||
nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
- pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n",
- ino, level, flags, nchildren);
+ nilfs_msg(inode->i_sb, KERN_CRIT,
+ "bad btree root (ino=%lu): level = %d, flags = 0x%x, nchildren = %d",
+ inode->i_ino, level, flags, nchildren);
ret = 1;
}
return ret;
@@ -396,13 +400,15 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
int nilfs_btree_broken_node_block(struct buffer_head *bh)
{
+ struct inode *inode;
int ret;
if (buffer_nilfs_checked(bh))
return 0;
+ inode = bh->b_page->mapping->host;
ret = nilfs_btree_node_broken((struct nilfs_btree_node *)bh->b_data,
- bh->b_size, bh->b_blocknr);
+ bh->b_size, inode, bh->b_blocknr);
if (likely(!ret))
set_buffer_nilfs_checked(bh);
return ret;
@@ -448,13 +454,15 @@ nilfs_btree_get_node(const struct nilfs_bmap *btree,
return node;
}
-static int
-nilfs_btree_bad_node(struct nilfs_btree_node *node, int level)
+static int nilfs_btree_bad_node(const struct nilfs_bmap *btree,
+ struct nilfs_btree_node *node, int level)
{
if (unlikely(nilfs_btree_node_get_level(node) != level)) {
dump_stack();
- printk(KERN_CRIT "NILFS: btree level mismatch: %d != %d\n",
- nilfs_btree_node_get_level(node), level);
+ nilfs_msg(btree->b_inode->i_sb, KERN_CRIT,
+ "btree level mismatch (ino=%lu): %d != %d",
+ btree->b_inode->i_ino,
+ nilfs_btree_node_get_level(node), level);
return 1;
}
return 0;
@@ -476,7 +484,8 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
sector_t submit_ptr = 0;
int ret;
- ret = nilfs_btnode_submit_block(btnc, ptr, 0, READ, &bh, &submit_ptr);
+ ret = nilfs_btnode_submit_block(btnc, ptr, 0, REQ_OP_READ, 0, &bh,
+ &submit_ptr);
if (ret) {
if (ret != -EEXIST)
return ret;
@@ -492,7 +501,8 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
n > 0 && i < ra->ncmax; n--, i++) {
ptr2 = nilfs_btree_node_get_ptr(ra->node, i, ra->ncmax);
- ret = nilfs_btnode_submit_block(btnc, ptr2, 0, READA,
+ ret = nilfs_btnode_submit_block(btnc, ptr2, 0,
+ REQ_OP_READ, REQ_RAHEAD,
&ra_bh, &submit_ptr);
if (likely(!ret || ret == -EEXIST))
brelse(ra_bh);
@@ -507,6 +517,9 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
out_no_wait:
if (!buffer_uptodate(bh)) {
+ nilfs_msg(btree->b_inode->i_sb, KERN_ERR,
+ "I/O error reading b-tree node block (ino=%lu, blocknr=%llu)",
+ btree->b_inode->i_ino, (unsigned long long)ptr);
brelse(bh);
return -EIO;
}
@@ -566,7 +579,7 @@ static int nilfs_btree_do_lookup(const struct nilfs_bmap *btree,
return ret;
node = nilfs_btree_get_nonroot_node(path, level);
- if (nilfs_btree_bad_node(node, level))
+ if (nilfs_btree_bad_node(btree, node, level))
return -EINVAL;
if (!found)
found = nilfs_btree_node_lookup(node, key, &index);
@@ -614,7 +627,7 @@ static int nilfs_btree_do_lookup_last(const struct nilfs_bmap *btree,
if (ret < 0)
return ret;
node = nilfs_btree_get_nonroot_node(path, level);
- if (nilfs_btree_bad_node(node, level))
+ if (nilfs_btree_bad_node(btree, node, level))
return -EINVAL;
index = nilfs_btree_node_get_nchildren(node) - 1;
ptr = nilfs_btree_node_get_ptr(node, index, ncmax);
@@ -2070,8 +2083,10 @@ static int nilfs_btree_propagate(struct nilfs_bmap *btree,
ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1, 0);
if (ret < 0) {
if (unlikely(ret == -ENOENT))
- printk(KERN_CRIT "%s: key = %llu, level == %d\n",
- __func__, (unsigned long long)key, level);
+ nilfs_msg(btree->b_inode->i_sb, KERN_CRIT,
+ "writing node/leaf block does not appear in b-tree (ino=%lu) at key=%llu, level=%d",
+ btree->b_inode->i_ino,
+ (unsigned long long)key, level);
goto out;
}
@@ -2108,12 +2123,11 @@ static void nilfs_btree_add_dirty_buffer(struct nilfs_bmap *btree,
if (level < NILFS_BTREE_LEVEL_NODE_MIN ||
level >= NILFS_BTREE_LEVEL_MAX) {
dump_stack();
- printk(KERN_WARNING
- "%s: invalid btree level: %d (key=%llu, ino=%lu, "
- "blocknr=%llu)\n",
- __func__, level, (unsigned long long)key,
- NILFS_BMAP_I(btree)->vfs_inode.i_ino,
- (unsigned long long)bh->b_blocknr);
+ nilfs_msg(btree->b_inode->i_sb, KERN_WARNING,
+ "invalid btree level: %d (key=%llu, ino=%lu, blocknr=%llu)",
+ level, (unsigned long long)key,
+ btree->b_inode->i_ino,
+ (unsigned long long)bh->b_blocknr);
return;
}
@@ -2392,8 +2406,7 @@ int nilfs_btree_init(struct nilfs_bmap *bmap)
__nilfs_btree_init(bmap);
- if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap),
- bmap->b_inode->i_ino))
+ if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap), bmap->b_inode))
ret = -EIO;
return ret;
}
diff --git a/fs/nilfs2/btree.h b/fs/nilfs2/btree.h
index df1a25faa83b..2184e47fa4bf 100644
--- a/fs/nilfs2/btree.h
+++ b/fs/nilfs2/btree.h
@@ -22,7 +22,7 @@
#include <linux/types.h>
#include <linux/buffer_head.h>
#include <linux/list.h>
-#include <linux/nilfs2_fs.h>
+#include <linux/nilfs2_ondisk.h> /* nilfs_btree_node */
#include "btnode.h"
#include "bmap.h"
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c
index 8a3d3b65af3f..a15a1601e931 100644
--- a/fs/nilfs2/cpfile.c
+++ b/fs/nilfs2/cpfile.c
@@ -21,7 +21,6 @@
#include <linux/string.h>
#include <linux/buffer_head.h>
#include <linux/errno.h>
-#include <linux/nilfs2_fs.h>
#include "mdt.h"
#include "cpfile.h"
@@ -332,9 +331,9 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
int ret, ncps, nicps, nss, count, i;
if (unlikely(start == 0 || start > end)) {
- printk(KERN_ERR "%s: invalid range of checkpoint numbers: "
- "[%llu, %llu)\n", __func__,
- (unsigned long long)start, (unsigned long long)end);
+ nilfs_msg(cpfile->i_sb, KERN_ERR,
+ "cannot delete checkpoints: invalid range [%llu, %llu)",
+ (unsigned long long)start, (unsigned long long)end);
return -EINVAL;
}
@@ -386,9 +385,9 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
cpfile, cno);
if (ret == 0)
continue;
- printk(KERN_ERR
- "%s: cannot delete block\n",
- __func__);
+ nilfs_msg(cpfile->i_sb, KERN_ERR,
+ "error %d deleting checkpoint block",
+ ret);
break;
}
}
@@ -991,14 +990,12 @@ int nilfs_cpfile_read(struct super_block *sb, size_t cpsize,
int err;
if (cpsize > sb->s_blocksize) {
- printk(KERN_ERR
- "NILFS: too large checkpoint size: %zu bytes.\n",
- cpsize);
+ nilfs_msg(sb, KERN_ERR,
+ "too large checkpoint size: %zu bytes", cpsize);
return -EINVAL;
} else if (cpsize < NILFS_MIN_CHECKPOINT_SIZE) {
- printk(KERN_ERR
- "NILFS: too small checkpoint size: %zu bytes.\n",
- cpsize);
+ nilfs_msg(sb, KERN_ERR,
+ "too small checkpoint size: %zu bytes", cpsize);
return -EINVAL;
}
diff --git a/fs/nilfs2/cpfile.h b/fs/nilfs2/cpfile.h
index 0249744ae234..6eca972f9673 100644
--- a/fs/nilfs2/cpfile.h
+++ b/fs/nilfs2/cpfile.h
@@ -21,7 +21,8 @@
#include <linux/fs.h>
#include <linux/buffer_head.h>
-#include <linux/nilfs2_fs.h>
+#include <linux/nilfs2_api.h> /* nilfs_cpstat */
+#include <linux/nilfs2_ondisk.h> /* nilfs_inode, nilfs_checkpoint */
int nilfs_cpfile_get_checkpoint(struct inode *, __u64, int,
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index 7367610ea807..dffedb2f8817 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -349,10 +349,11 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
kaddr = kmap_atomic(entry_bh->b_page);
entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
- printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__,
- (unsigned long long)vblocknr,
- (unsigned long long)le64_to_cpu(entry->de_start),
- (unsigned long long)le64_to_cpu(entry->de_end));
+ nilfs_msg(dat->i_sb, KERN_CRIT,
+ "%s: invalid vblocknr = %llu, [%llu, %llu)",
+ __func__, (unsigned long long)vblocknr,
+ (unsigned long long)le64_to_cpu(entry->de_start),
+ (unsigned long long)le64_to_cpu(entry->de_end));
kunmap_atomic(kaddr);
brelse(entry_bh);
return -EINVAL;
@@ -479,14 +480,12 @@ int nilfs_dat_read(struct super_block *sb, size_t entry_size,
int err;
if (entry_size > sb->s_blocksize) {
- printk(KERN_ERR
- "NILFS: too large DAT entry size: %zu bytes.\n",
- entry_size);
+ nilfs_msg(sb, KERN_ERR, "too large DAT entry size: %zu bytes",
+ entry_size);
return -EINVAL;
} else if (entry_size < NILFS_MIN_DAT_ENTRY_SIZE) {
- printk(KERN_ERR
- "NILFS: too small DAT entry size: %zu bytes.\n",
- entry_size);
+ nilfs_msg(sb, KERN_ERR, "too small DAT entry size: %zu bytes",
+ entry_size);
return -EINVAL;
}
diff --git a/fs/nilfs2/dat.h b/fs/nilfs2/dat.h
index abbfdabcabea..57dc6cf466d0 100644
--- a/fs/nilfs2/dat.h
+++ b/fs/nilfs2/dat.h
@@ -22,6 +22,7 @@
#include <linux/types.h>
#include <linux/buffer_head.h>
#include <linux/fs.h>
+#include <linux/nilfs2_ondisk.h> /* nilfs_inode, nilfs_checkpoint */
struct nilfs_palloc_req;
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index e506f4f7120a..908ebbf0ac7e 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -42,6 +42,28 @@
#include "nilfs.h"
#include "page.h"
+static inline unsigned int nilfs_rec_len_from_disk(__le16 dlen)
+{
+ unsigned int len = le16_to_cpu(dlen);
+
+#if (PAGE_SIZE >= 65536)
+ if (len == NILFS_MAX_REC_LEN)
+ return 1 << 16;
+#endif
+ return len;
+}
+
+static inline __le16 nilfs_rec_len_to_disk(unsigned int len)
+{
+#if (PAGE_SIZE >= 65536)
+ if (len == (1 << 16))
+ return cpu_to_le16(NILFS_MAX_REC_LEN);
+
+ BUG_ON(len > (1 << 16));
+#endif
+ return cpu_to_le16(len);
+}
+
/*
* nilfs uses block-sized chunks. Arguably, sector-sized ones would be
* more robust, but we have what we have
@@ -140,10 +162,9 @@ out:
/* Too bad, we had an error */
Ebadsize:
- nilfs_error(sb, "nilfs_check_page",
+ nilfs_error(sb,
"size of directory #%lu is not a multiple of chunk size",
- dir->i_ino
- );
+ dir->i_ino);
goto fail;
Eshort:
error = "rec_len is smaller than minimal";
@@ -157,19 +178,18 @@ Enamelen:
Espan:
error = "directory entry across blocks";
bad_entry:
- nilfs_error(sb, "nilfs_check_page", "bad entry in directory #%lu: %s - "
- "offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
- dir->i_ino, error, (page->index<<PAGE_SHIFT)+offs,
- (unsigned long) le64_to_cpu(p->inode),
+ nilfs_error(sb,
+ "bad entry in directory #%lu: %s - offset=%lu, inode=%lu, rec_len=%d, name_len=%d",
+ dir->i_ino, error, (page->index << PAGE_SHIFT) + offs,
+ (unsigned long)le64_to_cpu(p->inode),
rec_len, p->name_len);
goto fail;
Eend:
p = (struct nilfs_dir_entry *)(kaddr + offs);
- nilfs_error(sb, "nilfs_check_page",
- "entry in directory #%lu spans the page boundary"
- "offset=%lu, inode=%lu",
- dir->i_ino, (page->index<<PAGE_SHIFT)+offs,
- (unsigned long) le64_to_cpu(p->inode));
+ nilfs_error(sb,
+ "entry in directory #%lu spans the page boundary offset=%lu, inode=%lu",
+ dir->i_ino, (page->index << PAGE_SHIFT) + offs,
+ (unsigned long)le64_to_cpu(p->inode));
fail:
SetPageError(page);
return false;
@@ -267,8 +287,7 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx)
struct page *page = nilfs_get_page(inode, n);
if (IS_ERR(page)) {
- nilfs_error(sb, __func__, "bad page in #%lu",
- inode->i_ino);
+ nilfs_error(sb, "bad page in #%lu", inode->i_ino);
ctx->pos += PAGE_SIZE - offset;
return -EIO;
}
@@ -278,8 +297,7 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx)
NILFS_DIR_REC_LEN(1);
for ( ; (char *)de <= limit; de = nilfs_next_entry(de)) {
if (de->rec_len == 0) {
- nilfs_error(sb, __func__,
- "zero-length directory entry");
+ nilfs_error(sb, "zero-length directory entry");
nilfs_put_page(page);
return -EIO;
}
@@ -345,7 +363,7 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
kaddr += nilfs_last_byte(dir, n) - reclen;
while ((char *) de <= kaddr) {
if (de->rec_len == 0) {
- nilfs_error(dir->i_sb, __func__,
+ nilfs_error(dir->i_sb,
"zero-length directory entry");
nilfs_put_page(page);
goto out;
@@ -360,7 +378,7 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr,
n = 0;
/* next page is past the blocks we've got */
if (unlikely(n > (dir->i_blocks >> (PAGE_SHIFT - 9)))) {
- nilfs_error(dir->i_sb, __func__,
+ nilfs_error(dir->i_sb,
"dir %lu size %lld exceeds block count %llu",
dir->i_ino, dir->i_size,
(unsigned long long)dir->i_blocks);
@@ -469,7 +487,7 @@ int nilfs_add_link(struct dentry *dentry, struct inode *inode)
goto got_it;
}
if (de->rec_len == 0) {
- nilfs_error(dir->i_sb, __func__,
+ nilfs_error(dir->i_sb,
"zero-length directory entry");
err = -EIO;
goto out_unlock;
@@ -541,7 +559,7 @@ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page)
while ((char *)de < (char *)dir) {
if (de->rec_len == 0) {
- nilfs_error(inode->i_sb, __func__,
+ nilfs_error(inode->i_sb,
"zero-length directory entry");
err = -EIO;
goto out;
@@ -628,7 +646,7 @@ int nilfs_empty_dir(struct inode *inode)
while ((char *)de <= kaddr) {
if (de->rec_len == 0) {
- nilfs_error(inode->i_sb, __func__,
+ nilfs_error(inode->i_sb,
"zero-length directory entry (kaddr=%p, de=%p)",
kaddr, de);
goto not_empty;
diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c
index 251a44928405..96e3ed0d9652 100644
--- a/fs/nilfs2/direct.c
+++ b/fs/nilfs2/direct.c
@@ -337,14 +337,16 @@ static int nilfs_direct_assign(struct nilfs_bmap *bmap,
key = nilfs_bmap_data_get_key(bmap, *bh);
if (unlikely(key > NILFS_DIRECT_KEY_MAX)) {
- printk(KERN_CRIT "%s: invalid key: %llu\n", __func__,
- (unsigned long long)key);
+ nilfs_msg(bmap->b_inode->i_sb, KERN_CRIT,
+ "%s (ino=%lu): invalid key: %llu", __func__,
+ bmap->b_inode->i_ino, (unsigned long long)key);
return -EINVAL;
}
ptr = nilfs_direct_get_ptr(bmap, key);
if (unlikely(ptr == NILFS_BMAP_INVALID_PTR)) {
- printk(KERN_CRIT "%s: invalid pointer: %llu\n", __func__,
- (unsigned long long)ptr);
+ nilfs_msg(bmap->b_inode->i_sb, KERN_CRIT,
+ "%s (ino=%lu): invalid pointer: %llu", __func__,
+ bmap->b_inode->i_ino, (unsigned long long)ptr);
return -EINVAL;
}
diff --git a/fs/nilfs2/direct.h b/fs/nilfs2/direct.h
index 3015a6e78724..cfe85e848bba 100644
--- a/fs/nilfs2/direct.h
+++ b/fs/nilfs2/direct.h
@@ -24,16 +24,6 @@
#include "bmap.h"
-/**
- * struct nilfs_direct_node - direct node
- * @dn_flags: flags
- * @dn_pad: padding
- */
-struct nilfs_direct_node {
- __u8 dn_flags;
- __u8 pad[7];
-};
-
#define NILFS_DIRECT_NBLOCKS (NILFS_BMAP_SIZE / sizeof(__le64) - 1)
#define NILFS_DIRECT_KEY_MIN 0
#define NILFS_DIRECT_KEY_MAX (NILFS_DIRECT_NBLOCKS - 1)
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index 693aded72498..853a831dcde0 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -101,7 +101,7 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
bh->b_blocknr = pbn;
bh->b_end_io = end_buffer_read_sync;
get_bh(bh);
- submit_bh(READ, bh);
+ submit_bh(REQ_OP_READ, 0, bh);
if (vbn)
bh->b_blocknr = vbn;
out:
@@ -138,7 +138,8 @@ int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn,
int ret;
ret = nilfs_btnode_submit_block(&NILFS_I(inode)->i_btnode_cache,
- vbn ? : pbn, pbn, READ, out_bh, &pbn);
+ vbn ? : pbn, pbn, REQ_OP_READ, 0,
+ out_bh, &pbn);
if (ret == -EEXIST) /* internal code (cache hit) */
ret = 0;
return ret;
@@ -147,8 +148,15 @@ int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn,
int nilfs_gccache_wait_and_mark_dirty(struct buffer_head *bh)
{
wait_on_buffer(bh);
- if (!buffer_uptodate(bh))
+ if (!buffer_uptodate(bh)) {
+ struct inode *inode = bh->b_page->mapping->host;
+
+ nilfs_msg(inode->i_sb, KERN_ERR,
+ "I/O error reading %s block for GC (ino=%lu, vblocknr=%llu)",
+ buffer_nilfs_node(bh) ? "node" : "data",
+ inode->i_ino, (unsigned long long)bh->b_blocknr);
return -EIO;
+ }
if (buffer_dirty(bh))
return -EEXIST;
diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c
index 1d2b1805327a..b8fa45c20c63 100644
--- a/fs/nilfs2/ifile.c
+++ b/fs/nilfs2/ifile.c
@@ -145,15 +145,14 @@ int nilfs_ifile_get_inode_block(struct inode *ifile, ino_t ino,
int err;
if (unlikely(!NILFS_VALID_INODE(sb, ino))) {
- nilfs_error(sb, __func__, "bad inode number: %lu",
- (unsigned long) ino);
+ nilfs_error(sb, "bad inode number: %lu", (unsigned long)ino);
return -EINVAL;
}
err = nilfs_palloc_get_entry_block(ifile, ino, 0, out_bh);
if (unlikely(err))
- nilfs_warning(sb, __func__, "unable to read inode: %lu",
- (unsigned long) ino);
+ nilfs_msg(sb, KERN_WARNING, "error %d reading inode: ino=%lu",
+ err, (unsigned long)ino);
return err;
}
diff --git a/fs/nilfs2/ifile.h b/fs/nilfs2/ifile.h
index 23ad2f091e76..188b94fe0ec5 100644
--- a/fs/nilfs2/ifile.h
+++ b/fs/nilfs2/ifile.h
@@ -23,7 +23,6 @@
#include <linux/fs.h>
#include <linux/buffer_head.h>
-#include <linux/nilfs2_fs.h>
#include "mdt.h"
#include "alloc.h"
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index a0ebdb17e912..af04f553d7c9 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -112,13 +112,10 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff,
* However, the page having this block must
* be locked in this case.
*/
- printk(KERN_WARNING
- "nilfs_get_block: a race condition "
- "while inserting a data block. "
- "(inode number=%lu, file block "
- "offset=%llu)\n",
- inode->i_ino,
- (unsigned long long)blkoff);
+ nilfs_msg(inode->i_sb, KERN_WARNING,
+ "%s (ino=%lu): a race condition while inserting a data block at offset=%llu",
+ __func__, inode->i_ino,
+ (unsigned long long)blkoff);
err = 0;
}
nilfs_transaction_abort(inode->i_sb);
@@ -359,7 +356,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
root = NILFS_I(dir)->i_root;
ii = NILFS_I(inode);
- ii->i_state = 1 << NILFS_I_NEW;
+ ii->i_state = BIT(NILFS_I_NEW);
ii->i_root = root;
err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
@@ -558,7 +555,7 @@ static int nilfs_iget_set(struct inode *inode, void *opaque)
inode->i_ino = args->ino;
if (args->for_gc) {
- NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE;
+ NILFS_I(inode)->i_state = BIT(NILFS_I_GCINODE);
NILFS_I(inode)->i_cno = args->cno;
NILFS_I(inode)->i_root = NULL;
} else {
@@ -726,9 +723,9 @@ repeat:
goto repeat;
failed:
- nilfs_warning(ii->vfs_inode.i_sb, __func__,
- "failed to truncate bmap (ino=%lu, err=%d)",
- ii->vfs_inode.i_ino, ret);
+ nilfs_msg(ii->vfs_inode.i_sb, KERN_WARNING,
+ "error %d truncating bmap (ino=%lu)", ret,
+ ii->vfs_inode.i_ino);
}
void nilfs_truncate(struct inode *inode)
@@ -939,9 +936,9 @@ int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty)
* This will happen when somebody is freeing
* this inode.
*/
- nilfs_warning(inode->i_sb, __func__,
- "cannot get inode (ino=%lu)",
- inode->i_ino);
+ nilfs_msg(inode->i_sb, KERN_WARNING,
+ "cannot set file dirty (ino=%lu): the file is being freed",
+ inode->i_ino);
spin_unlock(&nilfs->ns_inode_lock);
return -EINVAL; /*
* NILFS_I_DIRTY may remain for
@@ -962,8 +959,9 @@ int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
err = nilfs_load_inode_block(inode, &ibh);
if (unlikely(err)) {
- nilfs_warning(inode->i_sb, __func__,
- "failed to reget inode block.");
+ nilfs_msg(inode->i_sb, KERN_WARNING,
+ "cannot mark inode dirty (ino=%lu): error %d loading inode block",
+ inode->i_ino, err);
return err;
}
nilfs_update_inode(inode, ibh, flags);
@@ -989,8 +987,8 @@ void nilfs_dirty_inode(struct inode *inode, int flags)
struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
if (is_bad_inode(inode)) {
- nilfs_warning(inode->i_sb, __func__,
- "tried to mark bad_inode dirty. ignored.");
+ nilfs_msg(inode->i_sb, KERN_WARNING,
+ "tried to mark bad_inode dirty. ignored.");
dump_stack();
return;
}
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 358b57e2cdf9..f1d7989459fd 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -25,7 +25,6 @@
#include <linux/compat.h> /* compat_ptr() */
#include <linux/mount.h> /* mnt_want_write_file(), mnt_drop_write_file() */
#include <linux/buffer_head.h>
-#include <linux/nilfs2_fs.h>
#include "nilfs.h"
#include "segment.h"
#include "bmap.h"
@@ -584,27 +583,25 @@ static int nilfs_ioctl_move_inode_block(struct inode *inode,
if (unlikely(ret < 0)) {
if (ret == -ENOENT)
- printk(KERN_CRIT
- "%s: invalid virtual block address (%s): "
- "ino=%llu, cno=%llu, offset=%llu, "
- "blocknr=%llu, vblocknr=%llu\n",
- __func__, vdesc->vd_flags ? "node" : "data",
- (unsigned long long)vdesc->vd_ino,
- (unsigned long long)vdesc->vd_cno,
- (unsigned long long)vdesc->vd_offset,
- (unsigned long long)vdesc->vd_blocknr,
- (unsigned long long)vdesc->vd_vblocknr);
+ nilfs_msg(inode->i_sb, KERN_CRIT,
+ "%s: invalid virtual block address (%s): ino=%llu, cno=%llu, offset=%llu, blocknr=%llu, vblocknr=%llu",
+ __func__, vdesc->vd_flags ? "node" : "data",
+ (unsigned long long)vdesc->vd_ino,
+ (unsigned long long)vdesc->vd_cno,
+ (unsigned long long)vdesc->vd_offset,
+ (unsigned long long)vdesc->vd_blocknr,
+ (unsigned long long)vdesc->vd_vblocknr);
return ret;
}
if (unlikely(!list_empty(&bh->b_assoc_buffers))) {
- printk(KERN_CRIT "%s: conflicting %s buffer: ino=%llu, "
- "cno=%llu, offset=%llu, blocknr=%llu, vblocknr=%llu\n",
- __func__, vdesc->vd_flags ? "node" : "data",
- (unsigned long long)vdesc->vd_ino,
- (unsigned long long)vdesc->vd_cno,
- (unsigned long long)vdesc->vd_offset,
- (unsigned long long)vdesc->vd_blocknr,
- (unsigned long long)vdesc->vd_vblocknr);
+ nilfs_msg(inode->i_sb, KERN_CRIT,
+ "%s: conflicting %s buffer: ino=%llu, cno=%llu, offset=%llu, blocknr=%llu, vblocknr=%llu",
+ __func__, vdesc->vd_flags ? "node" : "data",
+ (unsigned long long)vdesc->vd_ino,
+ (unsigned long long)vdesc->vd_cno,
+ (unsigned long long)vdesc->vd_offset,
+ (unsigned long long)vdesc->vd_blocknr,
+ (unsigned long long)vdesc->vd_vblocknr);
brelse(bh);
return -EEXIST;
}
@@ -854,8 +851,8 @@ int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *nilfs,
return 0;
failed:
- printk(KERN_ERR "NILFS: GC failed during preparation: %s: err=%d\n",
- msg, ret);
+ nilfs_msg(nilfs->ns_sb, KERN_ERR, "error %d preparing GC: %s", ret,
+ msg);
return ret;
}
@@ -963,10 +960,11 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
}
ret = nilfs_ioctl_move_blocks(inode->i_sb, &argv[0], kbufs[0]);
- if (ret < 0)
- printk(KERN_ERR "NILFS: GC failed during preparation: "
- "cannot read source blocks: err=%d\n", ret);
- else {
+ if (ret < 0) {
+ nilfs_msg(inode->i_sb, KERN_ERR,
+ "error %d preparing GC: cannot read source blocks",
+ ret);
+ } else {
if (nilfs_sb_need_update(nilfs))
set_nilfs_discontinued(nilfs);
ret = nilfs_clean_segments(inode->i_sb, argv, kbufs);
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 3417d859a03c..d56d3a5bea88 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -121,7 +121,7 @@ static int nilfs_mdt_create_block(struct inode *inode, unsigned long block,
static int
nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff,
- int mode, struct buffer_head **out_bh)
+ int mode, int mode_flags, struct buffer_head **out_bh)
{
struct buffer_head *bh;
__u64 blknum = 0;
@@ -135,7 +135,7 @@ nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff,
if (buffer_uptodate(bh))
goto out;
- if (mode == READA) {
+ if (mode_flags & REQ_RAHEAD) {
if (!trylock_buffer(bh)) {
ret = -EBUSY;
goto failed_bh;
@@ -157,7 +157,7 @@ nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff,
bh->b_end_io = end_buffer_read_sync;
get_bh(bh);
- submit_bh(mode, bh);
+ submit_bh(mode, mode_flags, bh);
ret = 0;
trace_nilfs2_mdt_submit_block(inode, inode->i_ino, blkoff, mode);
@@ -181,7 +181,7 @@ static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
int i, nr_ra_blocks = NILFS_MDT_MAX_RA_BLOCKS;
int err;
- err = nilfs_mdt_submit_block(inode, block, READ, &first_bh);
+ err = nilfs_mdt_submit_block(inode, block, REQ_OP_READ, 0, &first_bh);
if (err == -EEXIST) /* internal code */
goto out;
@@ -191,7 +191,8 @@ static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
if (readahead) {
blkoff = block + 1;
for (i = 0; i < nr_ra_blocks; i++, blkoff++) {
- err = nilfs_mdt_submit_block(inode, blkoff, READA, &bh);
+ err = nilfs_mdt_submit_block(inode, blkoff, REQ_OP_READ,
+ REQ_RAHEAD, &bh);
if (likely(!err || err == -EEXIST))
brelse(bh);
else if (err != -EBUSY)
@@ -206,8 +207,12 @@ static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
out_no_wait:
err = -EIO;
- if (!buffer_uptodate(first_bh))
+ if (!buffer_uptodate(first_bh)) {
+ nilfs_msg(inode->i_sb, KERN_ERR,
+ "I/O error reading meta-data file (ino=%lu, block-offset=%lu)",
+ inode->i_ino, block);
goto failed_bh;
+ }
out:
*out_bh = first_bh;
return 0;
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 1ec8ae5995a5..dbcf1dc93a51 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -283,9 +283,9 @@ static int nilfs_do_unlink(struct inode *dir, struct dentry *dentry)
goto out;
if (!inode->i_nlink) {
- nilfs_warning(inode->i_sb, __func__,
- "deleting nonexistent file (%lu), %d",
- inode->i_ino, inode->i_nlink);
+ nilfs_msg(inode->i_sb, KERN_WARNING,
+ "deleting nonexistent file (ino=%lu), %d",
+ inode->i_ino, inode->i_nlink);
set_nlink(inode, 1);
}
err = nilfs_delete_entry(de, page);
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index b1d48bc0532d..33f8c8fc96e8 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -23,7 +23,8 @@
#include <linux/buffer_head.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
-#include <linux/nilfs2_fs.h>
+#include <linux/nilfs2_api.h>
+#include <linux/nilfs2_ondisk.h>
#include "the_nilfs.h"
#include "bmap.h"
@@ -119,20 +120,19 @@ enum {
/*
* Macros to check inode numbers
*/
-#define NILFS_MDT_INO_BITS \
- ((unsigned int)(1 << NILFS_DAT_INO | 1 << NILFS_CPFILE_INO | \
- 1 << NILFS_SUFILE_INO | 1 << NILFS_IFILE_INO | \
- 1 << NILFS_ATIME_INO | 1 << NILFS_SKETCH_INO))
+#define NILFS_MDT_INO_BITS \
+ (BIT(NILFS_DAT_INO) | BIT(NILFS_CPFILE_INO) | \
+ BIT(NILFS_SUFILE_INO) | BIT(NILFS_IFILE_INO) | \
+ BIT(NILFS_ATIME_INO) | BIT(NILFS_SKETCH_INO))
-#define NILFS_SYS_INO_BITS \
- ((unsigned int)(1 << NILFS_ROOT_INO) | NILFS_MDT_INO_BITS)
+#define NILFS_SYS_INO_BITS (BIT(NILFS_ROOT_INO) | NILFS_MDT_INO_BITS)
#define NILFS_FIRST_INO(sb) (((struct the_nilfs *)sb->s_fs_info)->ns_first_ino)
#define NILFS_MDT_INODE(sb, ino) \
- ((ino) < NILFS_FIRST_INO(sb) && (NILFS_MDT_INO_BITS & (1 << (ino))))
+ ((ino) < NILFS_FIRST_INO(sb) && (NILFS_MDT_INO_BITS & BIT(ino)))
#define NILFS_VALID_INODE(sb, ino) \
- ((ino) >= NILFS_FIRST_INO(sb) || (NILFS_SYS_INO_BITS & (1 << (ino))))
+ ((ino) >= NILFS_FIRST_INO(sb) || (NILFS_SYS_INO_BITS & BIT(ino)))
/**
* struct nilfs_transaction_info: context information for synchronization
@@ -299,10 +299,36 @@ static inline int nilfs_mark_inode_dirty_sync(struct inode *inode)
/* super.c */
extern struct inode *nilfs_alloc_inode(struct super_block *);
extern void nilfs_destroy_inode(struct inode *);
+
extern __printf(3, 4)
-void nilfs_error(struct super_block *, const char *, const char *, ...);
+void __nilfs_msg(struct super_block *sb, const char *level,
+ const char *fmt, ...);
extern __printf(3, 4)
-void nilfs_warning(struct super_block *, const char *, const char *, ...);
+void __nilfs_error(struct super_block *sb, const char *function,
+ const char *fmt, ...);
+
+#ifdef CONFIG_PRINTK
+
+#define nilfs_msg(sb, level, fmt, ...) \
+ __nilfs_msg(sb, level, fmt, ##__VA_ARGS__)
+#define nilfs_error(sb, fmt, ...) \
+ __nilfs_error(sb, __func__, fmt, ##__VA_ARGS__)
+
+#else
+
+#define nilfs_msg(sb, level, fmt, ...) \
+ do { \
+ no_printk(fmt, ##__VA_ARGS__); \
+ (void)(sb); \
+ } while (0)
+#define nilfs_error(sb, fmt, ...) \
+ do { \
+ no_printk(fmt, ##__VA_ARGS__); \
+ __nilfs_error(sb, "", " "); \
+ } while (0)
+
+#endif /* CONFIG_PRINTK */
+
extern struct nilfs_super_block *
nilfs_read_super_block(struct super_block *, u64, int, struct buffer_head **);
extern int nilfs_store_magic_and_option(struct super_block *,
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index d97ba5f11b77..f11a3ad2df0c 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -30,9 +30,9 @@
#include "mdt.h"
-#define NILFS_BUFFER_INHERENT_BITS \
- ((1UL << BH_Uptodate) | (1UL << BH_Mapped) | (1UL << BH_NILFS_Node) | \
- (1UL << BH_NILFS_Volatile) | (1UL << BH_NILFS_Checked))
+#define NILFS_BUFFER_INHERENT_BITS \
+ (BIT(BH_Uptodate) | BIT(BH_Mapped) | BIT(BH_NILFS_Node) | \
+ BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Checked))
static struct buffer_head *
__nilfs_get_page_block(struct page *page, unsigned long block, pgoff_t index,
@@ -85,9 +85,9 @@ void nilfs_forget_buffer(struct buffer_head *bh)
{
struct page *page = bh->b_page;
const unsigned long clear_bits =
- (1 << BH_Uptodate | 1 << BH_Dirty | 1 << BH_Mapped |
- 1 << BH_Async_Write | 1 << BH_NILFS_Volatile |
- 1 << BH_NILFS_Checked | 1 << BH_NILFS_Redirected);
+ (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
+ BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
+ BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected));
lock_buffer(bh);
set_mask_bits(&bh->b_state, clear_bits, 0);
@@ -124,17 +124,17 @@ void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
dbh->b_bdev = sbh->b_bdev;
bh = dbh;
- bits = sbh->b_state & ((1UL << BH_Uptodate) | (1UL << BH_Mapped));
+ bits = sbh->b_state & (BIT(BH_Uptodate) | BIT(BH_Mapped));
while ((bh = bh->b_this_page) != dbh) {
lock_buffer(bh);
bits &= bh->b_state;
unlock_buffer(bh);
}
- if (bits & (1UL << BH_Uptodate))
+ if (bits & BIT(BH_Uptodate))
SetPageUptodate(dpage);
else
ClearPageUptodate(dpage);
- if (bits & (1UL << BH_Mapped))
+ if (bits & BIT(BH_Mapped))
SetPageMappedToDisk(dpage);
else
ClearPageMappedToDisk(dpage);
@@ -215,7 +215,7 @@ static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty)
create_empty_buffers(dst, sbh->b_size, 0);
if (copy_dirty)
- mask |= (1UL << BH_Dirty);
+ mask |= BIT(BH_Dirty);
dbh = dbufs = page_buffers(dst);
do {
@@ -403,11 +403,10 @@ void nilfs_clear_dirty_page(struct page *page, bool silent)
BUG_ON(!PageLocked(page));
- if (!silent) {
- nilfs_warning(sb, __func__,
- "discard page: offset %lld, ino %lu",
- page_offset(page), inode->i_ino);
- }
+ if (!silent)
+ nilfs_msg(sb, KERN_WARNING,
+ "discard dirty page: offset=%lld, ino=%lu",
+ page_offset(page), inode->i_ino);
ClearPageUptodate(page);
ClearPageMappedToDisk(page);
@@ -415,18 +414,18 @@ void nilfs_clear_dirty_page(struct page *page, bool silent)
if (page_has_buffers(page)) {
struct buffer_head *bh, *head;
const unsigned long clear_bits =
- (1 << BH_Uptodate | 1 << BH_Dirty | 1 << BH_Mapped |
- 1 << BH_Async_Write | 1 << BH_NILFS_Volatile |
- 1 << BH_NILFS_Checked | 1 << BH_NILFS_Redirected);
+ (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
+ BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
+ BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected));
bh = head = page_buffers(page);
do {
lock_buffer(bh);
- if (!silent) {
- nilfs_warning(sb, __func__,
- "discard block %llu, size %zu",
- (u64)bh->b_blocknr, bh->b_size);
- }
+ if (!silent)
+ nilfs_msg(sb, KERN_WARNING,
+ "discard dirty block: blocknr=%llu, size=%zu",
+ (u64)bh->b_blocknr, bh->b_size);
+
set_mask_bits(&bh->b_state, clear_bits, 0);
unlock_buffer(bh);
} while (bh = bh->b_this_page, bh != head);
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index d893dc912b62..5139efed1888 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -54,38 +54,37 @@ struct nilfs_recovery_block {
};
-static int nilfs_warn_segment_error(int err)
+static int nilfs_warn_segment_error(struct super_block *sb, int err)
{
+ const char *msg = NULL;
+
switch (err) {
case NILFS_SEG_FAIL_IO:
- printk(KERN_WARNING
- "NILFS warning: I/O error on loading last segment\n");
+ nilfs_msg(sb, KERN_ERR, "I/O error reading segment");
return -EIO;
case NILFS_SEG_FAIL_MAGIC:
- printk(KERN_WARNING
- "NILFS warning: Segment magic number invalid\n");
+ msg = "Magic number mismatch";
break;
case NILFS_SEG_FAIL_SEQ:
- printk(KERN_WARNING
- "NILFS warning: Sequence number mismatch\n");
+ msg = "Sequence number mismatch";
break;
case NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT:
- printk(KERN_WARNING
- "NILFS warning: Checksum error in super root\n");
+ msg = "Checksum error in super root";
break;
case NILFS_SEG_FAIL_CHECKSUM_FULL:
- printk(KERN_WARNING
- "NILFS warning: Checksum error in segment payload\n");
+ msg = "Checksum error in segment payload";
break;
case NILFS_SEG_FAIL_CONSISTENCY:
- printk(KERN_WARNING
- "NILFS warning: Inconsistent segment\n");
+ msg = "Inconsistency found";
break;
case NILFS_SEG_NO_SUPER_ROOT:
- printk(KERN_WARNING
- "NILFS warning: No super root in the last segment\n");
+ msg = "No super root in the last segment";
break;
+ default:
+ nilfs_msg(sb, KERN_ERR, "unrecognized segment error %d", err);
+ return -EINVAL;
}
+ nilfs_msg(sb, KERN_WARNING, "invalid segment: %s", msg);
return -EINVAL;
}
@@ -178,7 +177,7 @@ int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block,
brelse(bh_sr);
failed:
- return nilfs_warn_segment_error(ret);
+ return nilfs_warn_segment_error(nilfs->ns_sb, ret);
}
/**
@@ -553,11 +552,10 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
put_page(page);
failed_inode:
- printk(KERN_WARNING
- "NILFS warning: error recovering data block "
- "(err=%d, ino=%lu, block-offset=%llu)\n",
- err, (unsigned long)rb->ino,
- (unsigned long long)rb->blkoff);
+ nilfs_msg(sb, KERN_WARNING,
+ "error %d recovering data block (ino=%lu, block-offset=%llu)",
+ err, (unsigned long)rb->ino,
+ (unsigned long long)rb->blkoff);
if (!err2)
err2 = err;
next:
@@ -680,8 +678,8 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
}
if (nsalvaged_blocks) {
- printk(KERN_INFO "NILFS (device %s): salvaged %lu blocks\n",
- sb->s_id, nsalvaged_blocks);
+ nilfs_msg(sb, KERN_INFO, "salvaged %lu blocks",
+ nsalvaged_blocks);
ri->ri_need_recovery = NILFS_RECOVERY_ROLLFORWARD_DONE;
}
out:
@@ -692,10 +690,9 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
confused:
err = -EINVAL;
failed:
- printk(KERN_ERR
- "NILFS (device %s): Error roll-forwarding "
- "(err=%d, pseg block=%llu). ",
- sb->s_id, err, (unsigned long long)pseg_start);
+ nilfs_msg(sb, KERN_ERR,
+ "error %d roll-forwarding partial segment at blocknr = %llu",
+ err, (unsigned long long)pseg_start);
goto out;
}
@@ -715,9 +712,8 @@ static void nilfs_finish_roll_forward(struct the_nilfs *nilfs,
set_buffer_dirty(bh);
err = sync_dirty_buffer(bh);
if (unlikely(err))
- printk(KERN_WARNING
- "NILFS warning: buffer sync write failed during "
- "post-cleaning of recovery.\n");
+ nilfs_msg(nilfs->ns_sb, KERN_WARNING,
+ "buffer sync write failed during post-cleaning of recovery.");
brelse(bh);
}
@@ -752,8 +748,8 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
err = nilfs_attach_checkpoint(sb, ri->ri_cno, true, &root);
if (unlikely(err)) {
- printk(KERN_ERR
- "NILFS: error loading the latest checkpoint.\n");
+ nilfs_msg(sb, KERN_ERR,
+ "error %d loading the latest checkpoint", err);
return err;
}
@@ -764,8 +760,9 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
if (ri->ri_need_recovery == NILFS_RECOVERY_ROLLFORWARD_DONE) {
err = nilfs_prepare_segment_for_recovery(nilfs, sb, ri);
if (unlikely(err)) {
- printk(KERN_ERR "NILFS: Error preparing segments for "
- "recovery.\n");
+ nilfs_msg(sb, KERN_ERR,
+ "error %d preparing segment for recovery",
+ err);
goto failed;
}
@@ -778,8 +775,9 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
nilfs_detach_log_writer(sb);
if (unlikely(err)) {
- printk(KERN_ERR "NILFS: Oops! recovery failed. "
- "(err=%d)\n", err);
+ nilfs_msg(sb, KERN_ERR,
+ "error %d writing segment for recovery",
+ err);
goto failed;
}
@@ -961,5 +959,5 @@ int nilfs_search_super_root(struct the_nilfs *nilfs,
failed:
brelse(bh_sum);
nilfs_dispose_segment_list(&segments);
- return (ret < 0) ? ret : nilfs_warn_segment_error(ret);
+ return ret < 0 ? ret : nilfs_warn_segment_error(nilfs->ns_sb, ret);
}
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index bf36df10540b..6f87b2ac1aeb 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -346,7 +346,8 @@ static void nilfs_end_bio_write(struct bio *bio)
}
static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
- struct nilfs_write_info *wi, int mode)
+ struct nilfs_write_info *wi, int mode,
+ int mode_flags)
{
struct bio *bio = wi->bio;
int err;
@@ -364,7 +365,8 @@ static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
bio->bi_end_io = nilfs_end_bio_write;
bio->bi_private = segbuf;
- submit_bio(mode, bio);
+ bio_set_op_attrs(bio, mode, mode_flags);
+ submit_bio(bio);
segbuf->sb_nbio++;
wi->bio = NULL;
@@ -437,7 +439,7 @@ static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf,
return 0;
}
/* bio is FULL */
- err = nilfs_segbuf_submit_bio(segbuf, wi, mode);
+ err = nilfs_segbuf_submit_bio(segbuf, wi, mode, 0);
/* never submit current bh */
if (likely(!err))
goto repeat;
@@ -461,19 +463,19 @@ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
{
struct nilfs_write_info wi;
struct buffer_head *bh;
- int res = 0, rw = WRITE;
+ int res = 0;
wi.nilfs = nilfs;
nilfs_segbuf_prepare_write(segbuf, &wi);
list_for_each_entry(bh, &segbuf->sb_segsum_buffers, b_assoc_buffers) {
- res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw);
+ res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, REQ_OP_WRITE);
if (unlikely(res))
goto failed_bio;
}
list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
- res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, rw);
+ res = nilfs_segbuf_submit_bh(segbuf, &wi, bh, REQ_OP_WRITE);
if (unlikely(res))
goto failed_bio;
}
@@ -483,8 +485,8 @@ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
* Last BIO is always sent through the following
* submission.
*/
- rw |= REQ_SYNC;
- res = nilfs_segbuf_submit_bio(segbuf, &wi, rw);
+ res = nilfs_segbuf_submit_bio(segbuf, &wi, REQ_OP_WRITE,
+ REQ_SYNC);
}
failed_bio:
@@ -512,7 +514,11 @@ static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf)
} while (--segbuf->sb_nbio > 0);
if (unlikely(atomic_read(&segbuf->sb_err) > 0)) {
- printk(KERN_ERR "NILFS: IO error writing segment\n");
+ nilfs_msg(segbuf->sb_super, KERN_ERR,
+ "I/O error writing log (start-blocknr=%llu, block-count=%lu) in segment %llu",
+ (unsigned long long)segbuf->sb_pseg_start,
+ segbuf->sb_sum.nblocks,
+ (unsigned long long)segbuf->sb_segnum);
err = -EIO;
}
return err;
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index e78b68a81aec..bedcae2c28e6 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -150,7 +150,8 @@ static void nilfs_dispose_list(struct the_nilfs *, struct list_head *, int);
#define nilfs_cnt32_lt(a, b) nilfs_cnt32_gt(b, a)
#define nilfs_cnt32_le(a, b) nilfs_cnt32_ge(b, a)
-static int nilfs_prepare_segment_lock(struct nilfs_transaction_info *ti)
+static int nilfs_prepare_segment_lock(struct super_block *sb,
+ struct nilfs_transaction_info *ti)
{
struct nilfs_transaction_info *cur_ti = current->journal_info;
void *save = NULL;
@@ -164,8 +165,7 @@ static int nilfs_prepare_segment_lock(struct nilfs_transaction_info *ti)
* it is saved and will be restored on
* nilfs_transaction_commit().
*/
- printk(KERN_WARNING
- "NILFS warning: journal info from a different FS\n");
+ nilfs_msg(sb, KERN_WARNING, "journal info from a different FS");
save = current->journal_info;
}
if (!ti) {
@@ -215,7 +215,7 @@ int nilfs_transaction_begin(struct super_block *sb,
int vacancy_check)
{
struct the_nilfs *nilfs;
- int ret = nilfs_prepare_segment_lock(ti);
+ int ret = nilfs_prepare_segment_lock(sb, ti);
struct nilfs_transaction_info *trace_ti;
if (unlikely(ret < 0))
@@ -373,7 +373,7 @@ static void nilfs_transaction_lock(struct super_block *sb,
nilfs_segctor_do_immediate_flush(sci);
up_write(&nilfs->ns_segctor_sem);
- yield();
+ cond_resched();
}
if (gcflag)
ti->ti_flags |= NILFS_TI_GC;
@@ -1858,11 +1858,11 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci)
*/
list_for_each_entry(bh, &segbuf->sb_payload_buffers,
b_assoc_buffers) {
- const unsigned long set_bits = (1 << BH_Uptodate);
+ const unsigned long set_bits = BIT(BH_Uptodate);
const unsigned long clear_bits =
- (1 << BH_Dirty | 1 << BH_Async_Write |
- 1 << BH_Delay | 1 << BH_NILFS_Volatile |
- 1 << BH_NILFS_Redirected);
+ (BIT(BH_Dirty) | BIT(BH_Async_Write) |
+ BIT(BH_Delay) | BIT(BH_NILFS_Volatile) |
+ BIT(BH_NILFS_Redirected));
set_mask_bits(&bh->b_state, clear_bits, set_bits);
if (bh == segbuf->sb_super_root) {
@@ -1951,8 +1951,9 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
err = nilfs_ifile_get_inode_block(
ifile, ii->vfs_inode.i_ino, &ibh);
if (unlikely(err)) {
- nilfs_warning(sci->sc_super, __func__,
- "failed to get inode block.");
+ nilfs_msg(sci->sc_super, KERN_WARNING,
+ "log writer: error %d getting inode block (ino=%lu)",
+ err, ii->vfs_inode.i_ino);
return err;
}
mark_buffer_dirty(ibh);
@@ -2131,10 +2132,10 @@ static void nilfs_segctor_start_timer(struct nilfs_sc_info *sci)
static void nilfs_segctor_do_flush(struct nilfs_sc_info *sci, int bn)
{
spin_lock(&sci->sc_state_lock);
- if (!(sci->sc_flush_request & (1 << bn))) {
+ if (!(sci->sc_flush_request & BIT(bn))) {
unsigned long prev_req = sci->sc_flush_request;
- sci->sc_flush_request |= (1 << bn);
+ sci->sc_flush_request |= BIT(bn);
if (!prev_req)
wake_up(&sci->sc_wait_daemon);
}
@@ -2318,7 +2319,7 @@ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
}
#define FLUSH_FILE_BIT (0x1) /* data file only */
-#define FLUSH_DAT_BIT (1 << NILFS_DAT_INO) /* DAT only */
+#define FLUSH_DAT_BIT BIT(NILFS_DAT_INO) /* DAT only */
/**
* nilfs_segctor_accept - record accepted sequence count of log-write requests
@@ -2458,8 +2459,7 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
if (likely(!err))
break;
- nilfs_warning(sb, __func__,
- "segment construction failed. (err=%d)", err);
+ nilfs_msg(sb, KERN_WARNING, "error %d cleaning segments", err);
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(sci->sc_interval);
}
@@ -2467,9 +2467,9 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv,
int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs,
sci->sc_nfreesegs);
if (ret) {
- printk(KERN_WARNING
- "NILFS warning: error %d on discard request, "
- "turning discards off for the device\n", ret);
+ nilfs_msg(sb, KERN_WARNING,
+ "error %d on discard request, turning discards off for the device",
+ ret);
nilfs_clear_opt(nilfs, DISCARD);
}
}
@@ -2551,10 +2551,9 @@ static int nilfs_segctor_thread(void *arg)
/* start sync. */
sci->sc_task = current;
wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */
- printk(KERN_INFO
- "segctord starting. Construction interval = %lu seconds, "
- "CP frequency < %lu seconds\n",
- sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
+ nilfs_msg(sci->sc_super, KERN_INFO,
+ "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds",
+ sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ);
spin_lock(&sci->sc_state_lock);
loop:
@@ -2628,8 +2627,8 @@ static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci)
if (IS_ERR(t)) {
int err = PTR_ERR(t);
- printk(KERN_ERR "NILFS: error %d creating segctord thread\n",
- err);
+ nilfs_msg(sci->sc_super, KERN_ERR,
+ "error %d creating segctord thread", err);
return err;
}
wait_event(sci->sc_wait_task, sci->sc_task != NULL);
@@ -2739,14 +2738,14 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
nilfs_segctor_write_out(sci);
if (!list_empty(&sci->sc_dirty_files)) {
- nilfs_warning(sci->sc_super, __func__,
- "dirty file(s) after the final construction");
+ nilfs_msg(sci->sc_super, KERN_WARNING,
+ "disposed unprocessed dirty file(s) when stopping log writer");
nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1);
}
if (!list_empty(&sci->sc_iput_queue)) {
- nilfs_warning(sci->sc_super, __func__,
- "iput queue is not empty");
+ nilfs_msg(sci->sc_super, KERN_WARNING,
+ "disposed unprocessed inode(s) in iput queue when stopping log writer");
nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1);
}
@@ -2822,8 +2821,8 @@ void nilfs_detach_log_writer(struct super_block *sb)
spin_lock(&nilfs->ns_inode_lock);
if (!list_empty(&nilfs->ns_dirty_files)) {
list_splice_init(&nilfs->ns_dirty_files, &garbage_list);
- nilfs_warning(sb, __func__,
- "Hit dirty file after stopped log writer");
+ nilfs_msg(sb, KERN_WARNING,
+ "disposed unprocessed dirty file(s) when detaching log writer");
}
spin_unlock(&nilfs->ns_inode_lock);
up_write(&nilfs->ns_segctor_sem);
diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h
index 6565c10b7b76..1060949d7dd2 100644
--- a/fs/nilfs2/segment.h
+++ b/fs/nilfs2/segment.h
@@ -23,7 +23,6 @@
#include <linux/fs.h>
#include <linux/buffer_head.h>
#include <linux/workqueue.h>
-#include <linux/nilfs2_fs.h>
#include "nilfs.h"
struct nilfs_root;
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
index 1963595a1580..1541a1e9221a 100644
--- a/fs/nilfs2/sufile.c
+++ b/fs/nilfs2/sufile.c
@@ -22,7 +22,6 @@
#include <linux/string.h>
#include <linux/buffer_head.h>
#include <linux/errno.h>
-#include <linux/nilfs2_fs.h>
#include "mdt.h"
#include "sufile.h"
@@ -181,9 +180,9 @@ int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
down_write(&NILFS_MDT(sufile)->mi_sem);
for (seg = segnumv; seg < segnumv + nsegs; seg++) {
if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) {
- printk(KERN_WARNING
- "%s: invalid segment number: %llu\n", __func__,
- (unsigned long long)*seg);
+ nilfs_msg(sufile->i_sb, KERN_WARNING,
+ "%s: invalid segment number: %llu",
+ __func__, (unsigned long long)*seg);
nerr++;
}
}
@@ -240,8 +239,9 @@ int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
int ret;
if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
- printk(KERN_WARNING "%s: invalid segment number: %llu\n",
- __func__, (unsigned long long)segnum);
+ nilfs_msg(sufile->i_sb, KERN_WARNING,
+ "%s: invalid segment number: %llu",
+ __func__, (unsigned long long)segnum);
return -EINVAL;
}
down_write(&NILFS_MDT(sufile)->mi_sem);
@@ -419,8 +419,9 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
kaddr = kmap_atomic(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
if (unlikely(!nilfs_segment_usage_clean(su))) {
- printk(KERN_WARNING "%s: segment %llu must be clean\n",
- __func__, (unsigned long long)segnum);
+ nilfs_msg(sufile->i_sb, KERN_WARNING,
+ "%s: segment %llu must be clean", __func__,
+ (unsigned long long)segnum);
kunmap_atomic(kaddr);
return;
}
@@ -444,7 +445,7 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
kaddr = kmap_atomic(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
- if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
+ if (su->su_flags == cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY)) &&
su->su_nblocks == cpu_to_le32(0)) {
kunmap_atomic(kaddr);
return;
@@ -455,7 +456,7 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
/* make the segment garbage */
su->su_lastmod = cpu_to_le64(0);
su->su_nblocks = cpu_to_le32(0);
- su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
+ su->su_flags = cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY));
kunmap_atomic(kaddr);
nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
@@ -476,8 +477,9 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
kaddr = kmap_atomic(su_bh->b_page);
su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
if (nilfs_segment_usage_clean(su)) {
- printk(KERN_WARNING "%s: segment %llu is already clean\n",
- __func__, (unsigned long long)segnum);
+ nilfs_msg(sufile->i_sb, KERN_WARNING,
+ "%s: segment %llu is already clean",
+ __func__, (unsigned long long)segnum);
kunmap_atomic(kaddr);
return;
}
@@ -692,7 +694,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
su2 = su;
for (j = 0; j < n; j++, su = (void *)su + susz) {
if ((le32_to_cpu(su->su_flags) &
- ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) ||
+ ~BIT(NILFS_SEGMENT_USAGE_ERROR)) ||
nilfs_segment_is_active(nilfs, segnum + j)) {
ret = -EBUSY;
kunmap_atomic(kaddr);
@@ -859,10 +861,10 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
si->sui_lastmod = le64_to_cpu(su->su_lastmod);
si->sui_nblocks = le32_to_cpu(su->su_nblocks);
si->sui_flags = le32_to_cpu(su->su_flags) &
- ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
+ ~BIT(NILFS_SEGMENT_USAGE_ACTIVE);
if (nilfs_segment_is_active(nilfs, segnum + j))
si->sui_flags |=
- (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
+ BIT(NILFS_SEGMENT_USAGE_ACTIVE);
}
kunmap_atomic(kaddr);
brelse(su_bh);
@@ -950,7 +952,7 @@ ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
* disk.
*/
sup->sup_sui.sui_flags &=
- ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE);
+ ~BIT(NILFS_SEGMENT_USAGE_ACTIVE);
cleansi = nilfs_suinfo_clean(&sup->sup_sui);
cleansu = nilfs_segment_usage_clean(su);
@@ -1175,14 +1177,12 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize,
int err;
if (susize > sb->s_blocksize) {
- printk(KERN_ERR
- "NILFS: too large segment usage size: %zu bytes.\n",
- susize);
+ nilfs_msg(sb, KERN_ERR,
+ "too large segment usage size: %zu bytes", susize);
return -EINVAL;
} else if (susize < NILFS_MIN_SEGMENT_USAGE_SIZE) {
- printk(KERN_ERR
- "NILFS: too small segment usage size: %zu bytes.\n",
- susize);
+ nilfs_msg(sb, KERN_ERR,
+ "too small segment usage size: %zu bytes", susize);
return -EINVAL;
}
diff --git a/fs/nilfs2/sufile.h b/fs/nilfs2/sufile.h
index 46e89872294c..158a9190c8ec 100644
--- a/fs/nilfs2/sufile.h
+++ b/fs/nilfs2/sufile.h
@@ -21,7 +21,6 @@
#include <linux/fs.h>
#include <linux/buffer_head.h>
-#include <linux/nilfs2_fs.h>
#include "mdt.h"
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 666107a18a22..c95d369e90aa 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -71,6 +71,22 @@ struct kmem_cache *nilfs_btree_path_cache;
static int nilfs_setup_super(struct super_block *sb, int is_mount);
static int nilfs_remount(struct super_block *sb, int *flags, char *data);
+void __nilfs_msg(struct super_block *sb, const char *level, const char *fmt,
+ ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ if (sb)
+ printk("%sNILFS (%s): %pV\n", level, sb->s_id, &vaf);
+ else
+ printk("%sNILFS: %pV\n", level, &vaf);
+ va_end(args);
+}
+
static void nilfs_set_error(struct super_block *sb)
{
struct the_nilfs *nilfs = sb->s_fs_info;
@@ -91,19 +107,20 @@ static void nilfs_set_error(struct super_block *sb)
}
/**
- * nilfs_error() - report failure condition on a filesystem
+ * __nilfs_error() - report failure condition on a filesystem
+ *
+ * __nilfs_error() sets an ERROR_FS flag on the superblock as well as
+ * reporting an error message. This function should be called when
+ * NILFS detects incoherences or defects of meta data on disk.
*
- * nilfs_error() sets an ERROR_FS flag on the superblock as well as
- * reporting an error message. It should be called when NILFS detects
- * incoherences or defects of meta data on disk. As for sustainable
- * errors such as a single-shot I/O error, nilfs_warning() or the printk()
- * function should be used instead.
+ * This implements the body of nilfs_error() macro. Normally,
+ * nilfs_error() should be used. As for sustainable errors such as a
+ * single-shot I/O error, nilfs_msg() should be used instead.
*
- * The segment constructor must not call this function because it can
- * kill itself.
+ * Callers should not add a trailing newline since this will do it.
*/
-void nilfs_error(struct super_block *sb, const char *function,
- const char *fmt, ...)
+void __nilfs_error(struct super_block *sb, const char *function,
+ const char *fmt, ...)
{
struct the_nilfs *nilfs = sb->s_fs_info;
struct va_format vaf;
@@ -133,24 +150,6 @@ void nilfs_error(struct super_block *sb, const char *function,
sb->s_id);
}
-void nilfs_warning(struct super_block *sb, const char *function,
- const char *fmt, ...)
-{
- struct va_format vaf;
- va_list args;
-
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- printk(KERN_WARNING "NILFS warning (device %s): %s: %pV\n",
- sb->s_id, function, &vaf);
-
- va_end(args);
-}
-
-
struct inode *nilfs_alloc_inode(struct super_block *sb)
{
struct nilfs_inode_info *ii;
@@ -196,8 +195,8 @@ static int nilfs_sync_super(struct super_block *sb, int flag)
}
if (unlikely(err)) {
- printk(KERN_ERR
- "NILFS: unable to write superblock (err=%d)\n", err);
+ nilfs_msg(sb, KERN_ERR, "unable to write superblock: err=%d",
+ err);
if (err == -EIO && nilfs->ns_sbh[1]) {
/*
* sbp[0] points to newer log than sbp[1],
@@ -267,8 +266,7 @@ struct nilfs_super_block **nilfs_prepare_super(struct super_block *sb,
sbp[1]->s_magic == cpu_to_le16(NILFS_SUPER_MAGIC)) {
memcpy(sbp[0], sbp[1], nilfs->ns_sbsize);
} else {
- printk(KERN_CRIT "NILFS: superblock broke on dev %s\n",
- sb->s_id);
+ nilfs_msg(sb, KERN_CRIT, "superblock broke");
return NULL;
}
} else if (sbp[1] &&
@@ -378,9 +376,9 @@ static int nilfs_move_2nd_super(struct super_block *sb, loff_t sb2off)
offset = sb2off & (nilfs->ns_blocksize - 1);
nsbh = sb_getblk(sb, newblocknr);
if (!nsbh) {
- printk(KERN_WARNING
- "NILFS warning: unable to move secondary superblock "
- "to block %llu\n", (unsigned long long)newblocknr);
+ nilfs_msg(sb, KERN_WARNING,
+ "unable to move secondary superblock to block %llu",
+ (unsigned long long)newblocknr);
ret = -EIO;
goto out;
}
@@ -543,10 +541,9 @@ int nilfs_attach_checkpoint(struct super_block *sb, __u64 cno, int curr_mnt,
up_read(&nilfs->ns_segctor_sem);
if (unlikely(err)) {
if (err == -ENOENT || err == -EINVAL) {
- printk(KERN_ERR
- "NILFS: Invalid checkpoint "
- "(checkpoint number=%llu)\n",
- (unsigned long long)cno);
+ nilfs_msg(sb, KERN_ERR,
+ "Invalid checkpoint (checkpoint number=%llu)",
+ (unsigned long long)cno);
err = -EINVAL;
}
goto failed;
@@ -642,9 +639,8 @@ static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf)
err = nilfs_ifile_count_free_inodes(root->ifile,
&nmaxinodes, &nfreeinodes);
if (unlikely(err)) {
- printk(KERN_WARNING
- "NILFS warning: fail to count free inodes: err %d.\n",
- err);
+ nilfs_msg(sb, KERN_WARNING,
+ "failed to count free inodes: err=%d", err);
if (err == -ERANGE) {
/*
* If nilfs_palloc_count_max_entries() returns
@@ -776,9 +772,9 @@ static int parse_options(char *options, struct super_block *sb, int is_remount)
break;
case Opt_snapshot:
if (is_remount) {
- printk(KERN_ERR
- "NILFS: \"%s\" option is invalid "
- "for remount.\n", p);
+ nilfs_msg(sb, KERN_ERR,
+ "\"%s\" option is invalid for remount",
+ p);
return 0;
}
break;
@@ -792,8 +788,8 @@ static int parse_options(char *options, struct super_block *sb, int is_remount)
nilfs_clear_opt(nilfs, DISCARD);
break;
default:
- printk(KERN_ERR
- "NILFS: Unrecognized mount option \"%s\"\n", p);
+ nilfs_msg(sb, KERN_ERR,
+ "unrecognized mount option \"%s\"", p);
return 0;
}
}
@@ -829,12 +825,10 @@ static int nilfs_setup_super(struct super_block *sb, int is_mount)
mnt_count = le16_to_cpu(sbp[0]->s_mnt_count);
if (nilfs->ns_mount_state & NILFS_ERROR_FS) {
- printk(KERN_WARNING
- "NILFS warning: mounting fs with errors\n");
+ nilfs_msg(sb, KERN_WARNING, "mounting fs with errors");
#if 0
} else if (max_mnt_count >= 0 && mnt_count >= max_mnt_count) {
- printk(KERN_WARNING
- "NILFS warning: maximal mount count reached\n");
+ nilfs_msg(sb, KERN_WARNING, "maximal mount count reached");
#endif
}
if (!max_mnt_count)
@@ -897,17 +891,17 @@ int nilfs_check_feature_compatibility(struct super_block *sb,
features = le64_to_cpu(sbp->s_feature_incompat) &
~NILFS_FEATURE_INCOMPAT_SUPP;
if (features) {
- printk(KERN_ERR "NILFS: couldn't mount because of unsupported "
- "optional features (%llx)\n",
- (unsigned long long)features);
+ nilfs_msg(sb, KERN_ERR,
+ "couldn't mount because of unsupported optional features (%llx)",
+ (unsigned long long)features);
return -EINVAL;
}
features = le64_to_cpu(sbp->s_feature_compat_ro) &
~NILFS_FEATURE_COMPAT_RO_SUPP;
if (!(sb->s_flags & MS_RDONLY) && features) {
- printk(KERN_ERR "NILFS: couldn't mount RDWR because of "
- "unsupported optional features (%llx)\n",
- (unsigned long long)features);
+ nilfs_msg(sb, KERN_ERR,
+ "couldn't mount RDWR because of unsupported optional features (%llx)",
+ (unsigned long long)features);
return -EINVAL;
}
return 0;
@@ -923,13 +917,13 @@ static int nilfs_get_root_dentry(struct super_block *sb,
inode = nilfs_iget(sb, root, NILFS_ROOT_INO);
if (IS_ERR(inode)) {
- printk(KERN_ERR "NILFS: get root inode failed\n");
ret = PTR_ERR(inode);
+ nilfs_msg(sb, KERN_ERR, "error %d getting root inode", ret);
goto out;
}
if (!S_ISDIR(inode->i_mode) || !inode->i_blocks || !inode->i_size) {
iput(inode);
- printk(KERN_ERR "NILFS: corrupt root inode.\n");
+ nilfs_msg(sb, KERN_ERR, "corrupt root inode");
ret = -EINVAL;
goto out;
}
@@ -957,7 +951,7 @@ static int nilfs_get_root_dentry(struct super_block *sb,
return ret;
failed_dentry:
- printk(KERN_ERR "NILFS: get root dentry failed\n");
+ nilfs_msg(sb, KERN_ERR, "error %d getting root dentry", ret);
goto out;
}
@@ -977,18 +971,18 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
ret = (ret == -ENOENT) ? -EINVAL : ret;
goto out;
} else if (!ret) {
- printk(KERN_ERR "NILFS: The specified checkpoint is "
- "not a snapshot (checkpoint number=%llu).\n",
- (unsigned long long)cno);
+ nilfs_msg(s, KERN_ERR,
+ "The specified checkpoint is not a snapshot (checkpoint number=%llu)",
+ (unsigned long long)cno);
ret = -EINVAL;
goto out;
}
ret = nilfs_attach_checkpoint(s, cno, false, &root);
if (ret) {
- printk(KERN_ERR "NILFS: error loading snapshot "
- "(checkpoint number=%llu).\n",
- (unsigned long long)cno);
+ nilfs_msg(s, KERN_ERR,
+ "error %d while loading snapshot (checkpoint number=%llu)",
+ ret, (unsigned long long)cno);
goto out;
}
ret = nilfs_get_root_dentry(s, root, root_dentry);
@@ -1058,7 +1052,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
__u64 cno;
int err;
- nilfs = alloc_nilfs(sb->s_bdev);
+ nilfs = alloc_nilfs(sb);
if (!nilfs)
return -ENOMEM;
@@ -1083,8 +1077,9 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
cno = nilfs_last_cno(nilfs);
err = nilfs_attach_checkpoint(sb, cno, true, &fsroot);
if (err) {
- printk(KERN_ERR "NILFS: error loading last checkpoint "
- "(checkpoint number=%llu).\n", (unsigned long long)cno);
+ nilfs_msg(sb, KERN_ERR,
+ "error %d while loading last checkpoint (checkpoint number=%llu)",
+ err, (unsigned long long)cno);
goto failed_unload;
}
@@ -1144,9 +1139,8 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
err = -EINVAL;
if (!nilfs_valid_fs(nilfs)) {
- printk(KERN_WARNING "NILFS (device %s): couldn't "
- "remount because the filesystem is in an "
- "incomplete recovery state.\n", sb->s_id);
+ nilfs_msg(sb, KERN_WARNING,
+ "couldn't remount because the filesystem is in an incomplete recovery state");
goto restore_opts;
}
@@ -1178,10 +1172,9 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
~NILFS_FEATURE_COMPAT_RO_SUPP;
up_read(&nilfs->ns_sem);
if (features) {
- printk(KERN_WARNING "NILFS (device %s): couldn't "
- "remount RDWR because of unsupported optional "
- "features (%llx)\n",
- sb->s_id, (unsigned long long)features);
+ nilfs_msg(sb, KERN_WARNING,
+ "couldn't remount RDWR because of unsupported optional features (%llx)",
+ (unsigned long long)features);
err = -EROFS;
goto restore_opts;
}
@@ -1212,6 +1205,38 @@ struct nilfs_super_data {
int flags;
};
+static int nilfs_parse_snapshot_option(const char *option,
+ const substring_t *arg,
+ struct nilfs_super_data *sd)
+{
+ unsigned long long val;
+ const char *msg = NULL;
+ int err;
+
+ if (!(sd->flags & MS_RDONLY)) {
+ msg = "read-only option is not specified";
+ goto parse_error;
+ }
+
+ err = kstrtoull(arg->from, 0, &val);
+ if (err) {
+ if (err == -ERANGE)
+ msg = "too large checkpoint number";
+ else
+ msg = "malformed argument";
+ goto parse_error;
+ } else if (val == 0) {
+ msg = "invalid checkpoint number 0";
+ goto parse_error;
+ }
+ sd->cno = val;
+ return 0;
+
+parse_error:
+ nilfs_msg(NULL, KERN_ERR, "invalid option \"%s\": %s", option, msg);
+ return 1;
+}
+
/**
* nilfs_identify - pre-read mount options needed to identify mount instance
* @data: mount options
@@ -1228,24 +1253,9 @@ static int nilfs_identify(char *data, struct nilfs_super_data *sd)
p = strsep(&options, ",");
if (p != NULL && *p) {
token = match_token(p, tokens, args);
- if (token == Opt_snapshot) {
- if (!(sd->flags & MS_RDONLY)) {
- ret++;
- } else {
- sd->cno = simple_strtoull(args[0].from,
- NULL, 0);
- /*
- * No need to see the end pointer;
- * match_token() has done syntax
- * checking.
- */
- if (sd->cno == 0)
- ret++;
- }
- }
- if (ret)
- printk(KERN_ERR
- "NILFS: invalid mount option: %s\n", p);
+ if (token == Opt_snapshot)
+ ret = nilfs_parse_snapshot_option(p, &args[0],
+ sd);
}
if (!options)
break;
@@ -1326,10 +1336,10 @@ nilfs_mount(struct file_system_type *fs_type, int flags,
} else if (!sd.cno) {
if (nilfs_tree_is_busy(s->s_root)) {
if ((flags ^ s->s_flags) & MS_RDONLY) {
- printk(KERN_ERR "NILFS: the device already "
- "has a %s mount.\n",
- (s->s_flags & MS_RDONLY) ?
- "read-only" : "read/write");
+ nilfs_msg(s, KERN_ERR,
+ "the device already has a %s mount.",
+ (s->s_flags & MS_RDONLY) ?
+ "read-only" : "read/write");
err = -EBUSY;
goto failed_super;
}
diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c
index 8ffa42b704d8..490303e3d517 100644
--- a/fs/nilfs2/sysfs.c
+++ b/fs/nilfs2/sysfs.c
@@ -272,8 +272,8 @@ nilfs_checkpoints_checkpoints_number_show(struct nilfs_checkpoints_attr *attr,
err = nilfs_cpfile_get_stat(nilfs->ns_cpfile, &cpstat);
up_read(&nilfs->ns_segctor_sem);
if (err < 0) {
- printk(KERN_ERR "NILFS: unable to get checkpoint stat: err=%d\n",
- err);
+ nilfs_msg(nilfs->ns_sb, KERN_ERR,
+ "unable to get checkpoint stat: err=%d", err);
return err;
}
@@ -295,8 +295,8 @@ nilfs_checkpoints_snapshots_number_show(struct nilfs_checkpoints_attr *attr,
err = nilfs_cpfile_get_stat(nilfs->ns_cpfile, &cpstat);
up_read(&nilfs->ns_segctor_sem);
if (err < 0) {
- printk(KERN_ERR "NILFS: unable to get checkpoint stat: err=%d\n",
- err);
+ nilfs_msg(nilfs->ns_sb, KERN_ERR,
+ "unable to get checkpoint stat: err=%d", err);
return err;
}
@@ -326,9 +326,9 @@ nilfs_checkpoints_next_checkpoint_show(struct nilfs_checkpoints_attr *attr,
{
__u64 cno;
- down_read(&nilfs->ns_sem);
+ down_read(&nilfs->ns_segctor_sem);
cno = nilfs->ns_cno;
- up_read(&nilfs->ns_sem);
+ up_read(&nilfs->ns_segctor_sem);
return snprintf(buf, PAGE_SIZE, "%llu\n", cno);
}
@@ -414,8 +414,8 @@ nilfs_segments_dirty_segments_show(struct nilfs_segments_attr *attr,
err = nilfs_sufile_get_stat(nilfs->ns_sufile, &sustat);
up_read(&nilfs->ns_segctor_sem);
if (err < 0) {
- printk(KERN_ERR "NILFS: unable to get segment stat: err=%d\n",
- err);
+ nilfs_msg(nilfs->ns_sb, KERN_ERR,
+ "unable to get segment stat: err=%d", err);
return err;
}
@@ -511,9 +511,9 @@ nilfs_segctor_current_seg_sequence_show(struct nilfs_segctor_attr *attr,
{
u64 seg_seq;
- down_read(&nilfs->ns_sem);
+ down_read(&nilfs->ns_segctor_sem);
seg_seq = nilfs->ns_seg_seq;
- up_read(&nilfs->ns_sem);
+ up_read(&nilfs->ns_segctor_sem);
return snprintf(buf, PAGE_SIZE, "%llu\n", seg_seq);
}
@@ -525,9 +525,9 @@ nilfs_segctor_current_last_full_seg_show(struct nilfs_segctor_attr *attr,
{
__u64 segnum;
- down_read(&nilfs->ns_sem);
+ down_read(&nilfs->ns_segctor_sem);
segnum = nilfs->ns_segnum;
- up_read(&nilfs->ns_sem);
+ up_read(&nilfs->ns_segctor_sem);
return snprintf(buf, PAGE_SIZE, "%llu\n", segnum);
}
@@ -539,9 +539,9 @@ nilfs_segctor_next_full_seg_show(struct nilfs_segctor_attr *attr,
{
__u64 nextnum;
- down_read(&nilfs->ns_sem);
+ down_read(&nilfs->ns_segctor_sem);
nextnum = nilfs->ns_nextnum;
- up_read(&nilfs->ns_sem);
+ up_read(&nilfs->ns_segctor_sem);
return snprintf(buf, PAGE_SIZE, "%llu\n", nextnum);
}
@@ -553,9 +553,9 @@ nilfs_segctor_next_pseg_offset_show(struct nilfs_segctor_attr *attr,
{
unsigned long pseg_offset;
- down_read(&nilfs->ns_sem);
+ down_read(&nilfs->ns_segctor_sem);
pseg_offset = nilfs->ns_pseg_offset;
- up_read(&nilfs->ns_sem);
+ up_read(&nilfs->ns_segctor_sem);
return snprintf(buf, PAGE_SIZE, "%lu\n", pseg_offset);
}
@@ -567,9 +567,9 @@ nilfs_segctor_next_checkpoint_show(struct nilfs_segctor_attr *attr,
{
__u64 cno;
- down_read(&nilfs->ns_sem);
+ down_read(&nilfs->ns_segctor_sem);
cno = nilfs->ns_cno;
- up_read(&nilfs->ns_sem);
+ up_read(&nilfs->ns_segctor_sem);
return snprintf(buf, PAGE_SIZE, "%llu\n", cno);
}
@@ -581,9 +581,9 @@ nilfs_segctor_last_seg_write_time_show(struct nilfs_segctor_attr *attr,
{
time_t ctime;
- down_read(&nilfs->ns_sem);
+ down_read(&nilfs->ns_segctor_sem);
ctime = nilfs->ns_ctime;
- up_read(&nilfs->ns_sem);
+ up_read(&nilfs->ns_segctor_sem);
return NILFS_SHOW_TIME(ctime, buf);
}
@@ -595,9 +595,9 @@ nilfs_segctor_last_seg_write_time_secs_show(struct nilfs_segctor_attr *attr,
{
time_t ctime;
- down_read(&nilfs->ns_sem);
+ down_read(&nilfs->ns_segctor_sem);
ctime = nilfs->ns_ctime;
- up_read(&nilfs->ns_sem);
+ up_read(&nilfs->ns_segctor_sem);
return snprintf(buf, PAGE_SIZE, "%llu\n", (unsigned long long)ctime);
}
@@ -609,9 +609,9 @@ nilfs_segctor_last_nongc_write_time_show(struct nilfs_segctor_attr *attr,
{
time_t nongc_ctime;
- down_read(&nilfs->ns_sem);
+ down_read(&nilfs->ns_segctor_sem);
nongc_ctime = nilfs->ns_nongc_ctime;
- up_read(&nilfs->ns_sem);
+ up_read(&nilfs->ns_segctor_sem);
return NILFS_SHOW_TIME(nongc_ctime, buf);
}
@@ -623,9 +623,9 @@ nilfs_segctor_last_nongc_write_time_secs_show(struct nilfs_segctor_attr *attr,
{
time_t nongc_ctime;
- down_read(&nilfs->ns_sem);
+ down_read(&nilfs->ns_segctor_sem);
nongc_ctime = nilfs->ns_nongc_ctime;
- up_read(&nilfs->ns_sem);
+ up_read(&nilfs->ns_segctor_sem);
return snprintf(buf, PAGE_SIZE, "%llu\n",
(unsigned long long)nongc_ctime);
@@ -638,9 +638,9 @@ nilfs_segctor_dirty_data_blocks_count_show(struct nilfs_segctor_attr *attr,
{
u32 ndirtyblks;
- down_read(&nilfs->ns_sem);
+ down_read(&nilfs->ns_segctor_sem);
ndirtyblks = atomic_read(&nilfs->ns_ndirtyblks);
- up_read(&nilfs->ns_sem);
+ up_read(&nilfs->ns_segctor_sem);
return snprintf(buf, PAGE_SIZE, "%u\n", ndirtyblks);
}
@@ -789,14 +789,15 @@ nilfs_superblock_sb_update_frequency_store(struct nilfs_superblock_attr *attr,
err = kstrtouint(skip_spaces(buf), 0, &val);
if (err) {
- printk(KERN_ERR "NILFS: unable to convert string: err=%d\n",
- err);
+ nilfs_msg(nilfs->ns_sb, KERN_ERR,
+ "unable to convert string: err=%d", err);
return err;
}
if (val < NILFS_SB_FREQ) {
val = NILFS_SB_FREQ;
- printk(KERN_WARNING "NILFS: superblock update frequency cannot be lesser than 10 seconds\n");
+ nilfs_msg(nilfs->ns_sb, KERN_WARNING,
+ "superblock update frequency cannot be lesser than 10 seconds");
}
down_write(&nilfs->ns_sem);
@@ -999,7 +1000,8 @@ int nilfs_sysfs_create_device_group(struct super_block *sb)
nilfs->ns_dev_subgroups = kzalloc(devgrp_size, GFP_KERNEL);
if (unlikely(!nilfs->ns_dev_subgroups)) {
err = -ENOMEM;
- printk(KERN_ERR "NILFS: unable to allocate memory for device group\n");
+ nilfs_msg(sb, KERN_ERR,
+ "unable to allocate memory for device group");
goto failed_create_device_group;
}
@@ -1109,15 +1111,15 @@ int __init nilfs_sysfs_init(void)
nilfs_kset = kset_create_and_add(NILFS_ROOT_GROUP_NAME, NULL, fs_kobj);
if (!nilfs_kset) {
err = -ENOMEM;
- printk(KERN_ERR "NILFS: unable to create sysfs entry: err %d\n",
- err);
+ nilfs_msg(NULL, KERN_ERR,
+ "unable to create sysfs entry: err=%d", err);
goto failed_sysfs_init;
}
err = sysfs_create_group(&nilfs_kset->kobj, &nilfs_feature_attr_group);
if (unlikely(err)) {
- printk(KERN_ERR "NILFS: unable to create feature group: err %d\n",
- err);
+ nilfs_msg(NULL, KERN_ERR,
+ "unable to create feature group: err=%d", err);
goto cleanup_sysfs_init;
}
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index e9fd241b9a0a..2dd75bf619ad 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -56,12 +56,12 @@ void nilfs_set_last_segment(struct the_nilfs *nilfs,
/**
* alloc_nilfs - allocate a nilfs object
- * @bdev: block device to which the_nilfs is related
+ * @sb: super block instance
*
* Return Value: On success, pointer to the_nilfs is returned.
* On error, NULL is returned.
*/
-struct the_nilfs *alloc_nilfs(struct block_device *bdev)
+struct the_nilfs *alloc_nilfs(struct super_block *sb)
{
struct the_nilfs *nilfs;
@@ -69,7 +69,8 @@ struct the_nilfs *alloc_nilfs(struct block_device *bdev)
if (!nilfs)
return NULL;
- nilfs->ns_bdev = bdev;
+ nilfs->ns_sb = sb;
+ nilfs->ns_bdev = sb->s_bdev;
atomic_set(&nilfs->ns_ndirtyblks, 0);
init_rwsem(&nilfs->ns_sem);
mutex_init(&nilfs->ns_snapshot_mount_mutex);
@@ -191,7 +192,10 @@ static int nilfs_store_log_cursor(struct the_nilfs *nilfs,
nilfs_get_segnum_of_block(nilfs, nilfs->ns_last_pseg);
nilfs->ns_cno = nilfs->ns_last_cno + 1;
if (nilfs->ns_segnum >= nilfs->ns_nsegments) {
- printk(KERN_ERR "NILFS invalid last segment number.\n");
+ nilfs_msg(nilfs->ns_sb, KERN_ERR,
+ "pointed segment number is out of range: segnum=%llu, nsegments=%lu",
+ (unsigned long long)nilfs->ns_segnum,
+ nilfs->ns_nsegments);
ret = -EINVAL;
}
return ret;
@@ -215,12 +219,12 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
int err;
if (!valid_fs) {
- printk(KERN_WARNING "NILFS warning: mounting unchecked fs\n");
+ nilfs_msg(sb, KERN_WARNING, "mounting unchecked fs");
if (s_flags & MS_RDONLY) {
- printk(KERN_INFO "NILFS: INFO: recovery "
- "required for readonly filesystem.\n");
- printk(KERN_INFO "NILFS: write access will "
- "be enabled during recovery.\n");
+ nilfs_msg(sb, KERN_INFO,
+ "recovery required for readonly filesystem");
+ nilfs_msg(sb, KERN_INFO,
+ "write access will be enabled during recovery");
}
}
@@ -235,13 +239,12 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
goto scan_error;
if (!nilfs_valid_sb(sbp[1])) {
- printk(KERN_WARNING
- "NILFS warning: unable to fall back to spare"
- "super block\n");
+ nilfs_msg(sb, KERN_WARNING,
+ "unable to fall back to spare super block");
goto scan_error;
}
- printk(KERN_INFO
- "NILFS: try rollback from an earlier position\n");
+ nilfs_msg(sb, KERN_INFO,
+ "trying rollback from an earlier position");
/*
* restore super block with its spare and reconfigure
@@ -254,10 +257,9 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
/* verify consistency between two super blocks */
blocksize = BLOCK_SIZE << le32_to_cpu(sbp[0]->s_log_block_size);
if (blocksize != nilfs->ns_blocksize) {
- printk(KERN_WARNING
- "NILFS warning: blocksize differs between "
- "two super blocks (%d != %d)\n",
- blocksize, nilfs->ns_blocksize);
+ nilfs_msg(sb, KERN_WARNING,
+ "blocksize differs between two super blocks (%d != %d)",
+ blocksize, nilfs->ns_blocksize);
goto scan_error;
}
@@ -276,7 +278,8 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
err = nilfs_load_super_root(nilfs, sb, ri.ri_super_root);
if (unlikely(err)) {
- printk(KERN_ERR "NILFS: error loading super root.\n");
+ nilfs_msg(sb, KERN_ERR, "error %d while loading super root",
+ err);
goto failed;
}
@@ -287,30 +290,29 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
__u64 features;
if (nilfs_test_opt(nilfs, NORECOVERY)) {
- printk(KERN_INFO "NILFS: norecovery option specified. "
- "skipping roll-forward recovery\n");
+ nilfs_msg(sb, KERN_INFO,
+ "norecovery option specified, skipping roll-forward recovery");
goto skip_recovery;
}
features = le64_to_cpu(nilfs->ns_sbp[0]->s_feature_compat_ro) &
~NILFS_FEATURE_COMPAT_RO_SUPP;
if (features) {
- printk(KERN_ERR "NILFS: couldn't proceed with "
- "recovery because of unsupported optional "
- "features (%llx)\n",
- (unsigned long long)features);
+ nilfs_msg(sb, KERN_ERR,
+ "couldn't proceed with recovery because of unsupported optional features (%llx)",
+ (unsigned long long)features);
err = -EROFS;
goto failed_unload;
}
if (really_read_only) {
- printk(KERN_ERR "NILFS: write access "
- "unavailable, cannot proceed.\n");
+ nilfs_msg(sb, KERN_ERR,
+ "write access unavailable, cannot proceed");
err = -EROFS;
goto failed_unload;
}
sb->s_flags &= ~MS_RDONLY;
} else if (nilfs_test_opt(nilfs, NORECOVERY)) {
- printk(KERN_ERR "NILFS: recovery cancelled because norecovery "
- "option was specified for a read/write mount\n");
+ nilfs_msg(sb, KERN_ERR,
+ "recovery cancelled because norecovery option was specified for a read/write mount");
err = -EINVAL;
goto failed_unload;
}
@@ -325,11 +327,12 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
up_write(&nilfs->ns_sem);
if (err) {
- printk(KERN_ERR "NILFS: failed to update super block. "
- "recovery unfinished.\n");
+ nilfs_msg(sb, KERN_ERR,
+ "error %d updating super block. recovery unfinished.",
+ err);
goto failed_unload;
}
- printk(KERN_INFO "NILFS: recovery complete.\n");
+ nilfs_msg(sb, KERN_INFO, "recovery complete");
skip_recovery:
nilfs_clear_recovery_info(&ri);
@@ -337,7 +340,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
return 0;
scan_error:
- printk(KERN_ERR "NILFS: error searching super root.\n");
+ nilfs_msg(sb, KERN_ERR, "error %d while searching super root", err);
goto failed;
failed_unload:
@@ -384,12 +387,11 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
struct nilfs_super_block *sbp)
{
if (le32_to_cpu(sbp->s_rev_level) < NILFS_MIN_SUPP_REV) {
- printk(KERN_ERR "NILFS: unsupported revision "
- "(superblock rev.=%d.%d, current rev.=%d.%d). "
- "Please check the version of mkfs.nilfs.\n",
- le32_to_cpu(sbp->s_rev_level),
- le16_to_cpu(sbp->s_minor_rev_level),
- NILFS_CURRENT_REV, NILFS_MINOR_REV);
+ nilfs_msg(nilfs->ns_sb, KERN_ERR,
+ "unsupported revision (superblock rev.=%d.%d, current rev.=%d.%d). Please check the version of mkfs.nilfs(2).",
+ le32_to_cpu(sbp->s_rev_level),
+ le16_to_cpu(sbp->s_minor_rev_level),
+ NILFS_CURRENT_REV, NILFS_MINOR_REV);
return -EINVAL;
}
nilfs->ns_sbsize = le16_to_cpu(sbp->s_bytes);
@@ -398,12 +400,14 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
nilfs->ns_inode_size = le16_to_cpu(sbp->s_inode_size);
if (nilfs->ns_inode_size > nilfs->ns_blocksize) {
- printk(KERN_ERR "NILFS: too large inode size: %d bytes.\n",
- nilfs->ns_inode_size);
+ nilfs_msg(nilfs->ns_sb, KERN_ERR,
+ "too large inode size: %d bytes",
+ nilfs->ns_inode_size);
return -EINVAL;
} else if (nilfs->ns_inode_size < NILFS_MIN_INODE_SIZE) {
- printk(KERN_ERR "NILFS: too small inode size: %d bytes.\n",
- nilfs->ns_inode_size);
+ nilfs_msg(nilfs->ns_sb, KERN_ERR,
+ "too small inode size: %d bytes",
+ nilfs->ns_inode_size);
return -EINVAL;
}
@@ -411,7 +415,9 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment);
if (nilfs->ns_blocks_per_segment < NILFS_SEG_MIN_BLOCKS) {
- printk(KERN_ERR "NILFS: too short segment.\n");
+ nilfs_msg(nilfs->ns_sb, KERN_ERR,
+ "too short segment: %lu blocks",
+ nilfs->ns_blocks_per_segment);
return -EINVAL;
}
@@ -420,7 +426,9 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
le32_to_cpu(sbp->s_r_segments_percentage);
if (nilfs->ns_r_segments_percentage < 1 ||
nilfs->ns_r_segments_percentage > 99) {
- printk(KERN_ERR "NILFS: invalid reserved segments percentage.\n");
+ nilfs_msg(nilfs->ns_sb, KERN_ERR,
+ "invalid reserved segments percentage: %lu",
+ nilfs->ns_r_segments_percentage);
return -EINVAL;
}
@@ -504,16 +512,16 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
if (!sbp[0]) {
if (!sbp[1]) {
- printk(KERN_ERR "NILFS: unable to read superblock\n");
+ nilfs_msg(sb, KERN_ERR, "unable to read superblock");
return -EIO;
}
- printk(KERN_WARNING
- "NILFS warning: unable to read primary superblock "
- "(blocksize = %d)\n", blocksize);
+ nilfs_msg(sb, KERN_WARNING,
+ "unable to read primary superblock (blocksize = %d)",
+ blocksize);
} else if (!sbp[1]) {
- printk(KERN_WARNING
- "NILFS warning: unable to read secondary superblock "
- "(blocksize = %d)\n", blocksize);
+ nilfs_msg(sb, KERN_WARNING,
+ "unable to read secondary superblock (blocksize = %d)",
+ blocksize);
}
/*
@@ -535,14 +543,14 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
}
if (!valid[swp]) {
nilfs_release_super_block(nilfs);
- printk(KERN_ERR "NILFS: Can't find nilfs on dev %s.\n",
- sb->s_id);
+ nilfs_msg(sb, KERN_ERR, "couldn't find nilfs on the device");
return -EINVAL;
}
if (!valid[!swp])
- printk(KERN_WARNING "NILFS warning: broken superblock. "
- "using spare superblock (blocksize = %d).\n", blocksize);
+ nilfs_msg(sb, KERN_WARNING,
+ "broken superblock, retrying with spare superblock (blocksize = %d)",
+ blocksize);
if (swp)
nilfs_swap_super_block(nilfs);
@@ -576,7 +584,7 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
blocksize = sb_min_blocksize(sb, NILFS_MIN_BLOCK_SIZE);
if (!blocksize) {
- printk(KERN_ERR "NILFS: unable to set blocksize\n");
+ nilfs_msg(sb, KERN_ERR, "unable to set blocksize");
err = -EINVAL;
goto out;
}
@@ -595,8 +603,9 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size);
if (blocksize < NILFS_MIN_BLOCK_SIZE ||
blocksize > NILFS_MAX_BLOCK_SIZE) {
- printk(KERN_ERR "NILFS: couldn't mount because of unsupported "
- "filesystem blocksize %d\n", blocksize);
+ nilfs_msg(sb, KERN_ERR,
+ "couldn't mount because of unsupported filesystem blocksize %d",
+ blocksize);
err = -EINVAL;
goto failed_sbh;
}
@@ -604,10 +613,9 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
int hw_blocksize = bdev_logical_block_size(sb->s_bdev);
if (blocksize < hw_blocksize) {
- printk(KERN_ERR
- "NILFS: blocksize %d too small for device "
- "(sector-size = %d).\n",
- blocksize, hw_blocksize);
+ nilfs_msg(sb, KERN_ERR,
+ "blocksize %d too small for device (sector-size = %d)",
+ blocksize, hw_blocksize);
err = -EINVAL;
goto failed_sbh;
}
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index 79369fd6b13b..b305c6f033e7 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -43,6 +43,7 @@ enum {
* struct the_nilfs - struct to supervise multiple nilfs mount points
* @ns_flags: flags
* @ns_flushed_device: flag indicating if all volatile data was flushed
+ * @ns_sb: back pointer to super block instance
* @ns_bdev: block device
* @ns_sem: semaphore for shared states
* @ns_snapshot_mount_mutex: mutex to protect snapshot mounts
@@ -102,6 +103,7 @@ struct the_nilfs {
unsigned long ns_flags;
int ns_flushed_device;
+ struct super_block *ns_sb;
struct block_device *ns_bdev;
struct rw_semaphore ns_sem;
struct mutex ns_snapshot_mount_mutex;
@@ -120,11 +122,8 @@ struct the_nilfs {
unsigned int ns_sb_update_freq;
/*
- * Following fields are dedicated to a writable FS-instance.
- * Except for the period seeking checkpoint, code outside the segment
- * constructor must lock a segment semaphore while accessing these
- * fields.
- * The writable FS-instance is sole during a lifetime of the_nilfs.
+ * The following fields are updated by a writable FS-instance.
+ * These fields are protected by ns_segctor_sem outside load_nilfs().
*/
u64 ns_seg_seq;
__u64 ns_segnum;
@@ -281,7 +280,7 @@ static inline int nilfs_sb_will_flip(struct the_nilfs *nilfs)
}
void nilfs_set_last_segment(struct the_nilfs *, sector_t, u64, __u64);
-struct the_nilfs *alloc_nilfs(struct block_device *bdev);
+struct the_nilfs *alloc_nilfs(struct super_block *sb);
void destroy_nilfs(struct the_nilfs *nilfs);
int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data);
int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb);
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index 97768a1379f2..fe251f187ff8 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -362,7 +362,7 @@ handle_zblock:
for (i = 0; i < nr; i++) {
tbh = arr[i];
if (likely(!buffer_uptodate(tbh)))
- submit_bh(READ, tbh);
+ submit_bh(REQ_OP_READ, 0, tbh);
else
ntfs_end_buffer_async_read(tbh, 1);
}
@@ -877,7 +877,7 @@ lock_retry_remap:
do {
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
- submit_bh(WRITE, bh);
+ submit_bh(REQ_OP_WRITE, 0, bh);
need_end_writeback = false;
}
bh = next;
@@ -1202,7 +1202,7 @@ lock_retry_remap:
BUG_ON(!buffer_mapped(tbh));
get_bh(tbh);
tbh->b_end_io = end_buffer_write_sync;
- submit_bh(WRITE, tbh);
+ submit_bh(REQ_OP_WRITE, 0, tbh);
}
/* Synchronize the mft mirror now if not @sync. */
if (is_mft && !sync)
diff --git a/fs/ntfs/compress.c b/fs/ntfs/compress.c
index f2b5e746f49b..f8eb04387ca4 100644
--- a/fs/ntfs/compress.c
+++ b/fs/ntfs/compress.c
@@ -670,7 +670,7 @@ lock_retry_remap:
}
get_bh(tbh);
tbh->b_end_io = end_buffer_read_sync;
- submit_bh(READ, tbh);
+ submit_bh(REQ_OP_READ, 0, tbh);
}
/* Wait for io completion on all buffer heads. */
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 5622ed5a201e..f548629dfaac 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -553,7 +553,7 @@ static inline int ntfs_submit_bh_for_read(struct buffer_head *bh)
lock_buffer(bh);
get_bh(bh);
bh->b_end_io = end_buffer_read_sync;
- return submit_bh(READ, bh);
+ return submit_bh(REQ_OP_READ, 0, bh);
}
/**
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index f40972d6df90..e01287c964a8 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -1854,7 +1854,7 @@ int ntfs_read_inode_mount(struct inode *vi)
/* Need this to sanity check attribute list references to $MFT. */
vi->i_generation = ni->seq_no = le16_to_cpu(m->sequence_number);
- /* Provides readpage() and sync_page() for map_mft_record(). */
+ /* Provides readpage() for map_mft_record(). */
vi->i_mapping->a_ops = &ntfs_mst_aops;
ctx = ntfs_attr_get_search_ctx(ni, m);
diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c
index 9d71213ca81e..761f12f7f3ef 100644
--- a/fs/ntfs/logfile.c
+++ b/fs/ntfs/logfile.c
@@ -821,7 +821,7 @@ map_vcn:
* completed ignore errors afterwards as we can assume
* that if one buffer worked all of them will work.
*/
- submit_bh(WRITE, bh);
+ submit_bh(REQ_OP_WRITE, 0, bh);
if (should_wait) {
should_wait = false;
wait_on_buffer(bh);
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c
index 37b2501caaa4..d15d492ce47b 100644
--- a/fs/ntfs/mft.c
+++ b/fs/ntfs/mft.c
@@ -592,7 +592,7 @@ int ntfs_sync_mft_mirror(ntfs_volume *vol, const unsigned long mft_no,
clear_buffer_dirty(tbh);
get_bh(tbh);
tbh->b_end_io = end_buffer_write_sync;
- submit_bh(WRITE, tbh);
+ submit_bh(REQ_OP_WRITE, 0, tbh);
}
/* Wait on i/o completion of buffers. */
for (i_bhs = 0; i_bhs < nr_bhs; i_bhs++) {
@@ -785,7 +785,7 @@ int write_mft_record_nolock(ntfs_inode *ni, MFT_RECORD *m, int sync)
clear_buffer_dirty(tbh);
get_bh(tbh);
tbh->b_end_io = end_buffer_write_sync;
- submit_bh(WRITE, tbh);
+ submit_bh(REQ_OP_WRITE, 0, tbh);
}
/* Synchronize the mft mirror now if not @sync. */
if (!sync && ni->mft_no < vol->mftmirr_size)
diff --git a/fs/ntfs/namei.c b/fs/ntfs/namei.c
index 443abecf01b7..358258364616 100644
--- a/fs/ntfs/namei.c
+++ b/fs/ntfs/namei.c
@@ -253,7 +253,7 @@ handle_name:
err = (signed)nls_name.len;
goto err_out;
}
- nls_name.hash = full_name_hash(nls_name.name, nls_name.len);
+ nls_name.hash = full_name_hash(dent, nls_name.name, nls_name.len);
dent = d_add_ci(dent, dent_inode, &nls_name);
kfree(nls_name.name);
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 460c0cedab3a..7dabbc31060e 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -6106,6 +6106,43 @@ void ocfs2_schedule_truncate_log_flush(struct ocfs2_super *osb,
}
}
+/*
+ * Try to flush truncate logs if we can free enough clusters from it.
+ * As for return value, "< 0" means error, "0" no space and "1" means
+ * we have freed enough spaces and let the caller try to allocate again.
+ */
+int ocfs2_try_to_free_truncate_log(struct ocfs2_super *osb,
+ unsigned int needed)
+{
+ tid_t target;
+ int ret = 0;
+ unsigned int truncated_clusters;
+
+ inode_lock(osb->osb_tl_inode);
+ truncated_clusters = osb->truncated_clusters;
+ inode_unlock(osb->osb_tl_inode);
+
+ /*
+ * Check whether we can succeed in allocating if we free
+ * the truncate log.
+ */
+ if (truncated_clusters < needed)
+ goto out;
+
+ ret = ocfs2_flush_truncate_log(osb);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ if (jbd2_journal_start_commit(osb->journal->j_journal, &target)) {
+ jbd2_log_wait_commit(osb->journal->j_journal, target);
+ ret = 1;
+ }
+out:
+ return ret;
+}
+
static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb,
int slot_num,
struct inode **tl_inode,
diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h
index f3dc1b0dfffc..4a5152ec88a3 100644
--- a/fs/ocfs2/alloc.h
+++ b/fs/ocfs2/alloc.h
@@ -188,6 +188,8 @@ int ocfs2_truncate_log_append(struct ocfs2_super *osb,
u64 start_blk,
unsigned int num_clusters);
int __ocfs2_flush_truncate_log(struct ocfs2_super *osb);
+int ocfs2_try_to_free_truncate_log(struct ocfs2_super *osb,
+ unsigned int needed);
/*
* Process local structure which describes the block unlinks done
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index c034edf3ef38..98d36548153d 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -640,7 +640,7 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
!buffer_new(bh) &&
ocfs2_should_read_blk(inode, page, block_start) &&
(block_start < from || block_end > to)) {
- ll_rw_block(READ, 1, &bh);
+ ll_rw_block(REQ_OP_READ, 0, 1, &bh);
*wait_bh++=bh;
}
@@ -1645,43 +1645,6 @@ static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh,
return ret;
}
-/*
- * Try to flush truncate logs if we can free enough clusters from it.
- * As for return value, "< 0" means error, "0" no space and "1" means
- * we have freed enough spaces and let the caller try to allocate again.
- */
-static int ocfs2_try_to_free_truncate_log(struct ocfs2_super *osb,
- unsigned int needed)
-{
- tid_t target;
- int ret = 0;
- unsigned int truncated_clusters;
-
- inode_lock(osb->osb_tl_inode);
- truncated_clusters = osb->truncated_clusters;
- inode_unlock(osb->osb_tl_inode);
-
- /*
- * Check whether we can succeed in allocating if we free
- * the truncate log.
- */
- if (truncated_clusters < needed)
- goto out;
-
- ret = ocfs2_flush_truncate_log(osb);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
-
- if (jbd2_journal_start_commit(osb->journal->j_journal, &target)) {
- jbd2_log_wait_commit(osb->journal->j_journal, target);
- ret = 1;
- }
-out:
- return ret;
-}
-
int ocfs2_write_begin_nolock(struct address_space *mapping,
loff_t pos, unsigned len, ocfs2_write_type_t type,
struct page **pagep, void **fsdata,
@@ -2426,7 +2389,7 @@ static int ocfs2_dio_end_io(struct kiocb *iocb,
static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
{
struct file *file = iocb->ki_filp;
- struct inode *inode = file_inode(file)->i_mapping->host;
+ struct inode *inode = file->f_mapping->host;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
get_block_t *get_block;
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index 498641eed2db..8f040f88ade4 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -79,7 +79,7 @@ int ocfs2_write_block(struct ocfs2_super *osb, struct buffer_head *bh,
get_bh(bh); /* for end_buffer_write_sync() */
bh->b_end_io = end_buffer_write_sync;
- submit_bh(WRITE, bh);
+ submit_bh(REQ_OP_WRITE, 0, bh);
wait_on_buffer(bh);
@@ -154,7 +154,7 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
clear_buffer_uptodate(bh);
get_bh(bh); /* for end_buffer_read_sync() */
bh->b_end_io = end_buffer_read_sync;
- submit_bh(READ, bh);
+ submit_bh(REQ_OP_READ, 0, bh);
}
for (i = nr; i > 0; i--) {
@@ -310,7 +310,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
if (validate)
set_buffer_needs_validate(bh);
bh->b_end_io = end_buffer_read_sync;
- submit_bh(READ, bh);
+ submit_bh(REQ_OP_READ, 0, bh);
continue;
}
}
@@ -424,7 +424,7 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
get_bh(bh); /* for end_buffer_write_sync() */
bh->b_end_io = end_buffer_write_sync;
ocfs2_compute_meta_ecc(osb->sb, bh->b_data, &di->i_check);
- submit_bh(WRITE, bh);
+ submit_bh(REQ_OP_WRITE, 0, bh);
wait_on_buffer(bh);
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 6aaf3e351391..636abcbd4650 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -530,7 +530,8 @@ static void o2hb_bio_end_io(struct bio *bio)
static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
struct o2hb_bio_wait_ctxt *wc,
unsigned int *current_slot,
- unsigned int max_slots)
+ unsigned int max_slots, int op,
+ int op_flags)
{
int len, current_page;
unsigned int vec_len, vec_start;
@@ -556,6 +557,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
bio->bi_bdev = reg->hr_bdev;
bio->bi_private = wc;
bio->bi_end_io = o2hb_bio_end_io;
+ bio_set_op_attrs(bio, op, op_flags);
vec_start = (cs << bits) % PAGE_SIZE;
while(cs < max_slots) {
@@ -591,7 +593,8 @@ static int o2hb_read_slots(struct o2hb_region *reg,
o2hb_bio_wait_init(&wc);
while(current_slot < max_slots) {
- bio = o2hb_setup_one_bio(reg, &wc, &current_slot, max_slots);
+ bio = o2hb_setup_one_bio(reg, &wc, &current_slot, max_slots,
+ REQ_OP_READ, 0);
if (IS_ERR(bio)) {
status = PTR_ERR(bio);
mlog_errno(status);
@@ -599,7 +602,7 @@ static int o2hb_read_slots(struct o2hb_region *reg,
}
atomic_inc(&wc.wc_num_reqs);
- submit_bio(READ, bio);
+ submit_bio(bio);
}
status = 0;
@@ -623,7 +626,8 @@ static int o2hb_issue_node_write(struct o2hb_region *reg,
slot = o2nm_this_node();
- bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1);
+ bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1, REQ_OP_WRITE,
+ WRITE_SYNC);
if (IS_ERR(bio)) {
status = PTR_ERR(bio);
mlog_errno(status);
@@ -631,7 +635,7 @@ static int o2hb_issue_node_write(struct o2hb_region *reg,
}
atomic_inc(&write_wc->wc_num_reqs);
- submit_bio(WRITE_SYNC, bio);
+ submit_bio(bio);
status = 0;
bail:
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 4238eb28889f..1d67fcbf7160 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -1618,16 +1618,12 @@ static void o2net_start_connect(struct work_struct *work)
/* watch for racing with tearing a node down */
node = o2nm_get_node_by_num(o2net_num_from_nn(nn));
- if (node == NULL) {
- ret = 0;
+ if (node == NULL)
goto out;
- }
mynode = o2nm_get_node_by_num(o2nm_this_node());
- if (mynode == NULL) {
- ret = 0;
+ if (mynode == NULL)
goto out;
- }
spin_lock(&nn->nn_lock);
/*
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index 004f2cbe8f71..e9f3705c4c9f 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -47,7 +47,7 @@
#define DLM_HASH_BUCKETS (DLM_HASH_PAGES * DLM_BUCKETS_PER_PAGE)
/* Intended to make it easier for us to switch out hash functions */
-#define dlm_lockid_hash(_n, _l) full_name_hash(_n, _l)
+#define dlm_lockid_hash(_n, _l) full_name_hash(NULL, _n, _l)
enum dlm_mle_type {
DLM_MLE_BLOCK = 0,
@@ -1004,6 +1004,8 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
u8 nodenum, u8 *real_master);
+void __dlm_do_purge_lockres(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res);
int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res,
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index 825136070d2c..e7b760deefae 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -347,26 +347,6 @@ static struct dentry *dlm_debugfs_root;
#define DLM_DEBUGFS_PURGE_LIST "purge_list"
/* begin - utils funcs */
-static void dlm_debug_free(struct kref *kref)
-{
- struct dlm_debug_ctxt *dc;
-
- dc = container_of(kref, struct dlm_debug_ctxt, debug_refcnt);
-
- kfree(dc);
-}
-
-static void dlm_debug_put(struct dlm_debug_ctxt *dc)
-{
- if (dc)
- kref_put(&dc->debug_refcnt, dlm_debug_free);
-}
-
-static void dlm_debug_get(struct dlm_debug_ctxt *dc)
-{
- kref_get(&dc->debug_refcnt);
-}
-
static int debug_release(struct inode *inode, struct file *file)
{
free_page((unsigned long)file->private_data);
@@ -932,11 +912,9 @@ int dlm_debug_init(struct dlm_ctxt *dlm)
goto bail;
}
- dlm_debug_get(dc);
return 0;
bail:
- dlm_debug_shutdown(dlm);
return -ENOMEM;
}
@@ -949,7 +927,8 @@ void dlm_debug_shutdown(struct dlm_ctxt *dlm)
debugfs_remove(dc->debug_mle_dentry);
debugfs_remove(dc->debug_lockres_dentry);
debugfs_remove(dc->debug_state_dentry);
- dlm_debug_put(dc);
+ kfree(dc);
+ dc = NULL;
}
}
@@ -969,7 +948,6 @@ int dlm_create_debugfs_subroot(struct dlm_ctxt *dlm)
mlog_errno(-ENOMEM);
goto bail;
}
- kref_init(&dlm->dlm_debug_ctxt->debug_refcnt);
return 0;
bail:
diff --git a/fs/ocfs2/dlm/dlmdebug.h b/fs/ocfs2/dlm/dlmdebug.h
index 1f27c4812d1a..5ced5482e7d3 100644
--- a/fs/ocfs2/dlm/dlmdebug.h
+++ b/fs/ocfs2/dlm/dlmdebug.h
@@ -30,7 +30,6 @@ void dlm_print_one_mle(struct dlm_master_list_entry *mle);
#ifdef CONFIG_DEBUG_FS
struct dlm_debug_ctxt {
- struct kref debug_refcnt;
struct dentry *debug_state_dentry;
struct dentry *debug_lockres_dentry;
struct dentry *debug_mle_dentry;
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 12e064b8be9a..533bd524e41e 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -172,12 +172,10 @@ void __dlm_unhash_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
void __dlm_insert_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
{
struct hlist_head *bucket;
- struct qstr *q;
assert_spin_locked(&dlm->spinlock);
- q = &res->lockname;
- bucket = dlm_lockres_hash(dlm, q->hash);
+ bucket = dlm_lockres_hash(dlm, res->lockname.hash);
/* get a reference for our hashtable */
dlm_lockres_get(res);
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 13719d3f35f8..6ea06f8a7d29 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -2276,9 +2276,12 @@ int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res)
mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n",
dlm->name, namelen, lockname, res->owner, r);
dlm_print_one_lock_resource(res);
- BUG();
- }
- return ret ? ret : r;
+ if (r == -ENOMEM)
+ BUG();
+ } else
+ ret = r;
+
+ return ret;
}
int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
@@ -2416,48 +2419,26 @@ int dlm_deref_lockres_done_handler(struct o2net_msg *msg, u32 len, void *data,
}
spin_lock(&res->spinlock);
- BUG_ON(!(res->state & DLM_LOCK_RES_DROPPING_REF));
- if (!list_empty(&res->purge)) {
- mlog(0, "%s: Removing res %.*s from purgelist\n",
- dlm->name, res->lockname.len, res->lockname.name);
- list_del_init(&res->purge);
- dlm_lockres_put(res);
- dlm->purge_count--;
- }
-
- if (!__dlm_lockres_unused(res)) {
- mlog(ML_ERROR, "%s: res %.*s in use after deref\n",
- dlm->name, res->lockname.len, res->lockname.name);
- __dlm_print_one_lock_resource(res);
- BUG();
- }
-
- __dlm_unhash_lockres(dlm, res);
-
- spin_lock(&dlm->track_lock);
- if (!list_empty(&res->tracking))
- list_del_init(&res->tracking);
- else {
- mlog(ML_ERROR, "%s: Resource %.*s not on the Tracking list\n",
- dlm->name, res->lockname.len, res->lockname.name);
- __dlm_print_one_lock_resource(res);
+ if (!(res->state & DLM_LOCK_RES_DROPPING_REF)) {
+ spin_unlock(&res->spinlock);
+ spin_unlock(&dlm->spinlock);
+ mlog(ML_NOTICE, "%s:%.*s: node %u sends deref done "
+ "but it is already derefed!\n", dlm->name,
+ res->lockname.len, res->lockname.name, node);
+ ret = 0;
+ goto done;
}
- spin_unlock(&dlm->track_lock);
- /* lockres is not in the hash now. drop the flag and wake up
- * any processes waiting in dlm_get_lock_resource.
- */
- res->state &= ~DLM_LOCK_RES_DROPPING_REF;
+ __dlm_do_purge_lockres(dlm, res);
spin_unlock(&res->spinlock);
wake_up(&res->wq);
- dlm_lockres_put(res);
-
spin_unlock(&dlm->spinlock);
ret = 0;
-
done:
+ if (res)
+ dlm_lockres_put(res);
dlm_put(dlm);
return ret;
}
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index f6b313898763..dd5cb8bcefd1 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -2343,6 +2343,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
struct dlm_lock_resource *res;
int i;
struct hlist_head *bucket;
+ struct hlist_node *tmp;
struct dlm_lock *lock;
@@ -2365,7 +2366,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
*/
for (i = 0; i < DLM_HASH_BUCKETS; i++) {
bucket = dlm_lockres_hash(dlm, i);
- hlist_for_each_entry(res, bucket, hash_node) {
+ hlist_for_each_entry_safe(res, tmp, bucket, hash_node) {
/* always prune any $RECOVERY entries for dead nodes,
* otherwise hangs can occur during later recovery */
if (dlm_is_recovery_lock(res->lockname.name,
@@ -2386,8 +2387,17 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
break;
}
}
- dlm_lockres_clear_refmap_bit(dlm, res,
- dead_node);
+
+ if ((res->owner == dead_node) &&
+ (res->state & DLM_LOCK_RES_DROPPING_REF)) {
+ dlm_lockres_get(res);
+ __dlm_do_purge_lockres(dlm, res);
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+ dlm_lockres_put(res);
+ continue;
+ } else if (res->owner == dlm->node_num)
+ dlm_lockres_clear_refmap_bit(dlm, res, dead_node);
spin_unlock(&res->spinlock);
continue;
}
@@ -2398,14 +2408,17 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
if (res->state & DLM_LOCK_RES_DROPPING_REF) {
mlog(0, "%s:%.*s: owned by "
"dead node %u, this node was "
- "dropping its ref when it died. "
- "continue, dropping the flag.\n",
+ "dropping its ref when master died. "
+ "continue, purging the lockres.\n",
dlm->name, res->lockname.len,
res->lockname.name, dead_node);
+ dlm_lockres_get(res);
+ __dlm_do_purge_lockres(dlm, res);
+ spin_unlock(&res->spinlock);
+ wake_up(&res->wq);
+ dlm_lockres_put(res);
+ continue;
}
- res->state &= ~DLM_LOCK_RES_DROPPING_REF;
- dlm_move_lockres_to_recovery_list(dlm,
- res);
} else if (res->owner == dlm->node_num) {
dlm_free_dead_locks(dlm, res, dead_node);
__dlm_lockres_calc_usage(dlm, res);
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index 68d239ba0c63..838a06d4066a 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -160,6 +160,52 @@ void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
spin_unlock(&dlm->spinlock);
}
+/*
+ * Do the real purge work:
+ * unhash the lockres, and
+ * clear flag DLM_LOCK_RES_DROPPING_REF.
+ * It requires dlm and lockres spinlock to be taken.
+ */
+void __dlm_do_purge_lockres(struct dlm_ctxt *dlm,
+ struct dlm_lock_resource *res)
+{
+ assert_spin_locked(&dlm->spinlock);
+ assert_spin_locked(&res->spinlock);
+
+ if (!list_empty(&res->purge)) {
+ mlog(0, "%s: Removing res %.*s from purgelist\n",
+ dlm->name, res->lockname.len, res->lockname.name);
+ list_del_init(&res->purge);
+ dlm_lockres_put(res);
+ dlm->purge_count--;
+ }
+
+ if (!__dlm_lockres_unused(res)) {
+ mlog(ML_ERROR, "%s: res %.*s in use after deref\n",
+ dlm->name, res->lockname.len, res->lockname.name);
+ __dlm_print_one_lock_resource(res);
+ BUG();
+ }
+
+ __dlm_unhash_lockres(dlm, res);
+
+ spin_lock(&dlm->track_lock);
+ if (!list_empty(&res->tracking))
+ list_del_init(&res->tracking);
+ else {
+ mlog(ML_ERROR, "%s: Resource %.*s not on the Tracking list\n",
+ dlm->name, res->lockname.len, res->lockname.name);
+ __dlm_print_one_lock_resource(res);
+ }
+ spin_unlock(&dlm->track_lock);
+
+ /*
+ * lockres is not in the hash now. drop the flag and wake up
+ * any processes waiting in dlm_get_lock_resource.
+ */
+ res->state &= ~DLM_LOCK_RES_DROPPING_REF;
+}
+
static void dlm_purge_lockres(struct dlm_ctxt *dlm,
struct dlm_lock_resource *res)
{
@@ -175,6 +221,13 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm,
res->lockname.len, res->lockname.name, master);
if (!master) {
+ if (res->state & DLM_LOCK_RES_DROPPING_REF) {
+ mlog(ML_NOTICE, "%s: res %.*s already in DLM_LOCK_RES_DROPPING_REF state\n",
+ dlm->name, res->lockname.len, res->lockname.name);
+ spin_unlock(&res->spinlock);
+ return;
+ }
+
res->state |= DLM_LOCK_RES_DROPPING_REF;
/* drop spinlock... retake below */
spin_unlock(&res->spinlock);
@@ -203,8 +256,8 @@ static void dlm_purge_lockres(struct dlm_ctxt *dlm,
dlm->purge_count--;
}
- if (!master && ret != 0) {
- mlog(0, "%s: deref %.*s in progress or master goes down\n",
+ if (!master && ret == DLM_DEREF_RESPONSE_INPROG) {
+ mlog(0, "%s: deref %.*s in progress\n",
dlm->name, res->lockname.len, res->lockname.name);
spin_unlock(&res->spinlock);
return;
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 47b3b2d4e775..ef474cdd6404 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -469,7 +469,7 @@ static int dlmfs_mkdir(struct inode * dir,
{
int status;
struct inode *inode = NULL;
- struct qstr *domain = &dentry->d_name;
+ const struct qstr *domain = &dentry->d_name;
struct dlmfs_inode_private *ip;
struct ocfs2_cluster_connection *conn;
@@ -518,7 +518,7 @@ static int dlmfs_create(struct inode *dir,
{
int status = 0;
struct inode *inode;
- struct qstr *name = &dentry->d_name;
+ const struct qstr *name = &dentry->d_name;
mlog(0, "create %.*s\n", name->len, name->name);
diff --git a/fs/ocfs2/dlmfs/userdlm.c b/fs/ocfs2/dlmfs/userdlm.c
index 0499e3fb7bdb..f70cda2f090d 100644
--- a/fs/ocfs2/dlmfs/userdlm.c
+++ b/fs/ocfs2/dlmfs/userdlm.c
@@ -667,7 +667,7 @@ void user_dlm_set_locking_protocol(void)
ocfs2_stack_glue_set_max_proto_version(&user_dlm_lproto.lp_max_version);
}
-struct ocfs2_cluster_connection *user_dlm_register(struct qstr *name)
+struct ocfs2_cluster_connection *user_dlm_register(const struct qstr *name)
{
int rc;
struct ocfs2_cluster_connection *conn;
diff --git a/fs/ocfs2/dlmfs/userdlm.h b/fs/ocfs2/dlmfs/userdlm.h
index 3b42d79531d7..ede94a6e7fd3 100644
--- a/fs/ocfs2/dlmfs/userdlm.h
+++ b/fs/ocfs2/dlmfs/userdlm.h
@@ -83,7 +83,7 @@ void user_dlm_write_lvb(struct inode *inode,
ssize_t user_dlm_read_lvb(struct inode *inode,
char *val,
unsigned int len);
-struct ocfs2_cluster_connection *user_dlm_register(struct qstr *name);
+struct ocfs2_cluster_connection *user_dlm_register(const struct qstr *name);
void user_dlm_unregister(struct ocfs2_cluster_connection *conn);
void user_dlm_set_locking_protocol(void);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 1eaa9100c889..83d576f6a287 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -1635,7 +1635,6 @@ int ocfs2_create_new_inode_locks(struct inode *inode)
int ret;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- BUG_ON(!inode);
BUG_ON(!ocfs2_inode_is_new(inode));
mlog(0, "Inode %llu\n", (unsigned long long)OCFS2_I(inode)->ip_blkno);
@@ -1665,10 +1664,8 @@ int ocfs2_create_new_inode_locks(struct inode *inode)
}
ret = ocfs2_create_new_lock(osb, &OCFS2_I(inode)->ip_open_lockres, 0, 0);
- if (ret) {
+ if (ret)
mlog_errno(ret);
- goto bail;
- }
bail:
return ret;
@@ -1680,8 +1677,6 @@ int ocfs2_rw_lock(struct inode *inode, int write)
struct ocfs2_lock_res *lockres;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- BUG_ON(!inode);
-
mlog(0, "inode %llu take %s RW lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
write ? "EXMODE" : "PRMODE");
@@ -1724,8 +1719,6 @@ int ocfs2_open_lock(struct inode *inode)
struct ocfs2_lock_res *lockres;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- BUG_ON(!inode);
-
mlog(0, "inode %llu take PRMODE open lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno);
@@ -1749,8 +1742,6 @@ int ocfs2_try_open_lock(struct inode *inode, int write)
struct ocfs2_lock_res *lockres;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- BUG_ON(!inode);
-
mlog(0, "inode %llu try to take %s open lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
write ? "EXMODE" : "PRMODE");
@@ -2328,8 +2319,6 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct buffer_head *local_bh = NULL;
- BUG_ON(!inode);
-
mlog(0, "inode %llu, take %s META lock\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno,
ex ? "EXMODE" : "PRMODE");
diff --git a/fs/ocfs2/inode.h b/fs/ocfs2/inode.h
index d8f3fc8d2551..50cc55047443 100644
--- a/fs/ocfs2/inode.h
+++ b/fs/ocfs2/inode.h
@@ -145,22 +145,15 @@ int ocfs2_drop_inode(struct inode *inode);
struct inode *ocfs2_ilookup(struct super_block *sb, u64 feoff);
struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 feoff, unsigned flags,
int sysfile_type);
-int ocfs2_inode_init_private(struct inode *inode);
int ocfs2_inode_revalidate(struct dentry *dentry);
void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
int create_ino);
-void ocfs2_read_inode(struct inode *inode);
-void ocfs2_read_inode2(struct inode *inode, void *opaque);
-ssize_t ocfs2_rw_direct(int rw, struct file *filp, char *buf,
- size_t size, loff_t *offp);
void ocfs2_sync_blockdev(struct super_block *sb);
void ocfs2_refresh_inode(struct inode *inode,
struct ocfs2_dinode *fe);
int ocfs2_mark_inode_dirty(handle_t *handle,
struct inode *inode,
struct buffer_head *bh);
-struct buffer_head *ocfs2_bread(struct inode *inode,
- int block, int *err, int reada);
void ocfs2_set_inode_flags(struct inode *inode);
void ocfs2_get_inode_flags(struct ocfs2_inode_info *oi);
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index e607419cdfa4..a244f14c6b87 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -1159,10 +1159,8 @@ static int ocfs2_force_read_journal(struct inode *inode)
int status = 0;
int i;
u64 v_blkno, p_blkno, p_blocks, num_blocks;
-#define CONCURRENT_JOURNAL_FILL 32ULL
- struct buffer_head *bhs[CONCURRENT_JOURNAL_FILL];
-
- memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL);
+ struct buffer_head *bh = NULL;
+ struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
num_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
v_blkno = 0;
@@ -1174,29 +1172,32 @@ static int ocfs2_force_read_journal(struct inode *inode)
goto bail;
}
- if (p_blocks > CONCURRENT_JOURNAL_FILL)
- p_blocks = CONCURRENT_JOURNAL_FILL;
-
- /* We are reading journal data which should not
- * be put in the uptodate cache */
- status = ocfs2_read_blocks_sync(OCFS2_SB(inode->i_sb),
- p_blkno, p_blocks, bhs);
- if (status < 0) {
- mlog_errno(status);
- goto bail;
- }
+ for (i = 0; i < p_blocks; i++, p_blkno++) {
+ bh = __find_get_block(osb->sb->s_bdev, p_blkno,
+ osb->sb->s_blocksize);
+ /* block not cached. */
+ if (!bh)
+ continue;
+
+ brelse(bh);
+ bh = NULL;
+ /* We are reading journal data which should not
+ * be put in the uptodate cache.
+ */
+ status = ocfs2_read_blocks_sync(osb, p_blkno, 1, &bh);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
- for(i = 0; i < p_blocks; i++) {
- brelse(bhs[i]);
- bhs[i] = NULL;
+ brelse(bh);
+ bh = NULL;
}
v_blkno += p_blocks;
}
bail:
- for(i = 0; i < CONCURRENT_JOURNAL_FILL; i++)
- brelse(bhs[i]);
return status;
}
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index ab6a6cdcf91c..87e577a49b0d 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -483,7 +483,7 @@ int __ocfs2_sync_dquot(struct dquot *dquot, int freeing)
struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv;
struct ocfs2_global_disk_dqblk dqblk;
s64 spacechange, inodechange;
- time_t olditime, oldbtime;
+ time64_t olditime, oldbtime;
err = sb->s_op->quota_read(sb, type, (char *)&dqblk,
sizeof(struct ocfs2_global_disk_dqblk),
diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c
index ced70c8139f7..c9e828ec3c8e 100644
--- a/fs/ocfs2/stack_user.c
+++ b/fs/ocfs2/stack_user.c
@@ -1007,10 +1007,17 @@ static int user_cluster_connect(struct ocfs2_cluster_connection *conn)
lc->oc_type = NO_CONTROLD;
rc = dlm_new_lockspace(conn->cc_name, conn->cc_cluster_name,
- DLM_LSFL_FS, DLM_LVB_LEN,
+ DLM_LSFL_FS | DLM_LSFL_NEWEXCL, DLM_LVB_LEN,
&ocfs2_ls_ops, conn, &ops_rv, &fsdlm);
- if (rc)
+ if (rc) {
+ if (rc == -EEXIST || rc == -EPROTO)
+ printk(KERN_ERR "ocfs2: Unable to create the "
+ "lockspace %s (%d), because a ocfs2-tools "
+ "program is running on this file system "
+ "with the same name lockspace\n",
+ conn->cc_name, rc);
goto out;
+ }
if (ops_rv == -EOPNOTSUPP) {
lc->oc_type = WITH_CONTROLD;
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index 13219ed73e1d..52c07346bea3 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -735,8 +735,6 @@ static void __exit ocfs2_stack_glue_exit(void)
{
memset(&locking_max_version, 0,
sizeof(struct ocfs2_protocol_version));
- locking_max_version.pv_major = 0;
- locking_max_version.pv_minor = 0;
ocfs2_sysfs_exit();
if (ocfs2_table_header)
unregister_sysctl_table(ocfs2_table_header);
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 2f19aeec5482..ea47120a85ff 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -1164,7 +1164,8 @@ static int ocfs2_reserve_clusters_with_limit(struct ocfs2_super *osb,
int flags,
struct ocfs2_alloc_context **ac)
{
- int status;
+ int status, ret = 0;
+ int retried = 0;
*ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
if (!(*ac)) {
@@ -1189,7 +1190,24 @@ static int ocfs2_reserve_clusters_with_limit(struct ocfs2_super *osb,
}
if (status == -ENOSPC) {
+retry:
status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac);
+ /* Retry if there is sufficient space cached in truncate log */
+ if (status == -ENOSPC && !retried) {
+ retried = 1;
+ ocfs2_inode_unlock((*ac)->ac_inode, 1);
+ inode_unlock((*ac)->ac_inode);
+
+ ret = ocfs2_try_to_free_truncate_log(osb, bits_wanted);
+ if (ret == 1)
+ goto retry;
+
+ if (ret < 0)
+ mlog_errno(ret);
+
+ inode_lock((*ac)->ac_inode);
+ ocfs2_inode_lock((*ac)->ac_inode, NULL, 1);
+ }
if (status < 0) {
if (status != -ENOSPC)
mlog_errno(status);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index d7cae3327de5..603b28d6f008 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1819,7 +1819,7 @@ static int ocfs2_get_sector(struct super_block *sb,
if (!buffer_dirty(*bh))
clear_buffer_uptodate(*bh);
unlock_buffer(*bh);
- ll_rw_block(READ, 1, bh);
+ ll_rw_block(REQ_OP_READ, 0, 1, bh);
wait_on_buffer(*bh);
if (!buffer_uptodate(*bh)) {
mlog_errno(-EIO);
@@ -2072,7 +2072,6 @@ static int ocfs2_initialize_super(struct super_block *sb,
osb->osb_dx_seed[3] = le32_to_cpu(di->id2.i_super.s_uuid_hash);
osb->sb = sb;
- /* Save off for ocfs2_rw_direct */
osb->s_sectsize_bits = blksize_bits(sector_size);
BUG_ON(!osb->s_sectsize_bits);
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index d2053853951e..5bb44f7a78ee 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -7344,7 +7344,7 @@ const struct xattr_handler ocfs2_xattr_trusted_handler = {
* 'user' attributes support
*/
static int ocfs2_xattr_user_get(const struct xattr_handler *handler,
- struct dentry *unusde, struct inode *inode,
+ struct dentry *unused, struct inode *inode,
const char *name, void *buffer, size_t size)
{
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
diff --git a/fs/open.c b/fs/open.c
index 93ae3cdee4ab..4fd6e256f4f4 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -840,13 +840,13 @@ EXPORT_SYMBOL(file_path);
int vfs_open(const struct path *path, struct file *file,
const struct cred *cred)
{
- struct inode *inode = vfs_select_inode(path->dentry, file->f_flags);
+ struct dentry *dentry = d_real(path->dentry, NULL, file->f_flags);
- if (IS_ERR(inode))
- return PTR_ERR(inode);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
file->f_path = *path;
- return do_dentry_open(file, inode, NULL, cred);
+ return do_dentry_open(file, d_backing_inode(dentry), NULL, cred);
}
struct file *dentry_open(const struct path *path, int flags,
@@ -998,6 +998,26 @@ struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt,
}
EXPORT_SYMBOL(file_open_root);
+struct file *filp_clone_open(struct file *oldfile)
+{
+ struct file *file;
+ int retval;
+
+ file = get_empty_filp();
+ if (IS_ERR(file))
+ return file;
+
+ file->f_flags = oldfile->f_flags;
+ retval = vfs_open(&oldfile->f_path, file, oldfile->f_cred);
+ if (retval) {
+ put_filp(file);
+ return ERR_PTR(retval);
+ }
+
+ return file;
+}
+EXPORT_SYMBOL(filp_clone_open);
+
long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
{
struct open_flags op;
diff --git a/fs/orangefs/acl.c b/fs/orangefs/acl.c
index 03f89dbb2512..28f2195cd798 100644
--- a/fs/orangefs/acl.c
+++ b/fs/orangefs/acl.c
@@ -18,10 +18,10 @@ struct posix_acl *orangefs_get_acl(struct inode *inode, int type)
switch (type) {
case ACL_TYPE_ACCESS:
- key = ORANGEFS_XATTR_NAME_ACL_ACCESS;
+ key = XATTR_NAME_POSIX_ACL_ACCESS;
break;
case ACL_TYPE_DEFAULT:
- key = ORANGEFS_XATTR_NAME_ACL_DEFAULT;
+ key = XATTR_NAME_POSIX_ACL_DEFAULT;
break;
default:
gossip_err("orangefs_get_acl: bogus value of type %d\n", type);
@@ -43,11 +43,8 @@ struct posix_acl *orangefs_get_acl(struct inode *inode, int type)
get_khandle_from_ino(inode),
key,
type);
- ret = orangefs_inode_getxattr(inode,
- "",
- key,
- value,
- ORANGEFS_MAX_XATTR_VALUELEN);
+ ret = orangefs_inode_getxattr(inode, key, value,
+ ORANGEFS_MAX_XATTR_VALUELEN);
/* if the key exists, convert it to an in-memory rep */
if (ret > 0) {
acl = posix_acl_from_xattr(&init_user_ns, value, ret);
@@ -74,7 +71,7 @@ int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
switch (type) {
case ACL_TYPE_ACCESS:
- name = ORANGEFS_XATTR_NAME_ACL_ACCESS;
+ name = XATTR_NAME_POSIX_ACL_ACCESS;
if (acl) {
umode_t mode = inode->i_mode;
/*
@@ -98,7 +95,7 @@ int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
}
break;
case ACL_TYPE_DEFAULT:
- name = ORANGEFS_XATTR_NAME_ACL_DEFAULT;
+ name = XATTR_NAME_POSIX_ACL_DEFAULT;
break;
default:
gossip_err("%s: invalid type %d!\n", __func__, type);
@@ -131,7 +128,7 @@ int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
* will xlate to a removexattr. However, we don't want removexattr
* complain if attributes does not exist.
*/
- error = orangefs_inode_setxattr(inode, "", name, value, size, 0);
+ error = orangefs_inode_setxattr(inode, name, value, size, 0);
out:
kfree(value);
diff --git a/fs/orangefs/dcache.c b/fs/orangefs/dcache.c
index 5dfc4f3cfe68..00235bf644dc 100644
--- a/fs/orangefs/dcache.c
+++ b/fs/orangefs/dcache.c
@@ -73,6 +73,7 @@ static int orangefs_revalidate_lookup(struct dentry *dentry)
}
}
+ dentry->d_time = jiffies + dcache_timeout_msecs*HZ/1000;
ret = 1;
out_release_op:
op_release(new_op);
@@ -94,6 +95,9 @@ static int orangefs_d_revalidate(struct dentry *dentry, unsigned int flags)
{
int ret;
+ if (time_before(jiffies, dentry->d_time))
+ return 1;
+
if (flags & LOOKUP_RCU)
return -ECHILD;
diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c
index db170beba797..a287a66d94e3 100644
--- a/fs/orangefs/devorangefs-req.c
+++ b/fs/orangefs/devorangefs-req.c
@@ -116,6 +116,13 @@ static int orangefs_devreq_open(struct inode *inode, struct file *file)
{
int ret = -EINVAL;
+ /* in order to ensure that the filesystem driver sees correct UIDs */
+ if (file->f_cred->user_ns != &init_user_ns) {
+ gossip_err("%s: device cannot be opened outside init_user_ns\n",
+ __func__);
+ goto out;
+ }
+
if (!(file->f_flags & O_NONBLOCK)) {
gossip_err("%s: device cannot be opened in blocking mode\n",
__func__);
diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
index 491e82c6f705..526040e09f78 100644
--- a/fs/orangefs/file.c
+++ b/fs/orangefs/file.c
@@ -516,7 +516,6 @@ static long orangefs_ioctl(struct file *file, unsigned int cmd, unsigned long ar
if (cmd == FS_IOC_GETFLAGS) {
val = 0;
ret = orangefs_inode_getxattr(file_inode(file),
- ORANGEFS_XATTR_NAME_DEFAULT_PREFIX,
"user.pvfs2.meta_hint",
&val, sizeof(val));
if (ret < 0 && ret != -ENODATA)
@@ -549,7 +548,6 @@ static long orangefs_ioctl(struct file *file, unsigned int cmd, unsigned long ar
"orangefs_ioctl: FS_IOC_SETFLAGS: %llu\n",
(unsigned long long)val);
ret = orangefs_inode_setxattr(file_inode(file),
- ORANGEFS_XATTR_NAME_DEFAULT_PREFIX,
"user.pvfs2.meta_hint",
&val, sizeof(val), 0);
}
diff --git a/fs/orangefs/inode.c b/fs/orangefs/inode.c
index 85640e955cde..28a0557a69be 100644
--- a/fs/orangefs/inode.c
+++ b/fs/orangefs/inode.c
@@ -80,7 +80,7 @@ static int orangefs_readpages(struct file *file,
if (!add_to_page_cache(page,
mapping,
page->index,
- GFP_KERNEL)) {
+ readahead_gfp_mask(mapping))) {
ret = read_one_page(page);
gossip_debug(GOSSIP_INODE_DEBUG,
"failure adding page to cache, read_one_page returned: %d\n",
@@ -124,19 +124,16 @@ static int orangefs_releasepage(struct page *page, gfp_t foo)
* will need to be able to use O_DIRECT on open in order to support
* AIO. Modeled after NFS, they do this too.
*/
-/*
- * static ssize_t orangefs_direct_IO(int rw,
- * struct kiocb *iocb,
- * struct iov_iter *iter,
- * loff_t offset)
- *{
- * gossip_debug(GOSSIP_INODE_DEBUG,
- * "orangefs_direct_IO: %s\n",
- * iocb->ki_filp->f_path.dentry->d_name.name);
- *
- * return -EINVAL;
- *}
- */
+
+static ssize_t orangefs_direct_IO(struct kiocb *iocb,
+ struct iov_iter *iter)
+{
+ gossip_debug(GOSSIP_INODE_DEBUG,
+ "orangefs_direct_IO: %s\n",
+ iocb->ki_filp->f_path.dentry->d_name.name);
+
+ return -EINVAL;
+}
struct backing_dev_info orangefs_backing_dev_info = {
.name = "orangefs",
@@ -150,7 +147,7 @@ const struct address_space_operations orangefs_address_operations = {
.readpages = orangefs_readpages,
.invalidatepage = orangefs_invalidatepage,
.releasepage = orangefs_releasepage,
-/* .direct_IO = orangefs_direct_IO */
+ .direct_IO = orangefs_direct_IO,
};
static int orangefs_setattr_size(struct inode *inode, struct iattr *iattr)
@@ -265,7 +262,7 @@ int orangefs_getattr(struct vfsmount *mnt,
"orangefs_getattr: called on %s\n",
dentry->d_name.name);
- ret = orangefs_inode_getattr(inode, 0, 1);
+ ret = orangefs_inode_getattr(inode, 0, 0);
if (ret == 0) {
generic_fillattr(inode, kstat);
@@ -294,7 +291,7 @@ int orangefs_permission(struct inode *inode, int mask)
}
/* ORANGEDS2 implementation of VFS inode operations for files */
-struct inode_operations orangefs_file_inode_operations = {
+const struct inode_operations orangefs_file_inode_operations = {
.get_acl = orangefs_get_acl,
.set_acl = orangefs_set_acl,
.setattr = orangefs_setattr,
@@ -387,7 +384,7 @@ struct inode *orangefs_iget(struct super_block *sb, struct orangefs_object_kref
if (!inode || !(inode->i_state & I_NEW))
return inode;
- error = orangefs_inode_getattr(inode, 1, 0);
+ error = orangefs_inode_getattr(inode, 1, 1);
if (error) {
iget_failed(inode);
return ERR_PTR(error);
@@ -432,7 +429,7 @@ struct inode *orangefs_new_inode(struct super_block *sb, struct inode *dir,
orangefs_set_inode(inode, ref);
inode->i_ino = hash; /* needed for stat etc */
- error = orangefs_inode_getattr(inode, 1, 0);
+ error = orangefs_inode_getattr(inode, 1, 1);
if (error)
goto out_iput;
diff --git a/fs/orangefs/namei.c b/fs/orangefs/namei.c
index 5a60c508af4e..62c525936ee8 100644
--- a/fs/orangefs/namei.c
+++ b/fs/orangefs/namei.c
@@ -72,6 +72,8 @@ static int orangefs_create(struct inode *dir,
d_instantiate(dentry, inode);
unlock_new_inode(inode);
+ dentry->d_time = jiffies + dcache_timeout_msecs*HZ/1000;
+ ORANGEFS_I(inode)->getattr_time = jiffies - 1;
gossip_debug(GOSSIP_NAME_DEBUG,
"%s: dentry instantiated for %s\n",
@@ -181,6 +183,8 @@ static struct dentry *orangefs_lookup(struct inode *dir, struct dentry *dentry,
goto out;
}
+ dentry->d_time = jiffies + dcache_timeout_msecs*HZ/1000;
+
inode = orangefs_iget(dir->i_sb, &new_op->downcall.resp.lookup.refn);
if (IS_ERR(inode)) {
gossip_debug(GOSSIP_NAME_DEBUG,
@@ -189,6 +193,8 @@ static struct dentry *orangefs_lookup(struct inode *dir, struct dentry *dentry,
goto out;
}
+ ORANGEFS_I(inode)->getattr_time = jiffies - 1;
+
gossip_debug(GOSSIP_NAME_DEBUG,
"%s:%s:%d "
"Found good inode [%lu] with count [%d]\n",
@@ -316,6 +322,8 @@ static int orangefs_symlink(struct inode *dir,
d_instantiate(dentry, inode);
unlock_new_inode(inode);
+ dentry->d_time = jiffies + dcache_timeout_msecs*HZ/1000;
+ ORANGEFS_I(inode)->getattr_time = jiffies - 1;
gossip_debug(GOSSIP_NAME_DEBUG,
"Inode (Symlink) %pU -> %s\n",
@@ -378,6 +386,8 @@ static int orangefs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
d_instantiate(dentry, inode);
unlock_new_inode(inode);
+ dentry->d_time = jiffies + dcache_timeout_msecs*HZ/1000;
+ ORANGEFS_I(inode)->getattr_time = jiffies - 1;
gossip_debug(GOSSIP_NAME_DEBUG,
"Inode (Directory) %pU -> %s\n",
@@ -405,12 +415,10 @@ static int orangefs_rename(struct inode *old_dir,
int ret;
gossip_debug(GOSSIP_NAME_DEBUG,
- "orangefs_rename: called (%s/%s => %s/%s) ct=%d\n",
- old_dentry->d_parent->d_name.name,
- old_dentry->d_name.name,
- new_dentry->d_parent->d_name.name,
- new_dentry->d_name.name,
- d_count(new_dentry));
+ "orangefs_rename: called (%pd2 => %pd2) ct=%d\n",
+ old_dentry, new_dentry, d_count(new_dentry));
+
+ ORANGEFS_I(new_dentry->d_parent->d_inode)->getattr_time = jiffies - 1;
new_op = op_alloc(ORANGEFS_VFS_OP_RENAME);
if (!new_op)
@@ -442,7 +450,7 @@ static int orangefs_rename(struct inode *old_dir,
}
/* ORANGEFS implementation of VFS inode operations for directories */
-struct inode_operations orangefs_dir_inode_operations = {
+const struct inode_operations orangefs_dir_inode_operations = {
.lookup = orangefs_lookup,
.get_acl = orangefs_get_acl,
.set_acl = orangefs_set_acl,
diff --git a/fs/orangefs/orangefs-cache.c b/fs/orangefs/orangefs-cache.c
index 900a2e38e11b..b6edbe9fb309 100644
--- a/fs/orangefs/orangefs-cache.c
+++ b/fs/orangefs/orangefs-cache.c
@@ -136,10 +136,10 @@ struct orangefs_kernel_op_s *op_alloc(__s32 type)
llu(new_op->tag),
get_opname_string(new_op));
- new_op->upcall.uid = from_kuid(current_user_ns(),
+ new_op->upcall.uid = from_kuid(&init_user_ns,
current_fsuid());
- new_op->upcall.gid = from_kgid(current_user_ns(),
+ new_op->upcall.gid = from_kgid(&init_user_ns,
current_fsgid());
} else {
gossip_err("op_alloc: kmem_cache_zalloc failed!\n");
diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
index 2281882f718e..633c07a6e3d8 100644
--- a/fs/orangefs/orangefs-kernel.h
+++ b/fs/orangefs/orangefs-kernel.h
@@ -119,17 +119,6 @@ struct client_debug_mask {
#define ORANGEFS_CACHE_CREATE_FLAGS 0
#endif /* ((defined ORANGEFS_KERNEL_DEBUG) && (defined CONFIG_DEBUG_SLAB)) */
-/* orangefs xattr and acl related defines */
-#define ORANGEFS_XATTR_INDEX_POSIX_ACL_ACCESS 1
-#define ORANGEFS_XATTR_INDEX_POSIX_ACL_DEFAULT 2
-#define ORANGEFS_XATTR_INDEX_TRUSTED 3
-#define ORANGEFS_XATTR_INDEX_DEFAULT 4
-
-#define ORANGEFS_XATTR_NAME_ACL_ACCESS XATTR_NAME_POSIX_ACL_ACCESS
-#define ORANGEFS_XATTR_NAME_ACL_DEFAULT XATTR_NAME_POSIX_ACL_DEFAULT
-#define ORANGEFS_XATTR_NAME_TRUSTED_PREFIX "trusted."
-#define ORANGEFS_XATTR_NAME_DEFAULT_PREFIX ""
-
/* these functions are defined in orangefs-utils.c */
int orangefs_prepare_cdm_array(char *debug_array_string);
int orangefs_prepare_debugfs_help_string(int);
@@ -257,6 +246,8 @@ struct orangefs_inode_s {
* with this object
*/
unsigned long pinode_flags;
+
+ unsigned long getattr_time;
};
#define P_ATIME_FLAG 0
@@ -528,19 +519,17 @@ __s32 fsid_of_op(struct orangefs_kernel_op_s *op);
int orangefs_flush_inode(struct inode *inode);
ssize_t orangefs_inode_getxattr(struct inode *inode,
- const char *prefix,
const char *name,
void *buffer,
size_t size);
int orangefs_inode_setxattr(struct inode *inode,
- const char *prefix,
const char *name,
const void *value,
size_t size,
int flags);
-int orangefs_inode_getattr(struct inode *inode, int new, int size);
+int orangefs_inode_getattr(struct inode *inode, int new, int bypass);
int orangefs_inode_check_changed(struct inode *inode);
@@ -559,6 +548,8 @@ extern struct mutex request_mutex;
extern int debug;
extern int op_timeout_secs;
extern int slot_timeout_secs;
+extern int dcache_timeout_msecs;
+extern int getattr_timeout_msecs;
extern struct list_head orangefs_superblocks;
extern spinlock_t orangefs_superblocks_lock;
extern struct list_head orangefs_request_list;
@@ -570,10 +561,10 @@ extern int hash_table_size;
extern const struct address_space_operations orangefs_address_operations;
extern struct backing_dev_info orangefs_backing_dev_info;
-extern struct inode_operations orangefs_file_inode_operations;
+extern const struct inode_operations orangefs_file_inode_operations;
extern const struct file_operations orangefs_file_operations;
-extern struct inode_operations orangefs_symlink_inode_operations;
-extern struct inode_operations orangefs_dir_inode_operations;
+extern const struct inode_operations orangefs_symlink_inode_operations;
+extern const struct inode_operations orangefs_dir_inode_operations;
extern const struct file_operations orangefs_dir_operations;
extern const struct dentry_operations orangefs_dentry_operations;
extern const struct file_operations orangefs_devreq_file_operations;
@@ -600,8 +591,8 @@ int service_operation(struct orangefs_kernel_op_s *op,
#define fill_default_sys_attrs(sys_attr, type, mode) \
do { \
- sys_attr.owner = from_kuid(current_user_ns(), current_fsuid()); \
- sys_attr.group = from_kgid(current_user_ns(), current_fsgid()); \
+ sys_attr.owner = from_kuid(&init_user_ns, current_fsuid()); \
+ sys_attr.group = from_kgid(&init_user_ns, current_fsgid()); \
sys_attr.perms = ORANGEFS_util_translate_mode(mode); \
sys_attr.mtime = 0; \
sys_attr.atime = 0; \
diff --git a/fs/orangefs/orangefs-mod.c b/fs/orangefs/orangefs-mod.c
index 6f072a8c0de1..e9fd5755c05f 100644
--- a/fs/orangefs/orangefs-mod.c
+++ b/fs/orangefs/orangefs-mod.c
@@ -47,6 +47,8 @@ struct client_debug_mask client_debug_mask = { NULL, 0, 0 };
unsigned int kernel_mask_set_mod_init; /* implicitly false */
int op_timeout_secs = ORANGEFS_DEFAULT_OP_TIMEOUT_SECS;
int slot_timeout_secs = ORANGEFS_DEFAULT_SLOT_TIMEOUT_SECS;
+int dcache_timeout_msecs = 50;
+int getattr_timeout_msecs = 50;
MODULE_LICENSE("GPL");
MODULE_AUTHOR("ORANGEFS Development Team");
diff --git a/fs/orangefs/orangefs-sysfs.c b/fs/orangefs/orangefs-sysfs.c
index 5c03113e3ad2..375708c2db87 100644
--- a/fs/orangefs/orangefs-sysfs.c
+++ b/fs/orangefs/orangefs-sysfs.c
@@ -61,10 +61,21 @@
* Slots are requested and waited for,
* the wait times out after slot_timeout_secs.
*
+ * What: /sys/fs/orangefs/dcache_timeout_msecs
+ * Date: Jul 2016
+ * Contact: Martin Brandenburg <martin@omnibond.com>
+ * Description:
+ * Time lookup is valid in milliseconds.
+ *
+ * What: /sys/fs/orangefs/getattr_timeout_msecs
+ * Date: Jul 2016
+ * Contact: Martin Brandenburg <martin@omnibond.com>
+ * Description:
+ * Time getattr is valid in milliseconds.
*
* What: /sys/fs/orangefs/acache/...
* Date: Jun 2015
- * Contact: Mike Marshall <hubcap@omnibond.com>
+ * Contact: Martin Brandenburg <martin@omnibond.com>
* Description:
* Attribute cache configurable settings.
*
@@ -117,6 +128,8 @@ struct orangefs_obj {
int perf_history_size;
int perf_time_interval_secs;
int slot_timeout_secs;
+ int dcache_timeout_msecs;
+ int getattr_timeout_msecs;
};
struct acache_orangefs_obj {
@@ -658,6 +671,20 @@ static ssize_t sysfs_int_show(char *kobj_id, char *buf, void *attr)
"%d\n",
slot_timeout_secs);
goto out;
+ } else if (!strcmp(orangefs_attr->attr.name,
+ "dcache_timeout_msecs")) {
+ rc = scnprintf(buf,
+ PAGE_SIZE,
+ "%d\n",
+ dcache_timeout_msecs);
+ goto out;
+ } else if (!strcmp(orangefs_attr->attr.name,
+ "getattr_timeout_msecs")) {
+ rc = scnprintf(buf,
+ PAGE_SIZE,
+ "%d\n",
+ getattr_timeout_msecs);
+ goto out;
} else {
goto out;
}
@@ -734,6 +761,12 @@ static ssize_t int_store(struct orangefs_obj *orangefs_obj,
} else if (!strcmp(attr->attr.name, "slot_timeout_secs")) {
rc = kstrtoint(buf, 0, &slot_timeout_secs);
goto out;
+ } else if (!strcmp(attr->attr.name, "dcache_timeout_msecs")) {
+ rc = kstrtoint(buf, 0, &dcache_timeout_msecs);
+ goto out;
+ } else if (!strcmp(attr->attr.name, "getattr_timeout_msecs")) {
+ rc = kstrtoint(buf, 0, &getattr_timeout_msecs);
+ goto out;
} else {
goto out;
}
@@ -1361,6 +1394,12 @@ static struct orangefs_attribute op_timeout_secs_attribute =
static struct orangefs_attribute slot_timeout_secs_attribute =
__ATTR(slot_timeout_secs, 0664, int_orangefs_show, int_store);
+static struct orangefs_attribute dcache_timeout_msecs_attribute =
+ __ATTR(dcache_timeout_msecs, 0664, int_orangefs_show, int_store);
+
+static struct orangefs_attribute getattr_timeout_msecs_attribute =
+ __ATTR(getattr_timeout_msecs, 0664, int_orangefs_show, int_store);
+
static struct orangefs_attribute perf_counter_reset_attribute =
__ATTR(perf_counter_reset,
0664,
@@ -1382,6 +1421,8 @@ static struct orangefs_attribute perf_time_interval_secs_attribute =
static struct attribute *orangefs_default_attrs[] = {
&op_timeout_secs_attribute.attr,
&slot_timeout_secs_attribute.attr,
+ &dcache_timeout_msecs_attribute.attr,
+ &getattr_timeout_msecs_attribute.attr,
&perf_counter_reset_attribute.attr,
&perf_history_size_attribute.attr,
&perf_time_interval_secs_attribute.attr,
diff --git a/fs/orangefs/orangefs-utils.c b/fs/orangefs/orangefs-utils.c
index 2d129b5886ee..d13c7291fd05 100644
--- a/fs/orangefs/orangefs-utils.c
+++ b/fs/orangefs/orangefs-utils.c
@@ -153,12 +153,12 @@ static inline int copy_attributes_from_inode(struct inode *inode,
*/
attrs->mask = 0;
if (iattr->ia_valid & ATTR_UID) {
- attrs->owner = from_kuid(current_user_ns(), iattr->ia_uid);
+ attrs->owner = from_kuid(&init_user_ns, iattr->ia_uid);
attrs->mask |= ORANGEFS_ATTR_SYS_UID;
gossip_debug(GOSSIP_UTILS_DEBUG, "(UID) %d\n", attrs->owner);
}
if (iattr->ia_valid & ATTR_GID) {
- attrs->group = from_kgid(current_user_ns(), iattr->ia_gid);
+ attrs->group = from_kgid(&init_user_ns, iattr->ia_gid);
attrs->mask |= ORANGEFS_ATTR_SYS_GID;
gossip_debug(GOSSIP_UTILS_DEBUG, "(GID) %d\n", attrs->group);
}
@@ -251,7 +251,7 @@ static int orangefs_inode_is_stale(struct inode *inode, int new,
return 0;
}
-int orangefs_inode_getattr(struct inode *inode, int new, int size)
+int orangefs_inode_getattr(struct inode *inode, int new, int bypass)
{
struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
struct orangefs_kernel_op_s *new_op;
@@ -261,12 +261,16 @@ int orangefs_inode_getattr(struct inode *inode, int new, int size)
gossip_debug(GOSSIP_UTILS_DEBUG, "%s: called on inode %pU\n", __func__,
get_khandle_from_ino(inode));
+ if (!new && !bypass) {
+ if (time_before(jiffies, orangefs_inode->getattr_time))
+ return 0;
+ }
+
new_op = op_alloc(ORANGEFS_VFS_OP_GETATTR);
if (!new_op)
return -ENOMEM;
new_op->upcall.req.getattr.refn = orangefs_inode->refn;
- new_op->upcall.req.getattr.mask = size ?
- ORANGEFS_ATTR_SYS_ALL_NOHINT : ORANGEFS_ATTR_SYS_ALL_NOHINT_NOSIZE;
+ new_op->upcall.req.getattr.mask = ORANGEFS_ATTR_SYS_ALL_NOHINT;
ret = service_operation(new_op, __func__,
get_interruptible_flag(inode));
@@ -287,20 +291,18 @@ int orangefs_inode_getattr(struct inode *inode, int new, int size)
case S_IFREG:
inode->i_flags = orangefs_inode_flags(&new_op->
downcall.resp.getattr.attributes);
- if (size) {
- inode_size = (loff_t)new_op->
- downcall.resp.getattr.attributes.size;
- rounded_up_size =
- (inode_size + (4096 - (inode_size % 4096)));
- inode->i_size = inode_size;
- orangefs_inode->blksize =
- new_op->downcall.resp.getattr.attributes.blksize;
- spin_lock(&inode->i_lock);
- inode->i_bytes = inode_size;
- inode->i_blocks =
- (unsigned long)(rounded_up_size / 512);
- spin_unlock(&inode->i_lock);
- }
+ inode_size = (loff_t)new_op->
+ downcall.resp.getattr.attributes.size;
+ rounded_up_size =
+ (inode_size + (4096 - (inode_size % 4096)));
+ inode->i_size = inode_size;
+ orangefs_inode->blksize =
+ new_op->downcall.resp.getattr.attributes.blksize;
+ spin_lock(&inode->i_lock);
+ inode->i_bytes = inode_size;
+ inode->i_blocks =
+ (unsigned long)(rounded_up_size / 512);
+ spin_unlock(&inode->i_lock);
break;
case S_IFDIR:
inode->i_size = PAGE_SIZE;
@@ -345,6 +347,7 @@ int orangefs_inode_getattr(struct inode *inode, int new, int size)
inode->i_mode = type | (is_root_handle(inode) ? S_ISVTX : 0) |
orangefs_inode_perms(&new_op->downcall.resp.getattr.attributes);
+ orangefs_inode->getattr_time = jiffies + getattr_timeout_msecs*HZ/1000;
ret = 0;
out:
op_release(new_op);
@@ -418,6 +421,7 @@ int orangefs_inode_setattr(struct inode *inode, struct iattr *iattr)
ClearMtimeFlag(orangefs_inode);
ClearCtimeFlag(orangefs_inode);
ClearModeFlag(orangefs_inode);
+ orangefs_inode->getattr_time = jiffies - 1;
}
return ret;
diff --git a/fs/orangefs/protocol.h b/fs/orangefs/protocol.h
index 1efc6f8a5224..3d7418c728f5 100644
--- a/fs/orangefs/protocol.h
+++ b/fs/orangefs/protocol.h
@@ -207,14 +207,6 @@ typedef __s64 ORANGEFS_offset;
ORANGEFS_ATTR_SYS_DIRENT_COUNT | \
ORANGEFS_ATTR_SYS_BLKSIZE)
-#define ORANGEFS_ATTR_SYS_ALL_NOHINT_NOSIZE \
- (ORANGEFS_ATTR_SYS_COMMON_ALL | \
- ORANGEFS_ATTR_SYS_LNK_TARGET | \
- ORANGEFS_ATTR_SYS_DFILE_COUNT | \
- ORANGEFS_ATTR_SYS_MIRROR_COPIES_COUNT | \
- ORANGEFS_ATTR_SYS_DIRENT_COUNT | \
- ORANGEFS_ATTR_SYS_BLKSIZE)
-
#define ORANGEFS_XATTR_REPLACE 0x2
#define ORANGEFS_XATTR_CREATE 0x1
#define ORANGEFS_MAX_SERVER_ADDR_LEN 256
diff --git a/fs/orangefs/symlink.c b/fs/orangefs/symlink.c
index 6418dd638680..8fecf823f5ba 100644
--- a/fs/orangefs/symlink.c
+++ b/fs/orangefs/symlink.c
@@ -8,7 +8,7 @@
#include "orangefs-kernel.h"
#include "orangefs-bufmap.h"
-struct inode_operations orangefs_symlink_inode_operations = {
+const struct inode_operations orangefs_symlink_inode_operations = {
.readlink = generic_readlink,
.get_link = simple_get_link,
.setattr = orangefs_setattr,
diff --git a/fs/orangefs/xattr.c b/fs/orangefs/xattr.c
index 5893ddde0e4b..2a9f07f06d10 100644
--- a/fs/orangefs/xattr.c
+++ b/fs/orangefs/xattr.c
@@ -59,8 +59,8 @@ static inline int convert_to_internal_xattr_flags(int setxattr_flags)
* unless the key does not exist for the file and/or if
* there were errors in fetching the attribute value.
*/
-ssize_t orangefs_inode_getxattr(struct inode *inode, const char *prefix,
- const char *name, void *buffer, size_t size)
+ssize_t orangefs_inode_getxattr(struct inode *inode, const char *name,
+ void *buffer, size_t size)
{
struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
struct orangefs_kernel_op_s *new_op = NULL;
@@ -70,17 +70,17 @@ ssize_t orangefs_inode_getxattr(struct inode *inode, const char *prefix,
int fsgid;
gossip_debug(GOSSIP_XATTR_DEBUG,
- "%s: prefix %s name %s, buffer_size %zd\n",
- __func__, prefix, name, size);
+ "%s: name %s, buffer_size %zd\n",
+ __func__, name, size);
- if ((strlen(name) + strlen(prefix)) >= ORANGEFS_MAX_XATTR_NAMELEN) {
+ if (strlen(name) >= ORANGEFS_MAX_XATTR_NAMELEN) {
gossip_err("Invalid key length (%d)\n",
- (int)(strlen(name) + strlen(prefix)));
+ (int)strlen(name));
return -EINVAL;
}
- fsuid = from_kuid(current_user_ns(), current_fsuid());
- fsgid = from_kgid(current_user_ns(), current_fsgid());
+ fsuid = from_kuid(&init_user_ns, current_fsuid());
+ fsgid = from_kgid(&init_user_ns, current_fsgid());
gossip_debug(GOSSIP_XATTR_DEBUG,
"getxattr on inode %pU, name %s "
@@ -97,15 +97,14 @@ ssize_t orangefs_inode_getxattr(struct inode *inode, const char *prefix,
goto out_unlock;
new_op->upcall.req.getxattr.refn = orangefs_inode->refn;
- ret = snprintf((char *)new_op->upcall.req.getxattr.key,
- ORANGEFS_MAX_XATTR_NAMELEN, "%s%s", prefix, name);
+ strcpy(new_op->upcall.req.getxattr.key, name);
/*
* NOTE: Although keys are meant to be NULL terminated textual
* strings, I am going to explicitly pass the length just in case
* we change this later on...
*/
- new_op->upcall.req.getxattr.key_sz = ret + 1;
+ new_op->upcall.req.getxattr.key_sz = strlen(name) + 1;
ret = service_operation(new_op, "orangefs_inode_getxattr",
get_interruptible_flag(inode));
@@ -163,10 +162,8 @@ out_unlock:
return ret;
}
-static int orangefs_inode_removexattr(struct inode *inode,
- const char *prefix,
- const char *name,
- int flags)
+static int orangefs_inode_removexattr(struct inode *inode, const char *name,
+ int flags)
{
struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
struct orangefs_kernel_op_s *new_op = NULL;
@@ -183,12 +180,8 @@ static int orangefs_inode_removexattr(struct inode *inode,
* textual strings, I am going to explicitly pass the
* length just in case we change this later on...
*/
- ret = snprintf((char *)new_op->upcall.req.removexattr.key,
- ORANGEFS_MAX_XATTR_NAMELEN,
- "%s%s",
- (prefix ? prefix : ""),
- name);
- new_op->upcall.req.removexattr.key_sz = ret + 1;
+ strcpy(new_op->upcall.req.removexattr.key, name);
+ new_op->upcall.req.removexattr.key_sz = strlen(name) + 1;
gossip_debug(GOSSIP_XATTR_DEBUG,
"orangefs_inode_removexattr: key %s, key_sz %d\n",
@@ -223,8 +216,8 @@ out_unlock:
* Returns a -ve number on error and 0 on success. Key is text, but value
* can be binary!
*/
-int orangefs_inode_setxattr(struct inode *inode, const char *prefix,
- const char *name, const void *value, size_t size, int flags)
+int orangefs_inode_setxattr(struct inode *inode, const char *name,
+ const void *value, size_t size, int flags)
{
struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
struct orangefs_kernel_op_s *new_op;
@@ -232,8 +225,8 @@ int orangefs_inode_setxattr(struct inode *inode, const char *prefix,
int ret = -ENOMEM;
gossip_debug(GOSSIP_XATTR_DEBUG,
- "%s: prefix %s, name %s, buffer_size %zd\n",
- __func__, prefix, name, size);
+ "%s: name %s, buffer_size %zd\n",
+ __func__, name, size);
if (size >= ORANGEFS_MAX_XATTR_VALUELEN ||
flags < 0) {
@@ -245,29 +238,19 @@ int orangefs_inode_setxattr(struct inode *inode, const char *prefix,
internal_flag = convert_to_internal_xattr_flags(flags);
- if (prefix) {
- if (strlen(name) + strlen(prefix) >= ORANGEFS_MAX_XATTR_NAMELEN) {
- gossip_err
- ("orangefs_inode_setxattr: bogus key size (%d)\n",
- (int)(strlen(name) + strlen(prefix)));
- return -EINVAL;
- }
- } else {
- if (strlen(name) >= ORANGEFS_MAX_XATTR_NAMELEN) {
- gossip_err
- ("orangefs_inode_setxattr: bogus key size (%d)\n",
- (int)(strlen(name)));
- return -EINVAL;
- }
+ if (strlen(name) >= ORANGEFS_MAX_XATTR_NAMELEN) {
+ gossip_err
+ ("orangefs_inode_setxattr: bogus key size (%d)\n",
+ (int)(strlen(name)));
+ return -EINVAL;
}
/* This is equivalent to a removexattr */
if (size == 0 && value == NULL) {
gossip_debug(GOSSIP_XATTR_DEBUG,
- "removing xattr (%s%s)\n",
- prefix,
+ "removing xattr (%s)\n",
name);
- return orangefs_inode_removexattr(inode, prefix, name, flags);
+ return orangefs_inode_removexattr(inode, name, flags);
}
gossip_debug(GOSSIP_XATTR_DEBUG,
@@ -288,11 +271,8 @@ int orangefs_inode_setxattr(struct inode *inode, const char *prefix,
* strings, I am going to explicitly pass the length just in
* case we change this later on...
*/
- ret = snprintf((char *)new_op->upcall.req.setxattr.keyval.key,
- ORANGEFS_MAX_XATTR_NAMELEN,
- "%s%s",
- prefix, name);
- new_op->upcall.req.setxattr.keyval.key_sz = ret + 1;
+ strcpy(new_op->upcall.req.setxattr.keyval.key, name);
+ new_op->upcall.req.setxattr.keyval.key_sz = strlen(name) + 1;
memcpy(new_op->upcall.req.setxattr.keyval.val, value, size);
new_op->upcall.req.setxattr.keyval.val_sz = size;
@@ -455,12 +435,7 @@ static int orangefs_xattr_set_default(const struct xattr_handler *handler,
size_t size,
int flags)
{
- return orangefs_inode_setxattr(inode,
- ORANGEFS_XATTR_NAME_DEFAULT_PREFIX,
- name,
- buffer,
- size,
- flags);
+ return orangefs_inode_setxattr(inode, name, buffer, size, flags);
}
static int orangefs_xattr_get_default(const struct xattr_handler *handler,
@@ -470,57 +445,12 @@ static int orangefs_xattr_get_default(const struct xattr_handler *handler,
void *buffer,
size_t size)
{
- return orangefs_inode_getxattr(inode,
- ORANGEFS_XATTR_NAME_DEFAULT_PREFIX,
- name,
- buffer,
- size);
-
-}
+ return orangefs_inode_getxattr(inode, name, buffer, size);
-static int orangefs_xattr_set_trusted(const struct xattr_handler *handler,
- struct dentry *unused,
- struct inode *inode,
- const char *name,
- const void *buffer,
- size_t size,
- int flags)
-{
- return orangefs_inode_setxattr(inode,
- ORANGEFS_XATTR_NAME_TRUSTED_PREFIX,
- name,
- buffer,
- size,
- flags);
}
-static int orangefs_xattr_get_trusted(const struct xattr_handler *handler,
- struct dentry *unused,
- struct inode *inode,
- const char *name,
- void *buffer,
- size_t size)
-{
- return orangefs_inode_getxattr(inode,
- ORANGEFS_XATTR_NAME_TRUSTED_PREFIX,
- name,
- buffer,
- size);
-}
-
-static struct xattr_handler orangefs_xattr_trusted_handler = {
- .prefix = ORANGEFS_XATTR_NAME_TRUSTED_PREFIX,
- .get = orangefs_xattr_get_trusted,
- .set = orangefs_xattr_set_trusted,
-};
-
static struct xattr_handler orangefs_xattr_default_handler = {
- /*
- * NOTE: this is set to be the empty string.
- * so that all un-prefixed xattrs keys get caught
- * here!
- */
- .prefix = ORANGEFS_XATTR_NAME_DEFAULT_PREFIX,
+ .prefix = "", /* match any name => handlers called with full name */
.get = orangefs_xattr_get_default,
.set = orangefs_xattr_set_default,
};
@@ -528,7 +458,6 @@ static struct xattr_handler orangefs_xattr_default_handler = {
const struct xattr_handler *orangefs_xattr_handlers[] = {
&posix_acl_access_xattr_handler,
&posix_acl_default_xattr_handler,
- &orangefs_xattr_trusted_handler,
&orangefs_xattr_default_handler,
NULL
};
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 80aa6f1eb336..54e5d6681786 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -292,6 +292,7 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
goto out_cleanup;
ovl_dentry_update(dentry, newdentry);
+ ovl_inode_update(d_inode(dentry), d_inode(newdentry));
newdentry = NULL;
/*
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index c2a6b0894022..12bcd07b9e32 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -138,9 +138,12 @@ static int ovl_dir_getattr(struct vfsmount *mnt, struct dentry *dentry,
int err;
enum ovl_path_type type;
struct path realpath;
+ const struct cred *old_cred;
type = ovl_path_real(dentry, &realpath);
+ old_cred = ovl_override_creds(dentry->d_sb);
err = vfs_getattr(&realpath, stat);
+ revert_creds(old_cred);
if (err)
return err;
@@ -158,6 +161,22 @@ static int ovl_dir_getattr(struct vfsmount *mnt, struct dentry *dentry,
return 0;
}
+/* Common operations required to be done after creation of file on upper */
+static void ovl_instantiate(struct dentry *dentry, struct inode *inode,
+ struct dentry *newdentry, bool hardlink)
+{
+ ovl_dentry_version_inc(dentry->d_parent);
+ ovl_dentry_update(dentry, newdentry);
+ if (!hardlink) {
+ ovl_inode_update(inode, d_inode(newdentry));
+ ovl_copyattr(newdentry->d_inode, inode);
+ } else {
+ WARN_ON(ovl_inode_real(inode, NULL) != d_inode(newdentry));
+ inc_nlink(inode);
+ }
+ d_instantiate(dentry, inode);
+}
+
static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
struct kstat *stat, const char *link,
struct dentry *hardlink)
@@ -177,10 +196,7 @@ static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
if (err)
goto out_dput;
- ovl_dentry_version_inc(dentry->d_parent);
- ovl_dentry_update(dentry, newdentry);
- ovl_copyattr(newdentry->d_inode, inode);
- d_instantiate(dentry, inode);
+ ovl_instantiate(dentry, inode, newdentry, !!hardlink);
newdentry = NULL;
out_dput:
dput(newdentry);
@@ -291,23 +307,29 @@ static struct dentry *ovl_check_empty_and_clear(struct dentry *dentry)
{
int err;
struct dentry *ret = NULL;
+ enum ovl_path_type type = ovl_path_type(dentry);
LIST_HEAD(list);
err = ovl_check_empty_dir(dentry, &list);
- if (err)
+ if (err) {
ret = ERR_PTR(err);
- else {
- /*
- * If no upperdentry then skip clearing whiteouts.
- *
- * Can race with copy-up, since we don't hold the upperdir
- * mutex. Doesn't matter, since copy-up can't create a
- * non-empty directory from an empty one.
- */
- if (ovl_dentry_upper(dentry))
- ret = ovl_clear_empty(dentry, &list);
+ goto out_free;
}
+ /*
+ * When removing an empty opaque directory, then it makes no sense to
+ * replace it with an exact replica of itself.
+ *
+ * If no upperdentry then skip clearing whiteouts.
+ *
+ * Can race with copy-up, since we don't hold the upperdir mutex.
+ * Doesn't matter, since copy-up can't create a non-empty directory
+ * from an empty one.
+ */
+ if (OVL_TYPE_UPPER(type) && OVL_TYPE_MERGE(type))
+ ret = ovl_clear_empty(dentry, &list);
+
+out_free:
ovl_cache_free(&list);
return ret;
@@ -347,7 +369,23 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
if (err)
goto out_dput2;
- if (S_ISDIR(stat->mode)) {
+ /*
+ * mode could have been mutilated due to umask (e.g. sgid directory)
+ */
+ if (!hardlink &&
+ !S_ISLNK(stat->mode) && newdentry->d_inode->i_mode != stat->mode) {
+ struct iattr attr = {
+ .ia_valid = ATTR_MODE,
+ .ia_mode = stat->mode,
+ };
+ inode_lock(newdentry->d_inode);
+ err = notify_change(newdentry, &attr, NULL);
+ inode_unlock(newdentry->d_inode);
+ if (err)
+ goto out_cleanup;
+ }
+
+ if (!hardlink && S_ISDIR(stat->mode)) {
err = ovl_set_opaque(newdentry);
if (err)
goto out_cleanup;
@@ -363,10 +401,7 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
if (err)
goto out_cleanup;
}
- ovl_dentry_version_inc(dentry->d_parent);
- ovl_dentry_update(dentry, newdentry);
- ovl_copyattr(newdentry->d_inode, inode);
- d_instantiate(dentry, inode);
+ ovl_instantiate(dentry, inode, newdentry, !!hardlink);
newdentry = NULL;
out_dput2:
dput(upper);
@@ -382,52 +417,42 @@ out_cleanup:
goto out_dput2;
}
-static int ovl_create_or_link(struct dentry *dentry, int mode, dev_t rdev,
- const char *link, struct dentry *hardlink)
+static int ovl_create_or_link(struct dentry *dentry, struct inode *inode,
+ struct kstat *stat, const char *link,
+ struct dentry *hardlink)
{
int err;
- struct inode *inode;
- struct kstat stat = {
- .mode = mode,
- .rdev = rdev,
- };
-
- err = -ENOMEM;
- inode = ovl_new_inode(dentry->d_sb, mode, dentry->d_fsdata);
- if (!inode)
- goto out;
+ const struct cred *old_cred;
+ struct cred *override_cred;
err = ovl_copy_up(dentry->d_parent);
if (err)
- goto out_iput;
-
- if (!ovl_dentry_is_opaque(dentry)) {
- err = ovl_create_upper(dentry, inode, &stat, link, hardlink);
- } else {
- const struct cred *old_cred;
- struct cred *override_cred;
-
- old_cred = ovl_override_creds(dentry->d_sb);
-
- err = -ENOMEM;
- override_cred = prepare_creds();
- if (override_cred) {
- override_cred->fsuid = old_cred->fsuid;
- override_cred->fsgid = old_cred->fsgid;
- put_cred(override_creds(override_cred));
- put_cred(override_cred);
+ return err;
- err = ovl_create_over_whiteout(dentry, inode, &stat,
- link, hardlink);
- }
- revert_creds(old_cred);
+ old_cred = ovl_override_creds(dentry->d_sb);
+ err = -ENOMEM;
+ override_cred = prepare_creds();
+ if (override_cred) {
+ override_cred->fsuid = inode->i_uid;
+ override_cred->fsgid = inode->i_gid;
+ put_cred(override_creds(override_cred));
+ put_cred(override_cred);
+
+ if (!ovl_dentry_is_opaque(dentry))
+ err = ovl_create_upper(dentry, inode, stat, link,
+ hardlink);
+ else
+ err = ovl_create_over_whiteout(dentry, inode, stat,
+ link, hardlink);
}
+ revert_creds(old_cred);
+ if (!err) {
+ struct inode *realinode = d_inode(ovl_dentry_upper(dentry));
- if (!err)
- inode = NULL;
-out_iput:
- iput(inode);
-out:
+ WARN_ON(inode->i_mode != realinode->i_mode);
+ WARN_ON(!uid_eq(inode->i_uid, realinode->i_uid));
+ WARN_ON(!gid_eq(inode->i_gid, realinode->i_gid));
+ }
return err;
}
@@ -435,13 +460,30 @@ static int ovl_create_object(struct dentry *dentry, int mode, dev_t rdev,
const char *link)
{
int err;
+ struct inode *inode;
+ struct kstat stat = {
+ .rdev = rdev,
+ };
err = ovl_want_write(dentry);
- if (!err) {
- err = ovl_create_or_link(dentry, mode, rdev, link, NULL);
- ovl_drop_write(dentry);
- }
+ if (err)
+ goto out;
+
+ err = -ENOMEM;
+ inode = ovl_new_inode(dentry->d_sb, mode);
+ if (!inode)
+ goto out_drop_write;
+
+ inode_init_owner(inode, dentry->d_parent->d_inode, mode);
+ stat.mode = inode->i_mode;
+
+ err = ovl_create_or_link(dentry, inode, &stat, link, NULL);
+ if (err)
+ iput(inode);
+out_drop_write:
+ ovl_drop_write(dentry);
+out:
return err;
}
@@ -476,7 +518,7 @@ static int ovl_link(struct dentry *old, struct inode *newdir,
struct dentry *new)
{
int err;
- struct dentry *upper;
+ struct inode *inode;
err = ovl_want_write(old);
if (err)
@@ -486,8 +528,12 @@ static int ovl_link(struct dentry *old, struct inode *newdir,
if (err)
goto out_drop_write;
- upper = ovl_dentry_upper(old);
- err = ovl_create_or_link(new, upper->d_inode->i_mode, 0, NULL, upper);
+ inode = d_inode(old);
+ ihold(inode);
+
+ err = ovl_create_or_link(new, inode, NULL, NULL, ovl_dentry_upper(old));
+ if (err)
+ iput(inode);
out_drop_write:
ovl_drop_write(old);
@@ -505,75 +551,55 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
struct dentry *upper;
struct dentry *opaquedir = NULL;
int err;
+ int flags = 0;
if (WARN_ON(!workdir))
return -EROFS;
if (is_dir) {
- if (OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) {
- opaquedir = ovl_check_empty_and_clear(dentry);
- err = PTR_ERR(opaquedir);
- if (IS_ERR(opaquedir))
- goto out;
- } else {
- LIST_HEAD(list);
-
- /*
- * When removing an empty opaque directory, then it
- * makes no sense to replace it with an exact replica of
- * itself. But emptiness still needs to be checked.
- */
- err = ovl_check_empty_dir(dentry, &list);
- ovl_cache_free(&list);
- if (err)
- goto out;
- }
+ opaquedir = ovl_check_empty_and_clear(dentry);
+ err = PTR_ERR(opaquedir);
+ if (IS_ERR(opaquedir))
+ goto out;
}
err = ovl_lock_rename_workdir(workdir, upperdir);
if (err)
goto out_dput;
- whiteout = ovl_whiteout(workdir, dentry);
- err = PTR_ERR(whiteout);
- if (IS_ERR(whiteout))
+ upper = lookup_one_len(dentry->d_name.name, upperdir,
+ dentry->d_name.len);
+ err = PTR_ERR(upper);
+ if (IS_ERR(upper))
goto out_unlock;
- upper = ovl_dentry_upper(dentry);
- if (!upper) {
- upper = lookup_one_len(dentry->d_name.name, upperdir,
- dentry->d_name.len);
- err = PTR_ERR(upper);
- if (IS_ERR(upper))
- goto kill_whiteout;
-
- err = ovl_do_rename(wdir, whiteout, udir, upper, 0);
- dput(upper);
- if (err)
- goto kill_whiteout;
- } else {
- int flags = 0;
+ err = -ESTALE;
+ if ((opaquedir && upper != opaquedir) ||
+ (!opaquedir && ovl_dentry_upper(dentry) &&
+ upper != ovl_dentry_upper(dentry))) {
+ goto out_dput_upper;
+ }
- if (opaquedir)
- upper = opaquedir;
- err = -ESTALE;
- if (upper->d_parent != upperdir)
- goto kill_whiteout;
+ whiteout = ovl_whiteout(workdir, dentry);
+ err = PTR_ERR(whiteout);
+ if (IS_ERR(whiteout))
+ goto out_dput_upper;
- if (is_dir)
- flags |= RENAME_EXCHANGE;
+ if (d_is_dir(upper))
+ flags = RENAME_EXCHANGE;
- err = ovl_do_rename(wdir, whiteout, udir, upper, flags);
- if (err)
- goto kill_whiteout;
+ err = ovl_do_rename(wdir, whiteout, udir, upper, flags);
+ if (err)
+ goto kill_whiteout;
+ if (flags)
+ ovl_cleanup(wdir, upper);
- if (is_dir)
- ovl_cleanup(wdir, upper);
- }
ovl_dentry_version_inc(dentry->d_parent);
out_d_drop:
d_drop(dentry);
dput(whiteout);
+out_dput_upper:
+ dput(upper);
out_unlock:
unlock_rename(workdir, upperdir);
out_dput:
@@ -639,6 +665,8 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
{
enum ovl_path_type type;
int err;
+ const struct cred *old_cred;
+
err = ovl_check_sticky(dentry);
if (err)
@@ -653,14 +681,18 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
goto out_drop_write;
type = ovl_path_type(dentry);
- if (OVL_TYPE_PURE_UPPER(type)) {
- err = ovl_remove_upper(dentry, is_dir);
- } else {
- const struct cred *old_cred = ovl_override_creds(dentry->d_sb);
+ old_cred = ovl_override_creds(dentry->d_sb);
+ if (OVL_TYPE_PURE_UPPER(type))
+ err = ovl_remove_upper(dentry, is_dir);
+ else
err = ovl_remove_and_whiteout(dentry, is_dir);
-
- revert_creds(old_cred);
+ revert_creds(old_cred);
+ if (!err) {
+ if (is_dir)
+ clear_nlink(dentry->d_inode);
+ else
+ drop_nlink(dentry->d_inode);
}
out_drop_write:
ovl_drop_write(dentry);
@@ -766,8 +798,7 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
old_opaque = !OVL_TYPE_PURE_UPPER(old_type);
new_opaque = !OVL_TYPE_PURE_UPPER(new_type);
- if (old_opaque || new_opaque)
- old_cred = ovl_override_creds(old->d_sb);
+ old_cred = ovl_override_creds(old->d_sb);
if (overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir) {
opaquedir = ovl_check_empty_and_clear(new);
@@ -897,8 +928,7 @@ out_dput_old:
out_unlock:
unlock_rename(new_upperdir, old_upperdir);
out_revert_creds:
- if (old_opaque || new_opaque)
- revert_creds(old_cred);
+ revert_creds(old_cred);
out_drop_write:
ovl_drop_write(old);
out:
@@ -919,8 +949,10 @@ const struct inode_operations ovl_dir_inode_operations = {
.mknod = ovl_mknod,
.permission = ovl_permission,
.getattr = ovl_dir_getattr,
- .setxattr = ovl_setxattr,
+ .setxattr = generic_setxattr,
.getxattr = ovl_getxattr,
.listxattr = ovl_listxattr,
.removexattr = ovl_removexattr,
+ .get_acl = ovl_get_acl,
+ .update_time = ovl_update_time,
};
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index c831c2e5f803..1b885c156028 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -41,6 +41,7 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
{
int err;
struct dentry *upperdentry;
+ const struct cred *old_cred;
/*
* Check for permissions before trying to copy-up. This is redundant
@@ -80,8 +81,13 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
goto out_drop_write;
}
+ if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
+ attr->ia_valid &= ~ATTR_MODE;
+
inode_lock(upperdentry->d_inode);
+ old_cred = ovl_override_creds(dentry->d_sb);
err = notify_change(upperdentry, attr, NULL);
+ revert_creds(old_cred);
if (!err)
ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
inode_unlock(upperdentry->d_inode);
@@ -99,96 +105,46 @@ static int ovl_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat)
{
struct path realpath;
+ const struct cred *old_cred;
+ int err;
ovl_path_real(dentry, &realpath);
- return vfs_getattr(&realpath, stat);
+ old_cred = ovl_override_creds(dentry->d_sb);
+ err = vfs_getattr(&realpath, stat);
+ revert_creds(old_cred);
+ return err;
}
int ovl_permission(struct inode *inode, int mask)
{
- struct ovl_entry *oe;
- struct dentry *alias = NULL;
- struct inode *realinode;
- struct dentry *realdentry;
bool is_upper;
+ struct inode *realinode = ovl_inode_real(inode, &is_upper);
+ const struct cred *old_cred;
int err;
- if (S_ISDIR(inode->i_mode)) {
- oe = inode->i_private;
- } else if (mask & MAY_NOT_BLOCK) {
- return -ECHILD;
- } else {
- /*
- * For non-directories find an alias and get the info
- * from there.
- */
- alias = d_find_any_alias(inode);
- if (WARN_ON(!alias))
- return -ENOENT;
-
- oe = alias->d_fsdata;
- }
-
- realdentry = ovl_entry_real(oe, &is_upper);
-
- if (ovl_is_default_permissions(inode)) {
- struct kstat stat;
- struct path realpath = { .dentry = realdentry };
-
- if (mask & MAY_NOT_BLOCK)
- return -ECHILD;
-
- realpath.mnt = ovl_entry_mnt_real(oe, inode, is_upper);
-
- err = vfs_getattr(&realpath, &stat);
- if (err)
- goto out_dput;
-
- err = -ESTALE;
- if ((stat.mode ^ inode->i_mode) & S_IFMT)
- goto out_dput;
-
- inode->i_mode = stat.mode;
- inode->i_uid = stat.uid;
- inode->i_gid = stat.gid;
-
- err = generic_permission(inode, mask);
- goto out_dput;
- }
-
/* Careful in RCU walk mode */
- realinode = ACCESS_ONCE(realdentry->d_inode);
if (!realinode) {
WARN_ON(!(mask & MAY_NOT_BLOCK));
- err = -ENOENT;
- goto out_dput;
+ return -ECHILD;
}
- if (mask & MAY_WRITE) {
- umode_t mode = realinode->i_mode;
-
- /*
- * Writes will always be redirected to upper layer, so
- * ignore lower layer being read-only.
- *
- * If the overlay itself is read-only then proceed
- * with the permission check, don't return EROFS.
- * This will only happen if this is the lower layer of
- * another overlayfs.
- *
- * If upper fs becomes read-only after the overlay was
- * constructed return EROFS to prevent modification of
- * upper layer.
- */
- err = -EROFS;
- if (is_upper && !IS_RDONLY(inode) && IS_RDONLY(realinode) &&
- (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
- goto out_dput;
+ /*
+ * Check overlay inode with the creds of task and underlying inode
+ * with creds of mounter
+ */
+ err = generic_permission(inode, mask);
+ if (err)
+ return err;
+
+ old_cred = ovl_override_creds(inode->i_sb);
+ if (!is_upper && !special_file(realinode->i_mode) && mask & MAY_WRITE) {
+ mask &= ~(MAY_WRITE | MAY_APPEND);
+ /* Make sure mounter can read file for copy up later */
+ mask |= MAY_READ;
}
+ err = inode_permission(realinode, mask);
+ revert_creds(old_cred);
- err = __inode_permission(realinode, mask);
-out_dput:
- dput(alias);
return err;
}
@@ -198,6 +154,8 @@ static const char *ovl_get_link(struct dentry *dentry,
{
struct dentry *realdentry;
struct inode *realinode;
+ const struct cred *old_cred;
+ const char *p;
if (!dentry)
return ERR_PTR(-ECHILD);
@@ -208,13 +166,18 @@ static const char *ovl_get_link(struct dentry *dentry,
if (WARN_ON(!realinode->i_op->get_link))
return ERR_PTR(-EPERM);
- return realinode->i_op->get_link(realdentry, realinode, done);
+ old_cred = ovl_override_creds(dentry->d_sb);
+ p = realinode->i_op->get_link(realdentry, realinode, done);
+ revert_creds(old_cred);
+ return p;
}
static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
{
struct path realpath;
struct inode *realinode;
+ const struct cred *old_cred;
+ int err;
ovl_path_real(dentry, &realpath);
realinode = realpath.dentry->d_inode;
@@ -222,15 +185,17 @@ static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
if (!realinode->i_op->readlink)
return -EINVAL;
- touch_atime(&realpath);
-
- return realinode->i_op->readlink(realpath.dentry, buf, bufsiz);
+ old_cred = ovl_override_creds(dentry->d_sb);
+ err = realinode->i_op->readlink(realpath.dentry, buf, bufsiz);
+ revert_creds(old_cred);
+ return err;
}
-
static bool ovl_is_private_xattr(const char *name)
{
- return strncmp(name, OVL_XATTR_PRE_NAME, OVL_XATTR_PRE_LEN) == 0;
+#define OVL_XATTR_PRE_NAME OVL_XATTR_PREFIX "."
+ return strncmp(name, OVL_XATTR_PRE_NAME,
+ sizeof(OVL_XATTR_PRE_NAME) - 1) == 0;
}
int ovl_setxattr(struct dentry *dentry, struct inode *inode,
@@ -239,21 +204,20 @@ int ovl_setxattr(struct dentry *dentry, struct inode *inode,
{
int err;
struct dentry *upperdentry;
+ const struct cred *old_cred;
err = ovl_want_write(dentry);
if (err)
goto out;
- err = -EPERM;
- if (ovl_is_private_xattr(name))
- goto out_drop_write;
-
err = ovl_copy_up(dentry);
if (err)
goto out_drop_write;
upperdentry = ovl_dentry_upper(dentry);
+ old_cred = ovl_override_creds(dentry->d_sb);
err = vfs_setxattr(upperdentry, name, value, size, flags);
+ revert_creds(old_cred);
out_drop_write:
ovl_drop_write(dentry);
@@ -265,11 +229,16 @@ ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode,
const char *name, void *value, size_t size)
{
struct dentry *realdentry = ovl_dentry_real(dentry);
+ ssize_t res;
+ const struct cred *old_cred;
if (ovl_is_private_xattr(name))
return -ENODATA;
- return vfs_getxattr(realdentry, name, value, size);
+ old_cred = ovl_override_creds(dentry->d_sb);
+ res = vfs_getxattr(realdentry, name, value, size);
+ revert_creds(old_cred);
+ return res;
}
ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
@@ -277,8 +246,11 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
struct dentry *realdentry = ovl_dentry_real(dentry);
ssize_t res;
int off;
+ const struct cred *old_cred;
+ old_cred = ovl_override_creds(dentry->d_sb);
res = vfs_listxattr(realdentry, list, size);
+ revert_creds(old_cred);
if (res <= 0 || size == 0)
return res;
@@ -305,6 +277,7 @@ int ovl_removexattr(struct dentry *dentry, const char *name)
int err;
struct path realpath;
enum ovl_path_type type = ovl_path_real(dentry, &realpath);
+ const struct cred *old_cred;
err = ovl_want_write(dentry);
if (err)
@@ -326,13 +299,34 @@ int ovl_removexattr(struct dentry *dentry, const char *name)
ovl_path_upper(dentry, &realpath);
}
+ old_cred = ovl_override_creds(dentry->d_sb);
err = vfs_removexattr(realpath.dentry, name);
+ revert_creds(old_cred);
out_drop_write:
ovl_drop_write(dentry);
out:
return err;
}
+struct posix_acl *ovl_get_acl(struct inode *inode, int type)
+{
+ struct inode *realinode = ovl_inode_real(inode, NULL);
+ const struct cred *old_cred;
+ struct posix_acl *acl;
+
+ if (!IS_POSIXACL(realinode))
+ return NULL;
+
+ if (!realinode->i_op->get_acl)
+ return NULL;
+
+ old_cred = ovl_override_creds(inode->i_sb);
+ acl = realinode->i_op->get_acl(realinode, type);
+ revert_creds(old_cred);
+
+ return acl;
+}
+
static bool ovl_open_need_copy_up(int flags, enum ovl_path_type type,
struct dentry *realdentry)
{
@@ -348,46 +342,60 @@ static bool ovl_open_need_copy_up(int flags, enum ovl_path_type type,
return true;
}
-struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags)
+int ovl_open_maybe_copy_up(struct dentry *dentry, unsigned int file_flags)
{
- int err;
+ int err = 0;
struct path realpath;
enum ovl_path_type type;
- if (d_is_dir(dentry))
- return d_backing_inode(dentry);
-
type = ovl_path_real(dentry, &realpath);
if (ovl_open_need_copy_up(file_flags, type, realpath.dentry)) {
err = ovl_want_write(dentry);
- if (err)
- return ERR_PTR(err);
+ if (!err) {
+ if (file_flags & O_TRUNC)
+ err = ovl_copy_up_truncate(dentry);
+ else
+ err = ovl_copy_up(dentry);
+ ovl_drop_write(dentry);
+ }
+ }
- if (file_flags & O_TRUNC)
- err = ovl_copy_up_truncate(dentry);
- else
- err = ovl_copy_up(dentry);
- ovl_drop_write(dentry);
- if (err)
- return ERR_PTR(err);
+ return err;
+}
- ovl_path_upper(dentry, &realpath);
+int ovl_update_time(struct inode *inode, struct timespec *ts, int flags)
+{
+ struct dentry *alias;
+ struct path upperpath;
+
+ if (!(flags & S_ATIME))
+ return 0;
+
+ alias = d_find_any_alias(inode);
+ if (!alias)
+ return 0;
+
+ ovl_path_upper(alias, &upperpath);
+ if (upperpath.dentry) {
+ touch_atime(&upperpath);
+ inode->i_atime = d_inode(upperpath.dentry)->i_atime;
}
- if (realpath.dentry->d_flags & DCACHE_OP_SELECT_INODE)
- return realpath.dentry->d_op->d_select_inode(realpath.dentry, file_flags);
+ dput(alias);
- return d_backing_inode(realpath.dentry);
+ return 0;
}
static const struct inode_operations ovl_file_inode_operations = {
.setattr = ovl_setattr,
.permission = ovl_permission,
.getattr = ovl_getattr,
- .setxattr = ovl_setxattr,
+ .setxattr = generic_setxattr,
.getxattr = ovl_getxattr,
.listxattr = ovl_listxattr,
.removexattr = ovl_removexattr,
+ .get_acl = ovl_get_acl,
+ .update_time = ovl_update_time,
};
static const struct inode_operations ovl_symlink_inode_operations = {
@@ -395,30 +403,22 @@ static const struct inode_operations ovl_symlink_inode_operations = {
.get_link = ovl_get_link,
.readlink = ovl_readlink,
.getattr = ovl_getattr,
- .setxattr = ovl_setxattr,
+ .setxattr = generic_setxattr,
.getxattr = ovl_getxattr,
.listxattr = ovl_listxattr,
.removexattr = ovl_removexattr,
+ .update_time = ovl_update_time,
};
-struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
- struct ovl_entry *oe)
+static void ovl_fill_inode(struct inode *inode, umode_t mode)
{
- struct inode *inode;
-
- inode = new_inode(sb);
- if (!inode)
- return NULL;
-
- mode &= S_IFMT;
-
inode->i_ino = get_next_ino();
inode->i_mode = mode;
- inode->i_flags |= S_NOATIME | S_NOCMTIME;
+ inode->i_flags |= S_NOCMTIME;
+ mode &= S_IFMT;
switch (mode) {
case S_IFDIR:
- inode->i_private = oe;
inode->i_op = &ovl_dir_inode_operations;
inode->i_fop = &ovl_dir_operations;
break;
@@ -427,6 +427,10 @@ struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
inode->i_op = &ovl_symlink_inode_operations;
break;
+ default:
+ WARN(1, "illegal file type: %i\n", mode);
+ /* Fall through */
+
case S_IFREG:
case S_IFSOCK:
case S_IFBLK:
@@ -434,11 +438,42 @@ struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
case S_IFIFO:
inode->i_op = &ovl_file_inode_operations;
break;
+ }
+}
- default:
- WARN(1, "illegal file type: %i\n", mode);
- iput(inode);
- inode = NULL;
+struct inode *ovl_new_inode(struct super_block *sb, umode_t mode)
+{
+ struct inode *inode;
+
+ inode = new_inode(sb);
+ if (inode)
+ ovl_fill_inode(inode, mode);
+
+ return inode;
+}
+
+static int ovl_inode_test(struct inode *inode, void *data)
+{
+ return ovl_inode_real(inode, NULL) == data;
+}
+
+static int ovl_inode_set(struct inode *inode, void *data)
+{
+ inode->i_private = (void *) (((unsigned long) data) | OVL_ISUPPER_MASK);
+ return 0;
+}
+
+struct inode *ovl_get_inode(struct super_block *sb, struct inode *realinode)
+
+{
+ struct inode *inode;
+
+ inode = iget5_locked(sb, (unsigned long) realinode,
+ ovl_inode_test, ovl_inode_set, realinode);
+ if (inode && inode->i_state & I_NEW) {
+ ovl_fill_inode(inode, realinode->i_mode);
+ set_nlink(inode, realinode->i_nlink);
+ unlock_new_inode(inode);
}
return inode;
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index 4bd9b5ba8f42..e4f5c9536bfe 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -23,9 +23,11 @@ enum ovl_path_type {
#define OVL_TYPE_MERGE_OR_LOWER(type) \
(OVL_TYPE_MERGE(type) || !OVL_TYPE_UPPER(type))
-#define OVL_XATTR_PRE_NAME "trusted.overlay."
-#define OVL_XATTR_PRE_LEN 16
-#define OVL_XATTR_OPAQUE OVL_XATTR_PRE_NAME"opaque"
+
+#define OVL_XATTR_PREFIX XATTR_TRUSTED_PREFIX "overlay"
+#define OVL_XATTR_OPAQUE OVL_XATTR_PREFIX ".opaque"
+
+#define OVL_ISUPPER_MASK 1UL
static inline int ovl_do_rmdir(struct inode *dir, struct dentry *dentry)
{
@@ -131,6 +133,16 @@ static inline int ovl_do_whiteout(struct inode *dir, struct dentry *dentry)
return err;
}
+static inline struct inode *ovl_inode_real(struct inode *inode, bool *is_upper)
+{
+ unsigned long x = (unsigned long) READ_ONCE(inode->i_private);
+
+ if (is_upper)
+ *is_upper = x & OVL_ISUPPER_MASK;
+
+ return (struct inode *) (x & ~OVL_ISUPPER_MASK);
+}
+
enum ovl_path_type ovl_path_type(struct dentry *dentry);
u64 ovl_dentry_version_get(struct dentry *dentry);
void ovl_dentry_version_inc(struct dentry *dentry);
@@ -141,11 +153,9 @@ int ovl_path_next(int idx, struct dentry *dentry, struct path *path);
struct dentry *ovl_dentry_upper(struct dentry *dentry);
struct dentry *ovl_dentry_lower(struct dentry *dentry);
struct dentry *ovl_dentry_real(struct dentry *dentry);
-struct dentry *ovl_entry_real(struct ovl_entry *oe, bool *is_upper);
struct vfsmount *ovl_entry_mnt_real(struct ovl_entry *oe, struct inode *inode,
bool is_upper);
struct ovl_dir_cache *ovl_dir_cache(struct dentry *dentry);
-bool ovl_is_default_permissions(struct inode *inode);
void ovl_set_dir_cache(struct dentry *dentry, struct ovl_dir_cache *cache);
struct dentry *ovl_workdir(struct dentry *dentry);
int ovl_want_write(struct dentry *dentry);
@@ -155,6 +165,7 @@ void ovl_dentry_set_opaque(struct dentry *dentry, bool opaque);
bool ovl_is_whiteout(struct dentry *dentry);
const struct cred *ovl_override_creds(struct super_block *sb);
void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry);
+void ovl_inode_update(struct inode *inode, struct inode *upperinode);
struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags);
struct file *ovl_path_open(struct path *path, int flags);
@@ -179,14 +190,20 @@ ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode,
const char *name, void *value, size_t size);
ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
int ovl_removexattr(struct dentry *dentry, const char *name);
-struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags);
+struct posix_acl *ovl_get_acl(struct inode *inode, int type);
+int ovl_open_maybe_copy_up(struct dentry *dentry, unsigned int file_flags);
+int ovl_update_time(struct inode *inode, struct timespec *ts, int flags);
-struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
- struct ovl_entry *oe);
+struct inode *ovl_new_inode(struct super_block *sb, umode_t mode);
+struct inode *ovl_get_inode(struct super_block *sb, struct inode *realinode);
static inline void ovl_copyattr(struct inode *from, struct inode *to)
{
to->i_uid = from->i_uid;
to->i_gid = from->i_gid;
+ to->i_mode = from->i_mode;
+ to->i_atime = from->i_atime;
+ to->i_mtime = from->i_mtime;
+ to->i_ctime = from->i_ctime;
}
/* dir.c */
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 9a7693d5f8ff..4036132842b5 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -16,10 +16,10 @@
#include <linux/slab.h>
#include <linux/parser.h>
#include <linux/module.h>
-#include <linux/pagemap.h>
#include <linux/sched.h>
#include <linux/statfs.h>
#include <linux/seq_file.h>
+#include <linux/posix_acl_xattr.h>
#include "overlayfs.h"
MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
@@ -145,18 +145,11 @@ struct dentry *ovl_dentry_real(struct dentry *dentry)
return realdentry;
}
-struct dentry *ovl_entry_real(struct ovl_entry *oe, bool *is_upper)
+static void ovl_inode_init(struct inode *inode, struct inode *realinode,
+ bool is_upper)
{
- struct dentry *realdentry;
-
- realdentry = ovl_upperdentry_dereference(oe);
- if (realdentry) {
- *is_upper = true;
- } else {
- realdentry = __ovl_dentry_lower(oe);
- *is_upper = false;
- }
- return realdentry;
+ WRITE_ONCE(inode->i_private, (unsigned long) realinode |
+ (is_upper ? OVL_ISUPPER_MASK : 0));
}
struct vfsmount *ovl_entry_mnt_real(struct ovl_entry *oe, struct inode *inode,
@@ -178,13 +171,6 @@ struct ovl_dir_cache *ovl_dir_cache(struct dentry *dentry)
return oe->cache;
}
-bool ovl_is_default_permissions(struct inode *inode)
-{
- struct ovl_fs *ofs = inode->i_sb->s_fs_info;
-
- return ofs->config.default_permissions;
-}
-
void ovl_set_dir_cache(struct dentry *dentry, struct ovl_dir_cache *cache)
{
struct ovl_entry *oe = dentry->d_fsdata;
@@ -235,7 +221,6 @@ void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry)
WARN_ON(!inode_is_locked(upperdentry->d_parent->d_inode));
WARN_ON(oe->__upperdentry);
- BUG_ON(!upperdentry->d_inode);
/*
* Make sure upperdentry is consistent before making it visible to
* ovl_upperdentry_dereference().
@@ -244,6 +229,16 @@ void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry)
oe->__upperdentry = upperdentry;
}
+void ovl_inode_update(struct inode *inode, struct inode *upperinode)
+{
+ WARN_ON(!upperinode);
+ WARN_ON(!inode_unhashed(inode));
+ WRITE_ONCE(inode->i_private,
+ (unsigned long) upperinode | OVL_ISUPPER_MASK);
+ if (!S_ISDIR(upperinode->i_mode))
+ __insert_inode_hash(inode, (unsigned long) upperinode);
+}
+
void ovl_dentry_version_inc(struct dentry *dentry)
{
struct ovl_entry *oe = dentry->d_fsdata;
@@ -304,7 +299,9 @@ static void ovl_dentry_release(struct dentry *dentry)
}
}
-static struct dentry *ovl_d_real(struct dentry *dentry, struct inode *inode)
+static struct dentry *ovl_d_real(struct dentry *dentry,
+ const struct inode *inode,
+ unsigned int open_flags)
{
struct dentry *real;
@@ -314,6 +311,16 @@ static struct dentry *ovl_d_real(struct dentry *dentry, struct inode *inode)
goto bug;
}
+ if (d_is_negative(dentry))
+ return dentry;
+
+ if (open_flags) {
+ int err = ovl_open_maybe_copy_up(dentry, open_flags);
+
+ if (err)
+ return ERR_PTR(err);
+ }
+
real = ovl_dentry_upper(dentry);
if (real && (!inode || inode == d_inode(real)))
return real;
@@ -326,11 +333,9 @@ static struct dentry *ovl_d_real(struct dentry *dentry, struct inode *inode)
return real;
/* Handle recursion */
- if (real->d_flags & DCACHE_OP_REAL)
- return real->d_op->d_real(real, inode);
-
+ return d_real(real, inode, open_flags);
bug:
- WARN(1, "ovl_d_real(%pd4, %s:%lu\n): real dentry not found\n", dentry,
+ WARN(1, "ovl_d_real(%pd4, %s:%lu): real dentry not found\n", dentry,
inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
return dentry;
}
@@ -378,13 +383,11 @@ static int ovl_dentry_weak_revalidate(struct dentry *dentry, unsigned int flags)
static const struct dentry_operations ovl_dentry_operations = {
.d_release = ovl_dentry_release,
- .d_select_inode = ovl_d_select_inode,
.d_real = ovl_d_real,
};
static const struct dentry_operations ovl_reval_dentry_operations = {
.d_release = ovl_dentry_release,
- .d_select_inode = ovl_d_select_inode,
.d_real = ovl_d_real,
.d_revalidate = ovl_dentry_revalidate,
.d_weak_revalidate = ovl_dentry_weak_revalidate,
@@ -404,7 +407,8 @@ static struct ovl_entry *ovl_alloc_entry(unsigned int numlower)
static bool ovl_dentry_remote(struct dentry *dentry)
{
return dentry->d_flags &
- (DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE);
+ (DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE |
+ DCACHE_OP_REAL);
}
static bool ovl_dentry_weird(struct dentry *dentry)
@@ -415,12 +419,16 @@ static bool ovl_dentry_weird(struct dentry *dentry)
DCACHE_OP_COMPARE);
}
-static inline struct dentry *ovl_lookup_real(struct dentry *dir,
- struct qstr *name)
+static inline struct dentry *ovl_lookup_real(struct super_block *ovl_sb,
+ struct dentry *dir,
+ const struct qstr *name)
{
+ const struct cred *old_cred;
struct dentry *dentry;
- dentry = lookup_hash(name, dir);
+ old_cred = ovl_override_creds(ovl_sb);
+ dentry = lookup_one_len_unlocked(name->name, dir, name->len);
+ revert_creds(old_cred);
if (IS_ERR(dentry)) {
if (PTR_ERR(dentry) == -ENOENT)
@@ -473,7 +481,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
upperdir = ovl_upperdentry_dereference(poe);
if (upperdir) {
- this = ovl_lookup_real(upperdir, &dentry->d_name);
+ this = ovl_lookup_real(dentry->d_sb, upperdir, &dentry->d_name);
err = PTR_ERR(this);
if (IS_ERR(this))
goto out;
@@ -506,7 +514,8 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
bool opaque = false;
struct path lowerpath = poe->lowerstack[i];
- this = ovl_lookup_real(lowerpath.dentry, &dentry->d_name);
+ this = ovl_lookup_real(dentry->d_sb,
+ lowerpath.dentry, &dentry->d_name);
err = PTR_ERR(this);
if (IS_ERR(this)) {
/*
@@ -561,12 +570,19 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
if (upperdentry || ctr) {
struct dentry *realdentry;
+ struct inode *realinode;
realdentry = upperdentry ? upperdentry : stack[0].dentry;
+ realinode = d_inode(realdentry);
err = -ENOMEM;
- inode = ovl_new_inode(dentry->d_sb, realdentry->d_inode->i_mode,
- oe);
+ if (upperdentry && !d_is_dir(upperdentry)) {
+ inode = ovl_get_inode(dentry->d_sb, realinode);
+ } else {
+ inode = ovl_new_inode(dentry->d_sb, realinode->i_mode);
+ if (inode)
+ ovl_inode_init(inode, realinode, !!upperdentry);
+ }
if (!inode)
goto out_free_oe;
ovl_copyattr(realdentry->d_inode, inode);
@@ -595,7 +611,7 @@ out:
struct file *ovl_path_open(struct path *path, int flags)
{
- return dentry_open(path, flags, current_cred());
+ return dentry_open(path, flags | O_NOATIME, current_cred());
}
static void ovl_put_super(struct super_block *sb)
@@ -678,6 +694,7 @@ static const struct super_operations ovl_super_operations = {
.statfs = ovl_statfs,
.show_options = ovl_show_options,
.remount_fs = ovl_remount,
+ .drop_inode = generic_delete_inode,
};
enum {
@@ -950,11 +967,102 @@ static unsigned int ovl_split_lowerdirs(char *str)
return ctr;
}
+static int ovl_posix_acl_xattr_set(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
+{
+ struct dentry *workdir = ovl_workdir(dentry);
+ struct inode *realinode = ovl_inode_real(inode, NULL);
+ struct posix_acl *acl = NULL;
+ int err;
+
+ /* Check that everything is OK before copy-up */
+ if (value) {
+ acl = posix_acl_from_xattr(&init_user_ns, value, size);
+ if (IS_ERR(acl))
+ return PTR_ERR(acl);
+ }
+ err = -EOPNOTSUPP;
+ if (!IS_POSIXACL(d_inode(workdir)))
+ goto out_acl_release;
+ if (!realinode->i_op->set_acl)
+ goto out_acl_release;
+ if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode)) {
+ err = acl ? -EACCES : 0;
+ goto out_acl_release;
+ }
+ err = -EPERM;
+ if (!inode_owner_or_capable(inode))
+ goto out_acl_release;
+
+ posix_acl_release(acl);
+
+ return ovl_setxattr(dentry, inode, handler->name, value, size, flags);
+
+out_acl_release:
+ posix_acl_release(acl);
+ return err;
+}
+
+static int ovl_other_xattr_set(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
+{
+ return ovl_setxattr(dentry, inode, name, value, size, flags);
+}
+
+static int ovl_own_xattr_set(const struct xattr_handler *handler,
+ struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value,
+ size_t size, int flags)
+{
+ return -EPERM;
+}
+
+static const struct xattr_handler ovl_posix_acl_access_xattr_handler = {
+ .name = XATTR_NAME_POSIX_ACL_ACCESS,
+ .flags = ACL_TYPE_ACCESS,
+ .set = ovl_posix_acl_xattr_set,
+};
+
+static const struct xattr_handler ovl_posix_acl_default_xattr_handler = {
+ .name = XATTR_NAME_POSIX_ACL_DEFAULT,
+ .flags = ACL_TYPE_DEFAULT,
+ .set = ovl_posix_acl_xattr_set,
+};
+
+static const struct xattr_handler ovl_own_xattr_handler = {
+ .prefix = OVL_XATTR_PREFIX,
+ .set = ovl_own_xattr_set,
+};
+
+static const struct xattr_handler ovl_other_xattr_handler = {
+ .prefix = "", /* catch all */
+ .set = ovl_other_xattr_set,
+};
+
+static const struct xattr_handler *ovl_xattr_handlers[] = {
+ &ovl_posix_acl_access_xattr_handler,
+ &ovl_posix_acl_default_xattr_handler,
+ &ovl_own_xattr_handler,
+ &ovl_other_xattr_handler,
+ NULL
+};
+
+static const struct xattr_handler *ovl_xattr_noacl_handlers[] = {
+ &ovl_own_xattr_handler,
+ &ovl_other_xattr_handler,
+ NULL,
+};
+
static int ovl_fill_super(struct super_block *sb, void *data, int silent)
{
struct path upperpath = { NULL, NULL };
struct path workpath = { NULL, NULL };
struct dentry *root_dentry;
+ struct inode *realinode;
struct ovl_entry *oe;
struct ovl_fs *ufs;
struct path *stack = NULL;
@@ -1061,6 +1169,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
pr_err("overlayfs: failed to clone upperpath\n");
goto out_put_lowerpath;
}
+ /* Don't inherit atime flags */
+ ufs->upper_mnt->mnt_flags &= ~(MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME);
+
+ sb->s_time_gran = ufs->upper_mnt->mnt_sb->s_time_gran;
ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry);
err = PTR_ERR(ufs->workdir);
@@ -1108,7 +1220,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
* Make lower_mnt R/O. That way fchmod/fchown on lower file
* will fail instead of modifying lower fs.
*/
- mnt->mnt_flags |= MNT_READONLY;
+ mnt->mnt_flags |= MNT_READONLY | MNT_NOATIME;
ufs->lower_mnt[ufs->numlower] = mnt;
ufs->numlower++;
@@ -1132,7 +1244,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
if (!oe)
goto out_put_cred;
- root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR, oe));
+ root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR));
if (!root_dentry)
goto out_free_oe;
@@ -1151,13 +1263,19 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
root_dentry->d_fsdata = oe;
- ovl_copyattr(ovl_dentry_real(root_dentry)->d_inode,
- root_dentry->d_inode);
+ realinode = d_inode(ovl_dentry_real(root_dentry));
+ ovl_inode_init(d_inode(root_dentry), realinode, !!upperpath.dentry);
+ ovl_copyattr(realinode, d_inode(root_dentry));
sb->s_magic = OVERLAYFS_SUPER_MAGIC;
sb->s_op = &ovl_super_operations;
+ if (IS_ENABLED(CONFIG_FS_POSIX_ACL))
+ sb->s_xattr = ovl_xattr_handlers;
+ else
+ sb->s_xattr = ovl_xattr_noacl_handlers;
sb->s_root = root_dentry;
sb->s_fs_info = ufs;
+ sb->s_flags |= MS_POSIXACL;
return 0;
diff --git a/fs/pipe.c b/fs/pipe.c
index 0d3f5165cb0b..4ebe6b2e5217 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -21,6 +21,7 @@
#include <linux/audit.h>
#include <linux/syscalls.h>
#include <linux/fcntl.h>
+#include <linux/memcontrol.h>
#include <asm/uaccess.h>
#include <asm/ioctls.h>
@@ -137,6 +138,20 @@ static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
put_page(page);
}
+static int anon_pipe_buf_steal(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+{
+ struct page *page = buf->page;
+
+ if (page_count(page) == 1) {
+ if (memcg_kmem_enabled())
+ memcg_kmem_uncharge(page, 0);
+ __SetPageLocked(page);
+ return 0;
+ }
+ return 1;
+}
+
/**
* generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
* @pipe: the pipe that the buffer belongs to
@@ -219,7 +234,7 @@ static const struct pipe_buf_operations anon_pipe_buf_ops = {
.can_merge = 1,
.confirm = generic_pipe_buf_confirm,
.release = anon_pipe_buf_release,
- .steal = generic_pipe_buf_steal,
+ .steal = anon_pipe_buf_steal,
.get = generic_pipe_buf_get,
};
@@ -227,7 +242,7 @@ static const struct pipe_buf_operations packet_pipe_buf_ops = {
.can_merge = 0,
.confirm = generic_pipe_buf_confirm,
.release = anon_pipe_buf_release,
- .steal = generic_pipe_buf_steal,
+ .steal = anon_pipe_buf_steal,
.get = generic_pipe_buf_get,
};
@@ -405,7 +420,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
int copied;
if (!page) {
- page = alloc_page(GFP_HIGHUSER);
+ page = alloc_page(GFP_HIGHUSER | __GFP_ACCOUNT);
if (unlikely(!page)) {
ret = ret ? : -ENOMEM;
break;
@@ -611,7 +626,7 @@ struct pipe_inode_info *alloc_pipe_info(void)
{
struct pipe_inode_info *pipe;
- pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
+ pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
if (pipe) {
unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
struct user_struct *user = get_current_user();
@@ -619,7 +634,9 @@ struct pipe_inode_info *alloc_pipe_info(void)
if (!too_many_pipe_buffers_hard(user)) {
if (too_many_pipe_buffers_soft(user))
pipe_bufs = 1;
- pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * pipe_bufs, GFP_KERNEL);
+ pipe->bufs = kcalloc(pipe_bufs,
+ sizeof(struct pipe_buffer),
+ GFP_KERNEL_ACCOUNT);
}
if (pipe->bufs) {
@@ -1010,7 +1027,8 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
if (nr_pages < pipe->nrbufs)
return -EBUSY;
- bufs = kcalloc(nr_pages, sizeof(*bufs), GFP_KERNEL | __GFP_NOWARN);
+ bufs = kcalloc(nr_pages, sizeof(*bufs),
+ GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (unlikely(!bufs))
return -ENOMEM;
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index edc452c2a563..59d47ab0791a 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -205,7 +205,7 @@ posix_acl_clone(const struct posix_acl *acl, gfp_t flags)
* Check if an acl is valid. Returns 0 if it is, or -E... otherwise.
*/
int
-posix_acl_valid(const struct posix_acl *acl)
+posix_acl_valid(struct user_namespace *user_ns, const struct posix_acl *acl)
{
const struct posix_acl_entry *pa, *pe;
int state = ACL_USER_OBJ;
@@ -225,7 +225,7 @@ posix_acl_valid(const struct posix_acl *acl)
case ACL_USER:
if (state != ACL_USER)
return -EINVAL;
- if (!uid_valid(pa->e_uid))
+ if (!kuid_has_mapping(user_ns, pa->e_uid))
return -EINVAL;
needs_mask = 1;
break;
@@ -240,7 +240,7 @@ posix_acl_valid(const struct posix_acl *acl)
case ACL_GROUP:
if (state != ACL_GROUP)
return -EINVAL;
- if (!gid_valid(pa->e_gid))
+ if (!kgid_has_mapping(user_ns, pa->e_gid))
return -EINVAL;
needs_mask = 1;
break;
@@ -834,7 +834,7 @@ set_posix_acl(struct inode *inode, int type, struct posix_acl *acl)
return -EPERM;
if (acl) {
- int ret = posix_acl_valid(acl);
+ int ret = posix_acl_valid(inode->i_sb->s_user_ns, acl);
if (ret)
return ret;
}
diff --git a/fs/proc/Makefile b/fs/proc/Makefile
index 7151ea428041..12c6922c913c 100644
--- a/fs/proc/Makefile
+++ b/fs/proc/Makefile
@@ -4,6 +4,7 @@
obj-y += proc.o
+CFLAGS_task_mmu.o += $(call cc-option,-Wno-override-init,)
proc-y := nommu.o task_nommu.o
proc-$(CONFIG_MMU) := task_mmu.o
diff --git a/fs/proc/base.c b/fs/proc/base.c
index a11eb7196ec8..54e270262979 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -579,11 +579,8 @@ static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns,
unsigned long totalpages = totalram_pages + total_swap_pages;
unsigned long points = 0;
- read_lock(&tasklist_lock);
- if (pid_alive(task))
- points = oom_badness(task, NULL, NULL, totalpages) *
- 1000 / totalpages;
- read_unlock(&tasklist_lock);
+ points = oom_badness(task, NULL, NULL, totalpages) *
+ 1000 / totalpages;
seq_printf(m, "%lu\n", points);
return 0;
@@ -1024,23 +1021,107 @@ static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count,
char buffer[PROC_NUMBUF];
int oom_adj = OOM_ADJUST_MIN;
size_t len;
- unsigned long flags;
if (!task)
return -ESRCH;
- if (lock_task_sighand(task, &flags)) {
- if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MAX)
- oom_adj = OOM_ADJUST_MAX;
- else
- oom_adj = (task->signal->oom_score_adj * -OOM_DISABLE) /
- OOM_SCORE_ADJ_MAX;
- unlock_task_sighand(task, &flags);
- }
+ if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MAX)
+ oom_adj = OOM_ADJUST_MAX;
+ else
+ oom_adj = (task->signal->oom_score_adj * -OOM_DISABLE) /
+ OOM_SCORE_ADJ_MAX;
put_task_struct(task);
len = snprintf(buffer, sizeof(buffer), "%d\n", oom_adj);
return simple_read_from_buffer(buf, count, ppos, buffer, len);
}
+static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
+{
+ static DEFINE_MUTEX(oom_adj_mutex);
+ struct mm_struct *mm = NULL;
+ struct task_struct *task;
+ int err = 0;
+
+ task = get_proc_task(file_inode(file));
+ if (!task)
+ return -ESRCH;
+
+ mutex_lock(&oom_adj_mutex);
+ if (legacy) {
+ if (oom_adj < task->signal->oom_score_adj &&
+ !capable(CAP_SYS_RESOURCE)) {
+ err = -EACCES;
+ goto err_unlock;
+ }
+ /*
+ * /proc/pid/oom_adj is provided for legacy purposes, ask users to use
+ * /proc/pid/oom_score_adj instead.
+ */
+ pr_warn_once("%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n",
+ current->comm, task_pid_nr(current), task_pid_nr(task),
+ task_pid_nr(task));
+ } else {
+ if ((short)oom_adj < task->signal->oom_score_adj_min &&
+ !capable(CAP_SYS_RESOURCE)) {
+ err = -EACCES;
+ goto err_unlock;
+ }
+ }
+
+ /*
+ * Make sure we will check other processes sharing the mm if this is
+ * not vfrok which wants its own oom_score_adj.
+ * pin the mm so it doesn't go away and get reused after task_unlock
+ */
+ if (!task->vfork_done) {
+ struct task_struct *p = find_lock_task_mm(task);
+
+ if (p) {
+ if (atomic_read(&p->mm->mm_users) > 1) {
+ mm = p->mm;
+ atomic_inc(&mm->mm_count);
+ }
+ task_unlock(p);
+ }
+ }
+
+ task->signal->oom_score_adj = oom_adj;
+ if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE))
+ task->signal->oom_score_adj_min = (short)oom_adj;
+ trace_oom_score_adj_update(task);
+
+ if (mm) {
+ struct task_struct *p;
+
+ rcu_read_lock();
+ for_each_process(p) {
+ if (same_thread_group(task, p))
+ continue;
+
+ /* do not touch kernel threads or the global init */
+ if (p->flags & PF_KTHREAD || is_global_init(p))
+ continue;
+
+ task_lock(p);
+ if (!p->vfork_done && process_shares_mm(p, mm)) {
+ pr_info("updating oom_score_adj for %d (%s) from %d to %d because it shares mm with %d (%s). Report if this is unexpected.\n",
+ task_pid_nr(p), p->comm,
+ p->signal->oom_score_adj, oom_adj,
+ task_pid_nr(task), task->comm);
+ p->signal->oom_score_adj = oom_adj;
+ if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE))
+ p->signal->oom_score_adj_min = (short)oom_adj;
+ }
+ task_unlock(p);
+ }
+ rcu_read_unlock();
+ mmdrop(mm);
+ }
+err_unlock:
+ mutex_unlock(&oom_adj_mutex);
+ put_task_struct(task);
+ return err;
+}
+
/*
* /proc/pid/oom_adj exists solely for backwards compatibility with previous
* kernels. The effective policy is defined by oom_score_adj, which has a
@@ -1054,10 +1135,8 @@ static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count,
static ssize_t oom_adj_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct task_struct *task;
char buffer[PROC_NUMBUF];
int oom_adj;
- unsigned long flags;
int err;
memset(buffer, 0, sizeof(buffer));
@@ -1077,23 +1156,6 @@ static ssize_t oom_adj_write(struct file *file, const char __user *buf,
goto out;
}
- task = get_proc_task(file_inode(file));
- if (!task) {
- err = -ESRCH;
- goto out;
- }
-
- task_lock(task);
- if (!task->mm) {
- err = -EINVAL;
- goto err_task_lock;
- }
-
- if (!lock_task_sighand(task, &flags)) {
- err = -ESRCH;
- goto err_task_lock;
- }
-
/*
* Scale /proc/pid/oom_score_adj appropriately ensuring that a maximum
* value is always attainable.
@@ -1103,27 +1165,7 @@ static ssize_t oom_adj_write(struct file *file, const char __user *buf,
else
oom_adj = (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE;
- if (oom_adj < task->signal->oom_score_adj &&
- !capable(CAP_SYS_RESOURCE)) {
- err = -EACCES;
- goto err_sighand;
- }
-
- /*
- * /proc/pid/oom_adj is provided for legacy purposes, ask users to use
- * /proc/pid/oom_score_adj instead.
- */
- pr_warn_once("%s (%d): /proc/%d/oom_adj is deprecated, please use /proc/%d/oom_score_adj instead.\n",
- current->comm, task_pid_nr(current), task_pid_nr(task),
- task_pid_nr(task));
-
- task->signal->oom_score_adj = oom_adj;
- trace_oom_score_adj_update(task);
-err_sighand:
- unlock_task_sighand(task, &flags);
-err_task_lock:
- task_unlock(task);
- put_task_struct(task);
+ err = __set_oom_adj(file, oom_adj, true);
out:
return err < 0 ? err : count;
}
@@ -1140,15 +1182,11 @@ static ssize_t oom_score_adj_read(struct file *file, char __user *buf,
struct task_struct *task = get_proc_task(file_inode(file));
char buffer[PROC_NUMBUF];
short oom_score_adj = OOM_SCORE_ADJ_MIN;
- unsigned long flags;
size_t len;
if (!task)
return -ESRCH;
- if (lock_task_sighand(task, &flags)) {
- oom_score_adj = task->signal->oom_score_adj;
- unlock_task_sighand(task, &flags);
- }
+ oom_score_adj = task->signal->oom_score_adj;
put_task_struct(task);
len = snprintf(buffer, sizeof(buffer), "%hd\n", oom_score_adj);
return simple_read_from_buffer(buf, count, ppos, buffer, len);
@@ -1157,9 +1195,7 @@ static ssize_t oom_score_adj_read(struct file *file, char __user *buf,
static ssize_t oom_score_adj_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct task_struct *task;
char buffer[PROC_NUMBUF];
- unsigned long flags;
int oom_score_adj;
int err;
@@ -1180,39 +1216,7 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf,
goto out;
}
- task = get_proc_task(file_inode(file));
- if (!task) {
- err = -ESRCH;
- goto out;
- }
-
- task_lock(task);
- if (!task->mm) {
- err = -EINVAL;
- goto err_task_lock;
- }
-
- if (!lock_task_sighand(task, &flags)) {
- err = -ESRCH;
- goto err_task_lock;
- }
-
- if ((short)oom_score_adj < task->signal->oom_score_adj_min &&
- !capable(CAP_SYS_RESOURCE)) {
- err = -EACCES;
- goto err_sighand;
- }
-
- task->signal->oom_score_adj = (short)oom_score_adj;
- if (has_capability_noaudit(current, CAP_SYS_RESOURCE))
- task->signal->oom_score_adj_min = (short)oom_score_adj;
- trace_oom_score_adj_update(task);
-
-err_sighand:
- unlock_task_sighand(task, &flags);
-err_task_lock:
- task_unlock(task);
- put_task_struct(task);
+ err = __set_oom_adj(file, oom_score_adj, false);
out:
return err < 0 ? err : count;
}
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 42305ddcbaa0..c1b72388e571 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -457,17 +457,30 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
return inode;
}
-int proc_fill_super(struct super_block *s)
+int proc_fill_super(struct super_block *s, void *data, int silent)
{
+ struct pid_namespace *ns = get_pid_ns(s->s_fs_info);
struct inode *root_inode;
int ret;
+ if (!proc_parse_options(data, ns))
+ return -EINVAL;
+
+ /* User space would break if executables or devices appear on proc */
+ s->s_iflags |= SB_I_USERNS_VISIBLE | SB_I_NOEXEC | SB_I_NODEV;
s->s_flags |= MS_NODIRATIME | MS_NOSUID | MS_NOEXEC;
s->s_blocksize = 1024;
s->s_blocksize_bits = 10;
s->s_magic = PROC_SUPER_MAGIC;
s->s_op = &proc_sops;
s->s_time_gran = 1;
+
+ /*
+ * procfs isn't actually a stacking filesystem; however, there is
+ * too much magic going on inside it to permit stacking things on
+ * top of it
+ */
+ s->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
pde_get(&proc_root);
root_inode = proc_get_inode(s, &proc_root);
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index aa2781095bd1..7931c558c192 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -212,7 +212,7 @@ extern const struct inode_operations proc_pid_link_inode_operations;
extern void proc_init_inodecache(void);
extern struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *);
-extern int proc_fill_super(struct super_block *);
+extern int proc_fill_super(struct super_block *, void *data, int flags);
extern void proc_entry_rundown(struct proc_dir_entry *);
/*
@@ -268,6 +268,7 @@ static inline void proc_tty_init(void) {}
* root.c
*/
extern struct proc_dir_entry proc_root;
+extern int proc_parse_options(char *options, struct pid_namespace *pid);
extern void proc_self_init(void);
extern int proc_remount(struct super_block *, int *, char *);
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 83720460c5bc..b9a8c813e5e6 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -40,13 +40,13 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
si_swapinfo(&i);
committed = percpu_counter_read_positive(&vm_committed_as);
- cached = global_page_state(NR_FILE_PAGES) -
+ cached = global_node_page_state(NR_FILE_PAGES) -
total_swapcache_pages() - i.bufferram;
if (cached < 0)
cached = 0;
for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
- pages[lru] = global_page_state(NR_LRU_BASE + lru);
+ pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
available = si_mem_available();
@@ -105,6 +105,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
"AnonHugePages: %8lu kB\n"
+ "ShmemHugePages: %8lu kB\n"
+ "ShmemPmdMapped: %8lu kB\n"
#endif
#ifdef CONFIG_CMA
"CmaTotal: %8lu kB\n"
@@ -136,23 +138,23 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
#endif
K(i.totalswap),
K(i.freeswap),
- K(global_page_state(NR_FILE_DIRTY)),
- K(global_page_state(NR_WRITEBACK)),
- K(global_page_state(NR_ANON_PAGES)),
- K(global_page_state(NR_FILE_MAPPED)),
+ K(global_node_page_state(NR_FILE_DIRTY)),
+ K(global_node_page_state(NR_WRITEBACK)),
+ K(global_node_page_state(NR_ANON_MAPPED)),
+ K(global_node_page_state(NR_FILE_MAPPED)),
K(i.sharedram),
K(global_page_state(NR_SLAB_RECLAIMABLE) +
global_page_state(NR_SLAB_UNRECLAIMABLE)),
K(global_page_state(NR_SLAB_RECLAIMABLE)),
K(global_page_state(NR_SLAB_UNRECLAIMABLE)),
- global_page_state(NR_KERNEL_STACK) * THREAD_SIZE / 1024,
+ global_page_state(NR_KERNEL_STACK_KB),
K(global_page_state(NR_PAGETABLE)),
#ifdef CONFIG_QUICKLIST
K(quicklist_total_size()),
#endif
- K(global_page_state(NR_UNSTABLE_NFS)),
+ K(global_node_page_state(NR_UNSTABLE_NFS)),
K(global_page_state(NR_BOUNCE)),
- K(global_page_state(NR_WRITEBACK_TEMP)),
+ K(global_node_page_state(NR_WRITEBACK_TEMP)),
K(vm_commit_limit()),
K(committed),
(unsigned long)VMALLOC_TOTAL >> 10,
@@ -162,8 +164,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
, atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- , K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
- HPAGE_PMD_NR)
+ , K(global_node_page_state(NR_ANON_THPS) * HPAGE_PMD_NR)
+ , K(global_node_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR)
+ , K(global_node_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR)
#endif
#ifdef CONFIG_CMA
, K(totalcma_pages)
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 5e57c3e46e1d..1b93650dda2f 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -474,7 +474,7 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
{
struct ctl_table_header *head = grab_header(dir);
struct ctl_table_header *h = NULL;
- struct qstr *name = &dentry->d_name;
+ const struct qstr *name = &dentry->d_name;
struct ctl_table *p;
struct inode *inode;
struct dentry *err = ERR_PTR(-ENOENT);
@@ -623,7 +623,7 @@ static bool proc_sys_fill_cache(struct file *file,
qname.name = table->procname;
qname.len = strlen(table->procname);
- qname.hash = full_name_hash(qname.name, qname.len);
+ qname.hash = full_name_hash(dir, qname.name, qname.len);
child = d_lookup(dir, &qname);
if (!child) {
@@ -834,7 +834,7 @@ static int sysctl_is_seen(struct ctl_table_header *p)
return res;
}
-static int proc_sys_compare(const struct dentry *parent, const struct dentry *dentry,
+static int proc_sys_compare(const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
struct ctl_table_header *head;
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 06702783bf40..8d3e484055a6 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -23,21 +23,6 @@
#include "internal.h"
-static int proc_test_super(struct super_block *sb, void *data)
-{
- return sb->s_fs_info == data;
-}
-
-static int proc_set_super(struct super_block *sb, void *data)
-{
- int err = set_anon_super(sb, NULL);
- if (!err) {
- struct pid_namespace *ns = (struct pid_namespace *)data;
- sb->s_fs_info = get_pid_ns(ns);
- }
- return err;
-}
-
enum {
Opt_gid, Opt_hidepid, Opt_err,
};
@@ -48,7 +33,7 @@ static const match_table_t tokens = {
{Opt_err, NULL},
};
-static int proc_parse_options(char *options, struct pid_namespace *pid)
+int proc_parse_options(char *options, struct pid_namespace *pid)
{
char *p;
substring_t args[MAX_OPT_ARGS];
@@ -100,52 +85,16 @@ int proc_remount(struct super_block *sb, int *flags, char *data)
static struct dentry *proc_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
- int err;
- struct super_block *sb;
struct pid_namespace *ns;
- char *options;
if (flags & MS_KERNMOUNT) {
- ns = (struct pid_namespace *)data;
- options = NULL;
+ ns = data;
+ data = NULL;
} else {
ns = task_active_pid_ns(current);
- options = data;
-
- /* Does the mounter have privilege over the pid namespace? */
- if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN))
- return ERR_PTR(-EPERM);
- }
-
- sb = sget(fs_type, proc_test_super, proc_set_super, flags, ns);
- if (IS_ERR(sb))
- return ERR_CAST(sb);
-
- /*
- * procfs isn't actually a stacking filesystem; however, there is
- * too much magic going on inside it to permit stacking things on
- * top of it
- */
- sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
-
- if (!proc_parse_options(options, ns)) {
- deactivate_locked_super(sb);
- return ERR_PTR(-EINVAL);
- }
-
- if (!sb->s_root) {
- err = proc_fill_super(sb);
- if (err) {
- deactivate_locked_super(sb);
- return ERR_PTR(err);
- }
-
- sb->s_flags |= MS_ACTIVE;
- /* User space would break if executables appear on proc */
- sb->s_iflags |= SB_I_NOEXEC;
}
- return dget(sb->s_root);
+ return mount_ns(fs_type, flags, data, ns, ns->user_ns, proc_fill_super);
}
static void proc_kill_sb(struct super_block *sb)
@@ -165,7 +114,7 @@ static struct file_system_type proc_fs_type = {
.name = "proc",
.mount = proc_mount,
.kill_sb = proc_kill_sb,
- .fs_flags = FS_USERNS_VISIBLE | FS_USERNS_MOUNT,
+ .fs_flags = FS_USERNS_MOUNT,
};
void __init proc_root_init(void)
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 510413eb25b8..7907e456ac4f 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -80,19 +80,17 @@ static u64 get_iowait_time(int cpu)
static int show_stat(struct seq_file *p, void *v)
{
int i, j;
- unsigned long jif;
u64 user, nice, system, idle, iowait, irq, softirq, steal;
u64 guest, guest_nice;
u64 sum = 0;
u64 sum_softirq = 0;
unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
- struct timespec boottime;
+ struct timespec64 boottime;
user = nice = system = idle = iowait =
irq = softirq = steal = 0;
guest = guest_nice = 0;
- getboottime(&boottime);
- jif = boottime.tv_sec;
+ getboottime64(&boottime);
for_each_possible_cpu(i) {
user += kcpustat_cpu(i).cpustat[CPUTIME_USER];
@@ -163,12 +161,12 @@ static int show_stat(struct seq_file *p, void *v)
seq_printf(p,
"\nctxt %llu\n"
- "btime %lu\n"
+ "btime %llu\n"
"processes %lu\n"
"procs_running %lu\n"
"procs_blocked %lu\n",
nr_context_switches(),
- (unsigned long)jif,
+ (unsigned long long)boottime.tv_sec,
total_forks,
nr_running(),
nr_iowait());
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 4648c7f63ae2..187d84ef9de9 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -448,6 +448,7 @@ struct mem_size_stats {
unsigned long referenced;
unsigned long anonymous;
unsigned long anonymous_thp;
+ unsigned long shmem_thp;
unsigned long swap;
unsigned long shared_hugetlb;
unsigned long private_hugetlb;
@@ -576,7 +577,12 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP);
if (IS_ERR_OR_NULL(page))
return;
- mss->anonymous_thp += HPAGE_PMD_SIZE;
+ if (PageAnon(page))
+ mss->anonymous_thp += HPAGE_PMD_SIZE;
+ else if (PageSwapBacked(page))
+ mss->shmem_thp += HPAGE_PMD_SIZE;
+ else
+ VM_BUG_ON_PAGE(1, page);
smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
}
#else
@@ -770,6 +776,7 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
"Referenced: %8lu kB\n"
"Anonymous: %8lu kB\n"
"AnonHugePages: %8lu kB\n"
+ "ShmemPmdMapped: %8lu kB\n"
"Shared_Hugetlb: %8lu kB\n"
"Private_Hugetlb: %7lu kB\n"
"Swap: %8lu kB\n"
@@ -787,6 +794,7 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
mss.referenced >> 10,
mss.anonymous >> 10,
mss.anonymous_thp >> 10,
+ mss.shmem_thp >> 10,
mss.shared_hugetlb >> 10,
mss.private_hugetlb >> 10,
mss.swap >> 10,
diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
index 360ae43f590c..be40813eff52 100644
--- a/fs/pstore/Kconfig
+++ b/fs/pstore/Kconfig
@@ -1,8 +1,6 @@
config PSTORE
tristate "Persistent store support"
default n
- select ZLIB_DEFLATE
- select ZLIB_INFLATE
help
This option enables generic access to platform level
persistent storage via "pstore" filesystem that can
@@ -14,6 +12,35 @@ config PSTORE
If you don't have a platform persistent store driver,
say N.
+choice
+ prompt "Choose compression algorithm"
+ depends on PSTORE
+ default PSTORE_ZLIB_COMPRESS
+ help
+ This option chooses compression algorithm.
+
+config PSTORE_ZLIB_COMPRESS
+ bool "ZLIB"
+ select ZLIB_DEFLATE
+ select ZLIB_INFLATE
+ help
+ This option enables ZLIB compression algorithm support.
+
+config PSTORE_LZO_COMPRESS
+ bool "LZO"
+ select LZO_COMPRESS
+ select LZO_DECOMPRESS
+ help
+ This option enables LZO compression algorithm support.
+
+config PSTORE_LZ4_COMPRESS
+ bool "LZ4"
+ select LZ4_COMPRESS
+ select LZ4_DECOMPRESS
+ help
+ This option enables LZ4 compression algorithm support.
+endchoice
+
config PSTORE_CONSOLE
bool "Log kernel console messages"
depends on PSTORE
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index 45d6110744cb..ec9ddef5ae75 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -178,7 +178,6 @@ static loff_t pstore_file_llseek(struct file *file, loff_t off, int whence)
}
static const struct file_operations pstore_file_operations = {
- .owner = THIS_MODULE,
.open = pstore_file_open,
.read = pstore_file_read,
.llseek = pstore_file_llseek,
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 588461bb2dd4..16ecca5b72d8 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -28,7 +28,15 @@
#include <linux/console.h>
#include <linux/module.h>
#include <linux/pstore.h>
+#ifdef CONFIG_PSTORE_ZLIB_COMPRESS
#include <linux/zlib.h>
+#endif
+#ifdef CONFIG_PSTORE_LZO_COMPRESS
+#include <linux/lzo.h>
+#endif
+#ifdef CONFIG_PSTORE_LZ4_COMPRESS
+#include <linux/lz4.h>
+#endif
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
@@ -69,10 +77,23 @@ struct pstore_info *psinfo;
static char *backend;
/* Compression parameters */
+#ifdef CONFIG_PSTORE_ZLIB_COMPRESS
#define COMPR_LEVEL 6
#define WINDOW_BITS 12
#define MEM_LEVEL 4
static struct z_stream_s stream;
+#else
+static unsigned char *workspace;
+#endif
+
+struct pstore_zbackend {
+ int (*compress)(const void *in, void *out, size_t inlen, size_t outlen);
+ int (*decompress)(void *in, void *out, size_t inlen, size_t outlen);
+ void (*allocate)(void);
+ void (*free)(void);
+
+ const char *name;
+};
static char *big_oops_buf;
static size_t big_oops_buf_sz;
@@ -129,9 +150,9 @@ bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
}
EXPORT_SYMBOL_GPL(pstore_cannot_block_path);
+#ifdef CONFIG_PSTORE_ZLIB_COMPRESS
/* Derived from logfs_compress() */
-static int pstore_compress(const void *in, void *out, size_t inlen,
- size_t outlen)
+static int compress_zlib(const void *in, void *out, size_t inlen, size_t outlen)
{
int err, ret;
@@ -165,7 +186,7 @@ error:
}
/* Derived from logfs_uncompress */
-static int pstore_decompress(void *in, void *out, size_t inlen, size_t outlen)
+static int decompress_zlib(void *in, void *out, size_t inlen, size_t outlen)
{
int err, ret;
@@ -194,7 +215,7 @@ error:
return ret;
}
-static void allocate_buf_for_compression(void)
+static void allocate_zlib(void)
{
size_t size;
size_t cmpr;
@@ -237,12 +258,190 @@ static void allocate_buf_for_compression(void)
}
-static void free_buf_for_compression(void)
+static void free_zlib(void)
{
kfree(stream.workspace);
stream.workspace = NULL;
kfree(big_oops_buf);
big_oops_buf = NULL;
+ big_oops_buf_sz = 0;
+}
+
+static struct pstore_zbackend backend_zlib = {
+ .compress = compress_zlib,
+ .decompress = decompress_zlib,
+ .allocate = allocate_zlib,
+ .free = free_zlib,
+ .name = "zlib",
+};
+#endif
+
+#ifdef CONFIG_PSTORE_LZO_COMPRESS
+static int compress_lzo(const void *in, void *out, size_t inlen, size_t outlen)
+{
+ int ret;
+
+ ret = lzo1x_1_compress(in, inlen, out, &outlen, workspace);
+ if (ret != LZO_E_OK) {
+ pr_err("lzo_compress error, ret = %d!\n", ret);
+ return -EIO;
+ }
+
+ return outlen;
+}
+
+static int decompress_lzo(void *in, void *out, size_t inlen, size_t outlen)
+{
+ int ret;
+
+ ret = lzo1x_decompress_safe(in, inlen, out, &outlen);
+ if (ret != LZO_E_OK) {
+ pr_err("lzo_decompress error, ret = %d!\n", ret);
+ return -EIO;
+ }
+
+ return outlen;
+}
+
+static void allocate_lzo(void)
+{
+ big_oops_buf_sz = lzo1x_worst_compress(psinfo->bufsize);
+ big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
+ if (big_oops_buf) {
+ workspace = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
+ if (!workspace) {
+ pr_err("No memory for compression workspace; skipping compression\n");
+ kfree(big_oops_buf);
+ big_oops_buf = NULL;
+ }
+ } else {
+ pr_err("No memory for uncompressed data; skipping compression\n");
+ workspace = NULL;
+ }
+}
+
+static void free_lzo(void)
+{
+ kfree(workspace);
+ kfree(big_oops_buf);
+ big_oops_buf = NULL;
+ big_oops_buf_sz = 0;
+}
+
+static struct pstore_zbackend backend_lzo = {
+ .compress = compress_lzo,
+ .decompress = decompress_lzo,
+ .allocate = allocate_lzo,
+ .free = free_lzo,
+ .name = "lzo",
+};
+#endif
+
+#ifdef CONFIG_PSTORE_LZ4_COMPRESS
+static int compress_lz4(const void *in, void *out, size_t inlen, size_t outlen)
+{
+ int ret;
+
+ ret = lz4_compress(in, inlen, out, &outlen, workspace);
+ if (ret) {
+ pr_err("lz4_compress error, ret = %d!\n", ret);
+ return -EIO;
+ }
+
+ return outlen;
+}
+
+static int decompress_lz4(void *in, void *out, size_t inlen, size_t outlen)
+{
+ int ret;
+
+ ret = lz4_decompress_unknownoutputsize(in, inlen, out, &outlen);
+ if (ret) {
+ pr_err("lz4_decompress error, ret = %d!\n", ret);
+ return -EIO;
+ }
+
+ return outlen;
+}
+
+static void allocate_lz4(void)
+{
+ big_oops_buf_sz = lz4_compressbound(psinfo->bufsize);
+ big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
+ if (big_oops_buf) {
+ workspace = kmalloc(LZ4_MEM_COMPRESS, GFP_KERNEL);
+ if (!workspace) {
+ pr_err("No memory for compression workspace; skipping compression\n");
+ kfree(big_oops_buf);
+ big_oops_buf = NULL;
+ }
+ } else {
+ pr_err("No memory for uncompressed data; skipping compression\n");
+ workspace = NULL;
+ }
+}
+
+static void free_lz4(void)
+{
+ kfree(workspace);
+ kfree(big_oops_buf);
+ big_oops_buf = NULL;
+ big_oops_buf_sz = 0;
+}
+
+static struct pstore_zbackend backend_lz4 = {
+ .compress = compress_lz4,
+ .decompress = decompress_lz4,
+ .allocate = allocate_lz4,
+ .free = free_lz4,
+ .name = "lz4",
+};
+#endif
+
+static struct pstore_zbackend *zbackend =
+#if defined(CONFIG_PSTORE_ZLIB_COMPRESS)
+ &backend_zlib;
+#elif defined(CONFIG_PSTORE_LZO_COMPRESS)
+ &backend_lzo;
+#elif defined(CONFIG_PSTORE_LZ4_COMPRESS)
+ &backend_lz4;
+#else
+ NULL;
+#endif
+
+static int pstore_compress(const void *in, void *out,
+ size_t inlen, size_t outlen)
+{
+ if (zbackend)
+ return zbackend->compress(in, out, inlen, outlen);
+ else
+ return -EIO;
+}
+
+static int pstore_decompress(void *in, void *out, size_t inlen, size_t outlen)
+{
+ if (zbackend)
+ return zbackend->decompress(in, out, inlen, outlen);
+ else
+ return -EIO;
+}
+
+static void allocate_buf_for_compression(void)
+{
+ if (zbackend) {
+ pr_info("using %s compression\n", zbackend->name);
+ zbackend->allocate();
+ } else {
+ pr_err("allocate compression buffer error!\n");
+ }
+}
+
+static void free_buf_for_compression(void)
+{
+ if (zbackend)
+ zbackend->free();
+ else
+ pr_err("free compression buffer error!\n");
}
/*
@@ -284,7 +483,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
u64 id;
unsigned int part = 1;
unsigned long flags = 0;
- int is_locked = 0;
+ int is_locked;
int ret;
why = get_reason_str(reason);
@@ -295,8 +494,10 @@ static void pstore_dump(struct kmsg_dumper *dumper,
pr_err("pstore dump routine blocked in %s path, may corrupt error record\n"
, in_nmi() ? "NMI" : why);
}
- } else
+ } else {
spin_lock_irqsave(&psinfo->buf_lock, flags);
+ is_locked = 1;
+ }
oopscount++;
while (total < kmsg_bytes) {
char *dst;
@@ -304,19 +505,25 @@ static void pstore_dump(struct kmsg_dumper *dumper,
int hsize;
int zipped_len = -1;
size_t len;
- bool compressed;
+ bool compressed = false;
size_t total_len;
if (big_oops_buf && is_locked) {
dst = big_oops_buf;
- hsize = sprintf(dst, "%s#%d Part%u\n", why,
- oopscount, part);
- size = big_oops_buf_sz - hsize;
+ size = big_oops_buf_sz;
+ } else {
+ dst = psinfo->buf;
+ size = psinfo->bufsize;
+ }
- if (!kmsg_dump_get_buffer(dumper, true, dst + hsize,
- size, &len))
- break;
+ hsize = sprintf(dst, "%s#%d Part%u\n", why, oopscount, part);
+ size -= hsize;
+
+ if (!kmsg_dump_get_buffer(dumper, true, dst + hsize,
+ size, &len))
+ break;
+ if (big_oops_buf && is_locked) {
zipped_len = pstore_compress(dst, psinfo->buf,
hsize + len, psinfo->bufsize);
@@ -324,21 +531,9 @@ static void pstore_dump(struct kmsg_dumper *dumper,
compressed = true;
total_len = zipped_len;
} else {
- compressed = false;
total_len = copy_kmsg_to_buffer(hsize, len);
}
} else {
- dst = psinfo->buf;
- hsize = sprintf(dst, "%s#%d Part%u\n", why, oopscount,
- part);
- size = psinfo->bufsize - hsize;
- dst += hsize;
-
- if (!kmsg_dump_get_buffer(dumper, true, dst,
- size, &len))
- break;
-
- compressed = false;
total_len = hsize + len;
}
@@ -350,10 +545,7 @@ static void pstore_dump(struct kmsg_dumper *dumper,
total += total_len;
part++;
}
- if (pstore_cannot_block_path(reason)) {
- if (is_locked)
- spin_unlock_irqrestore(&psinfo->buf_lock, flags);
- } else
+ if (is_locked)
spin_unlock_irqrestore(&psinfo->buf_lock, flags);
}
@@ -497,9 +689,11 @@ EXPORT_SYMBOL_GPL(pstore_register);
void pstore_unregister(struct pstore_info *psi)
{
- pstore_unregister_pmsg();
- pstore_unregister_ftrace();
- pstore_unregister_console();
+ if ((psi->flags & PSTORE_FLAGS_FRAGILE) == 0) {
+ pstore_unregister_pmsg();
+ pstore_unregister_ftrace();
+ pstore_unregister_console();
+ }
pstore_unregister_kmsg();
free_buf_for_compression();
@@ -527,6 +721,7 @@ void pstore_get_records(int quiet)
int failed = 0, rc;
bool compressed;
int unzipped_len = -1;
+ ssize_t ecc_notice_size = 0;
if (!psi)
return;
@@ -536,7 +731,7 @@ void pstore_get_records(int quiet)
goto out;
while ((size = psi->read(&id, &type, &count, &time, &buf, &compressed,
- psi)) > 0) {
+ &ecc_notice_size, psi)) > 0) {
if (compressed && (type == PSTORE_TYPE_DMESG)) {
if (big_oops_buf)
unzipped_len = pstore_decompress(buf,
@@ -544,6 +739,9 @@ void pstore_get_records(int quiet)
big_oops_buf_sz);
if (unzipped_len > 0) {
+ if (ecc_notice_size)
+ memcpy(big_oops_buf + unzipped_len,
+ buf + size, ecc_notice_size);
kfree(buf);
buf = big_oops_buf;
size = unzipped_len;
@@ -555,7 +753,8 @@ void pstore_get_records(int quiet)
}
}
rc = pstore_mkfile(type, psi->name, id, count, buf,
- compressed, (size_t)size, time, psi);
+ compressed, size + ecc_notice_size,
+ time, psi);
if (unzipped_len < 0) {
/* Free buffer other than big oops */
kfree(buf);
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index bd9812e83461..7a034d62cf8c 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -34,6 +34,8 @@
#include <linux/slab.h>
#include <linux/compiler.h>
#include <linux/pstore_ram.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
#define RAMOOPS_KERNMSG_HDR "===="
#define MIN_MEM_SIZE 4096UL
@@ -181,10 +183,10 @@ static bool prz_ok(struct persistent_ram_zone *prz)
static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
int *count, struct timespec *time,
char **buf, bool *compressed,
+ ssize_t *ecc_notice_size,
struct pstore_info *psi)
{
ssize_t size;
- ssize_t ecc_notice_size;
struct ramoops_context *cxt = psi->data;
struct persistent_ram_zone *prz = NULL;
int header_length = 0;
@@ -229,16 +231,16 @@ static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
size = persistent_ram_old_size(prz) - header_length;
/* ECC correction notice */
- ecc_notice_size = persistent_ram_ecc_string(prz, NULL, 0);
+ *ecc_notice_size = persistent_ram_ecc_string(prz, NULL, 0);
- *buf = kmalloc(size + ecc_notice_size + 1, GFP_KERNEL);
+ *buf = kmalloc(size + *ecc_notice_size + 1, GFP_KERNEL);
if (*buf == NULL)
return -ENOMEM;
memcpy(*buf, (char *)persistent_ram_old(prz) + header_length, size);
- persistent_ram_ecc_string(prz, *buf + size, ecc_notice_size + 1);
+ persistent_ram_ecc_string(prz, *buf + size, *ecc_notice_size + 1);
- return size + ecc_notice_size;
+ return size;
}
static size_t ramoops_write_kmsg_hdr(struct persistent_ram_zone *prz,
@@ -458,15 +460,89 @@ static int ramoops_init_prz(struct device *dev, struct ramoops_context *cxt,
return 0;
}
+static int ramoops_parse_dt_size(struct platform_device *pdev,
+ const char *propname, u32 *value)
+{
+ u32 val32 = 0;
+ int ret;
+
+ ret = of_property_read_u32(pdev->dev.of_node, propname, &val32);
+ if (ret < 0 && ret != -EINVAL) {
+ dev_err(&pdev->dev, "failed to parse property %s: %d\n",
+ propname, ret);
+ return ret;
+ }
+
+ if (val32 > INT_MAX) {
+ dev_err(&pdev->dev, "%s %u > INT_MAX\n", propname, val32);
+ return -EOVERFLOW;
+ }
+
+ *value = val32;
+ return 0;
+}
+
+static int ramoops_parse_dt(struct platform_device *pdev,
+ struct ramoops_platform_data *pdata)
+{
+ struct device_node *of_node = pdev->dev.of_node;
+ struct resource *res;
+ u32 value;
+ int ret;
+
+ dev_dbg(&pdev->dev, "using Device Tree\n");
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev,
+ "failed to locate DT /reserved-memory resource\n");
+ return -EINVAL;
+ }
+
+ pdata->mem_size = resource_size(res);
+ pdata->mem_address = res->start;
+ pdata->mem_type = of_property_read_bool(of_node, "unbuffered");
+ pdata->dump_oops = !of_property_read_bool(of_node, "no-dump-oops");
+
+#define parse_size(name, field) { \
+ ret = ramoops_parse_dt_size(pdev, name, &value); \
+ if (ret < 0) \
+ return ret; \
+ field = value; \
+ }
+
+ parse_size("record-size", pdata->record_size);
+ parse_size("console-size", pdata->console_size);
+ parse_size("ftrace-size", pdata->ftrace_size);
+ parse_size("pmsg-size", pdata->pmsg_size);
+ parse_size("ecc-size", pdata->ecc_info.ecc_size);
+
+#undef parse_size
+
+ return 0;
+}
+
static int ramoops_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct ramoops_platform_data *pdata = pdev->dev.platform_data;
+ struct ramoops_platform_data *pdata = dev->platform_data;
struct ramoops_context *cxt = &oops_cxt;
size_t dump_mem_sz;
phys_addr_t paddr;
int err = -EINVAL;
+ if (dev_of_node(dev) && !pdata) {
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ err = -ENOMEM;
+ goto fail_out;
+ }
+
+ err = ramoops_parse_dt(pdev, pdata);
+ if (err < 0)
+ goto fail_out;
+ }
+
/* Only a single ramoops area allowed at a time, so fail extra
* probes.
*/
@@ -567,11 +643,11 @@ fail_buf:
kfree(cxt->pstore.buf);
fail_clear:
cxt->pstore.bufsize = 0;
- kfree(cxt->mprz);
+ persistent_ram_free(cxt->mprz);
fail_init_mprz:
- kfree(cxt->fprz);
+ persistent_ram_free(cxt->fprz);
fail_init_fprz:
- kfree(cxt->cprz);
+ persistent_ram_free(cxt->cprz);
fail_init_cprz:
ramoops_free_przs(cxt);
fail_out:
@@ -596,11 +672,17 @@ static int ramoops_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id dt_match[] = {
+ { .compatible = "ramoops" },
+ {}
+};
+
static struct platform_driver ramoops_driver = {
.probe = ramoops_probe,
.remove = ramoops_remove,
.driver = {
- .name = "ramoops",
+ .name = "ramoops",
+ .of_match_table = dt_match,
},
};
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index ff21980d0119..1bfac28b7e7d 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -841,6 +841,9 @@ struct dquot *dqget(struct super_block *sb, struct kqid qid)
unsigned int hashent = hashfn(sb, qid);
struct dquot *dquot, *empty = NULL;
+ if (!qid_has_mapping(sb->s_user_ns, qid))
+ return ERR_PTR(-EINVAL);
+
if (!sb_has_quota_active(sb, qid.type))
return ERR_PTR(-ESRCH);
we_slept:
@@ -1133,7 +1136,7 @@ static void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
else
dquot->dq_dqb.dqb_curinodes = 0;
if (dquot->dq_dqb.dqb_curinodes <= dquot->dq_dqb.dqb_isoftlimit)
- dquot->dq_dqb.dqb_itime = (time_t) 0;
+ dquot->dq_dqb.dqb_itime = (time64_t) 0;
clear_bit(DQ_INODES_B, &dquot->dq_flags);
}
@@ -1145,7 +1148,7 @@ static void dquot_decr_space(struct dquot *dquot, qsize_t number)
else
dquot->dq_dqb.dqb_curspace = 0;
if (dquot->dq_dqb.dqb_curspace <= dquot->dq_dqb.dqb_bsoftlimit)
- dquot->dq_dqb.dqb_btime = (time_t) 0;
+ dquot->dq_dqb.dqb_btime = (time64_t) 0;
clear_bit(DQ_BLKS_B, &dquot->dq_flags);
}
@@ -1292,7 +1295,7 @@ static int check_idq(struct dquot *dquot, qsize_t inodes,
if (dquot->dq_dqb.dqb_isoftlimit &&
newinodes > dquot->dq_dqb.dqb_isoftlimit &&
dquot->dq_dqb.dqb_itime &&
- get_seconds() >= dquot->dq_dqb.dqb_itime &&
+ ktime_get_real_seconds() >= dquot->dq_dqb.dqb_itime &&
!ignore_hardlimit(dquot)) {
prepare_warning(warn, dquot, QUOTA_NL_ISOFTLONGWARN);
return -EDQUOT;
@@ -1302,7 +1305,7 @@ static int check_idq(struct dquot *dquot, qsize_t inodes,
newinodes > dquot->dq_dqb.dqb_isoftlimit &&
dquot->dq_dqb.dqb_itime == 0) {
prepare_warning(warn, dquot, QUOTA_NL_ISOFTWARN);
- dquot->dq_dqb.dqb_itime = get_seconds() +
+ dquot->dq_dqb.dqb_itime = ktime_get_real_seconds() +
sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type].dqi_igrace;
}
@@ -1334,7 +1337,7 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc,
if (dquot->dq_dqb.dqb_bsoftlimit &&
tspace > dquot->dq_dqb.dqb_bsoftlimit &&
dquot->dq_dqb.dqb_btime &&
- get_seconds() >= dquot->dq_dqb.dqb_btime &&
+ ktime_get_real_seconds() >= dquot->dq_dqb.dqb_btime &&
!ignore_hardlimit(dquot)) {
if (!prealloc)
prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN);
@@ -1346,7 +1349,7 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc,
dquot->dq_dqb.dqb_btime == 0) {
if (!prealloc) {
prepare_warning(warn, dquot, QUOTA_NL_BSOFTWARN);
- dquot->dq_dqb.dqb_btime = get_seconds() +
+ dquot->dq_dqb.dqb_btime = ktime_get_real_seconds() +
sb_dqopt(sb)->info[dquot->dq_id.type].dqi_bgrace;
}
else
@@ -2268,6 +2271,11 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
error = -EINVAL;
goto out_fmt;
}
+ /* Filesystems outside of init_user_ns not yet supported */
+ if (sb->s_user_ns != &init_user_ns) {
+ error = -EINVAL;
+ goto out_fmt;
+ }
/* Usage always has to be set... */
if (!(flags & DQUOT_USAGE_ENABLED)) {
error = -EINVAL;
@@ -2695,7 +2703,7 @@ static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
clear_bit(DQ_BLKS_B, &dquot->dq_flags);
} else if (!(di->d_fieldmask & QC_SPC_TIMER))
/* Set grace only if user hasn't provided his own... */
- dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
+ dm->dqb_btime = ktime_get_real_seconds() + dqi->dqi_bgrace;
}
if (check_ilim) {
if (!dm->dqb_isoftlimit ||
@@ -2704,7 +2712,7 @@ static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
clear_bit(DQ_INODES_B, &dquot->dq_flags);
} else if (!(di->d_fieldmask & QC_INO_TIMER))
/* Set grace only if user hasn't provided his own... */
- dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
+ dm->dqb_itime = ktime_get_real_seconds() + dqi->dqi_igrace;
}
if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit ||
dm->dqb_isoftlimit)
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 0f10ee9892ce..35df08ee9c97 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -211,7 +211,7 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id,
if (!sb->s_qcop->get_dqblk)
return -ENOSYS;
qid = make_kqid(current_user_ns(), type, id);
- if (!qid_valid(qid))
+ if (!qid_has_mapping(sb->s_user_ns, qid))
return -EINVAL;
ret = sb->s_qcop->get_dqblk(sb, qid, &fdq);
if (ret)
@@ -237,7 +237,7 @@ static int quota_getnextquota(struct super_block *sb, int type, qid_t id,
if (!sb->s_qcop->get_nextdqblk)
return -ENOSYS;
qid = make_kqid(current_user_ns(), type, id);
- if (!qid_valid(qid))
+ if (!qid_has_mapping(sb->s_user_ns, qid))
return -EINVAL;
ret = sb->s_qcop->get_nextdqblk(sb, &qid, &fdq);
if (ret)
@@ -288,7 +288,7 @@ static int quota_setquota(struct super_block *sb, int type, qid_t id,
if (!sb->s_qcop->set_dqblk)
return -ENOSYS;
qid = make_kqid(current_user_ns(), type, id);
- if (!qid_valid(qid))
+ if (!qid_has_mapping(sb->s_user_ns, qid))
return -EINVAL;
copy_from_if_dqblk(&fdq, &idq);
return sb->s_qcop->set_dqblk(sb, qid, &fdq);
@@ -581,10 +581,10 @@ static int quota_setxquota(struct super_block *sb, int type, qid_t id,
if (!sb->s_qcop->set_dqblk)
return -ENOSYS;
qid = make_kqid(current_user_ns(), type, id);
- if (!qid_valid(qid))
+ if (!qid_has_mapping(sb->s_user_ns, qid))
return -EINVAL;
/* Are we actually setting timer / warning limits for all users? */
- if (from_kqid(&init_user_ns, qid) == 0 &&
+ if (from_kqid(sb->s_user_ns, qid) == 0 &&
fdq.d_fieldmask & (FS_DQ_WARNS_MASK | FS_DQ_TIMER_MASK)) {
struct qc_info qinfo;
int ret;
@@ -642,7 +642,7 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id,
if (!sb->s_qcop->get_dqblk)
return -ENOSYS;
qid = make_kqid(current_user_ns(), type, id);
- if (!qid_valid(qid))
+ if (!qid_has_mapping(sb->s_user_ns, qid))
return -EINVAL;
ret = sb->s_qcop->get_dqblk(sb, qid, &qdq);
if (ret)
@@ -669,7 +669,7 @@ static int quota_getnextxquota(struct super_block *sb, int type, qid_t id,
if (!sb->s_qcop->get_nextdqblk)
return -ENOSYS;
qid = make_kqid(current_user_ns(), type, id);
- if (!qid_valid(qid))
+ if (!qid_has_mapping(sb->s_user_ns, qid))
return -EINVAL;
ret = sb->s_qcop->get_nextdqblk(sb, &qid, &qdq);
if (ret)
diff --git a/fs/read_write.c b/fs/read_write.c
index 933b53a375b4..66215a7b17cf 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1168,6 +1168,15 @@ COMPAT_SYSCALL_DEFINE5(preadv, compat_ulong_t, fd,
return do_compat_preadv64(fd, vec, vlen, pos, 0);
}
+#ifdef __ARCH_WANT_COMPAT_SYS_PREADV64V2
+COMPAT_SYSCALL_DEFINE5(preadv64v2, unsigned long, fd,
+ const struct compat_iovec __user *,vec,
+ unsigned long, vlen, loff_t, pos, int, flags)
+{
+ return do_compat_preadv64(fd, vec, vlen, pos, flags);
+}
+#endif
+
COMPAT_SYSCALL_DEFINE6(preadv2, compat_ulong_t, fd,
const struct compat_iovec __user *,vec,
compat_ulong_t, vlen, u32, pos_low, u32, pos_high,
@@ -1265,6 +1274,15 @@ COMPAT_SYSCALL_DEFINE5(pwritev, compat_ulong_t, fd,
return do_compat_pwritev64(fd, vec, vlen, pos, 0);
}
+#ifdef __ARCH_WANT_COMPAT_SYS_PWRITEV64V2
+COMPAT_SYSCALL_DEFINE5(pwritev64v2, unsigned long, fd,
+ const struct compat_iovec __user *,vec,
+ unsigned long, vlen, loff_t, pos, int, flags)
+{
+ return do_compat_pwritev64(fd, vec, vlen, pos, flags);
+}
+#endif
+
COMPAT_SYSCALL_DEFINE6(pwritev2, compat_ulong_t, fd,
const struct compat_iovec __user *,vec,
compat_ulong_t, vlen, u32, pos_low, u32, pos_high, int, flags)
diff --git a/fs/reiserfs/ibalance.c b/fs/reiserfs/ibalance.c
index b751eea32e20..5db6f45b3fed 100644
--- a/fs/reiserfs/ibalance.c
+++ b/fs/reiserfs/ibalance.c
@@ -1153,8 +1153,9 @@ int balance_internal(struct tree_balance *tb,
insert_ptr);
}
- memcpy(new_insert_key_addr, &new_insert_key, KEY_SIZE);
insert_ptr[0] = new_insert_ptr;
+ if (new_insert_ptr)
+ memcpy(new_insert_key_addr, &new_insert_key, KEY_SIZE);
return order;
}
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 825455d3e4ba..c2c59f9ff04b 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -2668,7 +2668,7 @@ static int reiserfs_write_full_page(struct page *page,
do {
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
- submit_bh(WRITE, bh);
+ submit_bh(REQ_OP_WRITE, 0, bh);
nr++;
}
put_bh(bh);
@@ -2728,7 +2728,7 @@ fail:
struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) {
clear_buffer_dirty(bh);
- submit_bh(WRITE, bh);
+ submit_bh(REQ_OP_WRITE, 0, bh);
nr++;
}
put_bh(bh);
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 2ace90e981f0..bc2dde2423c2 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -652,7 +652,7 @@ static void submit_logged_buffer(struct buffer_head *bh)
BUG();
if (!buffer_uptodate(bh))
BUG();
- submit_bh(WRITE, bh);
+ submit_bh(REQ_OP_WRITE, 0, bh);
}
static void submit_ordered_buffer(struct buffer_head *bh)
@@ -662,7 +662,7 @@ static void submit_ordered_buffer(struct buffer_head *bh)
clear_buffer_dirty(bh);
if (!buffer_uptodate(bh))
BUG();
- submit_bh(WRITE, bh);
+ submit_bh(REQ_OP_WRITE, 0, bh);
}
#define CHUNK_SIZE 32
@@ -870,7 +870,7 @@ loop_next:
*/
if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) {
spin_unlock(lock);
- ll_rw_block(WRITE, 1, &bh);
+ ll_rw_block(REQ_OP_WRITE, 0, 1, &bh);
spin_lock(lock);
}
put_bh(bh);
@@ -1057,7 +1057,7 @@ static int flush_commit_list(struct super_block *s,
if (tbh) {
if (buffer_dirty(tbh)) {
depth = reiserfs_write_unlock_nested(s);
- ll_rw_block(WRITE, 1, &tbh);
+ ll_rw_block(REQ_OP_WRITE, 0, 1, &tbh);
reiserfs_write_lock_nested(s, depth);
}
put_bh(tbh) ;
@@ -2244,7 +2244,7 @@ abort_replay:
}
}
/* read in the log blocks, memcpy to the corresponding real block */
- ll_rw_block(READ, get_desc_trans_len(desc), log_blocks);
+ ll_rw_block(REQ_OP_READ, 0, get_desc_trans_len(desc), log_blocks);
for (i = 0; i < get_desc_trans_len(desc); i++) {
wait_on_buffer(log_blocks[i]);
@@ -2269,7 +2269,7 @@ abort_replay:
/* flush out the real blocks */
for (i = 0; i < get_desc_trans_len(desc); i++) {
set_buffer_dirty(real_blocks[i]);
- write_dirty_buffer(real_blocks[i], WRITE);
+ write_dirty_buffer(real_blocks[i], 0);
}
for (i = 0; i < get_desc_trans_len(desc); i++) {
wait_on_buffer(real_blocks[i]);
@@ -2346,7 +2346,7 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
} else
bhlist[j++] = bh;
}
- ll_rw_block(READ, j, bhlist);
+ ll_rw_block(REQ_OP_READ, 0, j, bhlist);
for (i = 1; i < j; i++)
brelse(bhlist[i]);
bh = bhlist[0];
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 5feacd689241..4032d1e87c8f 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -551,7 +551,7 @@ static int search_by_key_reada(struct super_block *s,
if (!buffer_uptodate(bh[j])) {
if (depth == -1)
depth = reiserfs_write_unlock_nested(s);
- ll_rw_block(READA, 1, bh + j);
+ ll_rw_block(REQ_OP_READ, REQ_RAHEAD, 1, bh + j);
}
brelse(bh[j]);
}
@@ -660,7 +660,7 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key,
if (!buffer_uptodate(bh) && depth == -1)
depth = reiserfs_write_unlock_nested(sb);
- ll_rw_block(READ, 1, &bh);
+ ll_rw_block(REQ_OP_READ, 0, 1, &bh);
wait_on_buffer(bh);
if (depth != -1)
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index c72c16c5a60f..7a4a85a6821e 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -1666,7 +1666,7 @@ static int read_super_block(struct super_block *s, int offset)
/* after journal replay, reread all bitmap and super blocks */
static int reread_meta_blocks(struct super_block *s)
{
- ll_rw_block(READ, 1, &SB_BUFFER_WITH_SB(s));
+ ll_rw_block(REQ_OP_READ, 0, 1, &SB_BUFFER_WITH_SB(s));
wait_on_buffer(SB_BUFFER_WITH_SB(s));
if (!buffer_uptodate(SB_BUFFER_WITH_SB(s))) {
reiserfs_warning(s, "reiserfs-2504", "error reading the super");
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
index 2c2618410d51..ce62a380314f 100644
--- a/fs/squashfs/block.c
+++ b/fs/squashfs/block.c
@@ -124,7 +124,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
goto block_release;
bytes += msblk->devblksize;
}
- ll_rw_block(READ, b, bh);
+ ll_rw_block(REQ_OP_READ, 0, b, bh);
} else {
/*
* Metadata block.
@@ -156,7 +156,7 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
goto block_release;
bytes += msblk->devblksize;
}
- ll_rw_block(READ, b - 1, bh + 1);
+ ll_rw_block(REQ_OP_READ, 0, b - 1, bh + 1);
}
for (i = 0; i < b; i++) {
diff --git a/fs/super.c b/fs/super.c
index d78b9847e6cb..c2ff475c1711 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -33,6 +33,7 @@
#include <linux/cleancache.h>
#include <linux/fsnotify.h>
#include <linux/lockdep.h>
+#include <linux/user_namespace.h>
#include "internal.h"
@@ -165,6 +166,7 @@ static void destroy_super(struct super_block *s)
list_lru_destroy(&s->s_inode_lru);
security_sb_free(s);
WARN_ON(!list_empty(&s->s_mounts));
+ put_user_ns(s->s_user_ns);
kfree(s->s_subtype);
kfree(s->s_options);
call_rcu(&s->rcu, destroy_super_rcu);
@@ -174,11 +176,13 @@ static void destroy_super(struct super_block *s)
* alloc_super - create new superblock
* @type: filesystem type superblock should belong to
* @flags: the mount flags
+ * @user_ns: User namespace for the super_block
*
* Allocates and initializes a new &struct super_block. alloc_super()
* returns a pointer new superblock or %NULL if allocation had failed.
*/
-static struct super_block *alloc_super(struct file_system_type *type, int flags)
+static struct super_block *alloc_super(struct file_system_type *type, int flags,
+ struct user_namespace *user_ns)
{
struct super_block *s = kzalloc(sizeof(struct super_block), GFP_USER);
static const struct super_operations default_op;
@@ -188,6 +192,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
return NULL;
INIT_LIST_HEAD(&s->s_mounts);
+ s->s_user_ns = get_user_ns(user_ns);
if (security_sb_alloc(s))
goto fail;
@@ -201,11 +206,15 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
init_waitqueue_head(&s->s_writers.wait_unfrozen);
s->s_bdi = &noop_backing_dev_info;
s->s_flags = flags;
+ if (s->s_user_ns != &init_user_ns)
+ s->s_iflags |= SB_I_NODEV;
INIT_HLIST_NODE(&s->s_instances);
INIT_HLIST_BL_HEAD(&s->s_anon);
mutex_init(&s->s_sync_lock);
INIT_LIST_HEAD(&s->s_inodes);
spin_lock_init(&s->s_inode_list_lock);
+ INIT_LIST_HEAD(&s->s_inodes_wb);
+ spin_lock_init(&s->s_inode_wblist_lock);
if (list_lru_init_memcg(&s->s_dentry_lru))
goto fail;
@@ -443,29 +452,42 @@ void generic_shutdown_super(struct super_block *sb)
EXPORT_SYMBOL(generic_shutdown_super);
/**
- * sget - find or create a superblock
+ * sget_userns - find or create a superblock
* @type: filesystem type superblock should belong to
* @test: comparison callback
* @set: setup callback
* @flags: mount flags
+ * @user_ns: User namespace for the super_block
* @data: argument to each of them
*/
-struct super_block *sget(struct file_system_type *type,
+struct super_block *sget_userns(struct file_system_type *type,
int (*test)(struct super_block *,void *),
int (*set)(struct super_block *,void *),
- int flags,
+ int flags, struct user_namespace *user_ns,
void *data)
{
struct super_block *s = NULL;
struct super_block *old;
int err;
+ if (!(flags & MS_KERNMOUNT) &&
+ !(type->fs_flags & FS_USERNS_MOUNT) &&
+ !capable(CAP_SYS_ADMIN))
+ return ERR_PTR(-EPERM);
retry:
spin_lock(&sb_lock);
if (test) {
hlist_for_each_entry(old, &type->fs_supers, s_instances) {
if (!test(old, data))
continue;
+ if (user_ns != old->s_user_ns) {
+ spin_unlock(&sb_lock);
+ if (s) {
+ up_write(&s->s_umount);
+ destroy_super(s);
+ }
+ return ERR_PTR(-EBUSY);
+ }
if (!grab_super(old))
goto retry;
if (s) {
@@ -478,7 +500,7 @@ retry:
}
if (!s) {
spin_unlock(&sb_lock);
- s = alloc_super(type, flags);
+ s = alloc_super(type, flags, user_ns);
if (!s)
return ERR_PTR(-ENOMEM);
goto retry;
@@ -501,6 +523,31 @@ retry:
return s;
}
+EXPORT_SYMBOL(sget_userns);
+
+/**
+ * sget - find or create a superblock
+ * @type: filesystem type superblock should belong to
+ * @test: comparison callback
+ * @set: setup callback
+ * @flags: mount flags
+ * @data: argument to each of them
+ */
+struct super_block *sget(struct file_system_type *type,
+ int (*test)(struct super_block *,void *),
+ int (*set)(struct super_block *,void *),
+ int flags,
+ void *data)
+{
+ struct user_namespace *user_ns = current_user_ns();
+
+ /* Ensure the requestor has permissions over the target filesystem */
+ if (!(flags & MS_KERNMOUNT) && !ns_capable(user_ns, CAP_SYS_ADMIN))
+ return ERR_PTR(-EPERM);
+
+ return sget_userns(type, test, set, flags, user_ns, data);
+}
+
EXPORT_SYMBOL(sget);
void drop_super(struct super_block *sb)
@@ -918,12 +965,20 @@ static int ns_set_super(struct super_block *sb, void *data)
return set_anon_super(sb, NULL);
}
-struct dentry *mount_ns(struct file_system_type *fs_type, int flags,
- void *data, int (*fill_super)(struct super_block *, void *, int))
+struct dentry *mount_ns(struct file_system_type *fs_type,
+ int flags, void *data, void *ns, struct user_namespace *user_ns,
+ int (*fill_super)(struct super_block *, void *, int))
{
struct super_block *sb;
- sb = sget(fs_type, ns_test_super, ns_set_super, flags, data);
+ /* Don't allow mounting unless the caller has CAP_SYS_ADMIN
+ * over the namespace.
+ */
+ if (!(flags & MS_KERNMOUNT) && !ns_capable(user_ns, CAP_SYS_ADMIN))
+ return ERR_PTR(-EPERM);
+
+ sb = sget_userns(fs_type, ns_test_super, ns_set_super, flags,
+ user_ns, ns);
if (IS_ERR(sb))
return ERR_CAST(sb);
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index f3db82071cfb..20b8f82e115b 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -41,8 +41,7 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type,
if (IS_ERR(root) || !new_sb)
kobj_ns_drop(KOBJ_NS_TYPE_NET, ns);
else if (new_sb)
- /* Userspace would break if executables appear on sysfs */
- root->d_sb->s_iflags |= SB_I_NOEXEC;
+ root->d_sb->s_iflags |= SB_I_USERNS_VISIBLE;
return root;
}
@@ -59,7 +58,7 @@ static struct file_system_type sysfs_fs_type = {
.name = "sysfs",
.mount = sysfs_mount,
.kill_sb = sysfs_kill_sb,
- .fs_flags = FS_USERNS_VISIBLE | FS_USERNS_MOUNT,
+ .fs_flags = FS_USERNS_MOUNT,
};
int __init sysfs_init(void)
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index 90b60c03b588..a42de45ce40d 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -33,7 +33,7 @@ static int sysv_hash(const struct dentry *dentry, struct qstr *qstr)
function. */
if (qstr->len > SYSV_NAMELEN) {
qstr->len = SYSV_NAMELEN;
- qstr->hash = full_name_hash(qstr->name, qstr->len);
+ qstr->hash = full_name_hash(dentry, qstr->name, qstr->len);
}
return 0;
}
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 053818dd6c18..9ae4abb4110b 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -390,6 +390,11 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags)
clockid != CLOCK_BOOTTIME_ALARM))
return -EINVAL;
+ if (!capable(CAP_WAKE_ALARM) &&
+ (clockid == CLOCK_REALTIME_ALARM ||
+ clockid == CLOCK_BOOTTIME_ALARM))
+ return -EPERM;
+
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
@@ -433,6 +438,11 @@ static int do_timerfd_settime(int ufd, int flags,
return ret;
ctx = f.file->private_data;
+ if (!capable(CAP_WAKE_ALARM) && isalarm(ctx)) {
+ fdput(f);
+ return -EPERM;
+ }
+
timerfd_setup_cancel(ctx, flags);
/*
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index 4a0e48f92104..ad40b64c5e2f 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -541,9 +541,6 @@ void tracefs_remove(struct dentry *dentry)
return;
parent = dentry->d_parent;
- if (!parent || !parent->d_inode)
- return;
-
inode_lock(parent->d_inode);
ret = __tracefs_remove(dentry, parent);
inode_unlock(parent->d_inode);
@@ -566,10 +563,6 @@ void tracefs_remove_recursive(struct dentry *dentry)
if (IS_ERR_OR_NULL(dentry))
return;
- parent = dentry->d_parent;
- if (!parent || !parent->d_inode)
- return;
-
parent = dentry;
down:
inode_lock(parent->d_inode);
diff --git a/fs/ubifs/gc.c b/fs/ubifs/gc.c
index 9718da86ad01..821b34816976 100644
--- a/fs/ubifs/gc.c
+++ b/fs/ubifs/gc.c
@@ -100,10 +100,6 @@ static int switch_gc_head(struct ubifs_info *c)
if (err)
return err;
- err = ubifs_wbuf_sync_nolock(wbuf);
- if (err)
- return err;
-
err = ubifs_add_bud_to_log(c, GCHD, gc_lnum, 0);
if (err)
return err;
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 70349954e78b..4ec051089186 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -520,19 +520,19 @@ static int init_constants_early(struct ubifs_info *c)
c->max_write_shift = fls(c->max_write_size) - 1;
if (c->leb_size < UBIFS_MIN_LEB_SZ) {
- ubifs_err(c, "too small LEBs (%d bytes), min. is %d bytes",
- c->leb_size, UBIFS_MIN_LEB_SZ);
+ ubifs_errc(c, "too small LEBs (%d bytes), min. is %d bytes",
+ c->leb_size, UBIFS_MIN_LEB_SZ);
return -EINVAL;
}
if (c->leb_cnt < UBIFS_MIN_LEB_CNT) {
- ubifs_err(c, "too few LEBs (%d), min. is %d",
- c->leb_cnt, UBIFS_MIN_LEB_CNT);
+ ubifs_errc(c, "too few LEBs (%d), min. is %d",
+ c->leb_cnt, UBIFS_MIN_LEB_CNT);
return -EINVAL;
}
if (!is_power_of_2(c->min_io_size)) {
- ubifs_err(c, "bad min. I/O size %d", c->min_io_size);
+ ubifs_errc(c, "bad min. I/O size %d", c->min_io_size);
return -EINVAL;
}
@@ -543,8 +543,8 @@ static int init_constants_early(struct ubifs_info *c)
if (c->max_write_size < c->min_io_size ||
c->max_write_size % c->min_io_size ||
!is_power_of_2(c->max_write_size)) {
- ubifs_err(c, "bad write buffer size %d for %d min. I/O unit",
- c->max_write_size, c->min_io_size);
+ ubifs_errc(c, "bad write buffer size %d for %d min. I/O unit",
+ c->max_write_size, c->min_io_size);
return -EINVAL;
}
@@ -2108,8 +2108,9 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
*/
ubi = open_ubi(name, UBI_READONLY);
if (IS_ERR(ubi)) {
- pr_err("UBIFS error (pid: %d): cannot open \"%s\", error %d",
- current->pid, name, (int)PTR_ERR(ubi));
+ if (!(flags & MS_SILENT))
+ pr_err("UBIFS error (pid: %d): cannot open \"%s\", error %d",
+ current->pid, name, (int)PTR_ERR(ubi));
return ERR_CAST(ubi);
}
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index ddf9f6b9eee2..4617d459022a 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -1783,8 +1783,8 @@ void ubifs_err(const struct ubifs_info *c, const char *fmt, ...);
__printf(2, 3)
void ubifs_warn(const struct ubifs_info *c, const char *fmt, ...);
/*
- * A variant of 'ubifs_err()' which takes the UBIFS file-sytem description
- * object as an argument.
+ * A conditional variant of 'ubifs_err()' which doesn't output anything
+ * if probing (ie. MS_SILENT set).
*/
#define ubifs_errc(c, fmt, ...) \
do { \
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index b5fc27969e9d..e237811f09ce 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -592,19 +592,19 @@ static int ubifs_xattr_set(const struct xattr_handler *handler,
return __ubifs_removexattr(inode, name);
}
-const struct xattr_handler ubifs_user_xattr_handler = {
+static const struct xattr_handler ubifs_user_xattr_handler = {
.prefix = XATTR_USER_PREFIX,
.get = ubifs_xattr_get,
.set = ubifs_xattr_set,
};
-const struct xattr_handler ubifs_trusted_xattr_handler = {
+static const struct xattr_handler ubifs_trusted_xattr_handler = {
.prefix = XATTR_TRUSTED_PREFIX,
.get = ubifs_xattr_get,
.set = ubifs_xattr_set,
};
-const struct xattr_handler ubifs_security_xattr_handler = {
+static const struct xattr_handler ubifs_security_xattr_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.get = ubifs_xattr_get,
.set = ubifs_xattr_set,
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index 4c5593abc553..aaec13c95253 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -113,7 +113,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
brelse(tmp);
}
if (num) {
- ll_rw_block(READA, num, bha);
+ ll_rw_block(REQ_OP_READ, REQ_RAHEAD, num, bha);
for (i = 0; i < num; i++)
brelse(bha[i]);
}
diff --git a/fs/udf/directory.c b/fs/udf/directory.c
index c763fda257bf..988d5352bdb8 100644
--- a/fs/udf/directory.c
+++ b/fs/udf/directory.c
@@ -87,7 +87,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
brelse(tmp);
}
if (num) {
- ll_rw_block(READA, num, bha);
+ ll_rw_block(REQ_OP_READ, REQ_RAHEAD, num, bha);
for (i = 0; i < num; i++)
brelse(bha[i]);
}
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index f323aff740ef..55aa587bbc38 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1199,7 +1199,7 @@ struct buffer_head *udf_bread(struct inode *inode, int block,
if (buffer_uptodate(bh))
return bh;
- ll_rw_block(READ, 1, &bh);
+ ll_rw_block(REQ_OP_READ, 0, 1, &bh);
wait_on_buffer(bh);
if (buffer_uptodate(bh))
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index 0447b949c7f5..67e085d591d8 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -292,7 +292,7 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg,
if (!buffer_mapped(bh))
map_bh(bh, inode->i_sb, oldb + pos);
if (!buffer_uptodate(bh)) {
- ll_rw_block(READ, 1, &bh);
+ ll_rw_block(REQ_OP_READ, 0, 1, &bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
ufs_error(inode->i_sb, __func__,
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index 57dcceda17d6..fa3bda1a860f 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -279,12 +279,6 @@ struct ufs_dir_entry *ufs_find_entry(struct inode *dir, const struct qstr *qstr,
de = (struct ufs_dir_entry *) kaddr;
kaddr += ufs_last_byte(dir, n) - reclen;
while ((char *) de <= kaddr) {
- if (de->d_reclen == 0) {
- ufs_error(dir->i_sb, __func__,
- "zero-length directory entry");
- ufs_put_page(page);
- goto out;
- }
if (ufs_match(sb, namelen, name, de))
goto found;
de = ufs_next_entry(sb, de);
@@ -414,11 +408,8 @@ ufs_validate_entry(struct super_block *sb, char *base,
{
struct ufs_dir_entry *de = (struct ufs_dir_entry*)(base + offset);
struct ufs_dir_entry *p = (struct ufs_dir_entry*)(base + (offset&mask));
- while ((char*)p < (char*)de) {
- if (p->d_reclen == 0)
- break;
+ while ((char*)p < (char*)de)
p = ufs_next_entry(sb, p);
- }
return (char *)p - base;
}
@@ -469,12 +460,6 @@ ufs_readdir(struct file *file, struct dir_context *ctx)
de = (struct ufs_dir_entry *)(kaddr+offset);
limit = kaddr + ufs_last_byte(inode, n) - UFS_DIR_REC_LEN(1);
for ( ;(char*)de <= limit; de = ufs_next_entry(sb, de)) {
- if (de->d_reclen == 0) {
- ufs_error(sb, __func__,
- "zero-length directory entry");
- ufs_put_page(page);
- return -EIO;
- }
if (de->d_ino) {
unsigned char d_type = DT_UNKNOWN;
diff --git a/fs/ufs/util.c b/fs/ufs/util.c
index a409e3e7827a..f41ad0a6106f 100644
--- a/fs/ufs/util.c
+++ b/fs/ufs/util.c
@@ -118,7 +118,7 @@ void ubh_sync_block(struct ufs_buffer_head *ubh)
unsigned i;
for (i = 0; i < ubh->count; i++)
- write_dirty_buffer(ubh->bh[i], WRITE);
+ write_dirty_buffer(ubh->bh[i], 0);
for (i = 0; i < ubh->count; i++)
wait_on_buffer(ubh->bh[i]);
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 2d97952e341a..85959d8324df 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -257,10 +257,9 @@ out:
* fatal_signal_pending()s, and the mmap_sem must be released before
* returning it.
*/
-int handle_userfault(struct vm_area_struct *vma, unsigned long address,
- unsigned int flags, unsigned long reason)
+int handle_userfault(struct fault_env *fe, unsigned long reason)
{
- struct mm_struct *mm = vma->vm_mm;
+ struct mm_struct *mm = fe->vma->vm_mm;
struct userfaultfd_ctx *ctx;
struct userfaultfd_wait_queue uwq;
int ret;
@@ -269,7 +268,7 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address,
BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
ret = VM_FAULT_SIGBUS;
- ctx = vma->vm_userfaultfd_ctx.ctx;
+ ctx = fe->vma->vm_userfaultfd_ctx.ctx;
if (!ctx)
goto out;
@@ -302,17 +301,17 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address,
* without first stopping userland access to the memory. For
* VM_UFFD_MISSING userfaults this is enough for now.
*/
- if (unlikely(!(flags & FAULT_FLAG_ALLOW_RETRY))) {
+ if (unlikely(!(fe->flags & FAULT_FLAG_ALLOW_RETRY))) {
/*
* Validate the invariant that nowait must allow retry
* to be sure not to return SIGBUS erroneously on
* nowait invocations.
*/
- BUG_ON(flags & FAULT_FLAG_RETRY_NOWAIT);
+ BUG_ON(fe->flags & FAULT_FLAG_RETRY_NOWAIT);
#ifdef CONFIG_DEBUG_VM
if (printk_ratelimit()) {
printk(KERN_WARNING
- "FAULT_FLAG_ALLOW_RETRY missing %x\n", flags);
+ "FAULT_FLAG_ALLOW_RETRY missing %x\n", fe->flags);
dump_stack();
}
#endif
@@ -324,7 +323,7 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address,
* and wait.
*/
ret = VM_FAULT_RETRY;
- if (flags & FAULT_FLAG_RETRY_NOWAIT)
+ if (fe->flags & FAULT_FLAG_RETRY_NOWAIT)
goto out;
/* take the reference before dropping the mmap_sem */
@@ -332,10 +331,11 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address,
init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
uwq.wq.private = current;
- uwq.msg = userfault_msg(address, flags, reason);
+ uwq.msg = userfault_msg(fe->address, fe->flags, reason);
uwq.ctx = ctx;
- return_to_userland = (flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
+ return_to_userland =
+ (fe->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
(FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
spin_lock(&ctx->fault_pending_wqh.lock);
@@ -353,7 +353,7 @@ int handle_userfault(struct vm_area_struct *vma, unsigned long address,
TASK_KILLABLE);
spin_unlock(&ctx->fault_pending_wqh.lock);
- must_wait = userfaultfd_must_wait(ctx, address, flags, reason);
+ must_wait = userfaultfd_must_wait(ctx, fe->address, fe->flags, reason);
up_read(&mm->mmap_sem);
if (likely(must_wait && !ACCESS_ONCE(ctx->released) &&
diff --git a/fs/utimes.c b/fs/utimes.c
index 85c40f4f373d..794f5f5b1fb5 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -92,10 +92,11 @@ static int utimes_common(struct path *path, struct timespec *times)
* then we need to check permissions, because
* inode_change_ok() won't do it.
*/
- error = -EACCES;
+ error = -EPERM;
if (IS_IMMUTABLE(inode))
goto mnt_drop_write_and_out;
+ error = -EACCES;
if (!inode_owner_or_capable(inode)) {
error = inode_permission(inode, MAY_WRITE);
if (error)
diff --git a/fs/xattr.c b/fs/xattr.c
index 4beafc43daa5..c243905835ab 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -38,6 +38,13 @@ xattr_permission(struct inode *inode, const char *name, int mask)
if (mask & MAY_WRITE) {
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
return -EPERM;
+ /*
+ * Updating an xattr will likely cause i_uid and i_gid
+ * to be writen back improperly if their true value is
+ * unknown to the vfs.
+ */
+ if (HAS_UNMAPPED_ID(inode))
+ return -EPERM;
}
/*
diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig
index 5d47b4df61ea..35faf128f36d 100644
--- a/fs/xfs/Kconfig
+++ b/fs/xfs/Kconfig
@@ -4,6 +4,7 @@ config XFS_FS
depends on (64BIT || LBDAF)
select EXPORTFS
select LIBCRC32C
+ select FS_IOMAP
help
XFS is a high performance journaling filesystem which originated
on the SGI IRIX platform. It is completely multi-threaded, can
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 3542d94fddce..fc593c869493 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -39,6 +39,7 @@ xfs-y += $(addprefix libxfs/, \
xfs_btree.o \
xfs_da_btree.o \
xfs_da_format.o \
+ xfs_defer.o \
xfs_dir2.o \
xfs_dir2_block.o \
xfs_dir2_data.o \
@@ -51,6 +52,8 @@ xfs-y += $(addprefix libxfs/, \
xfs_inode_fork.o \
xfs_inode_buf.o \
xfs_log_rlimit.o \
+ xfs_rmap.o \
+ xfs_rmap_btree.o \
xfs_sb.o \
xfs_symlink_remote.o \
xfs_trans_resv.o \
@@ -100,11 +103,13 @@ xfs-y += xfs_log.o \
xfs_extfree_item.o \
xfs_icreate_item.o \
xfs_inode_item.o \
+ xfs_rmap_item.o \
xfs_log_recover.o \
xfs_trans_ail.o \
xfs_trans_buf.o \
xfs_trans_extfree.o \
xfs_trans_inode.o \
+ xfs_trans_rmap.o \
# optional features
xfs-$(CONFIG_XFS_QUOTA) += xfs_dquot.o \
@@ -121,5 +126,4 @@ xfs-$(CONFIG_XFS_RT) += xfs_rtalloc.o
xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o
xfs-$(CONFIG_SYSCTL) += xfs_sysctl.o
xfs-$(CONFIG_COMPAT) += xfs_ioctl32.o
-xfs-$(CONFIG_NFSD_BLOCKLAYOUT) += xfs_pnfs.o
-xfs-$(CONFIG_NFSD_SCSILAYOUT) += xfs_pnfs.o
+xfs-$(CONFIG_EXPORTFS_BLOCK_OPS) += xfs_pnfs.o
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index a708e38b494c..776ae2f325d1 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -24,8 +24,10 @@
#include "xfs_bit.h"
#include "xfs_sb.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
+#include "xfs_rmap.h"
#include "xfs_alloc_btree.h"
#include "xfs_alloc.h"
#include "xfs_extent_busy.h"
@@ -49,6 +51,81 @@ STATIC int xfs_alloc_ag_vextent_size(xfs_alloc_arg_t *);
STATIC int xfs_alloc_ag_vextent_small(xfs_alloc_arg_t *,
xfs_btree_cur_t *, xfs_agblock_t *, xfs_extlen_t *, int *);
+xfs_extlen_t
+xfs_prealloc_blocks(
+ struct xfs_mount *mp)
+{
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+ return XFS_RMAP_BLOCK(mp) + 1;
+ if (xfs_sb_version_hasfinobt(&mp->m_sb))
+ return XFS_FIBT_BLOCK(mp) + 1;
+ return XFS_IBT_BLOCK(mp) + 1;
+}
+
+/*
+ * In order to avoid ENOSPC-related deadlock caused by out-of-order locking of
+ * AGF buffer (PV 947395), we place constraints on the relationship among
+ * actual allocations for data blocks, freelist blocks, and potential file data
+ * bmap btree blocks. However, these restrictions may result in no actual space
+ * allocated for a delayed extent, for example, a data block in a certain AG is
+ * allocated but there is no additional block for the additional bmap btree
+ * block due to a split of the bmap btree of the file. The result of this may
+ * lead to an infinite loop when the file gets flushed to disk and all delayed
+ * extents need to be actually allocated. To get around this, we explicitly set
+ * aside a few blocks which will not be reserved in delayed allocation.
+ *
+ * When rmap is disabled, we need to reserve 4 fsbs _per AG_ for the freelist
+ * and 4 more to handle a potential split of the file's bmap btree.
+ *
+ * When rmap is enabled, we must also be able to handle two rmap btree inserts
+ * to record both the file data extent and a new bmbt block. The bmbt block
+ * might not be in the same AG as the file data extent. In the worst case
+ * the bmap btree splits multiple levels and all the new blocks come from
+ * different AGs, so set aside enough to handle rmap btree splits in all AGs.
+ */
+unsigned int
+xfs_alloc_set_aside(
+ struct xfs_mount *mp)
+{
+ unsigned int blocks;
+
+ blocks = 4 + (mp->m_sb.sb_agcount * XFS_ALLOC_AGFL_RESERVE);
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+ blocks += mp->m_sb.sb_agcount * mp->m_rmap_maxlevels;
+ return blocks;
+}
+
+/*
+ * When deciding how much space to allocate out of an AG, we limit the
+ * allocation maximum size to the size the AG. However, we cannot use all the
+ * blocks in the AG - some are permanently used by metadata. These
+ * blocks are generally:
+ * - the AG superblock, AGF, AGI and AGFL
+ * - the AGF (bno and cnt) and AGI btree root blocks, and optionally
+ * the AGI free inode and rmap btree root blocks.
+ * - blocks on the AGFL according to xfs_alloc_set_aside() limits
+ * - the rmapbt root block
+ *
+ * The AG headers are sector sized, so the amount of space they take up is
+ * dependent on filesystem geometry. The others are all single blocks.
+ */
+unsigned int
+xfs_alloc_ag_max_usable(
+ struct xfs_mount *mp)
+{
+ unsigned int blocks;
+
+ blocks = XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)); /* ag headers */
+ blocks += XFS_ALLOC_AGFL_RESERVE;
+ blocks += 3; /* AGF, AGI btree root blocks */
+ if (xfs_sb_version_hasfinobt(&mp->m_sb))
+ blocks++; /* finobt root block */
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+ blocks++; /* rmap root block */
+
+ return mp->m_sb.sb_agblocks - blocks;
+}
+
/*
* Lookup the record equal to [bno, len] in the btree given by cur.
*/
@@ -84,7 +161,7 @@ xfs_alloc_lookup_ge(
* Lookup the first record less than or equal to [bno, len]
* in the btree given by cur.
*/
-int /* error */
+static int /* error */
xfs_alloc_lookup_le(
struct xfs_btree_cur *cur, /* btree cursor */
xfs_agblock_t bno, /* starting block of extent */
@@ -636,6 +713,14 @@ xfs_alloc_ag_vextent(
ASSERT(!args->wasfromfl || !args->isfl);
ASSERT(args->agbno % args->alignment == 0);
+ /* if not file data, insert new block into the reverse map btree */
+ if (args->oinfo.oi_owner != XFS_RMAP_OWN_UNKNOWN) {
+ error = xfs_rmap_alloc(args->tp, args->agbp, args->agno,
+ args->agbno, args->len, &args->oinfo);
+ if (error)
+ return error;
+ }
+
if (!args->wasfromfl) {
error = xfs_alloc_update_counters(args->tp, args->pag,
args->agbp,
@@ -1577,14 +1662,15 @@ error0:
/*
* Free the extent starting at agno/bno for length.
*/
-STATIC int /* error */
+STATIC int
xfs_free_ag_extent(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_buf_t *agbp, /* buffer for a.g. freelist header */
- xfs_agnumber_t agno, /* allocation group number */
- xfs_agblock_t bno, /* starting block number */
- xfs_extlen_t len, /* length of extent */
- int isfl) /* set if is freelist blocks - no sb acctg */
+ xfs_trans_t *tp,
+ xfs_buf_t *agbp,
+ xfs_agnumber_t agno,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ struct xfs_owner_info *oinfo,
+ int isfl)
{
xfs_btree_cur_t *bno_cur; /* cursor for by-block btree */
xfs_btree_cur_t *cnt_cur; /* cursor for by-size btree */
@@ -1601,12 +1687,19 @@ xfs_free_ag_extent(
xfs_extlen_t nlen; /* new length of freespace */
xfs_perag_t *pag; /* per allocation group data */
+ bno_cur = cnt_cur = NULL;
mp = tp->t_mountp;
+
+ if (oinfo->oi_owner != XFS_RMAP_OWN_UNKNOWN) {
+ error = xfs_rmap_free(tp, agbp, agno, bno, len, oinfo);
+ if (error)
+ goto error0;
+ }
+
/*
* Allocate and initialize a cursor for the by-block btree.
*/
bno_cur = xfs_allocbt_init_cursor(mp, tp, agbp, agno, XFS_BTNUM_BNO);
- cnt_cur = NULL;
/*
* Look for a neighboring block on the left (lower block numbers)
* that is contiguous with this space.
@@ -1839,19 +1932,8 @@ void
xfs_alloc_compute_maxlevels(
xfs_mount_t *mp) /* file system mount structure */
{
- int level;
- uint maxblocks;
- uint maxleafents;
- int minleafrecs;
- int minnoderecs;
-
- maxleafents = (mp->m_sb.sb_agblocks + 1) / 2;
- minleafrecs = mp->m_alloc_mnr[0];
- minnoderecs = mp->m_alloc_mnr[1];
- maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
- for (level = 1; maxblocks > 1; level++)
- maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
- mp->m_ag_maxlevels = level;
+ mp->m_ag_maxlevels = xfs_btree_compute_maxlevels(mp, mp->m_alloc_mnr,
+ (mp->m_sb.sb_agblocks + 1) / 2);
}
/*
@@ -1886,6 +1968,11 @@ xfs_alloc_min_freelist(
/* space needed by-size freespace btree */
min_free += min_t(unsigned int, pag->pagf_levels[XFS_BTNUM_CNTi] + 1,
mp->m_ag_maxlevels);
+ /* space needed reverse mapping used space btree */
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+ min_free += min_t(unsigned int,
+ pag->pagf_levels[XFS_BTNUM_RMAPi] + 1,
+ mp->m_rmap_maxlevels);
return min_free;
}
@@ -2003,21 +2090,34 @@ xfs_alloc_fix_freelist(
* anything other than extra overhead when we need to put more blocks
* back on the free list? Maybe we should only do this when space is
* getting low or the AGFL is more than half full?
+ *
+ * The NOSHRINK flag prevents the AGFL from being shrunk if it's too
+ * big; the NORMAP flag prevents AGFL expand/shrink operations from
+ * updating the rmapbt. Both flags are used in xfs_repair while we're
+ * rebuilding the rmapbt, and neither are used by the kernel. They're
+ * both required to ensure that rmaps are correctly recorded for the
+ * regenerated AGFL, bnobt, and cntbt. See repair/phase5.c and
+ * repair/rmap.c in xfsprogs for details.
*/
- while (pag->pagf_flcount > need) {
+ memset(&targs, 0, sizeof(targs));
+ if (flags & XFS_ALLOC_FLAG_NORMAP)
+ xfs_rmap_skip_owner_update(&targs.oinfo);
+ else
+ xfs_rmap_ag_owner(&targs.oinfo, XFS_RMAP_OWN_AG);
+ while (!(flags & XFS_ALLOC_FLAG_NOSHRINK) && pag->pagf_flcount > need) {
struct xfs_buf *bp;
error = xfs_alloc_get_freelist(tp, agbp, &bno, 0);
if (error)
goto out_agbp_relse;
- error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1, 1);
+ error = xfs_free_ag_extent(tp, agbp, args->agno, bno, 1,
+ &targs.oinfo, 1);
if (error)
goto out_agbp_relse;
bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0);
xfs_trans_binval(tp, bp);
}
- memset(&targs, 0, sizeof(targs));
targs.tp = tp;
targs.mp = mp;
targs.agbp = agbp;
@@ -2282,6 +2382,10 @@ xfs_agf_verify(
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]) > XFS_BTREE_MAXLEVELS)
return false;
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb) &&
+ be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]) > XFS_BTREE_MAXLEVELS)
+ return false;
+
/*
* during growfs operations, the perag is not fully initialised,
* so we can't use it for any useful checking. growfs ensures we can't
@@ -2413,6 +2517,8 @@ xfs_alloc_read_agf(
be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNOi]);
pag->pagf_levels[XFS_BTNUM_CNTi] =
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
+ pag->pagf_levels[XFS_BTNUM_RMAPi] =
+ be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAPi]);
spin_lock_init(&pag->pagb_lock);
pag->pagb_count = 0;
pag->pagb_tree = RB_ROOT;
@@ -2658,55 +2764,85 @@ error0:
return error;
}
-/*
- * Free an extent.
- * Just break up the extent address and hand off to xfs_free_ag_extent
- * after fixing up the freelist.
- */
-int /* error */
-xfs_free_extent(
- xfs_trans_t *tp, /* transaction pointer */
- xfs_fsblock_t bno, /* starting block number of extent */
- xfs_extlen_t len) /* length of extent */
+/* Ensure that the freelist is at full capacity. */
+int
+xfs_free_extent_fix_freelist(
+ struct xfs_trans *tp,
+ xfs_agnumber_t agno,
+ struct xfs_buf **agbp)
{
- xfs_alloc_arg_t args;
- int error;
+ struct xfs_alloc_arg args;
+ int error;
- ASSERT(len != 0);
- memset(&args, 0, sizeof(xfs_alloc_arg_t));
+ memset(&args, 0, sizeof(struct xfs_alloc_arg));
args.tp = tp;
args.mp = tp->t_mountp;
+ args.agno = agno;
/*
* validate that the block number is legal - the enables us to detect
* and handle a silent filesystem corruption rather than crashing.
*/
- args.agno = XFS_FSB_TO_AGNO(args.mp, bno);
if (args.agno >= args.mp->m_sb.sb_agcount)
return -EFSCORRUPTED;
- args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno);
- if (args.agbno >= args.mp->m_sb.sb_agblocks)
- return -EFSCORRUPTED;
-
args.pag = xfs_perag_get(args.mp, args.agno);
ASSERT(args.pag);
error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING);
if (error)
- goto error0;
+ goto out;
+
+ *agbp = args.agbp;
+out:
+ xfs_perag_put(args.pag);
+ return error;
+}
+
+/*
+ * Free an extent.
+ * Just break up the extent address and hand off to xfs_free_ag_extent
+ * after fixing up the freelist.
+ */
+int /* error */
+xfs_free_extent(
+ struct xfs_trans *tp, /* transaction pointer */
+ xfs_fsblock_t bno, /* starting block number of extent */
+ xfs_extlen_t len, /* length of extent */
+ struct xfs_owner_info *oinfo) /* extent owner */
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_buf *agbp;
+ xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, bno);
+ xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp, bno);
+ int error;
+
+ ASSERT(len != 0);
+
+ if (XFS_TEST_ERROR(false, mp,
+ XFS_ERRTAG_FREE_EXTENT,
+ XFS_RANDOM_FREE_EXTENT))
+ return -EIO;
+
+ error = xfs_free_extent_fix_freelist(tp, agno, &agbp);
+ if (error)
+ return error;
+
+ XFS_WANT_CORRUPTED_GOTO(mp, agbno < mp->m_sb.sb_agblocks, err);
/* validate the extent size is legal now we have the agf locked */
- if (args.agbno + len >
- be32_to_cpu(XFS_BUF_TO_AGF(args.agbp)->agf_length)) {
- error = -EFSCORRUPTED;
- goto error0;
- }
+ XFS_WANT_CORRUPTED_GOTO(mp,
+ agbno + len <= be32_to_cpu(XFS_BUF_TO_AGF(agbp)->agf_length),
+ err);
- error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
- if (!error)
- xfs_extent_busy_insert(tp, args.agno, args.agbno, len, 0);
-error0:
- xfs_perag_put(args.pag);
+ error = xfs_free_ag_extent(tp, agbp, agno, agbno, len, oinfo, 0);
+ if (error)
+ goto err;
+
+ xfs_extent_busy_insert(tp, agno, agbno, len, 0);
+ return 0;
+
+err:
+ xfs_trans_brelse(tp, agbp);
return error;
}
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index 135eb3d24db7..6fe2d6b7cfe9 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -54,41 +54,8 @@ typedef unsigned int xfs_alloctype_t;
*/
#define XFS_ALLOC_FLAG_TRYLOCK 0x00000001 /* use trylock for buffer locking */
#define XFS_ALLOC_FLAG_FREEING 0x00000002 /* indicate caller is freeing extents*/
-
-/*
- * In order to avoid ENOSPC-related deadlock caused by
- * out-of-order locking of AGF buffer (PV 947395), we place
- * constraints on the relationship among actual allocations for
- * data blocks, freelist blocks, and potential file data bmap
- * btree blocks. However, these restrictions may result in no
- * actual space allocated for a delayed extent, for example, a data
- * block in a certain AG is allocated but there is no additional
- * block for the additional bmap btree block due to a split of the
- * bmap btree of the file. The result of this may lead to an
- * infinite loop in xfssyncd when the file gets flushed to disk and
- * all delayed extents need to be actually allocated. To get around
- * this, we explicitly set aside a few blocks which will not be
- * reserved in delayed allocation. Considering the minimum number of
- * needed freelist blocks is 4 fsbs _per AG_, a potential split of file's bmap
- * btree requires 1 fsb, so we set the number of set-aside blocks
- * to 4 + 4*agcount.
- */
-#define XFS_ALLOC_SET_ASIDE(mp) (4 + ((mp)->m_sb.sb_agcount * 4))
-
-/*
- * When deciding how much space to allocate out of an AG, we limit the
- * allocation maximum size to the size the AG. However, we cannot use all the
- * blocks in the AG - some are permanently used by metadata. These
- * blocks are generally:
- * - the AG superblock, AGF, AGI and AGFL
- * - the AGF (bno and cnt) and AGI btree root blocks
- * - 4 blocks on the AGFL according to XFS_ALLOC_SET_ASIDE() limits
- *
- * The AG headers are sector sized, so the amount of space they take up is
- * dependent on filesystem geometry. The others are all single blocks.
- */
-#define XFS_ALLOC_AG_MAX_USABLE(mp) \
- ((mp)->m_sb.sb_agblocks - XFS_BB_TO_FSB(mp, XFS_FSS_TO_BB(mp, 4)) - 7)
+#define XFS_ALLOC_FLAG_NORMAP 0x00000004 /* don't modify the rmapbt */
+#define XFS_ALLOC_FLAG_NOSHRINK 0x00000008 /* don't shrink the freelist */
/*
@@ -123,6 +90,7 @@ typedef struct xfs_alloc_arg {
char isfl; /* set if is freelist blocks - !acctg */
char userdata; /* mask defining userdata treatment */
xfs_fsblock_t firstblock; /* io first block allocated */
+ struct xfs_owner_info oinfo; /* owner of blocks being allocated */
} xfs_alloc_arg_t;
/*
@@ -132,6 +100,11 @@ typedef struct xfs_alloc_arg {
#define XFS_ALLOC_INITIAL_USER_DATA (1 << 1)/* special case start of file */
#define XFS_ALLOC_USERDATA_ZERO (1 << 2)/* zero extent on allocation */
+/* freespace limit calculations */
+#define XFS_ALLOC_AGFL_RESERVE 4
+unsigned int xfs_alloc_set_aside(struct xfs_mount *mp);
+unsigned int xfs_alloc_ag_max_usable(struct xfs_mount *mp);
+
xfs_extlen_t xfs_alloc_longest_free_extent(struct xfs_mount *mp,
struct xfs_perag *pag, xfs_extlen_t need);
unsigned int xfs_alloc_min_freelist(struct xfs_mount *mp,
@@ -208,16 +181,10 @@ xfs_alloc_vextent(
*/
int /* error */
xfs_free_extent(
- struct xfs_trans *tp, /* transaction pointer */
- xfs_fsblock_t bno, /* starting block number of extent */
- xfs_extlen_t len); /* length of extent */
-
-int /* error */
-xfs_alloc_lookup_le(
- struct xfs_btree_cur *cur, /* btree cursor */
- xfs_agblock_t bno, /* starting block of extent */
+ struct xfs_trans *tp, /* transaction pointer */
+ xfs_fsblock_t bno, /* starting block number of extent */
xfs_extlen_t len, /* length of extent */
- int *stat); /* success/failure */
+ struct xfs_owner_info *oinfo);/* extent owner */
int /* error */
xfs_alloc_lookup_ge(
@@ -236,5 +203,9 @@ xfs_alloc_get_rec(
int xfs_read_agf(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_agnumber_t agno, int flags, struct xfs_buf **bpp);
int xfs_alloc_fix_freelist(struct xfs_alloc_arg *args, int flags);
+int xfs_free_extent_fix_freelist(struct xfs_trans *tp, xfs_agnumber_t agno,
+ struct xfs_buf **agbp);
+
+xfs_extlen_t xfs_prealloc_blocks(struct xfs_mount *mp);
#endif /* __XFS_ALLOC_H__ */
diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c
index d9b42425291e..5ba2dac5e67c 100644
--- a/fs/xfs/libxfs/xfs_alloc_btree.c
+++ b/fs/xfs/libxfs/xfs_alloc_btree.c
@@ -212,17 +212,6 @@ xfs_allocbt_init_key_from_rec(
}
STATIC void
-xfs_allocbt_init_rec_from_key(
- union xfs_btree_key *key,
- union xfs_btree_rec *rec)
-{
- ASSERT(key->alloc.ar_startblock != 0);
-
- rec->alloc.ar_startblock = key->alloc.ar_startblock;
- rec->alloc.ar_blockcount = key->alloc.ar_blockcount;
-}
-
-STATIC void
xfs_allocbt_init_rec_from_cur(
struct xfs_btree_cur *cur,
union xfs_btree_rec *rec)
@@ -406,7 +395,6 @@ static const struct xfs_btree_ops xfs_allocbt_ops = {
.get_minrecs = xfs_allocbt_get_minrecs,
.get_maxrecs = xfs_allocbt_get_maxrecs,
.init_key_from_rec = xfs_allocbt_init_key_from_rec,
- .init_rec_from_key = xfs_allocbt_init_rec_from_key,
.init_rec_from_cur = xfs_allocbt_init_rec_from_cur,
.init_ptr_from_cur = xfs_allocbt_init_ptr_from_cur,
.key_diff = xfs_allocbt_key_diff,
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index 4e126f41a0aa..af1ecb19121e 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -23,6 +23,7 @@
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_attr_sf.h"
@@ -203,7 +204,7 @@ xfs_attr_set(
{
struct xfs_mount *mp = dp->i_mount;
struct xfs_da_args args;
- struct xfs_bmap_free flist;
+ struct xfs_defer_ops dfops;
struct xfs_trans_res tres;
xfs_fsblock_t firstblock;
int rsvd = (flags & ATTR_ROOT) != 0;
@@ -221,7 +222,7 @@ xfs_attr_set(
args.value = value;
args.valuelen = valuelen;
args.firstblock = &firstblock;
- args.flist = &flist;
+ args.dfops = &dfops;
args.op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT;
args.total = xfs_attr_calc_size(&args, &local);
@@ -316,13 +317,13 @@ xfs_attr_set(
* It won't fit in the shortform, transform to a leaf block.
* GROT: another possible req'mt for a double-split btree op.
*/
- xfs_bmap_init(args.flist, args.firstblock);
+ xfs_defer_init(args.dfops, args.firstblock);
error = xfs_attr_shortform_to_leaf(&args);
if (!error)
- error = xfs_bmap_finish(&args.trans, args.flist, dp);
+ error = xfs_defer_finish(&args.trans, args.dfops, dp);
if (error) {
args.trans = NULL;
- xfs_bmap_cancel(&flist);
+ xfs_defer_cancel(&dfops);
goto out;
}
@@ -382,7 +383,7 @@ xfs_attr_remove(
{
struct xfs_mount *mp = dp->i_mount;
struct xfs_da_args args;
- struct xfs_bmap_free flist;
+ struct xfs_defer_ops dfops;
xfs_fsblock_t firstblock;
int error;
@@ -399,7 +400,7 @@ xfs_attr_remove(
return error;
args.firstblock = &firstblock;
- args.flist = &flist;
+ args.dfops = &dfops;
/*
* we have no control over the attribute names that userspace passes us
@@ -584,13 +585,13 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
* Commit that transaction so that the node_addname() call
* can manage its own transactions.
*/
- xfs_bmap_init(args->flist, args->firstblock);
+ xfs_defer_init(args->dfops, args->firstblock);
error = xfs_attr3_leaf_to_node(args);
if (!error)
- error = xfs_bmap_finish(&args->trans, args->flist, dp);
+ error = xfs_defer_finish(&args->trans, args->dfops, dp);
if (error) {
args->trans = NULL;
- xfs_bmap_cancel(args->flist);
+ xfs_defer_cancel(args->dfops);
return error;
}
@@ -674,15 +675,15 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
* If the result is small enough, shrink it all into the inode.
*/
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
- xfs_bmap_init(args->flist, args->firstblock);
+ xfs_defer_init(args->dfops, args->firstblock);
error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (!error)
- error = xfs_bmap_finish(&args->trans,
- args->flist, dp);
+ error = xfs_defer_finish(&args->trans,
+ args->dfops, dp);
if (error) {
args->trans = NULL;
- xfs_bmap_cancel(args->flist);
+ xfs_defer_cancel(args->dfops);
return error;
}
}
@@ -737,14 +738,14 @@ xfs_attr_leaf_removename(xfs_da_args_t *args)
* If the result is small enough, shrink it all into the inode.
*/
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
- xfs_bmap_init(args->flist, args->firstblock);
+ xfs_defer_init(args->dfops, args->firstblock);
error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (!error)
- error = xfs_bmap_finish(&args->trans, args->flist, dp);
+ error = xfs_defer_finish(&args->trans, args->dfops, dp);
if (error) {
args->trans = NULL;
- xfs_bmap_cancel(args->flist);
+ xfs_defer_cancel(args->dfops);
return error;
}
}
@@ -863,14 +864,14 @@ restart:
*/
xfs_da_state_free(state);
state = NULL;
- xfs_bmap_init(args->flist, args->firstblock);
+ xfs_defer_init(args->dfops, args->firstblock);
error = xfs_attr3_leaf_to_node(args);
if (!error)
- error = xfs_bmap_finish(&args->trans,
- args->flist, dp);
+ error = xfs_defer_finish(&args->trans,
+ args->dfops, dp);
if (error) {
args->trans = NULL;
- xfs_bmap_cancel(args->flist);
+ xfs_defer_cancel(args->dfops);
goto out;
}
@@ -891,13 +892,13 @@ restart:
* in the index/blkno/rmtblkno/rmtblkcnt fields and
* in the index2/blkno2/rmtblkno2/rmtblkcnt2 fields.
*/
- xfs_bmap_init(args->flist, args->firstblock);
+ xfs_defer_init(args->dfops, args->firstblock);
error = xfs_da3_split(state);
if (!error)
- error = xfs_bmap_finish(&args->trans, args->flist, dp);
+ error = xfs_defer_finish(&args->trans, args->dfops, dp);
if (error) {
args->trans = NULL;
- xfs_bmap_cancel(args->flist);
+ xfs_defer_cancel(args->dfops);
goto out;
}
} else {
@@ -990,14 +991,14 @@ restart:
* Check to see if the tree needs to be collapsed.
*/
if (retval && (state->path.active > 1)) {
- xfs_bmap_init(args->flist, args->firstblock);
+ xfs_defer_init(args->dfops, args->firstblock);
error = xfs_da3_join(state);
if (!error)
- error = xfs_bmap_finish(&args->trans,
- args->flist, dp);
+ error = xfs_defer_finish(&args->trans,
+ args->dfops, dp);
if (error) {
args->trans = NULL;
- xfs_bmap_cancel(args->flist);
+ xfs_defer_cancel(args->dfops);
goto out;
}
}
@@ -1113,13 +1114,13 @@ xfs_attr_node_removename(xfs_da_args_t *args)
* Check to see if the tree needs to be collapsed.
*/
if (retval && (state->path.active > 1)) {
- xfs_bmap_init(args->flist, args->firstblock);
+ xfs_defer_init(args->dfops, args->firstblock);
error = xfs_da3_join(state);
if (!error)
- error = xfs_bmap_finish(&args->trans, args->flist, dp);
+ error = xfs_defer_finish(&args->trans, args->dfops, dp);
if (error) {
args->trans = NULL;
- xfs_bmap_cancel(args->flist);
+ xfs_defer_cancel(args->dfops);
goto out;
}
/*
@@ -1146,15 +1147,15 @@ xfs_attr_node_removename(xfs_da_args_t *args)
goto out;
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
- xfs_bmap_init(args->flist, args->firstblock);
+ xfs_defer_init(args->dfops, args->firstblock);
error = xfs_attr3_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (!error)
- error = xfs_bmap_finish(&args->trans,
- args->flist, dp);
+ error = xfs_defer_finish(&args->trans,
+ args->dfops, dp);
if (error) {
args->trans = NULL;
- xfs_bmap_cancel(args->flist);
+ xfs_defer_cancel(args->dfops);
goto out;
}
} else
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 01a5ecfedfcf..8ea91f363093 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -792,7 +792,7 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
nargs.dp = dp;
nargs.geo = args->geo;
nargs.firstblock = args->firstblock;
- nargs.flist = args->flist;
+ nargs.dfops = args->dfops;
nargs.total = args->total;
nargs.whichfork = XFS_ATTR_FORK;
nargs.trans = args->trans;
@@ -922,7 +922,7 @@ xfs_attr3_leaf_to_shortform(
nargs.geo = args->geo;
nargs.dp = dp;
nargs.firstblock = args->firstblock;
- nargs.flist = args->flist;
+ nargs.dfops = args->dfops;
nargs.total = args->total;
nargs.whichfork = XFS_ATTR_FORK;
nargs.trans = args->trans;
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h
index 882c8d338891..4f2aed04f827 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.h
+++ b/fs/xfs/libxfs/xfs_attr_leaf.h
@@ -50,7 +50,6 @@ int xfs_attr_shortform_lookup(struct xfs_da_args *args);
int xfs_attr_shortform_getvalue(struct xfs_da_args *args);
int xfs_attr_shortform_to_leaf(struct xfs_da_args *args);
int xfs_attr_shortform_remove(struct xfs_da_args *args);
-int xfs_attr_shortform_list(struct xfs_attr_list_context *context);
int xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp);
int xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes);
void xfs_attr_fork_remove(struct xfs_inode *ip, struct xfs_trans *tp);
@@ -88,8 +87,6 @@ int xfs_attr3_leaf_toosmall(struct xfs_da_state *state, int *retval);
void xfs_attr3_leaf_unbalance(struct xfs_da_state *state,
struct xfs_da_state_blk *drop_blk,
struct xfs_da_state_blk *save_blk);
-int xfs_attr3_root_inactive(struct xfs_trans **trans, struct xfs_inode *dp);
-
/*
* Utility routines.
*/
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index a572532a55cd..d52f525f5b2d 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -24,6 +24,7 @@
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_inode.h"
@@ -460,16 +461,16 @@ xfs_attr_rmtval_set(
* extent and then crash then the block may not contain the
* correct metadata after log recovery occurs.
*/
- xfs_bmap_init(args->flist, args->firstblock);
+ xfs_defer_init(args->dfops, args->firstblock);
nmap = 1;
error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno,
blkcnt, XFS_BMAPI_ATTRFORK, args->firstblock,
- args->total, &map, &nmap, args->flist);
+ args->total, &map, &nmap, args->dfops);
if (!error)
- error = xfs_bmap_finish(&args->trans, args->flist, dp);
+ error = xfs_defer_finish(&args->trans, args->dfops, dp);
if (error) {
args->trans = NULL;
- xfs_bmap_cancel(args->flist);
+ xfs_defer_cancel(args->dfops);
return error;
}
@@ -503,7 +504,7 @@ xfs_attr_rmtval_set(
ASSERT(blkcnt > 0);
- xfs_bmap_init(args->flist, args->firstblock);
+ xfs_defer_init(args->dfops, args->firstblock);
nmap = 1;
error = xfs_bmapi_read(dp, (xfs_fileoff_t)lblkno,
blkcnt, &map, &nmap,
@@ -603,16 +604,16 @@ xfs_attr_rmtval_remove(
blkcnt = args->rmtblkcnt;
done = 0;
while (!done) {
- xfs_bmap_init(args->flist, args->firstblock);
+ xfs_defer_init(args->dfops, args->firstblock);
error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
XFS_BMAPI_ATTRFORK, 1, args->firstblock,
- args->flist, &done);
+ args->dfops, &done);
if (!error)
- error = xfs_bmap_finish(&args->trans, args->flist,
+ error = xfs_defer_finish(&args->trans, args->dfops,
args->dp);
if (error) {
args->trans = NULL;
- xfs_bmap_cancel(args->flist);
+ xfs_defer_cancel(args->dfops);
return error;
}
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 932381caef1b..b060bca93402 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -24,6 +24,7 @@
#include "xfs_bit.h"
#include "xfs_sb.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_dir2.h"
@@ -45,6 +46,7 @@
#include "xfs_symlink.h"
#include "xfs_attr_leaf.h"
#include "xfs_filestream.h"
+#include "xfs_rmap.h"
kmem_zone_t *xfs_bmap_free_item_zone;
@@ -570,14 +572,13 @@ xfs_bmap_validate_ret(
*/
void
xfs_bmap_add_free(
- xfs_fsblock_t bno, /* fs block number of extent */
- xfs_filblks_t len, /* length of extent */
- xfs_bmap_free_t *flist, /* list of extents */
- xfs_mount_t *mp) /* mount point structure */
+ struct xfs_mount *mp,
+ struct xfs_defer_ops *dfops,
+ xfs_fsblock_t bno,
+ xfs_filblks_t len,
+ struct xfs_owner_info *oinfo)
{
- xfs_bmap_free_item_t *cur; /* current (next) element */
- xfs_bmap_free_item_t *new; /* new element */
- xfs_bmap_free_item_t *prev; /* previous element */
+ struct xfs_extent_free_item *new; /* new element */
#ifdef DEBUG
xfs_agnumber_t agno;
xfs_agblock_t agbno;
@@ -594,59 +595,17 @@ xfs_bmap_add_free(
ASSERT(agbno + len <= mp->m_sb.sb_agblocks);
#endif
ASSERT(xfs_bmap_free_item_zone != NULL);
- new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
- new->xbfi_startblock = bno;
- new->xbfi_blockcount = (xfs_extlen_t)len;
- for (prev = NULL, cur = flist->xbf_first;
- cur != NULL;
- prev = cur, cur = cur->xbfi_next) {
- if (cur->xbfi_startblock >= bno)
- break;
- }
- if (prev)
- prev->xbfi_next = new;
- else
- flist->xbf_first = new;
- new->xbfi_next = cur;
- flist->xbf_count++;
-}
-/*
- * Remove the entry "free" from the free item list. Prev points to the
- * previous entry, unless "free" is the head of the list.
- */
-void
-xfs_bmap_del_free(
- xfs_bmap_free_t *flist, /* free item list header */
- xfs_bmap_free_item_t *prev, /* previous item on list, if any */
- xfs_bmap_free_item_t *free) /* list item to be freed */
-{
- if (prev)
- prev->xbfi_next = free->xbfi_next;
+ new = kmem_zone_alloc(xfs_bmap_free_item_zone, KM_SLEEP);
+ new->xefi_startblock = bno;
+ new->xefi_blockcount = (xfs_extlen_t)len;
+ if (oinfo)
+ new->xefi_oinfo = *oinfo;
else
- flist->xbf_first = free->xbfi_next;
- flist->xbf_count--;
- kmem_zone_free(xfs_bmap_free_item_zone, free);
-}
-
-/*
- * Free up any items left in the list.
- */
-void
-xfs_bmap_cancel(
- xfs_bmap_free_t *flist) /* list of bmap_free_items */
-{
- xfs_bmap_free_item_t *free; /* free list item */
- xfs_bmap_free_item_t *next;
-
- if (flist->xbf_count == 0)
- return;
- ASSERT(flist->xbf_first != NULL);
- for (free = flist->xbf_first; free; free = next) {
- next = free->xbfi_next;
- xfs_bmap_del_free(flist, NULL, free);
- }
- ASSERT(flist->xbf_count == 0);
+ xfs_rmap_skip_owner_update(&new->xefi_oinfo);
+ trace_xfs_bmap_free_defer(mp, XFS_FSB_TO_AGNO(mp, bno), 0,
+ XFS_FSB_TO_AGBNO(mp, bno), len);
+ xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_FREE, &new->xefi_list);
}
/*
@@ -676,6 +635,7 @@ xfs_bmap_btree_to_extents(
xfs_mount_t *mp; /* mount point structure */
__be64 *pp; /* ptr to block address */
struct xfs_btree_block *rblock;/* root btree block */
+ struct xfs_owner_info oinfo;
mp = ip->i_mount;
ifp = XFS_IFORK_PTR(ip, whichfork);
@@ -699,7 +659,8 @@ xfs_bmap_btree_to_extents(
cblock = XFS_BUF_TO_BLOCK(cbp);
if ((error = xfs_btree_check_block(cur, cblock, 0, cbp)))
return error;
- xfs_bmap_add_free(cbno, 1, cur->bc_private.b.flist, mp);
+ xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
+ xfs_bmap_add_free(mp, cur->bc_private.b.dfops, cbno, 1, &oinfo);
ip->i_d.di_nblocks--;
xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, -1L);
xfs_trans_binval(tp, cbp);
@@ -722,7 +683,7 @@ xfs_bmap_extents_to_btree(
xfs_trans_t *tp, /* transaction pointer */
xfs_inode_t *ip, /* incore inode pointer */
xfs_fsblock_t *firstblock, /* first-block-allocated */
- xfs_bmap_free_t *flist, /* blocks freed in xaction */
+ struct xfs_defer_ops *dfops, /* blocks freed in xaction */
xfs_btree_cur_t **curp, /* cursor returned to caller */
int wasdel, /* converting a delayed alloc */
int *logflagsp, /* inode logging flags */
@@ -771,7 +732,7 @@ xfs_bmap_extents_to_btree(
*/
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
cur->bc_private.b.firstblock = *firstblock;
- cur->bc_private.b.flist = flist;
+ cur->bc_private.b.dfops = dfops;
cur->bc_private.b.flags = wasdel ? XFS_BTCUR_BPRV_WASDEL : 0;
/*
* Convert to a btree with two levels, one record in root.
@@ -780,11 +741,12 @@ xfs_bmap_extents_to_btree(
memset(&args, 0, sizeof(args));
args.tp = tp;
args.mp = mp;
+ xfs_rmap_ino_bmbt_owner(&args.oinfo, ip->i_ino, whichfork);
args.firstblock = *firstblock;
if (*firstblock == NULLFSBLOCK) {
args.type = XFS_ALLOCTYPE_START_BNO;
args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
- } else if (flist->xbf_low) {
+ } else if (dfops->dop_low) {
args.type = XFS_ALLOCTYPE_START_BNO;
args.fsbno = *firstblock;
} else {
@@ -805,7 +767,7 @@ xfs_bmap_extents_to_btree(
ASSERT(args.fsbno != NULLFSBLOCK);
ASSERT(*firstblock == NULLFSBLOCK ||
args.agno == XFS_FSB_TO_AGNO(mp, *firstblock) ||
- (flist->xbf_low &&
+ (dfops->dop_low &&
args.agno > XFS_FSB_TO_AGNO(mp, *firstblock)));
*firstblock = cur->bc_private.b.firstblock = args.fsbno;
cur->bc_private.b.allocated++;
@@ -926,6 +888,7 @@ xfs_bmap_local_to_extents(
memset(&args, 0, sizeof(args));
args.tp = tp;
args.mp = ip->i_mount;
+ xfs_rmap_ino_owner(&args.oinfo, ip->i_ino, whichfork, 0);
args.firstblock = *firstblock;
/*
* Allocate a block. We know we need only one, since the
@@ -990,7 +953,7 @@ xfs_bmap_add_attrfork_btree(
xfs_trans_t *tp, /* transaction pointer */
xfs_inode_t *ip, /* incore inode pointer */
xfs_fsblock_t *firstblock, /* first block allocated */
- xfs_bmap_free_t *flist, /* blocks to free at commit */
+ struct xfs_defer_ops *dfops, /* blocks to free at commit */
int *flags) /* inode logging flags */
{
xfs_btree_cur_t *cur; /* btree cursor */
@@ -1003,7 +966,7 @@ xfs_bmap_add_attrfork_btree(
*flags |= XFS_ILOG_DBROOT;
else {
cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
- cur->bc_private.b.flist = flist;
+ cur->bc_private.b.dfops = dfops;
cur->bc_private.b.firstblock = *firstblock;
if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
goto error0;
@@ -1033,7 +996,7 @@ xfs_bmap_add_attrfork_extents(
xfs_trans_t *tp, /* transaction pointer */
xfs_inode_t *ip, /* incore inode pointer */
xfs_fsblock_t *firstblock, /* first block allocated */
- xfs_bmap_free_t *flist, /* blocks to free at commit */
+ struct xfs_defer_ops *dfops, /* blocks to free at commit */
int *flags) /* inode logging flags */
{
xfs_btree_cur_t *cur; /* bmap btree cursor */
@@ -1042,7 +1005,7 @@ xfs_bmap_add_attrfork_extents(
if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
return 0;
cur = NULL;
- error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, &cur, 0,
+ error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops, &cur, 0,
flags, XFS_DATA_FORK);
if (cur) {
cur->bc_private.b.allocated = 0;
@@ -1068,7 +1031,7 @@ xfs_bmap_add_attrfork_local(
xfs_trans_t *tp, /* transaction pointer */
xfs_inode_t *ip, /* incore inode pointer */
xfs_fsblock_t *firstblock, /* first block allocated */
- xfs_bmap_free_t *flist, /* blocks to free at commit */
+ struct xfs_defer_ops *dfops, /* blocks to free at commit */
int *flags) /* inode logging flags */
{
xfs_da_args_t dargs; /* args for dir/attr code */
@@ -1081,7 +1044,7 @@ xfs_bmap_add_attrfork_local(
dargs.geo = ip->i_mount->m_dir_geo;
dargs.dp = ip;
dargs.firstblock = firstblock;
- dargs.flist = flist;
+ dargs.dfops = dfops;
dargs.total = dargs.geo->fsbcount;
dargs.whichfork = XFS_DATA_FORK;
dargs.trans = tp;
@@ -1109,7 +1072,7 @@ xfs_bmap_add_attrfork(
int rsvd) /* xact may use reserved blks */
{
xfs_fsblock_t firstblock; /* 1st block/ag allocated */
- xfs_bmap_free_t flist; /* freed extent records */
+ struct xfs_defer_ops dfops; /* freed extent records */
xfs_mount_t *mp; /* mount structure */
xfs_trans_t *tp; /* transaction pointer */
int blks; /* space reservation */
@@ -1175,18 +1138,18 @@ xfs_bmap_add_attrfork(
ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
ip->i_afp->if_flags = XFS_IFEXTENTS;
logflags = 0;
- xfs_bmap_init(&flist, &firstblock);
+ xfs_defer_init(&dfops, &firstblock);
switch (ip->i_d.di_format) {
case XFS_DINODE_FMT_LOCAL:
- error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &flist,
+ error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &dfops,
&logflags);
break;
case XFS_DINODE_FMT_EXTENTS:
error = xfs_bmap_add_attrfork_extents(tp, ip, &firstblock,
- &flist, &logflags);
+ &dfops, &logflags);
break;
case XFS_DINODE_FMT_BTREE:
- error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &flist,
+ error = xfs_bmap_add_attrfork_btree(tp, ip, &firstblock, &dfops,
&logflags);
break;
default:
@@ -1215,7 +1178,7 @@ xfs_bmap_add_attrfork(
xfs_log_sb(tp);
}
- error = xfs_bmap_finish(&tp, &flist, NULL);
+ error = xfs_defer_finish(&tp, &dfops, NULL);
if (error)
goto bmap_cancel;
error = xfs_trans_commit(tp);
@@ -1223,7 +1186,7 @@ xfs_bmap_add_attrfork(
return error;
bmap_cancel:
- xfs_bmap_cancel(&flist);
+ xfs_defer_cancel(&dfops);
trans_cancel:
xfs_trans_cancel(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -2020,7 +1983,7 @@ xfs_bmap_add_extent_delay_real(
if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
- bma->firstblock, bma->flist,
+ bma->firstblock, bma->dfops,
&bma->cur, 1, &tmp_rval, whichfork);
rval |= tmp_rval;
if (error)
@@ -2104,7 +2067,7 @@ xfs_bmap_add_extent_delay_real(
if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
- bma->firstblock, bma->flist, &bma->cur, 1,
+ bma->firstblock, bma->dfops, &bma->cur, 1,
&tmp_rval, whichfork);
rval |= tmp_rval;
if (error)
@@ -2173,7 +2136,7 @@ xfs_bmap_add_extent_delay_real(
if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
- bma->firstblock, bma->flist, &bma->cur,
+ bma->firstblock, bma->dfops, &bma->cur,
1, &tmp_rval, whichfork);
rval |= tmp_rval;
if (error)
@@ -2216,13 +2179,18 @@ xfs_bmap_add_extent_delay_real(
ASSERT(0);
}
+ /* add reverse mapping */
+ error = xfs_rmap_map_extent(mp, bma->dfops, bma->ip, whichfork, new);
+ if (error)
+ goto done;
+
/* convert to a btree if necessary */
if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
int tmp_logflags; /* partial log flag return val */
ASSERT(bma->cur == NULL);
error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
- bma->firstblock, bma->flist, &bma->cur,
+ bma->firstblock, bma->dfops, &bma->cur,
da_old > 0, &tmp_logflags, whichfork);
bma->logflags |= tmp_logflags;
if (error)
@@ -2264,7 +2232,7 @@ xfs_bmap_add_extent_unwritten_real(
xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
xfs_bmbt_irec_t *new, /* new data to add to file extents */
xfs_fsblock_t *first, /* pointer to firstblock variable */
- xfs_bmap_free_t *flist, /* list of extents to be freed */
+ struct xfs_defer_ops *dfops, /* list of extents to be freed */
int *logflagsp) /* inode logging flags */
{
xfs_btree_cur_t *cur; /* btree cursor */
@@ -2752,12 +2720,17 @@ xfs_bmap_add_extent_unwritten_real(
ASSERT(0);
}
+ /* update reverse mappings */
+ error = xfs_rmap_convert_extent(mp, dfops, ip, XFS_DATA_FORK, new);
+ if (error)
+ goto done;
+
/* convert to a btree if necessary */
if (xfs_bmap_needs_btree(ip, XFS_DATA_FORK)) {
int tmp_logflags; /* partial log flag return val */
ASSERT(cur == NULL);
- error = xfs_bmap_extents_to_btree(tp, ip, first, flist, &cur,
+ error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, &cur,
0, &tmp_logflags, XFS_DATA_FORK);
*logflagsp |= tmp_logflags;
if (error)
@@ -3144,13 +3117,18 @@ xfs_bmap_add_extent_hole_real(
break;
}
+ /* add reverse mapping */
+ error = xfs_rmap_map_extent(mp, bma->dfops, bma->ip, whichfork, new);
+ if (error)
+ goto done;
+
/* convert to a btree if necessary */
if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
int tmp_logflags; /* partial log flag return val */
ASSERT(bma->cur == NULL);
error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
- bma->firstblock, bma->flist, &bma->cur,
+ bma->firstblock, bma->dfops, &bma->cur,
0, &tmp_logflags, whichfork);
bma->logflags |= tmp_logflags;
if (error)
@@ -3708,9 +3686,10 @@ xfs_bmap_btalloc(
args.tp = ap->tp;
args.mp = mp;
args.fsbno = ap->blkno;
+ xfs_rmap_skip_owner_update(&args.oinfo);
/* Trim the allocation back to the maximum an AG can fit. */
- args.maxlen = MIN(ap->length, XFS_ALLOC_AG_MAX_USABLE(mp));
+ args.maxlen = MIN(ap->length, mp->m_ag_max_usable);
args.firstblock = *ap->firstblock;
blen = 0;
if (nullfb) {
@@ -3725,7 +3704,7 @@ xfs_bmap_btalloc(
error = xfs_bmap_btalloc_nullfb(ap, &args, &blen);
if (error)
return error;
- } else if (ap->flist->xbf_low) {
+ } else if (ap->dfops->dop_low) {
if (xfs_inode_is_filestream(ap->ip))
args.type = XFS_ALLOCTYPE_FIRST_AG;
else
@@ -3758,7 +3737,7 @@ xfs_bmap_btalloc(
* is >= the stripe unit and the allocation offset is
* at the end of file.
*/
- if (!ap->flist->xbf_low && ap->aeof) {
+ if (!ap->dfops->dop_low && ap->aeof) {
if (!ap->offset) {
args.alignment = stripe_align;
atype = args.type;
@@ -3851,7 +3830,7 @@ xfs_bmap_btalloc(
args.minleft = 0;
if ((error = xfs_alloc_vextent(&args)))
return error;
- ap->flist->xbf_low = 1;
+ ap->dfops->dop_low = true;
}
if (args.fsbno != NULLFSBLOCK) {
/*
@@ -3861,7 +3840,7 @@ xfs_bmap_btalloc(
ASSERT(*ap->firstblock == NULLFSBLOCK ||
XFS_FSB_TO_AGNO(mp, *ap->firstblock) ==
XFS_FSB_TO_AGNO(mp, args.fsbno) ||
- (ap->flist->xbf_low &&
+ (ap->dfops->dop_low &&
XFS_FSB_TO_AGNO(mp, *ap->firstblock) <
XFS_FSB_TO_AGNO(mp, args.fsbno)));
@@ -3869,7 +3848,7 @@ xfs_bmap_btalloc(
if (*ap->firstblock == NULLFSBLOCK)
*ap->firstblock = args.fsbno;
ASSERT(nullfb || fb_agno == args.agno ||
- (ap->flist->xbf_low && fb_agno < args.agno));
+ (ap->dfops->dop_low && fb_agno < args.agno));
ap->length = args.len;
ap->ip->i_d.di_nblocks += args.len;
xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
@@ -4336,7 +4315,7 @@ xfs_bmapi_allocate(
if (error)
return error;
- if (bma->flist->xbf_low)
+ if (bma->dfops->dop_low)
bma->minleft = 0;
if (bma->cur)
bma->cur->bc_private.b.firstblock = *bma->firstblock;
@@ -4345,7 +4324,7 @@ xfs_bmapi_allocate(
if ((ifp->if_flags & XFS_IFBROOT) && !bma->cur) {
bma->cur = xfs_bmbt_init_cursor(mp, bma->tp, bma->ip, whichfork);
bma->cur->bc_private.b.firstblock = *bma->firstblock;
- bma->cur->bc_private.b.flist = bma->flist;
+ bma->cur->bc_private.b.dfops = bma->dfops;
}
/*
* Bump the number of extents we've allocated
@@ -4426,7 +4405,7 @@ xfs_bmapi_convert_unwritten(
bma->cur = xfs_bmbt_init_cursor(bma->ip->i_mount, bma->tp,
bma->ip, whichfork);
bma->cur->bc_private.b.firstblock = *bma->firstblock;
- bma->cur->bc_private.b.flist = bma->flist;
+ bma->cur->bc_private.b.dfops = bma->dfops;
}
mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN)
? XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
@@ -4443,7 +4422,7 @@ xfs_bmapi_convert_unwritten(
}
error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, &bma->idx,
- &bma->cur, mval, bma->firstblock, bma->flist,
+ &bma->cur, mval, bma->firstblock, bma->dfops,
&tmp_logflags);
/*
* Log the inode core unconditionally in the unwritten extent conversion
@@ -4497,7 +4476,7 @@ xfs_bmapi_write(
xfs_extlen_t total, /* total blocks needed */
struct xfs_bmbt_irec *mval, /* output: map values */
int *nmap, /* i/o: mval size/count */
- struct xfs_bmap_free *flist) /* i/o: list extents to free */
+ struct xfs_defer_ops *dfops) /* i/o: list extents to free */
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_ifork *ifp;
@@ -4587,7 +4566,7 @@ xfs_bmapi_write(
bma.ip = ip;
bma.total = total;
bma.userdata = 0;
- bma.flist = flist;
+ bma.dfops = dfops;
bma.firstblock = firstblock;
while (bno < end && n < *nmap) {
@@ -4701,7 +4680,7 @@ error0:
XFS_FSB_TO_AGNO(mp, *firstblock) ==
XFS_FSB_TO_AGNO(mp,
bma.cur->bc_private.b.firstblock) ||
- (flist->xbf_low &&
+ (dfops->dop_low &&
XFS_FSB_TO_AGNO(mp, *firstblock) <
XFS_FSB_TO_AGNO(mp,
bma.cur->bc_private.b.firstblock)));
@@ -4785,7 +4764,7 @@ xfs_bmap_del_extent(
xfs_inode_t *ip, /* incore inode pointer */
xfs_trans_t *tp, /* current transaction pointer */
xfs_extnum_t *idx, /* extent number to update/delete */
- xfs_bmap_free_t *flist, /* list of extents to be freed */
+ struct xfs_defer_ops *dfops, /* list of extents to be freed */
xfs_btree_cur_t *cur, /* if null, not a btree */
xfs_bmbt_irec_t *del, /* data to remove from extents */
int *logflagsp, /* inode logging flags */
@@ -4887,6 +4866,7 @@ xfs_bmap_del_extent(
nblks = 0;
do_fx = 0;
}
+
/*
* Set flag value to use in switch statement.
* Left-contig is 2, right-contig is 1.
@@ -5069,12 +5049,20 @@ xfs_bmap_del_extent(
++*idx;
break;
}
+
+ /* remove reverse mapping */
+ if (!delay) {
+ error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, del);
+ if (error)
+ goto done;
+ }
+
/*
* If we need to, add to list of extents to delete.
*/
if (do_fx)
- xfs_bmap_add_free(del->br_startblock, del->br_blockcount, flist,
- mp);
+ xfs_bmap_add_free(mp, dfops, del->br_startblock,
+ del->br_blockcount, NULL);
/*
* Adjust inode # blocks in the file.
*/
@@ -5114,7 +5102,7 @@ xfs_bunmapi(
xfs_extnum_t nexts, /* number of extents max */
xfs_fsblock_t *firstblock, /* first allocated block
controls a.g. for allocs */
- xfs_bmap_free_t *flist, /* i/o: list extents to free */
+ struct xfs_defer_ops *dfops, /* i/o: list extents to free */
int *done) /* set if not done yet */
{
xfs_btree_cur_t *cur; /* bmap btree cursor */
@@ -5187,7 +5175,7 @@ xfs_bunmapi(
ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE);
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
cur->bc_private.b.firstblock = *firstblock;
- cur->bc_private.b.flist = flist;
+ cur->bc_private.b.dfops = dfops;
cur->bc_private.b.flags = 0;
} else
cur = NULL;
@@ -5196,8 +5184,10 @@ xfs_bunmapi(
/*
* Synchronize by locking the bitmap inode.
*/
- xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
+ xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
xfs_trans_ijoin(tp, mp->m_rbmip, XFS_ILOCK_EXCL);
+ xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
+ xfs_trans_ijoin(tp, mp->m_rsumip, XFS_ILOCK_EXCL);
}
extno = 0;
@@ -5279,7 +5269,7 @@ xfs_bunmapi(
}
del.br_state = XFS_EXT_UNWRITTEN;
error = xfs_bmap_add_extent_unwritten_real(tp, ip,
- &lastx, &cur, &del, firstblock, flist,
+ &lastx, &cur, &del, firstblock, dfops,
&logflags);
if (error)
goto error0;
@@ -5338,7 +5328,7 @@ xfs_bunmapi(
lastx--;
error = xfs_bmap_add_extent_unwritten_real(tp,
ip, &lastx, &cur, &prev,
- firstblock, flist, &logflags);
+ firstblock, dfops, &logflags);
if (error)
goto error0;
goto nodelete;
@@ -5347,7 +5337,7 @@ xfs_bunmapi(
del.br_state = XFS_EXT_UNWRITTEN;
error = xfs_bmap_add_extent_unwritten_real(tp,
ip, &lastx, &cur, &del,
- firstblock, flist, &logflags);
+ firstblock, dfops, &logflags);
if (error)
goto error0;
goto nodelete;
@@ -5405,7 +5395,7 @@ xfs_bunmapi(
} else if (cur)
cur->bc_private.b.flags &= ~XFS_BTCUR_BPRV_WASDEL;
- error = xfs_bmap_del_extent(ip, tp, &lastx, flist, cur, &del,
+ error = xfs_bmap_del_extent(ip, tp, &lastx, dfops, cur, &del,
&tmp_logflags, whichfork);
logflags |= tmp_logflags;
if (error)
@@ -5439,7 +5429,7 @@ nodelete:
*/
if (xfs_bmap_needs_btree(ip, whichfork)) {
ASSERT(cur == NULL);
- error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist,
+ error = xfs_bmap_extents_to_btree(tp, ip, firstblock, dfops,
&cur, 0, &tmp_logflags, whichfork);
logflags |= tmp_logflags;
if (error)
@@ -5606,7 +5596,8 @@ xfs_bmse_shift_one(
struct xfs_bmbt_rec_host *gotp,
struct xfs_btree_cur *cur,
int *logflags,
- enum shift_direction direction)
+ enum shift_direction direction,
+ struct xfs_defer_ops *dfops)
{
struct xfs_ifork *ifp;
struct xfs_mount *mp;
@@ -5654,9 +5645,13 @@ xfs_bmse_shift_one(
/* check whether to merge the extent or shift it down */
if (xfs_bmse_can_merge(&adj_irec, &got,
offset_shift_fsb)) {
- return xfs_bmse_merge(ip, whichfork, offset_shift_fsb,
- *current_ext, gotp, adj_irecp,
- cur, logflags);
+ error = xfs_bmse_merge(ip, whichfork, offset_shift_fsb,
+ *current_ext, gotp, adj_irecp,
+ cur, logflags);
+ if (error)
+ return error;
+ adj_irec = got;
+ goto update_rmap;
}
} else {
startoff = got.br_startoff + offset_shift_fsb;
@@ -5693,9 +5688,10 @@ update_current_ext:
(*current_ext)--;
xfs_bmbt_set_startoff(gotp, startoff);
*logflags |= XFS_ILOG_CORE;
+ adj_irec = got;
if (!cur) {
*logflags |= XFS_ILOG_DEXT;
- return 0;
+ goto update_rmap;
}
error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock,
@@ -5705,8 +5701,18 @@ update_current_ext:
XFS_WANT_CORRUPTED_RETURN(mp, i == 1);
got.br_startoff = startoff;
- return xfs_bmbt_update(cur, got.br_startoff, got.br_startblock,
- got.br_blockcount, got.br_state);
+ error = xfs_bmbt_update(cur, got.br_startoff, got.br_startblock,
+ got.br_blockcount, got.br_state);
+ if (error)
+ return error;
+
+update_rmap:
+ /* update reverse mapping */
+ error = xfs_rmap_unmap_extent(mp, dfops, ip, whichfork, &adj_irec);
+ if (error)
+ return error;
+ adj_irec.br_startoff = startoff;
+ return xfs_rmap_map_extent(mp, dfops, ip, whichfork, &adj_irec);
}
/*
@@ -5728,7 +5734,7 @@ xfs_bmap_shift_extents(
int *done,
xfs_fileoff_t stop_fsb,
xfs_fsblock_t *firstblock,
- struct xfs_bmap_free *flist,
+ struct xfs_defer_ops *dfops,
enum shift_direction direction,
int num_exts)
{
@@ -5773,7 +5779,7 @@ xfs_bmap_shift_extents(
if (ifp->if_flags & XFS_IFBROOT) {
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
cur->bc_private.b.firstblock = *firstblock;
- cur->bc_private.b.flist = flist;
+ cur->bc_private.b.dfops = dfops;
cur->bc_private.b.flags = 0;
}
@@ -5834,7 +5840,7 @@ xfs_bmap_shift_extents(
while (nexts++ < num_exts) {
error = xfs_bmse_shift_one(ip, whichfork, offset_shift_fsb,
&current_ext, gotp, cur, &logflags,
- direction);
+ direction, dfops);
if (error)
goto del_cursor;
/*
@@ -5882,7 +5888,7 @@ xfs_bmap_split_extent_at(
struct xfs_inode *ip,
xfs_fileoff_t split_fsb,
xfs_fsblock_t *firstfsb,
- struct xfs_bmap_free *free_list)
+ struct xfs_defer_ops *dfops)
{
int whichfork = XFS_DATA_FORK;
struct xfs_btree_cur *cur = NULL;
@@ -5944,7 +5950,7 @@ xfs_bmap_split_extent_at(
if (ifp->if_flags & XFS_IFBROOT) {
cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
cur->bc_private.b.firstblock = *firstfsb;
- cur->bc_private.b.flist = free_list;
+ cur->bc_private.b.dfops = dfops;
cur->bc_private.b.flags = 0;
error = xfs_bmbt_lookup_eq(cur, got.br_startoff,
got.br_startblock,
@@ -5997,7 +6003,7 @@ xfs_bmap_split_extent_at(
int tmp_logflags; /* partial log flag return val */
ASSERT(cur == NULL);
- error = xfs_bmap_extents_to_btree(tp, ip, firstfsb, free_list,
+ error = xfs_bmap_extents_to_btree(tp, ip, firstfsb, dfops,
&cur, 0, &tmp_logflags, whichfork);
logflags |= tmp_logflags;
}
@@ -6021,7 +6027,7 @@ xfs_bmap_split_extent(
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
- struct xfs_bmap_free free_list;
+ struct xfs_defer_ops dfops;
xfs_fsblock_t firstfsb;
int error;
@@ -6033,21 +6039,21 @@ xfs_bmap_split_extent(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_bmap_init(&free_list, &firstfsb);
+ xfs_defer_init(&dfops, &firstfsb);
error = xfs_bmap_split_extent_at(tp, ip, split_fsb,
- &firstfsb, &free_list);
+ &firstfsb, &dfops);
if (error)
goto out;
- error = xfs_bmap_finish(&tp, &free_list, NULL);
+ error = xfs_defer_finish(&tp, &dfops, NULL);
if (error)
goto out;
return xfs_trans_commit(tp);
out:
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
xfs_trans_cancel(tp);
return error;
}
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index 423a34e832bd..254034f96941 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -32,7 +32,7 @@ extern kmem_zone_t *xfs_bmap_free_item_zone;
*/
struct xfs_bmalloca {
xfs_fsblock_t *firstblock; /* i/o first block allocated */
- struct xfs_bmap_free *flist; /* bmap freelist */
+ struct xfs_defer_ops *dfops; /* bmap freelist */
struct xfs_trans *tp; /* transaction pointer */
struct xfs_inode *ip; /* incore inode pointer */
struct xfs_bmbt_irec prev; /* extent before the new one */
@@ -62,33 +62,13 @@ struct xfs_bmalloca {
* List of extents to be free "later".
* The list is kept sorted on xbf_startblock.
*/
-typedef struct xfs_bmap_free_item
+struct xfs_extent_free_item
{
- xfs_fsblock_t xbfi_startblock;/* starting fs block number */
- xfs_extlen_t xbfi_blockcount;/* number of blocks in extent */
- struct xfs_bmap_free_item *xbfi_next; /* link to next entry */
-} xfs_bmap_free_item_t;
-
-/*
- * Header for free extent list.
- *
- * xbf_low is used by the allocator to activate the lowspace algorithm -
- * when free space is running low the extent allocator may choose to
- * allocate an extent from an AG without leaving sufficient space for
- * a btree split when inserting the new extent. In this case the allocator
- * will enable the lowspace algorithm which is supposed to allow further
- * allocations (such as btree splits and newroots) to allocate from
- * sequential AGs. In order to avoid locking AGs out of order the lowspace
- * algorithm will start searching for free space from AG 0. If the correct
- * transaction reservations have been made then this algorithm will eventually
- * find all the space it needs.
- */
-typedef struct xfs_bmap_free
-{
- xfs_bmap_free_item_t *xbf_first; /* list of to-be-free extents */
- int xbf_count; /* count of items on list */
- int xbf_low; /* alloc in low mode */
-} xfs_bmap_free_t;
+ xfs_fsblock_t xefi_startblock;/* starting fs block number */
+ xfs_extlen_t xefi_blockcount;/* number of blocks in extent */
+ struct list_head xefi_list;
+ struct xfs_owner_info xefi_oinfo; /* extent owner */
+};
#define XFS_BMAP_MAX_NMAP 4
@@ -139,12 +119,6 @@ static inline int xfs_bmapi_aflag(int w)
#define DELAYSTARTBLOCK ((xfs_fsblock_t)-1LL)
#define HOLESTARTBLOCK ((xfs_fsblock_t)-2LL)
-static inline void xfs_bmap_init(xfs_bmap_free_t *flp, xfs_fsblock_t *fbp)
-{
- ((flp)->xbf_first = NULL, (flp)->xbf_count = 0, \
- (flp)->xbf_low = 0, *(fbp) = NULLFSBLOCK);
-}
-
/*
* Flags for xfs_bmap_add_extent*.
*/
@@ -191,11 +165,9 @@ void xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt,
int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
-void xfs_bmap_add_free(xfs_fsblock_t bno, xfs_filblks_t len,
- struct xfs_bmap_free *flist, struct xfs_mount *mp);
-void xfs_bmap_cancel(struct xfs_bmap_free *flist);
-int xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist,
- struct xfs_inode *ip);
+void xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
+ xfs_fsblock_t bno, xfs_filblks_t len,
+ struct xfs_owner_info *oinfo);
void xfs_bmap_compute_maxlevels(struct xfs_mount *mp, int whichfork);
int xfs_bmap_first_unused(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_extlen_t len, xfs_fileoff_t *unused, int whichfork);
@@ -216,18 +188,18 @@ int xfs_bmapi_write(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t bno, xfs_filblks_t len, int flags,
xfs_fsblock_t *firstblock, xfs_extlen_t total,
struct xfs_bmbt_irec *mval, int *nmap,
- struct xfs_bmap_free *flist);
+ struct xfs_defer_ops *dfops);
int xfs_bunmapi(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t bno, xfs_filblks_t len, int flags,
xfs_extnum_t nexts, xfs_fsblock_t *firstblock,
- struct xfs_bmap_free *flist, int *done);
+ struct xfs_defer_ops *dfops, int *done);
int xfs_check_nostate_extents(struct xfs_ifork *ifp, xfs_extnum_t idx,
xfs_extnum_t num);
uint xfs_default_attroffset(struct xfs_inode *ip);
int xfs_bmap_shift_extents(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
int *done, xfs_fileoff_t stop_fsb, xfs_fsblock_t *firstblock,
- struct xfs_bmap_free *flist, enum shift_direction direction,
+ struct xfs_defer_ops *dfops, enum shift_direction direction,
int num_exts);
int xfs_bmap_split_extent(struct xfs_inode *ip, xfs_fileoff_t split_offset);
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index 6282f6e708af..cd85274e810c 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -23,6 +23,7 @@
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_trans.h"
#include "xfs_inode_item.h"
@@ -34,6 +35,7 @@
#include "xfs_quota.h"
#include "xfs_trace.h"
#include "xfs_cksum.h"
+#include "xfs_rmap.h"
/*
* Determine the extent state.
@@ -406,11 +408,11 @@ xfs_bmbt_dup_cursor(
cur->bc_private.b.ip, cur->bc_private.b.whichfork);
/*
- * Copy the firstblock, flist, and flags values,
+ * Copy the firstblock, dfops, and flags values,
* since init cursor doesn't get them.
*/
new->bc_private.b.firstblock = cur->bc_private.b.firstblock;
- new->bc_private.b.flist = cur->bc_private.b.flist;
+ new->bc_private.b.dfops = cur->bc_private.b.dfops;
new->bc_private.b.flags = cur->bc_private.b.flags;
return new;
@@ -423,7 +425,7 @@ xfs_bmbt_update_cursor(
{
ASSERT((dst->bc_private.b.firstblock != NULLFSBLOCK) ||
(dst->bc_private.b.ip->i_d.di_flags & XFS_DIFLAG_REALTIME));
- ASSERT(dst->bc_private.b.flist == src->bc_private.b.flist);
+ ASSERT(dst->bc_private.b.dfops == src->bc_private.b.dfops);
dst->bc_private.b.allocated += src->bc_private.b.allocated;
dst->bc_private.b.firstblock = src->bc_private.b.firstblock;
@@ -446,6 +448,8 @@ xfs_bmbt_alloc_block(
args.mp = cur->bc_mp;
args.fsbno = cur->bc_private.b.firstblock;
args.firstblock = args.fsbno;
+ xfs_rmap_ino_bmbt_owner(&args.oinfo, cur->bc_private.b.ip->i_ino,
+ cur->bc_private.b.whichfork);
if (args.fsbno == NULLFSBLOCK) {
args.fsbno = be64_to_cpu(start->l);
@@ -462,7 +466,7 @@ xfs_bmbt_alloc_block(
* block allocation here and corrupt the filesystem.
*/
args.minleft = args.tp->t_blk_res;
- } else if (cur->bc_private.b.flist->xbf_low) {
+ } else if (cur->bc_private.b.dfops->dop_low) {
args.type = XFS_ALLOCTYPE_START_BNO;
} else {
args.type = XFS_ALLOCTYPE_NEAR_BNO;
@@ -490,7 +494,7 @@ xfs_bmbt_alloc_block(
error = xfs_alloc_vextent(&args);
if (error)
goto error0;
- cur->bc_private.b.flist->xbf_low = 1;
+ cur->bc_private.b.dfops->dop_low = true;
}
if (args.fsbno == NULLFSBLOCK) {
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
@@ -525,8 +529,10 @@ xfs_bmbt_free_block(
struct xfs_inode *ip = cur->bc_private.b.ip;
struct xfs_trans *tp = cur->bc_tp;
xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
+ struct xfs_owner_info oinfo;
- xfs_bmap_add_free(fsbno, 1, cur->bc_private.b.flist, mp);
+ xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, cur->bc_private.b.whichfork);
+ xfs_bmap_add_free(mp, cur->bc_private.b.dfops, fsbno, 1, &oinfo);
ip->i_d.di_nblocks--;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
@@ -600,17 +606,6 @@ xfs_bmbt_init_key_from_rec(
}
STATIC void
-xfs_bmbt_init_rec_from_key(
- union xfs_btree_key *key,
- union xfs_btree_rec *rec)
-{
- ASSERT(key->bmbt.br_startoff != 0);
-
- xfs_bmbt_disk_set_allf(&rec->bmbt, be64_to_cpu(key->bmbt.br_startoff),
- 0, 0, XFS_EXT_NORM);
-}
-
-STATIC void
xfs_bmbt_init_rec_from_cur(
struct xfs_btree_cur *cur,
union xfs_btree_rec *rec)
@@ -760,7 +755,6 @@ static const struct xfs_btree_ops xfs_bmbt_ops = {
.get_minrecs = xfs_bmbt_get_minrecs,
.get_dmaxrecs = xfs_bmbt_get_dmaxrecs,
.init_key_from_rec = xfs_bmbt_init_key_from_rec,
- .init_rec_from_key = xfs_bmbt_init_rec_from_key,
.init_rec_from_cur = xfs_bmbt_init_rec_from_cur,
.init_ptr_from_cur = xfs_bmbt_init_ptr_from_cur,
.key_diff = xfs_bmbt_key_diff,
@@ -800,7 +794,7 @@ xfs_bmbt_init_cursor(
cur->bc_private.b.forksize = XFS_IFORK_SIZE(ip, whichfork);
cur->bc_private.b.ip = ip;
cur->bc_private.b.firstblock = NULLFSBLOCK;
- cur->bc_private.b.flist = NULL;
+ cur->bc_private.b.dfops = NULL;
cur->bc_private.b.allocated = 0;
cur->bc_private.b.flags = 0;
cur->bc_private.b.whichfork = whichfork;
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 1f88e1ce770f..b5c213a051cd 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -23,6 +23,7 @@
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_trans.h"
#include "xfs_inode_item.h"
@@ -43,15 +44,14 @@ kmem_zone_t *xfs_btree_cur_zone;
* Btree magic numbers.
*/
static const __uint32_t xfs_magics[2][XFS_BTNUM_MAX] = {
- { XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, XFS_BMAP_MAGIC, XFS_IBT_MAGIC,
+ { XFS_ABTB_MAGIC, XFS_ABTC_MAGIC, 0, XFS_BMAP_MAGIC, XFS_IBT_MAGIC,
XFS_FIBT_MAGIC },
- { XFS_ABTB_CRC_MAGIC, XFS_ABTC_CRC_MAGIC,
+ { XFS_ABTB_CRC_MAGIC, XFS_ABTC_CRC_MAGIC, XFS_RMAP_CRC_MAGIC,
XFS_BMAP_CRC_MAGIC, XFS_IBT_CRC_MAGIC, XFS_FIBT_CRC_MAGIC }
};
#define xfs_btree_magic(cur) \
xfs_magics[!!((cur)->bc_flags & XFS_BTREE_CRC_BLOCKS)][cur->bc_btnum]
-
STATIC int /* error (0 or EFSCORRUPTED) */
xfs_btree_check_lblock(
struct xfs_btree_cur *cur, /* btree cursor */
@@ -428,6 +428,50 @@ xfs_btree_dup_cursor(
* into a btree block (xfs_btree_*_offset) or return a pointer to the given
* record, key or pointer (xfs_btree_*_addr). Note that all addressing
* inside the btree block is done using indices starting at one, not zero!
+ *
+ * If XFS_BTREE_OVERLAPPING is set, then this btree supports keys containing
+ * overlapping intervals. In such a tree, records are still sorted lowest to
+ * highest and indexed by the smallest key value that refers to the record.
+ * However, nodes are different: each pointer has two associated keys -- one
+ * indexing the lowest key available in the block(s) below (the same behavior
+ * as the key in a regular btree) and another indexing the highest key
+ * available in the block(s) below. Because records are /not/ sorted by the
+ * highest key, all leaf block updates require us to compute the highest key
+ * that matches any record in the leaf and to recursively update the high keys
+ * in the nodes going further up in the tree, if necessary. Nodes look like
+ * this:
+ *
+ * +--------+-----+-----+-----+-----+-----+-------+-------+-----+
+ * Non-Leaf: | header | lo1 | hi1 | lo2 | hi2 | ... | ptr 1 | ptr 2 | ... |
+ * +--------+-----+-----+-----+-----+-----+-------+-------+-----+
+ *
+ * To perform an interval query on an overlapped tree, perform the usual
+ * depth-first search and use the low and high keys to decide if we can skip
+ * that particular node. If a leaf node is reached, return the records that
+ * intersect the interval. Note that an interval query may return numerous
+ * entries. For a non-overlapped tree, simply search for the record associated
+ * with the lowest key and iterate forward until a non-matching record is
+ * found. Section 14.3 ("Interval Trees") of _Introduction to Algorithms_ by
+ * Cormen, Leiserson, Rivest, and Stein (2nd or 3rd ed. only) discuss this in
+ * more detail.
+ *
+ * Why do we care about overlapping intervals? Let's say you have a bunch of
+ * reverse mapping records on a reflink filesystem:
+ *
+ * 1: +- file A startblock B offset C length D -----------+
+ * 2: +- file E startblock F offset G length H --------------+
+ * 3: +- file I startblock F offset J length K --+
+ * 4: +- file L... --+
+ *
+ * Now say we want to map block (B+D) into file A at offset (C+D). Ideally,
+ * we'd simply increment the length of record 1. But how do we find the record
+ * that ends at (B+D-1) (i.e. record 1)? A LE lookup of (B+D-1) would return
+ * record 3 because the keys are ordered first by startblock. An interval
+ * query would return records 1 and 2 because they both overlap (B+D-1), and
+ * from that we can pick out record 1 as the appropriate left neighbor.
+ *
+ * In the non-overlapped case you can do a LE lookup and decrement the cursor
+ * because a record's interval must end before the next record.
*/
/*
@@ -479,6 +523,18 @@ xfs_btree_key_offset(
}
/*
+ * Calculate offset of the n-th high key in a btree block.
+ */
+STATIC size_t
+xfs_btree_high_key_offset(
+ struct xfs_btree_cur *cur,
+ int n)
+{
+ return xfs_btree_block_len(cur) +
+ (n - 1) * cur->bc_ops->key_len + (cur->bc_ops->key_len / 2);
+}
+
+/*
* Calculate offset of the n-th block pointer in a btree block.
*/
STATIC size_t
@@ -519,6 +575,19 @@ xfs_btree_key_addr(
}
/*
+ * Return a pointer to the n-th high key in the btree block.
+ */
+STATIC union xfs_btree_key *
+xfs_btree_high_key_addr(
+ struct xfs_btree_cur *cur,
+ int n,
+ struct xfs_btree_block *block)
+{
+ return (union xfs_btree_key *)
+ ((char *)block + xfs_btree_high_key_offset(cur, n));
+}
+
+/*
* Return a pointer to the n-th block pointer in the btree block.
*/
STATIC union xfs_btree_ptr *
@@ -543,12 +612,12 @@ xfs_btree_ptr_addr(
*/
STATIC struct xfs_btree_block *
xfs_btree_get_iroot(
- struct xfs_btree_cur *cur)
+ struct xfs_btree_cur *cur)
{
- struct xfs_ifork *ifp;
+ struct xfs_ifork *ifp;
- ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, cur->bc_private.b.whichfork);
- return (struct xfs_btree_block *)ifp->if_broot;
+ ifp = XFS_IFORK_PTR(cur->bc_private.b.ip, cur->bc_private.b.whichfork);
+ return (struct xfs_btree_block *)ifp->if_broot;
}
/*
@@ -1144,6 +1213,9 @@ xfs_btree_set_refs(
case XFS_BTNUM_BMAP:
xfs_buf_set_ref(bp, XFS_BMAP_BTREE_REF);
break;
+ case XFS_BTNUM_RMAP:
+ xfs_buf_set_ref(bp, XFS_RMAP_BTREE_REF);
+ break;
default:
ASSERT(0);
}
@@ -1879,32 +1951,214 @@ error0:
return error;
}
+/* Find the high key storage area from a regular key. */
+STATIC union xfs_btree_key *
+xfs_btree_high_key_from_key(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_key *key)
+{
+ ASSERT(cur->bc_flags & XFS_BTREE_OVERLAPPING);
+ return (union xfs_btree_key *)((char *)key +
+ (cur->bc_ops->key_len / 2));
+}
+
+/* Determine the low (and high if overlapped) keys of a leaf block */
+STATIC void
+xfs_btree_get_leaf_keys(
+ struct xfs_btree_cur *cur,
+ struct xfs_btree_block *block,
+ union xfs_btree_key *key)
+{
+ union xfs_btree_key max_hkey;
+ union xfs_btree_key hkey;
+ union xfs_btree_rec *rec;
+ union xfs_btree_key *high;
+ int n;
+
+ rec = xfs_btree_rec_addr(cur, 1, block);
+ cur->bc_ops->init_key_from_rec(key, rec);
+
+ if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
+
+ cur->bc_ops->init_high_key_from_rec(&max_hkey, rec);
+ for (n = 2; n <= xfs_btree_get_numrecs(block); n++) {
+ rec = xfs_btree_rec_addr(cur, n, block);
+ cur->bc_ops->init_high_key_from_rec(&hkey, rec);
+ if (cur->bc_ops->diff_two_keys(cur, &hkey, &max_hkey)
+ > 0)
+ max_hkey = hkey;
+ }
+
+ high = xfs_btree_high_key_from_key(cur, key);
+ memcpy(high, &max_hkey, cur->bc_ops->key_len / 2);
+ }
+}
+
+/* Determine the low (and high if overlapped) keys of a node block */
+STATIC void
+xfs_btree_get_node_keys(
+ struct xfs_btree_cur *cur,
+ struct xfs_btree_block *block,
+ union xfs_btree_key *key)
+{
+ union xfs_btree_key *hkey;
+ union xfs_btree_key *max_hkey;
+ union xfs_btree_key *high;
+ int n;
+
+ if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
+ memcpy(key, xfs_btree_key_addr(cur, 1, block),
+ cur->bc_ops->key_len / 2);
+
+ max_hkey = xfs_btree_high_key_addr(cur, 1, block);
+ for (n = 2; n <= xfs_btree_get_numrecs(block); n++) {
+ hkey = xfs_btree_high_key_addr(cur, n, block);
+ if (cur->bc_ops->diff_two_keys(cur, hkey, max_hkey) > 0)
+ max_hkey = hkey;
+ }
+
+ high = xfs_btree_high_key_from_key(cur, key);
+ memcpy(high, max_hkey, cur->bc_ops->key_len / 2);
+ } else {
+ memcpy(key, xfs_btree_key_addr(cur, 1, block),
+ cur->bc_ops->key_len);
+ }
+}
+
+/* Derive the keys for any btree block. */
+STATIC void
+xfs_btree_get_keys(
+ struct xfs_btree_cur *cur,
+ struct xfs_btree_block *block,
+ union xfs_btree_key *key)
+{
+ if (be16_to_cpu(block->bb_level) == 0)
+ xfs_btree_get_leaf_keys(cur, block, key);
+ else
+ xfs_btree_get_node_keys(cur, block, key);
+}
+
/*
- * Update keys at all levels from here to the root along the cursor's path.
+ * Decide if we need to update the parent keys of a btree block. For
+ * a standard btree this is only necessary if we're updating the first
+ * record/key. For an overlapping btree, we must always update the
+ * keys because the highest key can be in any of the records or keys
+ * in the block.
+ */
+static inline bool
+xfs_btree_needs_key_update(
+ struct xfs_btree_cur *cur,
+ int ptr)
+{
+ return (cur->bc_flags & XFS_BTREE_OVERLAPPING) || ptr == 1;
+}
+
+/*
+ * Update the low and high parent keys of the given level, progressing
+ * towards the root. If force_all is false, stop if the keys for a given
+ * level do not need updating.
*/
STATIC int
-xfs_btree_updkey(
+__xfs_btree_updkeys(
+ struct xfs_btree_cur *cur,
+ int level,
+ struct xfs_btree_block *block,
+ struct xfs_buf *bp0,
+ bool force_all)
+{
+ union xfs_btree_bigkey key; /* keys from current level */
+ union xfs_btree_key *lkey; /* keys from the next level up */
+ union xfs_btree_key *hkey;
+ union xfs_btree_key *nlkey; /* keys from the next level up */
+ union xfs_btree_key *nhkey;
+ struct xfs_buf *bp;
+ int ptr;
+
+ ASSERT(cur->bc_flags & XFS_BTREE_OVERLAPPING);
+
+ /* Exit if there aren't any parent levels to update. */
+ if (level + 1 >= cur->bc_nlevels)
+ return 0;
+
+ trace_xfs_btree_updkeys(cur, level, bp0);
+
+ lkey = (union xfs_btree_key *)&key;
+ hkey = xfs_btree_high_key_from_key(cur, lkey);
+ xfs_btree_get_keys(cur, block, lkey);
+ for (level++; level < cur->bc_nlevels; level++) {
+#ifdef DEBUG
+ int error;
+#endif
+ block = xfs_btree_get_block(cur, level, &bp);
+ trace_xfs_btree_updkeys(cur, level, bp);
+#ifdef DEBUG
+ error = xfs_btree_check_block(cur, block, level, bp);
+ if (error) {
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+ return error;
+ }
+#endif
+ ptr = cur->bc_ptrs[level];
+ nlkey = xfs_btree_key_addr(cur, ptr, block);
+ nhkey = xfs_btree_high_key_addr(cur, ptr, block);
+ if (!force_all &&
+ !(cur->bc_ops->diff_two_keys(cur, nlkey, lkey) != 0 ||
+ cur->bc_ops->diff_two_keys(cur, nhkey, hkey) != 0))
+ break;
+ xfs_btree_copy_keys(cur, nlkey, lkey, 1);
+ xfs_btree_log_keys(cur, bp, ptr, ptr);
+ if (level + 1 >= cur->bc_nlevels)
+ break;
+ xfs_btree_get_node_keys(cur, block, lkey);
+ }
+
+ return 0;
+}
+
+/* Update all the keys from some level in cursor back to the root. */
+STATIC int
+xfs_btree_updkeys_force(
+ struct xfs_btree_cur *cur,
+ int level)
+{
+ struct xfs_buf *bp;
+ struct xfs_btree_block *block;
+
+ block = xfs_btree_get_block(cur, level, &bp);
+ return __xfs_btree_updkeys(cur, level, block, bp, true);
+}
+
+/*
+ * Update the parent keys of the given level, progressing towards the root.
+ */
+STATIC int
+xfs_btree_update_keys(
struct xfs_btree_cur *cur,
- union xfs_btree_key *keyp,
int level)
{
struct xfs_btree_block *block;
struct xfs_buf *bp;
union xfs_btree_key *kp;
+ union xfs_btree_key key;
int ptr;
+ ASSERT(level >= 0);
+
+ block = xfs_btree_get_block(cur, level, &bp);
+ if (cur->bc_flags & XFS_BTREE_OVERLAPPING)
+ return __xfs_btree_updkeys(cur, level, block, bp, false);
+
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGIK(cur, level, keyp);
- ASSERT(!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) || level >= 1);
-
/*
* Go up the tree from this level toward the root.
* At each level, update the key value to the value input.
* Stop when we reach a level where the cursor isn't pointing
* at the first entry in the block.
*/
- for (ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) {
+ xfs_btree_get_keys(cur, block, &key);
+ for (level++, ptr = 1; ptr == 1 && level < cur->bc_nlevels; level++) {
#ifdef DEBUG
int error;
#endif
@@ -1918,7 +2172,7 @@ xfs_btree_updkey(
#endif
ptr = cur->bc_ptrs[level];
kp = xfs_btree_key_addr(cur, ptr, block);
- xfs_btree_copy_keys(cur, kp, keyp, 1);
+ xfs_btree_copy_keys(cur, kp, &key, 1);
xfs_btree_log_keys(cur, bp, ptr, ptr);
}
@@ -1970,12 +2224,9 @@ xfs_btree_update(
ptr, LASTREC_UPDATE);
}
- /* Updating first rec in leaf. Pass new key value up to our parent. */
- if (ptr == 1) {
- union xfs_btree_key key;
-
- cur->bc_ops->init_key_from_rec(&key, rec);
- error = xfs_btree_updkey(cur, &key, 1);
+ /* Pass new key value up to our parent. */
+ if (xfs_btree_needs_key_update(cur, ptr)) {
+ error = xfs_btree_update_keys(cur, 0);
if (error)
goto error0;
}
@@ -1998,18 +2249,19 @@ xfs_btree_lshift(
int level,
int *stat) /* success/failure */
{
- union xfs_btree_key key; /* btree key */
struct xfs_buf *lbp; /* left buffer pointer */
struct xfs_btree_block *left; /* left btree block */
int lrecs; /* left record count */
struct xfs_buf *rbp; /* right buffer pointer */
struct xfs_btree_block *right; /* right btree block */
+ struct xfs_btree_cur *tcur; /* temporary btree cursor */
int rrecs; /* right record count */
union xfs_btree_ptr lptr; /* left btree pointer */
union xfs_btree_key *rkp = NULL; /* right btree key */
union xfs_btree_ptr *rpp = NULL; /* right address pointer */
union xfs_btree_rec *rrp = NULL; /* right record pointer */
int error; /* error return value */
+ int i;
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
XFS_BTREE_TRACE_ARGI(cur, level);
@@ -2139,18 +2391,33 @@ xfs_btree_lshift(
xfs_btree_rec_addr(cur, 2, right),
-1, rrecs);
xfs_btree_log_recs(cur, rbp, 1, rrecs);
+ }
- /*
- * If it's the first record in the block, we'll need a key
- * structure to pass up to the next level (updkey).
- */
- cur->bc_ops->init_key_from_rec(&key,
- xfs_btree_rec_addr(cur, 1, right));
- rkp = &key;
+ /*
+ * Using a temporary cursor, update the parent key values of the
+ * block on the left.
+ */
+ if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
+ error = xfs_btree_dup_cursor(cur, &tcur);
+ if (error)
+ goto error0;
+ i = xfs_btree_firstrec(tcur, level);
+ XFS_WANT_CORRUPTED_GOTO(tcur->bc_mp, i == 1, error0);
+
+ error = xfs_btree_decrement(tcur, level, &i);
+ if (error)
+ goto error1;
+
+ /* Update the parent high keys of the left block, if needed. */
+ error = xfs_btree_update_keys(tcur, level);
+ if (error)
+ goto error1;
+
+ xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR);
}
- /* Update the parent key values of right. */
- error = xfs_btree_updkey(cur, rkp, level + 1);
+ /* Update the parent keys of the right block. */
+ error = xfs_btree_update_keys(cur, level);
if (error)
goto error0;
@@ -2169,6 +2436,11 @@ out0:
error0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
return error;
+
+error1:
+ XFS_BTREE_TRACE_CURSOR(tcur, XBT_ERROR);
+ xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
+ return error;
}
/*
@@ -2181,7 +2453,6 @@ xfs_btree_rshift(
int level,
int *stat) /* success/failure */
{
- union xfs_btree_key key; /* btree key */
struct xfs_buf *lbp; /* left buffer pointer */
struct xfs_btree_block *left; /* left btree block */
struct xfs_buf *rbp; /* right buffer pointer */
@@ -2290,12 +2561,6 @@ xfs_btree_rshift(
/* Now put the new data in, and log it. */
xfs_btree_copy_recs(cur, rrp, lrp, 1);
xfs_btree_log_recs(cur, rbp, 1, rrecs + 1);
-
- cur->bc_ops->init_key_from_rec(&key, rrp);
- rkp = &key;
-
- ASSERT(cur->bc_ops->recs_inorder(cur, rrp,
- xfs_btree_rec_addr(cur, 2, right)));
}
/*
@@ -2315,13 +2580,21 @@ xfs_btree_rshift(
if (error)
goto error0;
i = xfs_btree_lastrec(tcur, level);
- XFS_WANT_CORRUPTED_GOTO(cur->bc_mp, i == 1, error0);
+ XFS_WANT_CORRUPTED_GOTO(tcur->bc_mp, i == 1, error0);
error = xfs_btree_increment(tcur, level, &i);
if (error)
goto error1;
- error = xfs_btree_updkey(tcur, rkp, level + 1);
+ /* Update the parent high keys of the left block, if needed. */
+ if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
+ error = xfs_btree_update_keys(cur, level);
+ if (error)
+ goto error1;
+ }
+
+ /* Update the parent keys of the right block. */
+ error = xfs_btree_update_keys(tcur, level);
if (error)
goto error1;
@@ -2422,6 +2695,11 @@ __xfs_btree_split(
XFS_BTREE_STATS_ADD(cur, moves, rrecs);
+ /* Adjust numrecs for the later get_*_keys() calls. */
+ lrecs -= rrecs;
+ xfs_btree_set_numrecs(left, lrecs);
+ xfs_btree_set_numrecs(right, xfs_btree_get_numrecs(right) + rrecs);
+
/*
* Copy btree block entries from the left block over to the
* new block, the right. Update the right block and log the
@@ -2447,14 +2725,15 @@ __xfs_btree_split(
}
#endif
+ /* Copy the keys & pointers to the new block. */
xfs_btree_copy_keys(cur, rkp, lkp, rrecs);
xfs_btree_copy_ptrs(cur, rpp, lpp, rrecs);
xfs_btree_log_keys(cur, rbp, 1, rrecs);
xfs_btree_log_ptrs(cur, rbp, 1, rrecs);
- /* Grab the keys to the entries moved to the right block */
- xfs_btree_copy_keys(cur, key, rkp, 1);
+ /* Stash the keys of the new block for later insertion. */
+ xfs_btree_get_node_keys(cur, right, key);
} else {
/* It's a leaf. Move records. */
union xfs_btree_rec *lrp; /* left record pointer */
@@ -2463,27 +2742,23 @@ __xfs_btree_split(
lrp = xfs_btree_rec_addr(cur, src_index, left);
rrp = xfs_btree_rec_addr(cur, 1, right);
+ /* Copy records to the new block. */
xfs_btree_copy_recs(cur, rrp, lrp, rrecs);
xfs_btree_log_recs(cur, rbp, 1, rrecs);
- cur->bc_ops->init_key_from_rec(key,
- xfs_btree_rec_addr(cur, 1, right));
+ /* Stash the keys of the new block for later insertion. */
+ xfs_btree_get_leaf_keys(cur, right, key);
}
-
/*
* Find the left block number by looking in the buffer.
- * Adjust numrecs, sibling pointers.
+ * Adjust sibling pointers.
*/
xfs_btree_get_sibling(cur, left, &rrptr, XFS_BB_RIGHTSIB);
xfs_btree_set_sibling(cur, right, &rrptr, XFS_BB_RIGHTSIB);
xfs_btree_set_sibling(cur, right, &lptr, XFS_BB_LEFTSIB);
xfs_btree_set_sibling(cur, left, &rptr, XFS_BB_RIGHTSIB);
- lrecs -= rrecs;
- xfs_btree_set_numrecs(left, lrecs);
- xfs_btree_set_numrecs(right, xfs_btree_get_numrecs(right) + rrecs);
-
xfs_btree_log_block(cur, rbp, XFS_BB_ALL_BITS);
xfs_btree_log_block(cur, lbp, XFS_BB_NUMRECS | XFS_BB_RIGHTSIB);
@@ -2499,6 +2774,14 @@ __xfs_btree_split(
xfs_btree_set_sibling(cur, rrblock, &rptr, XFS_BB_LEFTSIB);
xfs_btree_log_block(cur, rrbp, XFS_BB_LEFTSIB);
}
+
+ /* Update the parent high keys of the left block, if needed. */
+ if (cur->bc_flags & XFS_BTREE_OVERLAPPING) {
+ error = xfs_btree_update_keys(cur, level);
+ if (error)
+ goto error0;
+ }
+
/*
* If the cursor is really in the right block, move it there.
* If it's just pointing past the last entry in left, then we'll
@@ -2802,6 +3085,7 @@ xfs_btree_new_root(
bp = lbp;
nptr = 2;
}
+
/* Fill in the new block's btree header and log it. */
xfs_btree_init_block_cur(cur, nbp, cur->bc_nlevels, 2);
xfs_btree_log_block(cur, nbp, XFS_BB_ALL_BITS);
@@ -2810,19 +3094,24 @@ xfs_btree_new_root(
/* Fill in the key data in the new root. */
if (xfs_btree_get_level(left) > 0) {
- xfs_btree_copy_keys(cur,
- xfs_btree_key_addr(cur, 1, new),
- xfs_btree_key_addr(cur, 1, left), 1);
- xfs_btree_copy_keys(cur,
- xfs_btree_key_addr(cur, 2, new),
- xfs_btree_key_addr(cur, 1, right), 1);
+ /*
+ * Get the keys for the left block's keys and put them directly
+ * in the parent block. Do the same for the right block.
+ */
+ xfs_btree_get_node_keys(cur, left,
+ xfs_btree_key_addr(cur, 1, new));
+ xfs_btree_get_node_keys(cur, right,
+ xfs_btree_key_addr(cur, 2, new));
} else {
- cur->bc_ops->init_key_from_rec(
- xfs_btree_key_addr(cur, 1, new),
- xfs_btree_rec_addr(cur, 1, left));
- cur->bc_ops->init_key_from_rec(
- xfs_btree_key_addr(cur, 2, new),
- xfs_btree_rec_addr(cur, 1, right));
+ /*
+ * Get the keys for the left block's records and put them
+ * directly in the parent block. Do the same for the right
+ * block.
+ */
+ xfs_btree_get_leaf_keys(cur, left,
+ xfs_btree_key_addr(cur, 1, new));
+ xfs_btree_get_leaf_keys(cur, right,
+ xfs_btree_key_addr(cur, 2, new));
}
xfs_btree_log_keys(cur, nbp, 1, 2);
@@ -2858,10 +3147,9 @@ xfs_btree_make_block_unfull(
int *index, /* new tree index */
union xfs_btree_ptr *nptr, /* new btree ptr */
struct xfs_btree_cur **ncur, /* new btree cursor */
- union xfs_btree_rec *nrec, /* new record */
+ union xfs_btree_key *key, /* key of new block */
int *stat)
{
- union xfs_btree_key key; /* new btree key value */
int error = 0;
if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) &&
@@ -2871,6 +3159,7 @@ xfs_btree_make_block_unfull(
if (numrecs < cur->bc_ops->get_dmaxrecs(cur, level)) {
/* A root block that can be made bigger. */
xfs_iroot_realloc(ip, 1, cur->bc_private.b.whichfork);
+ *stat = 1;
} else {
/* A root block that needs replacing */
int logflags = 0;
@@ -2906,13 +3195,12 @@ xfs_btree_make_block_unfull(
* If this works we have to re-set our variables because we
* could be in a different block now.
*/
- error = xfs_btree_split(cur, level, nptr, &key, ncur, stat);
+ error = xfs_btree_split(cur, level, nptr, key, ncur, stat);
if (error || *stat == 0)
return error;
*index = cur->bc_ptrs[level];
- cur->bc_ops->init_rec_from_key(&key, nrec);
return 0;
}
@@ -2925,16 +3213,17 @@ xfs_btree_insrec(
struct xfs_btree_cur *cur, /* btree cursor */
int level, /* level to insert record at */
union xfs_btree_ptr *ptrp, /* i/o: block number inserted */
- union xfs_btree_rec *recp, /* i/o: record data inserted */
+ union xfs_btree_rec *rec, /* record to insert */
+ union xfs_btree_key *key, /* i/o: block key for ptrp */
struct xfs_btree_cur **curp, /* output: new cursor replacing cur */
int *stat) /* success/failure */
{
struct xfs_btree_block *block; /* btree block */
struct xfs_buf *bp; /* buffer for block */
- union xfs_btree_key key; /* btree key */
union xfs_btree_ptr nptr; /* new block ptr */
struct xfs_btree_cur *ncur; /* new btree cursor */
- union xfs_btree_rec nrec; /* new record count */
+ union xfs_btree_bigkey nkey; /* new block key */
+ union xfs_btree_key *lkey;
int optr; /* old key/record index */
int ptr; /* key/record index */
int numrecs;/* number of records */
@@ -2942,11 +3231,13 @@ xfs_btree_insrec(
#ifdef DEBUG
int i;
#endif
+ xfs_daddr_t old_bn;
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
- XFS_BTREE_TRACE_ARGIPR(cur, level, *ptrp, recp);
+ XFS_BTREE_TRACE_ARGIPR(cur, level, *ptrp, &rec);
ncur = NULL;
+ lkey = (union xfs_btree_key *)&nkey;
/*
* If we have an external root pointer, and we've made it to the
@@ -2969,15 +3260,13 @@ xfs_btree_insrec(
return 0;
}
- /* Make a key out of the record data to be inserted, and save it. */
- cur->bc_ops->init_key_from_rec(&key, recp);
-
optr = ptr;
XFS_BTREE_STATS_INC(cur, insrec);
/* Get pointers to the btree buffer and block. */
block = xfs_btree_get_block(cur, level, &bp);
+ old_bn = bp ? bp->b_bn : XFS_BUF_DADDR_NULL;
numrecs = xfs_btree_get_numrecs(block);
#ifdef DEBUG
@@ -2988,10 +3277,10 @@ xfs_btree_insrec(
/* Check that the new entry is being inserted in the right place. */
if (ptr <= numrecs) {
if (level == 0) {
- ASSERT(cur->bc_ops->recs_inorder(cur, recp,
+ ASSERT(cur->bc_ops->recs_inorder(cur, rec,
xfs_btree_rec_addr(cur, ptr, block)));
} else {
- ASSERT(cur->bc_ops->keys_inorder(cur, &key,
+ ASSERT(cur->bc_ops->keys_inorder(cur, key,
xfs_btree_key_addr(cur, ptr, block)));
}
}
@@ -3004,7 +3293,7 @@ xfs_btree_insrec(
xfs_btree_set_ptr_null(cur, &nptr);
if (numrecs == cur->bc_ops->get_maxrecs(cur, level)) {
error = xfs_btree_make_block_unfull(cur, level, numrecs,
- &optr, &ptr, &nptr, &ncur, &nrec, stat);
+ &optr, &ptr, &nptr, &ncur, lkey, stat);
if (error || *stat == 0)
goto error0;
}
@@ -3054,7 +3343,7 @@ xfs_btree_insrec(
#endif
/* Now put the new data in, bump numrecs and log it. */
- xfs_btree_copy_keys(cur, kp, &key, 1);
+ xfs_btree_copy_keys(cur, kp, key, 1);
xfs_btree_copy_ptrs(cur, pp, ptrp, 1);
numrecs++;
xfs_btree_set_numrecs(block, numrecs);
@@ -3075,7 +3364,7 @@ xfs_btree_insrec(
xfs_btree_shift_recs(cur, rp, 1, numrecs - ptr + 1);
/* Now put the new data in, bump numrecs and log it. */
- xfs_btree_copy_recs(cur, rp, recp, 1);
+ xfs_btree_copy_recs(cur, rp, rec, 1);
xfs_btree_set_numrecs(block, ++numrecs);
xfs_btree_log_recs(cur, bp, ptr, numrecs);
#ifdef DEBUG
@@ -3089,9 +3378,18 @@ xfs_btree_insrec(
/* Log the new number of records in the btree header. */
xfs_btree_log_block(cur, bp, XFS_BB_NUMRECS);
- /* If we inserted at the start of a block, update the parents' keys. */
- if (optr == 1) {
- error = xfs_btree_updkey(cur, &key, level + 1);
+ /*
+ * If we just inserted into a new tree block, we have to
+ * recalculate nkey here because nkey is out of date.
+ *
+ * Otherwise we're just updating an existing block (having shoved
+ * some records into the new tree block), so use the regular key
+ * update mechanism.
+ */
+ if (bp && bp->b_bn != old_bn) {
+ xfs_btree_get_keys(cur, block, lkey);
+ } else if (xfs_btree_needs_key_update(cur, optr)) {
+ error = xfs_btree_update_keys(cur, level);
if (error)
goto error0;
}
@@ -3101,7 +3399,7 @@ xfs_btree_insrec(
* we are at the far right edge of the tree, update it.
*/
if (xfs_btree_is_lastrec(cur, block, level)) {
- cur->bc_ops->update_lastrec(cur, block, recp,
+ cur->bc_ops->update_lastrec(cur, block, rec,
ptr, LASTREC_INSREC);
}
@@ -3111,7 +3409,7 @@ xfs_btree_insrec(
*/
*ptrp = nptr;
if (!xfs_btree_ptr_is_null(cur, &nptr)) {
- *recp = nrec;
+ xfs_btree_copy_keys(cur, key, lkey, 1);
*curp = ncur;
}
@@ -3142,14 +3440,20 @@ xfs_btree_insert(
union xfs_btree_ptr nptr; /* new block number (split result) */
struct xfs_btree_cur *ncur; /* new cursor (split result) */
struct xfs_btree_cur *pcur; /* previous level's cursor */
+ union xfs_btree_bigkey bkey; /* key of block to insert */
+ union xfs_btree_key *key;
union xfs_btree_rec rec; /* record to insert */
level = 0;
ncur = NULL;
pcur = cur;
+ key = (union xfs_btree_key *)&bkey;
xfs_btree_set_ptr_null(cur, &nptr);
+
+ /* Make a key out of the record data to be inserted, and save it. */
cur->bc_ops->init_rec_from_cur(cur, &rec);
+ cur->bc_ops->init_key_from_rec(key, &rec);
/*
* Loop going up the tree, starting at the leaf level.
@@ -3161,7 +3465,8 @@ xfs_btree_insert(
* Insert nrec/nptr into this level of the tree.
* Note if we fail, nptr will be null.
*/
- error = xfs_btree_insrec(pcur, level, &nptr, &rec, &ncur, &i);
+ error = xfs_btree_insrec(pcur, level, &nptr, &rec, key,
+ &ncur, &i);
if (error) {
if (pcur != cur)
xfs_btree_del_cursor(pcur, XFS_BTREE_ERROR);
@@ -3385,8 +3690,6 @@ xfs_btree_delrec(
struct xfs_buf *bp; /* buffer for block */
int error; /* error return value */
int i; /* loop counter */
- union xfs_btree_key key; /* storage for keyp */
- union xfs_btree_key *keyp = &key; /* passed to the next level */
union xfs_btree_ptr lptr; /* left sibling block ptr */
struct xfs_buf *lbp; /* left buffer pointer */
struct xfs_btree_block *left; /* left btree block */
@@ -3457,13 +3760,6 @@ xfs_btree_delrec(
xfs_btree_log_keys(cur, bp, ptr, numrecs - 1);
xfs_btree_log_ptrs(cur, bp, ptr, numrecs - 1);
}
-
- /*
- * If it's the first record in the block, we'll need to pass a
- * key up to the next level (updkey).
- */
- if (ptr == 1)
- keyp = xfs_btree_key_addr(cur, 1, block);
} else {
/* It's a leaf. operate on records */
if (ptr < numrecs) {
@@ -3472,16 +3768,6 @@ xfs_btree_delrec(
-1, numrecs - ptr);
xfs_btree_log_recs(cur, bp, ptr, numrecs - 1);
}
-
- /*
- * If it's the first record in the block, we'll need a key
- * structure to pass up to the next level (updkey).
- */
- if (ptr == 1) {
- cur->bc_ops->init_key_from_rec(&key,
- xfs_btree_rec_addr(cur, 1, block));
- keyp = &key;
- }
}
/*
@@ -3548,8 +3834,8 @@ xfs_btree_delrec(
* If we deleted the leftmost entry in the block, update the
* key values above us in the tree.
*/
- if (ptr == 1) {
- error = xfs_btree_updkey(cur, keyp, level + 1);
+ if (xfs_btree_needs_key_update(cur, ptr)) {
+ error = xfs_btree_update_keys(cur, level);
if (error)
goto error0;
}
@@ -3878,6 +4164,16 @@ xfs_btree_delrec(
if (level > 0)
cur->bc_ptrs[level]--;
+ /*
+ * We combined blocks, so we have to update the parent keys if the
+ * btree supports overlapped intervals. However, bc_ptrs[level + 1]
+ * points to the old block so that the caller knows which record to
+ * delete. Therefore, the caller must be savvy enough to call updkeys
+ * for us if we return stat == 2. The other exit points from this
+ * function don't require deletions further up the tree, so they can
+ * call updkeys directly.
+ */
+
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
/* Return value means the next level up has something to do. */
*stat = 2;
@@ -3903,6 +4199,7 @@ xfs_btree_delete(
int error; /* error return value */
int level;
int i;
+ bool joined = false;
XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
@@ -3916,6 +4213,18 @@ xfs_btree_delete(
error = xfs_btree_delrec(cur, level, &i);
if (error)
goto error0;
+ if (i == 2)
+ joined = true;
+ }
+
+ /*
+ * If we combined blocks as part of deleting the record, delrec won't
+ * have updated the parent high keys so we have to do that here.
+ */
+ if (joined && (cur->bc_flags & XFS_BTREE_OVERLAPPING)) {
+ error = xfs_btree_updkeys_force(cur, 0);
+ if (error)
+ goto error0;
}
if (i == 0) {
@@ -3978,6 +4287,81 @@ xfs_btree_get_rec(
return 0;
}
+/* Visit a block in a btree. */
+STATIC int
+xfs_btree_visit_block(
+ struct xfs_btree_cur *cur,
+ int level,
+ xfs_btree_visit_blocks_fn fn,
+ void *data)
+{
+ struct xfs_btree_block *block;
+ struct xfs_buf *bp;
+ union xfs_btree_ptr rptr;
+ int error;
+
+ /* do right sibling readahead */
+ xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
+ block = xfs_btree_get_block(cur, level, &bp);
+
+ /* process the block */
+ error = fn(cur, level, data);
+ if (error)
+ return error;
+
+ /* now read rh sibling block for next iteration */
+ xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
+ if (xfs_btree_ptr_is_null(cur, &rptr))
+ return -ENOENT;
+
+ return xfs_btree_lookup_get_block(cur, level, &rptr, &block);
+}
+
+
+/* Visit every block in a btree. */
+int
+xfs_btree_visit_blocks(
+ struct xfs_btree_cur *cur,
+ xfs_btree_visit_blocks_fn fn,
+ void *data)
+{
+ union xfs_btree_ptr lptr;
+ int level;
+ struct xfs_btree_block *block = NULL;
+ int error = 0;
+
+ cur->bc_ops->init_ptr_from_cur(cur, &lptr);
+
+ /* for each level */
+ for (level = cur->bc_nlevels - 1; level >= 0; level--) {
+ /* grab the left hand block */
+ error = xfs_btree_lookup_get_block(cur, level, &lptr, &block);
+ if (error)
+ return error;
+
+ /* readahead the left most block for the next level down */
+ if (level > 0) {
+ union xfs_btree_ptr *ptr;
+
+ ptr = xfs_btree_ptr_addr(cur, 1, block);
+ xfs_btree_readahead_ptr(cur, ptr, 1);
+
+ /* save for the next iteration of the loop */
+ lptr = *ptr;
+ }
+
+ /* for each buffer in the level */
+ do {
+ error = xfs_btree_visit_block(cur, level, fn, data);
+ } while (!error);
+
+ if (error != -ENOENT)
+ return error;
+ }
+
+ return 0;
+}
+
/*
* Change the owner of a btree.
*
@@ -4002,26 +4386,27 @@ xfs_btree_get_rec(
* just queue the modified buffer as delayed write buffer so the transaction
* recovery completion writes the changes to disk.
*/
+struct xfs_btree_block_change_owner_info {
+ __uint64_t new_owner;
+ struct list_head *buffer_list;
+};
+
static int
xfs_btree_block_change_owner(
struct xfs_btree_cur *cur,
int level,
- __uint64_t new_owner,
- struct list_head *buffer_list)
+ void *data)
{
+ struct xfs_btree_block_change_owner_info *bbcoi = data;
struct xfs_btree_block *block;
struct xfs_buf *bp;
- union xfs_btree_ptr rptr;
-
- /* do right sibling readahead */
- xfs_btree_readahead(cur, level, XFS_BTCUR_RIGHTRA);
/* modify the owner */
block = xfs_btree_get_block(cur, level, &bp);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
- block->bb_u.l.bb_owner = cpu_to_be64(new_owner);
+ block->bb_u.l.bb_owner = cpu_to_be64(bbcoi->new_owner);
else
- block->bb_u.s.bb_owner = cpu_to_be32(new_owner);
+ block->bb_u.s.bb_owner = cpu_to_be32(bbcoi->new_owner);
/*
* If the block is a root block hosted in an inode, we might not have a
@@ -4035,19 +4420,14 @@ xfs_btree_block_change_owner(
xfs_trans_ordered_buf(cur->bc_tp, bp);
xfs_btree_log_block(cur, bp, XFS_BB_OWNER);
} else {
- xfs_buf_delwri_queue(bp, buffer_list);
+ xfs_buf_delwri_queue(bp, bbcoi->buffer_list);
}
} else {
ASSERT(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE);
ASSERT(level == cur->bc_nlevels - 1);
}
- /* now read rh sibling block for next iteration */
- xfs_btree_get_sibling(cur, block, &rptr, XFS_BB_RIGHTSIB);
- if (xfs_btree_ptr_is_null(cur, &rptr))
- return -ENOENT;
-
- return xfs_btree_lookup_get_block(cur, level, &rptr, &block);
+ return 0;
}
int
@@ -4056,43 +4436,13 @@ xfs_btree_change_owner(
__uint64_t new_owner,
struct list_head *buffer_list)
{
- union xfs_btree_ptr lptr;
- int level;
- struct xfs_btree_block *block = NULL;
- int error = 0;
+ struct xfs_btree_block_change_owner_info bbcoi;
- cur->bc_ops->init_ptr_from_cur(cur, &lptr);
+ bbcoi.new_owner = new_owner;
+ bbcoi.buffer_list = buffer_list;
- /* for each level */
- for (level = cur->bc_nlevels - 1; level >= 0; level--) {
- /* grab the left hand block */
- error = xfs_btree_lookup_get_block(cur, level, &lptr, &block);
- if (error)
- return error;
-
- /* readahead the left most block for the next level down */
- if (level > 0) {
- union xfs_btree_ptr *ptr;
-
- ptr = xfs_btree_ptr_addr(cur, 1, block);
- xfs_btree_readahead_ptr(cur, ptr, 1);
-
- /* save for the next iteration of the loop */
- lptr = *ptr;
- }
-
- /* for each buffer in the level */
- do {
- error = xfs_btree_block_change_owner(cur, level,
- new_owner,
- buffer_list);
- } while (!error);
-
- if (error != -ENOENT)
- return error;
- }
-
- return 0;
+ return xfs_btree_visit_blocks(cur, xfs_btree_block_change_owner,
+ &bbcoi);
}
/**
@@ -4152,3 +4502,286 @@ xfs_btree_sblock_verify(
return true;
}
+
+/*
+ * Calculate the number of btree levels needed to store a given number of
+ * records in a short-format btree.
+ */
+uint
+xfs_btree_compute_maxlevels(
+ struct xfs_mount *mp,
+ uint *limits,
+ unsigned long len)
+{
+ uint level;
+ unsigned long maxblocks;
+
+ maxblocks = (len + limits[0] - 1) / limits[0];
+ for (level = 1; maxblocks > 1; level++)
+ maxblocks = (maxblocks + limits[1] - 1) / limits[1];
+ return level;
+}
+
+/*
+ * Query a regular btree for all records overlapping a given interval.
+ * Start with a LE lookup of the key of low_rec and return all records
+ * until we find a record with a key greater than the key of high_rec.
+ */
+STATIC int
+xfs_btree_simple_query_range(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_key *low_key,
+ union xfs_btree_key *high_key,
+ xfs_btree_query_range_fn fn,
+ void *priv)
+{
+ union xfs_btree_rec *recp;
+ union xfs_btree_key rec_key;
+ __int64_t diff;
+ int stat;
+ bool firstrec = true;
+ int error;
+
+ ASSERT(cur->bc_ops->init_high_key_from_rec);
+ ASSERT(cur->bc_ops->diff_two_keys);
+
+ /*
+ * Find the leftmost record. The btree cursor must be set
+ * to the low record used to generate low_key.
+ */
+ stat = 0;
+ error = xfs_btree_lookup(cur, XFS_LOOKUP_LE, &stat);
+ if (error)
+ goto out;
+
+ while (stat) {
+ /* Find the record. */
+ error = xfs_btree_get_rec(cur, &recp, &stat);
+ if (error || !stat)
+ break;
+ cur->bc_ops->init_high_key_from_rec(&rec_key, recp);
+
+ /* Skip if high_key(rec) < low_key. */
+ if (firstrec) {
+ firstrec = false;
+ diff = cur->bc_ops->diff_two_keys(cur, low_key,
+ &rec_key);
+ if (diff > 0)
+ goto advloop;
+ }
+
+ /* Stop if high_key < low_key(rec). */
+ diff = cur->bc_ops->diff_two_keys(cur, &rec_key, high_key);
+ if (diff > 0)
+ break;
+
+ /* Callback */
+ error = fn(cur, recp, priv);
+ if (error < 0 || error == XFS_BTREE_QUERY_RANGE_ABORT)
+ break;
+
+advloop:
+ /* Move on to the next record. */
+ error = xfs_btree_increment(cur, 0, &stat);
+ if (error)
+ break;
+ }
+
+out:
+ return error;
+}
+
+/*
+ * Query an overlapped interval btree for all records overlapping a given
+ * interval. This function roughly follows the algorithm given in
+ * "Interval Trees" of _Introduction to Algorithms_, which is section
+ * 14.3 in the 2nd and 3rd editions.
+ *
+ * First, generate keys for the low and high records passed in.
+ *
+ * For any leaf node, generate the high and low keys for the record.
+ * If the record keys overlap with the query low/high keys, pass the
+ * record to the function iterator.
+ *
+ * For any internal node, compare the low and high keys of each
+ * pointer against the query low/high keys. If there's an overlap,
+ * follow the pointer.
+ *
+ * As an optimization, we stop scanning a block when we find a low key
+ * that is greater than the query's high key.
+ */
+STATIC int
+xfs_btree_overlapped_query_range(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_key *low_key,
+ union xfs_btree_key *high_key,
+ xfs_btree_query_range_fn fn,
+ void *priv)
+{
+ union xfs_btree_ptr ptr;
+ union xfs_btree_ptr *pp;
+ union xfs_btree_key rec_key;
+ union xfs_btree_key rec_hkey;
+ union xfs_btree_key *lkp;
+ union xfs_btree_key *hkp;
+ union xfs_btree_rec *recp;
+ struct xfs_btree_block *block;
+ __int64_t ldiff;
+ __int64_t hdiff;
+ int level;
+ struct xfs_buf *bp;
+ int i;
+ int error;
+
+ /* Load the root of the btree. */
+ level = cur->bc_nlevels - 1;
+ cur->bc_ops->init_ptr_from_cur(cur, &ptr);
+ error = xfs_btree_lookup_get_block(cur, level, &ptr, &block);
+ if (error)
+ return error;
+ xfs_btree_get_block(cur, level, &bp);
+ trace_xfs_btree_overlapped_query_range(cur, level, bp);
+#ifdef DEBUG
+ error = xfs_btree_check_block(cur, block, level, bp);
+ if (error)
+ goto out;
+#endif
+ cur->bc_ptrs[level] = 1;
+
+ while (level < cur->bc_nlevels) {
+ block = xfs_btree_get_block(cur, level, &bp);
+
+ /* End of node, pop back towards the root. */
+ if (cur->bc_ptrs[level] > be16_to_cpu(block->bb_numrecs)) {
+pop_up:
+ if (level < cur->bc_nlevels - 1)
+ cur->bc_ptrs[level + 1]++;
+ level++;
+ continue;
+ }
+
+ if (level == 0) {
+ /* Handle a leaf node. */
+ recp = xfs_btree_rec_addr(cur, cur->bc_ptrs[0], block);
+
+ cur->bc_ops->init_high_key_from_rec(&rec_hkey, recp);
+ ldiff = cur->bc_ops->diff_two_keys(cur, &rec_hkey,
+ low_key);
+
+ cur->bc_ops->init_key_from_rec(&rec_key, recp);
+ hdiff = cur->bc_ops->diff_two_keys(cur, high_key,
+ &rec_key);
+
+ /*
+ * If (record's high key >= query's low key) and
+ * (query's high key >= record's low key), then
+ * this record overlaps the query range; callback.
+ */
+ if (ldiff >= 0 && hdiff >= 0) {
+ error = fn(cur, recp, priv);
+ if (error < 0 ||
+ error == XFS_BTREE_QUERY_RANGE_ABORT)
+ break;
+ } else if (hdiff < 0) {
+ /* Record is larger than high key; pop. */
+ goto pop_up;
+ }
+ cur->bc_ptrs[level]++;
+ continue;
+ }
+
+ /* Handle an internal node. */
+ lkp = xfs_btree_key_addr(cur, cur->bc_ptrs[level], block);
+ hkp = xfs_btree_high_key_addr(cur, cur->bc_ptrs[level], block);
+ pp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[level], block);
+
+ ldiff = cur->bc_ops->diff_two_keys(cur, hkp, low_key);
+ hdiff = cur->bc_ops->diff_two_keys(cur, high_key, lkp);
+
+ /*
+ * If (pointer's high key >= query's low key) and
+ * (query's high key >= pointer's low key), then
+ * this record overlaps the query range; follow pointer.
+ */
+ if (ldiff >= 0 && hdiff >= 0) {
+ level--;
+ error = xfs_btree_lookup_get_block(cur, level, pp,
+ &block);
+ if (error)
+ goto out;
+ xfs_btree_get_block(cur, level, &bp);
+ trace_xfs_btree_overlapped_query_range(cur, level, bp);
+#ifdef DEBUG
+ error = xfs_btree_check_block(cur, block, level, bp);
+ if (error)
+ goto out;
+#endif
+ cur->bc_ptrs[level] = 1;
+ continue;
+ } else if (hdiff < 0) {
+ /* The low key is larger than the upper range; pop. */
+ goto pop_up;
+ }
+ cur->bc_ptrs[level]++;
+ }
+
+out:
+ /*
+ * If we don't end this function with the cursor pointing at a record
+ * block, a subsequent non-error cursor deletion will not release
+ * node-level buffers, causing a buffer leak. This is quite possible
+ * with a zero-results range query, so release the buffers if we
+ * failed to return any results.
+ */
+ if (cur->bc_bufs[0] == NULL) {
+ for (i = 0; i < cur->bc_nlevels; i++) {
+ if (cur->bc_bufs[i]) {
+ xfs_trans_brelse(cur->bc_tp, cur->bc_bufs[i]);
+ cur->bc_bufs[i] = NULL;
+ cur->bc_ptrs[i] = 0;
+ cur->bc_ra[i] = 0;
+ }
+ }
+ }
+
+ return error;
+}
+
+/*
+ * Query a btree for all records overlapping a given interval of keys. The
+ * supplied function will be called with each record found; return one of the
+ * XFS_BTREE_QUERY_RANGE_{CONTINUE,ABORT} values or the usual negative error
+ * code. This function returns XFS_BTREE_QUERY_RANGE_ABORT, zero, or a
+ * negative error code.
+ */
+int
+xfs_btree_query_range(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_irec *low_rec,
+ union xfs_btree_irec *high_rec,
+ xfs_btree_query_range_fn fn,
+ void *priv)
+{
+ union xfs_btree_rec rec;
+ union xfs_btree_key low_key;
+ union xfs_btree_key high_key;
+
+ /* Find the keys of both ends of the interval. */
+ cur->bc_rec = *high_rec;
+ cur->bc_ops->init_rec_from_cur(cur, &rec);
+ cur->bc_ops->init_key_from_rec(&high_key, &rec);
+
+ cur->bc_rec = *low_rec;
+ cur->bc_ops->init_rec_from_cur(cur, &rec);
+ cur->bc_ops->init_key_from_rec(&low_key, &rec);
+
+ /* Enforce low key < high key. */
+ if (cur->bc_ops->diff_two_keys(cur, &low_key, &high_key) > 0)
+ return -EINVAL;
+
+ if (!(cur->bc_flags & XFS_BTREE_OVERLAPPING))
+ return xfs_btree_simple_query_range(cur, &low_key,
+ &high_key, fn, priv);
+ return xfs_btree_overlapped_query_range(cur, &low_key, &high_key,
+ fn, priv);
+}
diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index 2e874be70209..04d0865e5e6d 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -19,7 +19,7 @@
#define __XFS_BTREE_H__
struct xfs_buf;
-struct xfs_bmap_free;
+struct xfs_defer_ops;
struct xfs_inode;
struct xfs_mount;
struct xfs_trans;
@@ -38,17 +38,37 @@ union xfs_btree_ptr {
};
union xfs_btree_key {
- xfs_bmbt_key_t bmbt;
- xfs_bmdr_key_t bmbr; /* bmbt root block */
- xfs_alloc_key_t alloc;
- xfs_inobt_key_t inobt;
+ struct xfs_bmbt_key bmbt;
+ xfs_bmdr_key_t bmbr; /* bmbt root block */
+ xfs_alloc_key_t alloc;
+ struct xfs_inobt_key inobt;
+ struct xfs_rmap_key rmap;
+};
+
+/*
+ * In-core key that holds both low and high keys for overlapped btrees.
+ * The two keys are packed next to each other on disk, so do the same
+ * in memory. Preserve the existing xfs_btree_key as a single key to
+ * avoid the mental model breakage that would happen if we passed a
+ * bigkey into a function that operates on a single key.
+ */
+union xfs_btree_bigkey {
+ struct xfs_bmbt_key bmbt;
+ xfs_bmdr_key_t bmbr; /* bmbt root block */
+ xfs_alloc_key_t alloc;
+ struct xfs_inobt_key inobt;
+ struct {
+ struct xfs_rmap_key rmap;
+ struct xfs_rmap_key rmap_hi;
+ };
};
union xfs_btree_rec {
- xfs_bmbt_rec_t bmbt;
- xfs_bmdr_rec_t bmbr; /* bmbt root block */
- xfs_alloc_rec_t alloc;
- xfs_inobt_rec_t inobt;
+ struct xfs_bmbt_rec bmbt;
+ xfs_bmdr_rec_t bmbr; /* bmbt root block */
+ struct xfs_alloc_rec alloc;
+ struct xfs_inobt_rec inobt;
+ struct xfs_rmap_rec rmap;
};
/*
@@ -63,6 +83,7 @@ union xfs_btree_rec {
#define XFS_BTNUM_BMAP ((xfs_btnum_t)XFS_BTNUM_BMAPi)
#define XFS_BTNUM_INO ((xfs_btnum_t)XFS_BTNUM_INOi)
#define XFS_BTNUM_FINO ((xfs_btnum_t)XFS_BTNUM_FINOi)
+#define XFS_BTNUM_RMAP ((xfs_btnum_t)XFS_BTNUM_RMAPi)
/*
* For logging record fields.
@@ -95,6 +116,7 @@ do { \
case XFS_BTNUM_BMAP: __XFS_BTREE_STATS_INC(__mp, bmbt, stat); break; \
case XFS_BTNUM_INO: __XFS_BTREE_STATS_INC(__mp, ibt, stat); break; \
case XFS_BTNUM_FINO: __XFS_BTREE_STATS_INC(__mp, fibt, stat); break; \
+ case XFS_BTNUM_RMAP: __XFS_BTREE_STATS_INC(__mp, rmap, stat); break; \
case XFS_BTNUM_MAX: ASSERT(0); /* fucking gcc */ ; break; \
} \
} while (0)
@@ -115,11 +137,13 @@ do { \
__XFS_BTREE_STATS_ADD(__mp, ibt, stat, val); break; \
case XFS_BTNUM_FINO: \
__XFS_BTREE_STATS_ADD(__mp, fibt, stat, val); break; \
+ case XFS_BTNUM_RMAP: \
+ __XFS_BTREE_STATS_ADD(__mp, rmap, stat, val); break; \
case XFS_BTNUM_MAX: ASSERT(0); /* fucking gcc */ ; break; \
} \
} while (0)
-#define XFS_BTREE_MAXLEVELS 8 /* max of all btrees */
+#define XFS_BTREE_MAXLEVELS 9 /* max of all btrees */
struct xfs_btree_ops {
/* size of the key and record structures */
@@ -158,17 +182,25 @@ struct xfs_btree_ops {
/* init values of btree structures */
void (*init_key_from_rec)(union xfs_btree_key *key,
union xfs_btree_rec *rec);
- void (*init_rec_from_key)(union xfs_btree_key *key,
- union xfs_btree_rec *rec);
void (*init_rec_from_cur)(struct xfs_btree_cur *cur,
union xfs_btree_rec *rec);
void (*init_ptr_from_cur)(struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr);
+ void (*init_high_key_from_rec)(union xfs_btree_key *key,
+ union xfs_btree_rec *rec);
/* difference between key value and cursor value */
__int64_t (*key_diff)(struct xfs_btree_cur *cur,
union xfs_btree_key *key);
+ /*
+ * Difference between key2 and key1 -- positive if key1 > key2,
+ * negative if key1 < key2, and zero if equal.
+ */
+ __int64_t (*diff_two_keys)(struct xfs_btree_cur *cur,
+ union xfs_btree_key *key1,
+ union xfs_btree_key *key2);
+
const struct xfs_buf_ops *buf_ops;
#if defined(DEBUG) || defined(XFS_WARN)
@@ -192,6 +224,13 @@ struct xfs_btree_ops {
#define LASTREC_DELREC 2
+union xfs_btree_irec {
+ struct xfs_alloc_rec_incore a;
+ struct xfs_bmbt_irec b;
+ struct xfs_inobt_rec_incore i;
+ struct xfs_rmap_irec r;
+};
+
/*
* Btree cursor structure.
* This collects all information needed by the btree code in one place.
@@ -202,11 +241,7 @@ typedef struct xfs_btree_cur
struct xfs_mount *bc_mp; /* file system mount struct */
const struct xfs_btree_ops *bc_ops;
uint bc_flags; /* btree features - below */
- union {
- xfs_alloc_rec_incore_t a;
- xfs_bmbt_irec_t b;
- xfs_inobt_rec_incore_t i;
- } bc_rec; /* current insert/search record value */
+ union xfs_btree_irec bc_rec; /* current insert/search record value */
struct xfs_buf *bc_bufs[XFS_BTREE_MAXLEVELS]; /* buf ptr per level */
int bc_ptrs[XFS_BTREE_MAXLEVELS]; /* key/record # */
__uint8_t bc_ra[XFS_BTREE_MAXLEVELS]; /* readahead bits */
@@ -218,11 +253,12 @@ typedef struct xfs_btree_cur
union {
struct { /* needed for BNO, CNT, INO */
struct xfs_buf *agbp; /* agf/agi buffer pointer */
+ struct xfs_defer_ops *dfops; /* deferred updates */
xfs_agnumber_t agno; /* ag number */
} a;
struct { /* needed for BMAP */
struct xfs_inode *ip; /* pointer to our inode */
- struct xfs_bmap_free *flist; /* list to free after */
+ struct xfs_defer_ops *dfops; /* deferred updates */
xfs_fsblock_t firstblock; /* 1st blk allocated */
int allocated; /* count of alloced */
short forksize; /* fork's inode space */
@@ -238,6 +274,7 @@ typedef struct xfs_btree_cur
#define XFS_BTREE_ROOT_IN_INODE (1<<1) /* root may be variable size */
#define XFS_BTREE_LASTREC_UPDATE (1<<2) /* track last rec externally */
#define XFS_BTREE_CRC_BLOCKS (1<<3) /* uses extended btree blocks */
+#define XFS_BTREE_OVERLAPPING (1<<4) /* overlapping intervals */
#define XFS_BTREE_NOERROR 0
@@ -474,5 +511,22 @@ static inline int xfs_btree_get_level(struct xfs_btree_block *block)
bool xfs_btree_sblock_v5hdr_verify(struct xfs_buf *bp);
bool xfs_btree_sblock_verify(struct xfs_buf *bp, unsigned int max_recs);
+uint xfs_btree_compute_maxlevels(struct xfs_mount *mp, uint *limits,
+ unsigned long len);
+
+/* return codes */
+#define XFS_BTREE_QUERY_RANGE_CONTINUE 0 /* keep iterating */
+#define XFS_BTREE_QUERY_RANGE_ABORT 1 /* stop iterating */
+typedef int (*xfs_btree_query_range_fn)(struct xfs_btree_cur *cur,
+ union xfs_btree_rec *rec, void *priv);
+
+int xfs_btree_query_range(struct xfs_btree_cur *cur,
+ union xfs_btree_irec *low_rec, union xfs_btree_irec *high_rec,
+ xfs_btree_query_range_fn fn, void *priv);
+
+typedef int (*xfs_btree_visit_blocks_fn)(struct xfs_btree_cur *cur, int level,
+ void *data);
+int xfs_btree_visit_blocks(struct xfs_btree_cur *cur,
+ xfs_btree_visit_blocks_fn fn, void *data);
#endif /* __XFS_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index 097bf7717d80..f2dc1a950c85 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -356,7 +356,6 @@ xfs_da3_split(
struct xfs_da_state_blk *newblk;
struct xfs_da_state_blk *addblk;
struct xfs_da_intnode *node;
- struct xfs_buf *bp;
int max;
int action = 0;
int error;
@@ -397,7 +396,9 @@ xfs_da3_split(
break;
}
/*
- * Entry wouldn't fit, split the leaf again.
+ * Entry wouldn't fit, split the leaf again. The new
+ * extrablk will be consumed by xfs_da3_node_split if
+ * the node is split.
*/
state->extravalid = 1;
if (state->inleaf) {
@@ -446,6 +447,14 @@ xfs_da3_split(
return 0;
/*
+ * xfs_da3_node_split() should have consumed any extra blocks we added
+ * during a double leaf split in the attr fork. This is guaranteed as
+ * we can't be here if the attr fork only has a single leaf block.
+ */
+ ASSERT(state->extravalid == 0 ||
+ state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
+
+ /*
* Split the root node.
*/
ASSERT(state->path.active == 0);
@@ -457,43 +466,33 @@ xfs_da3_split(
}
/*
- * Update pointers to the node which used to be block 0 and
- * just got bumped because of the addition of a new root node.
- * There might be three blocks involved if a double split occurred,
- * and the original block 0 could be at any position in the list.
+ * Update pointers to the node which used to be block 0 and just got
+ * bumped because of the addition of a new root node. Note that the
+ * original block 0 could be at any position in the list of blocks in
+ * the tree.
*
- * Note: the magic numbers and sibling pointers are in the same
- * physical place for both v2 and v3 headers (by design). Hence it
- * doesn't matter which version of the xfs_da_intnode structure we use
- * here as the result will be the same using either structure.
+ * Note: the magic numbers and sibling pointers are in the same physical
+ * place for both v2 and v3 headers (by design). Hence it doesn't matter
+ * which version of the xfs_da_intnode structure we use here as the
+ * result will be the same using either structure.
*/
node = oldblk->bp->b_addr;
if (node->hdr.info.forw) {
- if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) {
- bp = addblk->bp;
- } else {
- ASSERT(state->extravalid);
- bp = state->extrablk.bp;
- }
- node = bp->b_addr;
+ ASSERT(be32_to_cpu(node->hdr.info.forw) == addblk->blkno);
+ node = addblk->bp->b_addr;
node->hdr.info.back = cpu_to_be32(oldblk->blkno);
- xfs_trans_log_buf(state->args->trans, bp,
- XFS_DA_LOGRANGE(node, &node->hdr.info,
- sizeof(node->hdr.info)));
+ xfs_trans_log_buf(state->args->trans, addblk->bp,
+ XFS_DA_LOGRANGE(node, &node->hdr.info,
+ sizeof(node->hdr.info)));
}
node = oldblk->bp->b_addr;
if (node->hdr.info.back) {
- if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) {
- bp = addblk->bp;
- } else {
- ASSERT(state->extravalid);
- bp = state->extrablk.bp;
- }
- node = bp->b_addr;
+ ASSERT(be32_to_cpu(node->hdr.info.back) == addblk->blkno);
+ node = addblk->bp->b_addr;
node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
- xfs_trans_log_buf(state->args->trans, bp,
- XFS_DA_LOGRANGE(node, &node->hdr.info,
- sizeof(node->hdr.info)));
+ xfs_trans_log_buf(state->args->trans, addblk->bp,
+ XFS_DA_LOGRANGE(node, &node->hdr.info,
+ sizeof(node->hdr.info)));
}
addblk->bp = NULL;
return 0;
@@ -2030,7 +2029,7 @@ xfs_da_grow_inode_int(
error = xfs_bmapi_write(tp, dp, *bno, count,
xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
args->firstblock, args->total, &map, &nmap,
- args->flist);
+ args->dfops);
if (error)
return error;
@@ -2053,7 +2052,7 @@ xfs_da_grow_inode_int(
error = xfs_bmapi_write(tp, dp, b, c,
xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
args->firstblock, args->total,
- &mapp[mapi], &nmap, args->flist);
+ &mapp[mapi], &nmap, args->dfops);
if (error)
goto out_free_map;
if (nmap < 1)
@@ -2363,7 +2362,7 @@ xfs_da_shrink_inode(
*/
error = xfs_bunmapi(tp, dp, dead_blkno, count,
xfs_bmapi_aflag(w), 0, args->firstblock,
- args->flist, &done);
+ args->dfops, &done);
if (error == -ENOSPC) {
if (w != XFS_DATA_FORK)
break;
diff --git a/fs/xfs/libxfs/xfs_da_btree.h b/fs/xfs/libxfs/xfs_da_btree.h
index 6e153e399a77..98c75cbe6ac2 100644
--- a/fs/xfs/libxfs/xfs_da_btree.h
+++ b/fs/xfs/libxfs/xfs_da_btree.h
@@ -19,7 +19,7 @@
#ifndef __XFS_DA_BTREE_H__
#define __XFS_DA_BTREE_H__
-struct xfs_bmap_free;
+struct xfs_defer_ops;
struct xfs_inode;
struct xfs_trans;
struct zone;
@@ -70,7 +70,7 @@ typedef struct xfs_da_args {
xfs_ino_t inumber; /* input/output inode number */
struct xfs_inode *dp; /* directory inode to manipulate */
xfs_fsblock_t *firstblock; /* ptr to firstblock for bmap calls */
- struct xfs_bmap_free *flist; /* ptr to freelist for bmap_finish */
+ struct xfs_defer_ops *dfops; /* ptr to freelist for bmap_finish */
struct xfs_trans *trans; /* current trans (changes over time) */
xfs_extlen_t total; /* total blocks needed, for 1st bmap */
int whichfork; /* data or attribute fork */
diff --git a/fs/xfs/libxfs/xfs_da_format.c b/fs/xfs/libxfs/xfs_da_format.c
index 9d624a622946..f1e8d4dbb600 100644
--- a/fs/xfs/libxfs/xfs_da_format.c
+++ b/fs/xfs/libxfs/xfs_da_format.c
@@ -40,8 +40,7 @@ xfs_dir2_sf_entsize(
int count = sizeof(struct xfs_dir2_sf_entry); /* namelen + offset */
count += len; /* name */
- count += hdr->i8count ? sizeof(xfs_dir2_ino8_t) :
- sizeof(xfs_dir2_ino4_t); /* ino # */
+ count += hdr->i8count ? XFS_INO64_SIZE : XFS_INO32_SIZE; /* ino # */
return count;
}
@@ -125,33 +124,33 @@ xfs_dir3_sfe_put_ftype(
static xfs_ino_t
xfs_dir2_sf_get_ino(
struct xfs_dir2_sf_hdr *hdr,
- xfs_dir2_inou_t *from)
+ __uint8_t *from)
{
if (hdr->i8count)
- return get_unaligned_be64(&from->i8.i) & 0x00ffffffffffffffULL;
+ return get_unaligned_be64(from) & 0x00ffffffffffffffULL;
else
- return get_unaligned_be32(&from->i4.i);
+ return get_unaligned_be32(from);
}
static void
xfs_dir2_sf_put_ino(
struct xfs_dir2_sf_hdr *hdr,
- xfs_dir2_inou_t *to,
+ __uint8_t *to,
xfs_ino_t ino)
{
ASSERT((ino & 0xff00000000000000ULL) == 0);
if (hdr->i8count)
- put_unaligned_be64(ino, &to->i8.i);
+ put_unaligned_be64(ino, to);
else
- put_unaligned_be32(ino, &to->i4.i);
+ put_unaligned_be32(ino, to);
}
static xfs_ino_t
xfs_dir2_sf_get_parent_ino(
struct xfs_dir2_sf_hdr *hdr)
{
- return xfs_dir2_sf_get_ino(hdr, &hdr->parent);
+ return xfs_dir2_sf_get_ino(hdr, hdr->parent);
}
static void
@@ -159,7 +158,7 @@ xfs_dir2_sf_put_parent_ino(
struct xfs_dir2_sf_hdr *hdr,
xfs_ino_t ino)
{
- xfs_dir2_sf_put_ino(hdr, &hdr->parent, ino);
+ xfs_dir2_sf_put_ino(hdr, hdr->parent, ino);
}
/*
@@ -173,8 +172,7 @@ xfs_dir2_sfe_get_ino(
struct xfs_dir2_sf_hdr *hdr,
struct xfs_dir2_sf_entry *sfep)
{
- return xfs_dir2_sf_get_ino(hdr,
- (xfs_dir2_inou_t *)&sfep->name[sfep->namelen]);
+ return xfs_dir2_sf_get_ino(hdr, &sfep->name[sfep->namelen]);
}
static void
@@ -183,8 +181,7 @@ xfs_dir2_sfe_put_ino(
struct xfs_dir2_sf_entry *sfep,
xfs_ino_t ino)
{
- xfs_dir2_sf_put_ino(hdr,
- (xfs_dir2_inou_t *)&sfep->name[sfep->namelen], ino);
+ xfs_dir2_sf_put_ino(hdr, &sfep->name[sfep->namelen], ino);
}
static xfs_ino_t
@@ -192,8 +189,7 @@ xfs_dir3_sfe_get_ino(
struct xfs_dir2_sf_hdr *hdr,
struct xfs_dir2_sf_entry *sfep)
{
- return xfs_dir2_sf_get_ino(hdr,
- (xfs_dir2_inou_t *)&sfep->name[sfep->namelen + 1]);
+ return xfs_dir2_sf_get_ino(hdr, &sfep->name[sfep->namelen + 1]);
}
static void
@@ -202,8 +198,7 @@ xfs_dir3_sfe_put_ino(
struct xfs_dir2_sf_entry *sfep,
xfs_ino_t ino)
{
- xfs_dir2_sf_put_ino(hdr,
- (xfs_dir2_inou_t *)&sfep->name[sfep->namelen + 1], ino);
+ xfs_dir2_sf_put_ino(hdr, &sfep->name[sfep->namelen + 1], ino);
}
diff --git a/fs/xfs/libxfs/xfs_da_format.h b/fs/xfs/libxfs/xfs_da_format.h
index 8d4d8bce41bf..9a492a9e19bd 100644
--- a/fs/xfs/libxfs/xfs_da_format.h
+++ b/fs/xfs/libxfs/xfs_da_format.h
@@ -192,12 +192,6 @@ typedef __uint16_t xfs_dir2_data_off_t;
typedef uint xfs_dir2_data_aoff_t; /* argument form */
/*
- * Normalized offset (in a data block) of the entry, really xfs_dir2_data_off_t.
- * Only need 16 bits, this is the byte offset into the single block form.
- */
-typedef struct { __uint8_t i[2]; } __arch_pack xfs_dir2_sf_off_t;
-
-/*
* Offset in data space of a data entry.
*/
typedef __uint32_t xfs_dir2_dataptr_t;
@@ -214,22 +208,10 @@ typedef xfs_off_t xfs_dir2_off_t;
*/
typedef __uint32_t xfs_dir2_db_t;
-/*
- * Inode number stored as 8 8-bit values.
- */
-typedef struct { __uint8_t i[8]; } xfs_dir2_ino8_t;
-
-/*
- * Inode number stored as 4 8-bit values.
- * Works a lot of the time, when all the inode numbers in a directory
- * fit in 32 bits.
- */
-typedef struct { __uint8_t i[4]; } xfs_dir2_ino4_t;
+#define XFS_INO32_SIZE 4
+#define XFS_INO64_SIZE 8
+#define XFS_INO64_DIFF (XFS_INO64_SIZE - XFS_INO32_SIZE)
-typedef union {
- xfs_dir2_ino8_t i8;
- xfs_dir2_ino4_t i4;
-} xfs_dir2_inou_t;
#define XFS_DIR2_MAX_SHORT_INUM ((xfs_ino_t)0xffffffffULL)
/*
@@ -246,39 +228,38 @@ typedef union {
typedef struct xfs_dir2_sf_hdr {
__uint8_t count; /* count of entries */
__uint8_t i8count; /* count of 8-byte inode #s */
- xfs_dir2_inou_t parent; /* parent dir inode number */
-} __arch_pack xfs_dir2_sf_hdr_t;
+ __uint8_t parent[8]; /* parent dir inode number */
+} __packed xfs_dir2_sf_hdr_t;
typedef struct xfs_dir2_sf_entry {
__u8 namelen; /* actual name length */
- xfs_dir2_sf_off_t offset; /* saved offset */
+ __u8 offset[2]; /* saved offset */
__u8 name[]; /* name, variable size */
/*
* A single byte containing the file type field follows the inode
* number for version 3 directory entries.
*
- * A xfs_dir2_ino8_t or xfs_dir2_ino4_t follows here, at a
- * variable offset after the name.
+ * A 64-bit or 32-bit inode number follows here, at a variable offset
+ * after the name.
*/
-} __arch_pack xfs_dir2_sf_entry_t;
+} xfs_dir2_sf_entry_t;
static inline int xfs_dir2_sf_hdr_size(int i8count)
{
return sizeof(struct xfs_dir2_sf_hdr) -
- (i8count == 0) *
- (sizeof(xfs_dir2_ino8_t) - sizeof(xfs_dir2_ino4_t));
+ (i8count == 0) * XFS_INO64_DIFF;
}
static inline xfs_dir2_data_aoff_t
xfs_dir2_sf_get_offset(xfs_dir2_sf_entry_t *sfep)
{
- return get_unaligned_be16(&sfep->offset.i);
+ return get_unaligned_be16(sfep->offset);
}
static inline void
xfs_dir2_sf_put_offset(xfs_dir2_sf_entry_t *sfep, xfs_dir2_data_aoff_t off)
{
- put_unaligned_be16(off, &sfep->offset.i);
+ put_unaligned_be16(off, sfep->offset);
}
static inline struct xfs_dir2_sf_entry *
@@ -648,6 +629,7 @@ typedef struct xfs_attr_shortform {
struct xfs_attr_sf_hdr { /* constant-structure header block */
__be16 totsize; /* total bytes in shortform list */
__u8 count; /* count of active entries */
+ __u8 padding;
} hdr;
struct xfs_attr_sf_entry {
__uint8_t namelen; /* actual length of name (no NULL) */
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
new file mode 100644
index 000000000000..054a2032fdb3
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_defer.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright (C) 2016 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_bit.h"
+#include "xfs_sb.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_trans.h"
+#include "xfs_trace.h"
+
+/*
+ * Deferred Operations in XFS
+ *
+ * Due to the way locking rules work in XFS, certain transactions (block
+ * mapping and unmapping, typically) have permanent reservations so that
+ * we can roll the transaction to adhere to AG locking order rules and
+ * to unlock buffers between metadata updates. Prior to rmap/reflink,
+ * the mapping code had a mechanism to perform these deferrals for
+ * extents that were going to be freed; this code makes that facility
+ * more generic.
+ *
+ * When adding the reverse mapping and reflink features, it became
+ * necessary to perform complex remapping multi-transactions to comply
+ * with AG locking order rules, and to be able to spread a single
+ * refcount update operation (an operation on an n-block extent can
+ * update as many as n records!) among multiple transactions. XFS can
+ * roll a transaction to facilitate this, but using this facility
+ * requires us to log "intent" items in case log recovery needs to
+ * redo the operation, and to log "done" items to indicate that redo
+ * is not necessary.
+ *
+ * Deferred work is tracked in xfs_defer_pending items. Each pending
+ * item tracks one type of deferred work. Incoming work items (which
+ * have not yet had an intent logged) are attached to a pending item
+ * on the dop_intake list, where they wait for the caller to finish
+ * the deferred operations.
+ *
+ * Finishing a set of deferred operations is an involved process. To
+ * start, we define "rolling a deferred-op transaction" as follows:
+ *
+ * > For each xfs_defer_pending item on the dop_intake list,
+ * - Sort the work items in AG order. XFS locking
+ * order rules require us to lock buffers in AG order.
+ * - Create a log intent item for that type.
+ * - Attach it to the pending item.
+ * - Move the pending item from the dop_intake list to the
+ * dop_pending list.
+ * > Roll the transaction.
+ *
+ * NOTE: To avoid exceeding the transaction reservation, we limit the
+ * number of items that we attach to a given xfs_defer_pending.
+ *
+ * The actual finishing process looks like this:
+ *
+ * > For each xfs_defer_pending in the dop_pending list,
+ * - Roll the deferred-op transaction as above.
+ * - Create a log done item for that type, and attach it to the
+ * log intent item.
+ * - For each work item attached to the log intent item,
+ * * Perform the described action.
+ * * Attach the work item to the log done item.
+ *
+ * The key here is that we must log an intent item for all pending
+ * work items every time we roll the transaction, and that we must log
+ * a done item as soon as the work is completed. With this mechanism
+ * we can perform complex remapping operations, chaining intent items
+ * as needed.
+ *
+ * This is an example of remapping the extent (E, E+B) into file X at
+ * offset A and dealing with the extent (C, C+B) already being mapped
+ * there:
+ * +-------------------------------------------------+
+ * | Unmap file X startblock C offset A length B | t0
+ * | Intent to reduce refcount for extent (C, B) |
+ * | Intent to remove rmap (X, C, A, B) |
+ * | Intent to free extent (D, 1) (bmbt block) |
+ * | Intent to map (X, A, B) at startblock E |
+ * +-------------------------------------------------+
+ * | Map file X startblock E offset A length B | t1
+ * | Done mapping (X, E, A, B) |
+ * | Intent to increase refcount for extent (E, B) |
+ * | Intent to add rmap (X, E, A, B) |
+ * +-------------------------------------------------+
+ * | Reduce refcount for extent (C, B) | t2
+ * | Done reducing refcount for extent (C, B) |
+ * | Increase refcount for extent (E, B) |
+ * | Done increasing refcount for extent (E, B) |
+ * | Intent to free extent (C, B) |
+ * | Intent to free extent (F, 1) (refcountbt block) |
+ * | Intent to remove rmap (F, 1, REFC) |
+ * +-------------------------------------------------+
+ * | Remove rmap (X, C, A, B) | t3
+ * | Done removing rmap (X, C, A, B) |
+ * | Add rmap (X, E, A, B) |
+ * | Done adding rmap (X, E, A, B) |
+ * | Remove rmap (F, 1, REFC) |
+ * | Done removing rmap (F, 1, REFC) |
+ * +-------------------------------------------------+
+ * | Free extent (C, B) | t4
+ * | Done freeing extent (C, B) |
+ * | Free extent (D, 1) |
+ * | Done freeing extent (D, 1) |
+ * | Free extent (F, 1) |
+ * | Done freeing extent (F, 1) |
+ * +-------------------------------------------------+
+ *
+ * If we should crash before t2 commits, log recovery replays
+ * the following intent items:
+ *
+ * - Intent to reduce refcount for extent (C, B)
+ * - Intent to remove rmap (X, C, A, B)
+ * - Intent to free extent (D, 1) (bmbt block)
+ * - Intent to increase refcount for extent (E, B)
+ * - Intent to add rmap (X, E, A, B)
+ *
+ * In the process of recovering, it should also generate and take care
+ * of these intent items:
+ *
+ * - Intent to free extent (C, B)
+ * - Intent to free extent (F, 1) (refcountbt block)
+ * - Intent to remove rmap (F, 1, REFC)
+ */
+
+static const struct xfs_defer_op_type *defer_op_types[XFS_DEFER_OPS_TYPE_MAX];
+
+/*
+ * For each pending item in the intake list, log its intent item and the
+ * associated extents, then add the entire intake list to the end of
+ * the pending list.
+ */
+STATIC void
+xfs_defer_intake_work(
+ struct xfs_trans *tp,
+ struct xfs_defer_ops *dop)
+{
+ struct list_head *li;
+ struct xfs_defer_pending *dfp;
+
+ list_for_each_entry(dfp, &dop->dop_intake, dfp_list) {
+ trace_xfs_defer_intake_work(tp->t_mountp, dfp);
+ dfp->dfp_intent = dfp->dfp_type->create_intent(tp,
+ dfp->dfp_count);
+ list_sort(tp->t_mountp, &dfp->dfp_work,
+ dfp->dfp_type->diff_items);
+ list_for_each(li, &dfp->dfp_work)
+ dfp->dfp_type->log_item(tp, dfp->dfp_intent, li);
+ }
+
+ list_splice_tail_init(&dop->dop_intake, &dop->dop_pending);
+}
+
+/* Abort all the intents that were committed. */
+STATIC void
+xfs_defer_trans_abort(
+ struct xfs_trans *tp,
+ struct xfs_defer_ops *dop,
+ int error)
+{
+ struct xfs_defer_pending *dfp;
+
+ trace_xfs_defer_trans_abort(tp->t_mountp, dop);
+ /*
+ * If the transaction was committed, drop the intent reference
+ * since we're bailing out of here. The other reference is
+ * dropped when the intent hits the AIL. If the transaction
+ * was not committed, the intent is freed by the intent item
+ * unlock handler on abort.
+ */
+ if (!dop->dop_committed)
+ return;
+
+ /* Abort intent items. */
+ list_for_each_entry(dfp, &dop->dop_pending, dfp_list) {
+ trace_xfs_defer_pending_abort(tp->t_mountp, dfp);
+ if (dfp->dfp_committed)
+ dfp->dfp_type->abort_intent(dfp->dfp_intent);
+ }
+
+ /* Shut down FS. */
+ xfs_force_shutdown(tp->t_mountp, (error == -EFSCORRUPTED) ?
+ SHUTDOWN_CORRUPT_INCORE : SHUTDOWN_META_IO_ERROR);
+}
+
+/* Roll a transaction so we can do some deferred op processing. */
+STATIC int
+xfs_defer_trans_roll(
+ struct xfs_trans **tp,
+ struct xfs_defer_ops *dop,
+ struct xfs_inode *ip)
+{
+ int i;
+ int error;
+
+ /* Log all the joined inodes except the one we passed in. */
+ for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++) {
+ if (dop->dop_inodes[i] == ip)
+ continue;
+ xfs_trans_log_inode(*tp, dop->dop_inodes[i], XFS_ILOG_CORE);
+ }
+
+ trace_xfs_defer_trans_roll((*tp)->t_mountp, dop);
+
+ /* Roll the transaction. */
+ error = xfs_trans_roll(tp, ip);
+ if (error) {
+ trace_xfs_defer_trans_roll_error((*tp)->t_mountp, dop, error);
+ xfs_defer_trans_abort(*tp, dop, error);
+ return error;
+ }
+ dop->dop_committed = true;
+
+ /* Rejoin the joined inodes except the one we passed in. */
+ for (i = 0; i < XFS_DEFER_OPS_NR_INODES && dop->dop_inodes[i]; i++) {
+ if (dop->dop_inodes[i] == ip)
+ continue;
+ xfs_trans_ijoin(*tp, dop->dop_inodes[i], 0);
+ }
+
+ return error;
+}
+
+/* Do we have any work items to finish? */
+bool
+xfs_defer_has_unfinished_work(
+ struct xfs_defer_ops *dop)
+{
+ return !list_empty(&dop->dop_pending) || !list_empty(&dop->dop_intake);
+}
+
+/*
+ * Add this inode to the deferred op. Each joined inode is relogged
+ * each time we roll the transaction, in addition to any inode passed
+ * to xfs_defer_finish().
+ */
+int
+xfs_defer_join(
+ struct xfs_defer_ops *dop,
+ struct xfs_inode *ip)
+{
+ int i;
+
+ for (i = 0; i < XFS_DEFER_OPS_NR_INODES; i++) {
+ if (dop->dop_inodes[i] == ip)
+ return 0;
+ else if (dop->dop_inodes[i] == NULL) {
+ dop->dop_inodes[i] = ip;
+ return 0;
+ }
+ }
+
+ return -EFSCORRUPTED;
+}
+
+/*
+ * Finish all the pending work. This involves logging intent items for
+ * any work items that wandered in since the last transaction roll (if
+ * one has even happened), rolling the transaction, and finishing the
+ * work items in the first item on the logged-and-pending list.
+ *
+ * If an inode is provided, relog it to the new transaction.
+ */
+int
+xfs_defer_finish(
+ struct xfs_trans **tp,
+ struct xfs_defer_ops *dop,
+ struct xfs_inode *ip)
+{
+ struct xfs_defer_pending *dfp;
+ struct list_head *li;
+ struct list_head *n;
+ void *done_item = NULL;
+ void *state;
+ int error = 0;
+ void (*cleanup_fn)(struct xfs_trans *, void *, int);
+
+ ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
+
+ trace_xfs_defer_finish((*tp)->t_mountp, dop);
+
+ /* Until we run out of pending work to finish... */
+ while (xfs_defer_has_unfinished_work(dop)) {
+ /* Log intents for work items sitting in the intake. */
+ xfs_defer_intake_work(*tp, dop);
+
+ /* Roll the transaction. */
+ error = xfs_defer_trans_roll(tp, dop, ip);
+ if (error)
+ goto out;
+
+ /* Mark all pending intents as committed. */
+ list_for_each_entry_reverse(dfp, &dop->dop_pending, dfp_list) {
+ if (dfp->dfp_committed)
+ break;
+ trace_xfs_defer_pending_commit((*tp)->t_mountp, dfp);
+ dfp->dfp_committed = true;
+ }
+
+ /* Log an intent-done item for the first pending item. */
+ dfp = list_first_entry(&dop->dop_pending,
+ struct xfs_defer_pending, dfp_list);
+ trace_xfs_defer_pending_finish((*tp)->t_mountp, dfp);
+ done_item = dfp->dfp_type->create_done(*tp, dfp->dfp_intent,
+ dfp->dfp_count);
+ cleanup_fn = dfp->dfp_type->finish_cleanup;
+
+ /* Finish the work items. */
+ state = NULL;
+ list_for_each_safe(li, n, &dfp->dfp_work) {
+ list_del(li);
+ dfp->dfp_count--;
+ error = dfp->dfp_type->finish_item(*tp, dop, li,
+ done_item, &state);
+ if (error) {
+ /*
+ * Clean up after ourselves and jump out.
+ * xfs_defer_cancel will take care of freeing
+ * all these lists and stuff.
+ */
+ if (cleanup_fn)
+ cleanup_fn(*tp, state, error);
+ xfs_defer_trans_abort(*tp, dop, error);
+ goto out;
+ }
+ }
+ /* Done with the dfp, free it. */
+ list_del(&dfp->dfp_list);
+ kmem_free(dfp);
+
+ if (cleanup_fn)
+ cleanup_fn(*tp, state, error);
+ }
+
+out:
+ if (error)
+ trace_xfs_defer_finish_error((*tp)->t_mountp, dop, error);
+ else
+ trace_xfs_defer_finish_done((*tp)->t_mountp, dop);
+ return error;
+}
+
+/*
+ * Free up any items left in the list.
+ */
+void
+xfs_defer_cancel(
+ struct xfs_defer_ops *dop)
+{
+ struct xfs_defer_pending *dfp;
+ struct xfs_defer_pending *pli;
+ struct list_head *pwi;
+ struct list_head *n;
+
+ trace_xfs_defer_cancel(NULL, dop);
+
+ /*
+ * Free the pending items. Caller should already have arranged
+ * for the intent items to be released.
+ */
+ list_for_each_entry_safe(dfp, pli, &dop->dop_intake, dfp_list) {
+ trace_xfs_defer_intake_cancel(NULL, dfp);
+ list_del(&dfp->dfp_list);
+ list_for_each_safe(pwi, n, &dfp->dfp_work) {
+ list_del(pwi);
+ dfp->dfp_count--;
+ dfp->dfp_type->cancel_item(pwi);
+ }
+ ASSERT(dfp->dfp_count == 0);
+ kmem_free(dfp);
+ }
+ list_for_each_entry_safe(dfp, pli, &dop->dop_pending, dfp_list) {
+ trace_xfs_defer_pending_cancel(NULL, dfp);
+ list_del(&dfp->dfp_list);
+ list_for_each_safe(pwi, n, &dfp->dfp_work) {
+ list_del(pwi);
+ dfp->dfp_count--;
+ dfp->dfp_type->cancel_item(pwi);
+ }
+ ASSERT(dfp->dfp_count == 0);
+ kmem_free(dfp);
+ }
+}
+
+/* Add an item for later deferred processing. */
+void
+xfs_defer_add(
+ struct xfs_defer_ops *dop,
+ enum xfs_defer_ops_type type,
+ struct list_head *li)
+{
+ struct xfs_defer_pending *dfp = NULL;
+
+ /*
+ * Add the item to a pending item at the end of the intake list.
+ * If the last pending item has the same type, reuse it. Else,
+ * create a new pending item at the end of the intake list.
+ */
+ if (!list_empty(&dop->dop_intake)) {
+ dfp = list_last_entry(&dop->dop_intake,
+ struct xfs_defer_pending, dfp_list);
+ if (dfp->dfp_type->type != type ||
+ (dfp->dfp_type->max_items &&
+ dfp->dfp_count >= dfp->dfp_type->max_items))
+ dfp = NULL;
+ }
+ if (!dfp) {
+ dfp = kmem_alloc(sizeof(struct xfs_defer_pending),
+ KM_SLEEP | KM_NOFS);
+ dfp->dfp_type = defer_op_types[type];
+ dfp->dfp_committed = false;
+ dfp->dfp_intent = NULL;
+ dfp->dfp_count = 0;
+ INIT_LIST_HEAD(&dfp->dfp_work);
+ list_add_tail(&dfp->dfp_list, &dop->dop_intake);
+ }
+
+ list_add_tail(li, &dfp->dfp_work);
+ dfp->dfp_count++;
+}
+
+/* Initialize a deferred operation list. */
+void
+xfs_defer_init_op_type(
+ const struct xfs_defer_op_type *type)
+{
+ defer_op_types[type->type] = type;
+}
+
+/* Initialize a deferred operation. */
+void
+xfs_defer_init(
+ struct xfs_defer_ops *dop,
+ xfs_fsblock_t *fbp)
+{
+ dop->dop_committed = false;
+ dop->dop_low = false;
+ memset(&dop->dop_inodes, 0, sizeof(dop->dop_inodes));
+ *fbp = NULLFSBLOCK;
+ INIT_LIST_HEAD(&dop->dop_intake);
+ INIT_LIST_HEAD(&dop->dop_pending);
+ trace_xfs_defer_init(NULL, dop);
+}
diff --git a/fs/xfs/libxfs/xfs_defer.h b/fs/xfs/libxfs/xfs_defer.h
new file mode 100644
index 000000000000..cc3981c48296
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_defer.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2016 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef __XFS_DEFER_H__
+#define __XFS_DEFER_H__
+
+struct xfs_defer_op_type;
+
+/*
+ * Save a log intent item and a list of extents, so that we can replay
+ * whatever action had to happen to the extent list and file the log done
+ * item.
+ */
+struct xfs_defer_pending {
+ const struct xfs_defer_op_type *dfp_type; /* function pointers */
+ struct list_head dfp_list; /* pending items */
+ bool dfp_committed; /* committed trans? */
+ void *dfp_intent; /* log intent item */
+ struct list_head dfp_work; /* work items */
+ unsigned int dfp_count; /* # extent items */
+};
+
+/*
+ * Header for deferred operation list.
+ *
+ * dop_low is used by the allocator to activate the lowspace algorithm -
+ * when free space is running low the extent allocator may choose to
+ * allocate an extent from an AG without leaving sufficient space for
+ * a btree split when inserting the new extent. In this case the allocator
+ * will enable the lowspace algorithm which is supposed to allow further
+ * allocations (such as btree splits and newroots) to allocate from
+ * sequential AGs. In order to avoid locking AGs out of order the lowspace
+ * algorithm will start searching for free space from AG 0. If the correct
+ * transaction reservations have been made then this algorithm will eventually
+ * find all the space it needs.
+ */
+enum xfs_defer_ops_type {
+ XFS_DEFER_OPS_TYPE_RMAP,
+ XFS_DEFER_OPS_TYPE_FREE,
+ XFS_DEFER_OPS_TYPE_MAX,
+};
+
+#define XFS_DEFER_OPS_NR_INODES 2 /* join up to two inodes */
+
+struct xfs_defer_ops {
+ bool dop_committed; /* did any trans commit? */
+ bool dop_low; /* alloc in low mode */
+ struct list_head dop_intake; /* unlogged pending work */
+ struct list_head dop_pending; /* logged pending work */
+
+ /* relog these inodes with each roll */
+ struct xfs_inode *dop_inodes[XFS_DEFER_OPS_NR_INODES];
+};
+
+void xfs_defer_add(struct xfs_defer_ops *dop, enum xfs_defer_ops_type type,
+ struct list_head *h);
+int xfs_defer_finish(struct xfs_trans **tp, struct xfs_defer_ops *dop,
+ struct xfs_inode *ip);
+void xfs_defer_cancel(struct xfs_defer_ops *dop);
+void xfs_defer_init(struct xfs_defer_ops *dop, xfs_fsblock_t *fbp);
+bool xfs_defer_has_unfinished_work(struct xfs_defer_ops *dop);
+int xfs_defer_join(struct xfs_defer_ops *dop, struct xfs_inode *ip);
+
+/* Description of a deferred type. */
+struct xfs_defer_op_type {
+ enum xfs_defer_ops_type type;
+ unsigned int max_items;
+ void (*abort_intent)(void *);
+ void *(*create_done)(struct xfs_trans *, void *, unsigned int);
+ int (*finish_item)(struct xfs_trans *, struct xfs_defer_ops *,
+ struct list_head *, void *, void **);
+ void (*finish_cleanup)(struct xfs_trans *, void *, int);
+ void (*cancel_item)(struct list_head *);
+ int (*diff_items)(void *, struct list_head *, struct list_head *);
+ void *(*create_intent)(struct xfs_trans *, uint);
+ void (*log_item)(struct xfs_trans *, void *, struct list_head *);
+};
+
+void xfs_defer_init_op_type(const struct xfs_defer_op_type *type);
+
+#endif /* __XFS_DEFER_H__ */
diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
index af0f9d171f8a..20a96dd5af7e 100644
--- a/fs/xfs/libxfs/xfs_dir2.c
+++ b/fs/xfs/libxfs/xfs_dir2.c
@@ -21,6 +21,7 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_inode.h"
@@ -259,7 +260,7 @@ xfs_dir_createname(
struct xfs_name *name,
xfs_ino_t inum, /* new entry inode number */
xfs_fsblock_t *first, /* bmap's firstblock */
- xfs_bmap_free_t *flist, /* bmap's freeblock list */
+ struct xfs_defer_ops *dfops, /* bmap's freeblock list */
xfs_extlen_t total) /* bmap's total block count */
{
struct xfs_da_args *args;
@@ -286,7 +287,7 @@ xfs_dir_createname(
args->inumber = inum;
args->dp = dp;
args->firstblock = first;
- args->flist = flist;
+ args->dfops = dfops;
args->total = total;
args->whichfork = XFS_DATA_FORK;
args->trans = tp;
@@ -436,7 +437,7 @@ xfs_dir_removename(
struct xfs_name *name,
xfs_ino_t ino,
xfs_fsblock_t *first, /* bmap's firstblock */
- xfs_bmap_free_t *flist, /* bmap's freeblock list */
+ struct xfs_defer_ops *dfops, /* bmap's freeblock list */
xfs_extlen_t total) /* bmap's total block count */
{
struct xfs_da_args *args;
@@ -458,7 +459,7 @@ xfs_dir_removename(
args->inumber = ino;
args->dp = dp;
args->firstblock = first;
- args->flist = flist;
+ args->dfops = dfops;
args->total = total;
args->whichfork = XFS_DATA_FORK;
args->trans = tp;
@@ -498,7 +499,7 @@ xfs_dir_replace(
struct xfs_name *name, /* name of entry to replace */
xfs_ino_t inum, /* new inode number */
xfs_fsblock_t *first, /* bmap's firstblock */
- xfs_bmap_free_t *flist, /* bmap's freeblock list */
+ struct xfs_defer_ops *dfops, /* bmap's freeblock list */
xfs_extlen_t total) /* bmap's total block count */
{
struct xfs_da_args *args;
@@ -523,7 +524,7 @@ xfs_dir_replace(
args->inumber = inum;
args->dp = dp;
args->firstblock = first;
- args->flist = flist;
+ args->dfops = dfops;
args->total = total;
args->whichfork = XFS_DATA_FORK;
args->trans = tp;
@@ -680,7 +681,7 @@ xfs_dir2_shrink_inode(
/* Unmap the fsblock(s). */
error = xfs_bunmapi(tp, dp, da, args->geo->fsbcount, 0, 0,
- args->firstblock, args->flist, &done);
+ args->firstblock, args->dfops, &done);
if (error) {
/*
* ENOSPC actually can happen if we're in a removename with no
diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h
index e55353651f5b..becc926c3e3d 100644
--- a/fs/xfs/libxfs/xfs_dir2.h
+++ b/fs/xfs/libxfs/xfs_dir2.h
@@ -18,7 +18,7 @@
#ifndef __XFS_DIR2_H__
#define __XFS_DIR2_H__
-struct xfs_bmap_free;
+struct xfs_defer_ops;
struct xfs_da_args;
struct xfs_inode;
struct xfs_mount;
@@ -129,18 +129,18 @@ extern int xfs_dir_init(struct xfs_trans *tp, struct xfs_inode *dp,
extern int xfs_dir_createname(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name, xfs_ino_t inum,
xfs_fsblock_t *first,
- struct xfs_bmap_free *flist, xfs_extlen_t tot);
+ struct xfs_defer_ops *dfops, xfs_extlen_t tot);
extern int xfs_dir_lookup(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name, xfs_ino_t *inum,
struct xfs_name *ci_name);
extern int xfs_dir_removename(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name, xfs_ino_t ino,
xfs_fsblock_t *first,
- struct xfs_bmap_free *flist, xfs_extlen_t tot);
+ struct xfs_defer_ops *dfops, xfs_extlen_t tot);
extern int xfs_dir_replace(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name, xfs_ino_t inum,
xfs_fsblock_t *first,
- struct xfs_bmap_free *flist, xfs_extlen_t tot);
+ struct xfs_defer_ops *dfops, xfs_extlen_t tot);
extern int xfs_dir_canenter(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name);
diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c
index e5bb9cc3b243..c6809ff41197 100644
--- a/fs/xfs/libxfs/xfs_dir2_sf.c
+++ b/fs/xfs/libxfs/xfs_dir2_sf.c
@@ -126,13 +126,12 @@ xfs_dir2_block_sfsize(
/*
* Calculate the new size, see if we should give up yet.
*/
- size = xfs_dir2_sf_hdr_size(i8count) + /* header */
- count + /* namelen */
- count * (uint)sizeof(xfs_dir2_sf_off_t) + /* offset */
- namelen + /* name */
- (i8count ? /* inumber */
- (uint)sizeof(xfs_dir2_ino8_t) * count :
- (uint)sizeof(xfs_dir2_ino4_t) * count);
+ size = xfs_dir2_sf_hdr_size(i8count) + /* header */
+ count * 3 * sizeof(u8) + /* namelen + offset */
+ namelen + /* name */
+ (i8count ? /* inumber */
+ count * XFS_INO64_SIZE :
+ count * XFS_INO32_SIZE);
if (size > XFS_IFORK_DSIZE(dp))
return size; /* size value is a failure */
}
@@ -319,10 +318,7 @@ xfs_dir2_sf_addname(
/*
* Yes, adjust the inode size. old count + (parent + new)
*/
- incr_isize +=
- (sfp->count + 2) *
- ((uint)sizeof(xfs_dir2_ino8_t) -
- (uint)sizeof(xfs_dir2_ino4_t));
+ incr_isize += (sfp->count + 2) * XFS_INO64_DIFF;
objchange = 1;
}
@@ -897,11 +893,7 @@ xfs_dir2_sf_replace(
int error; /* error return value */
int newsize; /* new inode size */
- newsize =
- dp->i_df.if_bytes +
- (sfp->count + 1) *
- ((uint)sizeof(xfs_dir2_ino8_t) -
- (uint)sizeof(xfs_dir2_ino4_t));
+ newsize = dp->i_df.if_bytes + (sfp->count + 1) * XFS_INO64_DIFF;
/*
* Won't fit as shortform, convert to block then do replace.
*/
@@ -1022,10 +1014,7 @@ xfs_dir2_sf_toino4(
/*
* Compute the new inode size.
*/
- newsize =
- oldsize -
- (oldsfp->count + 1) *
- ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t));
+ newsize = oldsize - (oldsfp->count + 1) * XFS_INO64_DIFF;
xfs_idata_realloc(dp, -oldsize, XFS_DATA_FORK);
xfs_idata_realloc(dp, newsize, XFS_DATA_FORK);
/*
@@ -1048,7 +1037,7 @@ xfs_dir2_sf_toino4(
i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep),
oldsfep = dp->d_ops->sf_nextentry(oldsfp, oldsfep)) {
sfep->namelen = oldsfep->namelen;
- sfep->offset = oldsfep->offset;
+ memcpy(sfep->offset, oldsfep->offset, sizeof(sfep->offset));
memcpy(sfep->name, oldsfep->name, sfep->namelen);
dp->d_ops->sf_put_ino(sfp, sfep,
dp->d_ops->sf_get_ino(oldsfp, oldsfep));
@@ -1098,10 +1087,7 @@ xfs_dir2_sf_toino8(
/*
* Compute the new inode size (nb: entry count + 1 for parent)
*/
- newsize =
- oldsize +
- (oldsfp->count + 1) *
- ((uint)sizeof(xfs_dir2_ino8_t) - (uint)sizeof(xfs_dir2_ino4_t));
+ newsize = oldsize + (oldsfp->count + 1) * XFS_INO64_DIFF;
xfs_idata_realloc(dp, -oldsize, XFS_DATA_FORK);
xfs_idata_realloc(dp, newsize, XFS_DATA_FORK);
/*
@@ -1124,7 +1110,7 @@ xfs_dir2_sf_toino8(
i++, sfep = dp->d_ops->sf_nextentry(sfp, sfep),
oldsfep = dp->d_ops->sf_nextentry(oldsfp, oldsfep)) {
sfep->namelen = oldsfep->namelen;
- sfep->offset = oldsfep->offset;
+ memcpy(sfep->offset, oldsfep->offset, sizeof(sfep->offset));
memcpy(sfep->name, oldsfep->name, sfep->namelen);
dp->d_ops->sf_put_ino(sfp, sfep,
dp->d_ops->sf_get_ino(oldsfp, oldsfep));
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index dc97eb21af07..f814d42c73b2 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -455,8 +455,10 @@ xfs_sb_has_compat_feature(
}
#define XFS_SB_FEAT_RO_COMPAT_FINOBT (1 << 0) /* free inode btree */
+#define XFS_SB_FEAT_RO_COMPAT_RMAPBT (1 << 1) /* reverse map btree */
#define XFS_SB_FEAT_RO_COMPAT_ALL \
- (XFS_SB_FEAT_RO_COMPAT_FINOBT)
+ (XFS_SB_FEAT_RO_COMPAT_FINOBT | \
+ XFS_SB_FEAT_RO_COMPAT_RMAPBT)
#define XFS_SB_FEAT_RO_COMPAT_UNKNOWN ~XFS_SB_FEAT_RO_COMPAT_ALL
static inline bool
xfs_sb_has_ro_compat_feature(
@@ -538,6 +540,12 @@ static inline bool xfs_sb_version_hasmetauuid(struct xfs_sb *sbp)
(sbp->sb_features_incompat & XFS_SB_FEAT_INCOMPAT_META_UUID);
}
+static inline bool xfs_sb_version_hasrmapbt(struct xfs_sb *sbp)
+{
+ return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) &&
+ (sbp->sb_features_ro_compat & XFS_SB_FEAT_RO_COMPAT_RMAPBT);
+}
+
/*
* end of superblock version macros
*/
@@ -598,10 +606,10 @@ xfs_is_quota_inode(struct xfs_sb *sbp, xfs_ino_t ino)
#define XFS_AGI_GOOD_VERSION(v) ((v) == XFS_AGI_VERSION)
/*
- * Btree number 0 is bno, 1 is cnt. This value gives the size of the
+ * Btree number 0 is bno, 1 is cnt, 2 is rmap. This value gives the size of the
* arrays below.
*/
-#define XFS_BTNUM_AGF ((int)XFS_BTNUM_CNTi + 1)
+#define XFS_BTNUM_AGF ((int)XFS_BTNUM_RMAPi + 1)
/*
* The second word of agf_levels in the first a.g. overlaps the EFS
@@ -618,12 +626,10 @@ typedef struct xfs_agf {
__be32 agf_seqno; /* sequence # starting from 0 */
__be32 agf_length; /* size in blocks of a.g. */
/*
- * Freespace information
+ * Freespace and rmap information
*/
__be32 agf_roots[XFS_BTNUM_AGF]; /* root blocks */
- __be32 agf_spare0; /* spare field */
__be32 agf_levels[XFS_BTNUM_AGF]; /* btree levels */
- __be32 agf_spare1; /* spare field */
__be32 agf_flfirst; /* first freelist block's index */
__be32 agf_fllast; /* last freelist block's index */
@@ -1308,17 +1314,118 @@ typedef __be32 xfs_inobt_ptr_t;
#define XFS_FIBT_BLOCK(mp) ((xfs_agblock_t)(XFS_IBT_BLOCK(mp) + 1))
/*
- * The first data block of an AG depends on whether the filesystem was formatted
- * with the finobt feature. If so, account for the finobt reserved root btree
- * block.
+ * Reverse mapping btree format definitions
+ *
+ * There is a btree for the reverse map per allocation group
+ */
+#define XFS_RMAP_CRC_MAGIC 0x524d4233 /* 'RMB3' */
+
+/*
+ * Ownership info for an extent. This is used to create reverse-mapping
+ * entries.
*/
-#define XFS_PREALLOC_BLOCKS(mp) \
+#define XFS_OWNER_INFO_ATTR_FORK (1 << 0)
+#define XFS_OWNER_INFO_BMBT_BLOCK (1 << 1)
+struct xfs_owner_info {
+ uint64_t oi_owner;
+ xfs_fileoff_t oi_offset;
+ unsigned int oi_flags;
+};
+
+/*
+ * Special owner types.
+ *
+ * Seeing as we only support up to 8EB, we have the upper bit of the owner field
+ * to tell us we have a special owner value. We use these for static metadata
+ * allocated at mkfs/growfs time, as well as for freespace management metadata.
+ */
+#define XFS_RMAP_OWN_NULL (-1ULL) /* No owner, for growfs */
+#define XFS_RMAP_OWN_UNKNOWN (-2ULL) /* Unknown owner, for EFI recovery */
+#define XFS_RMAP_OWN_FS (-3ULL) /* static fs metadata */
+#define XFS_RMAP_OWN_LOG (-4ULL) /* static fs metadata */
+#define XFS_RMAP_OWN_AG (-5ULL) /* AG freespace btree blocks */
+#define XFS_RMAP_OWN_INOBT (-6ULL) /* Inode btree blocks */
+#define XFS_RMAP_OWN_INODES (-7ULL) /* Inode chunk */
+#define XFS_RMAP_OWN_MIN (-8ULL) /* guard */
+
+#define XFS_RMAP_NON_INODE_OWNER(owner) (!!((owner) & (1ULL << 63)))
+
+/*
+ * Data record structure
+ */
+struct xfs_rmap_rec {
+ __be32 rm_startblock; /* extent start block */
+ __be32 rm_blockcount; /* extent length */
+ __be64 rm_owner; /* extent owner */
+ __be64 rm_offset; /* offset within the owner */
+};
+
+/*
+ * rmap btree record
+ * rm_offset:63 is the attribute fork flag
+ * rm_offset:62 is the bmbt block flag
+ * rm_offset:61 is the unwritten extent flag (same as l0:63 in bmbt)
+ * rm_offset:54-60 aren't used and should be zero
+ * rm_offset:0-53 is the block offset within the inode
+ */
+#define XFS_RMAP_OFF_ATTR_FORK ((__uint64_t)1ULL << 63)
+#define XFS_RMAP_OFF_BMBT_BLOCK ((__uint64_t)1ULL << 62)
+#define XFS_RMAP_OFF_UNWRITTEN ((__uint64_t)1ULL << 61)
+
+#define XFS_RMAP_LEN_MAX ((__uint32_t)~0U)
+#define XFS_RMAP_OFF_FLAGS (XFS_RMAP_OFF_ATTR_FORK | \
+ XFS_RMAP_OFF_BMBT_BLOCK | \
+ XFS_RMAP_OFF_UNWRITTEN)
+#define XFS_RMAP_OFF_MASK ((__uint64_t)0x3FFFFFFFFFFFFFULL)
+
+#define XFS_RMAP_OFF(off) ((off) & XFS_RMAP_OFF_MASK)
+
+#define XFS_RMAP_IS_BMBT_BLOCK(off) (!!((off) & XFS_RMAP_OFF_BMBT_BLOCK))
+#define XFS_RMAP_IS_ATTR_FORK(off) (!!((off) & XFS_RMAP_OFF_ATTR_FORK))
+#define XFS_RMAP_IS_UNWRITTEN(len) (!!((off) & XFS_RMAP_OFF_UNWRITTEN))
+
+#define RMAPBT_STARTBLOCK_BITLEN 32
+#define RMAPBT_BLOCKCOUNT_BITLEN 32
+#define RMAPBT_OWNER_BITLEN 64
+#define RMAPBT_ATTRFLAG_BITLEN 1
+#define RMAPBT_BMBTFLAG_BITLEN 1
+#define RMAPBT_EXNTFLAG_BITLEN 1
+#define RMAPBT_UNUSED_OFFSET_BITLEN 7
+#define RMAPBT_OFFSET_BITLEN 54
+
+#define XFS_RMAP_ATTR_FORK (1 << 0)
+#define XFS_RMAP_BMBT_BLOCK (1 << 1)
+#define XFS_RMAP_UNWRITTEN (1 << 2)
+#define XFS_RMAP_KEY_FLAGS (XFS_RMAP_ATTR_FORK | \
+ XFS_RMAP_BMBT_BLOCK)
+#define XFS_RMAP_REC_FLAGS (XFS_RMAP_UNWRITTEN)
+struct xfs_rmap_irec {
+ xfs_agblock_t rm_startblock; /* extent start block */
+ xfs_extlen_t rm_blockcount; /* extent length */
+ __uint64_t rm_owner; /* extent owner */
+ __uint64_t rm_offset; /* offset within the owner */
+ unsigned int rm_flags; /* state flags */
+};
+
+/*
+ * Key structure
+ *
+ * We don't use the length for lookups
+ */
+struct xfs_rmap_key {
+ __be32 rm_startblock; /* extent start block */
+ __be64 rm_owner; /* extent owner */
+ __be64 rm_offset; /* offset within the owner */
+} __attribute__((packed));
+
+/* btree pointer type */
+typedef __be32 xfs_rmap_ptr_t;
+
+#define XFS_RMAP_BLOCK(mp) \
(xfs_sb_version_hasfinobt(&((mp)->m_sb)) ? \
XFS_FIBT_BLOCK(mp) + 1 : \
XFS_IBT_BLOCK(mp) + 1)
-
-
/*
* BMAP Btree format definitions
*
@@ -1435,41 +1542,57 @@ typedef __be64 xfs_bmbt_ptr_t, xfs_bmdr_ptr_t;
* with the crc feature bit, and all accesses to them must be conditional on
* that flag.
*/
+/* short form block header */
+struct xfs_btree_block_shdr {
+ __be32 bb_leftsib;
+ __be32 bb_rightsib;
+
+ __be64 bb_blkno;
+ __be64 bb_lsn;
+ uuid_t bb_uuid;
+ __be32 bb_owner;
+ __le32 bb_crc;
+};
+
+/* long form block header */
+struct xfs_btree_block_lhdr {
+ __be64 bb_leftsib;
+ __be64 bb_rightsib;
+
+ __be64 bb_blkno;
+ __be64 bb_lsn;
+ uuid_t bb_uuid;
+ __be64 bb_owner;
+ __le32 bb_crc;
+ __be32 bb_pad; /* padding for alignment */
+};
+
struct xfs_btree_block {
__be32 bb_magic; /* magic number for block type */
__be16 bb_level; /* 0 is a leaf */
__be16 bb_numrecs; /* current # of data records */
union {
- struct {
- __be32 bb_leftsib;
- __be32 bb_rightsib;
-
- __be64 bb_blkno;
- __be64 bb_lsn;
- uuid_t bb_uuid;
- __be32 bb_owner;
- __le32 bb_crc;
- } s; /* short form pointers */
- struct {
- __be64 bb_leftsib;
- __be64 bb_rightsib;
-
- __be64 bb_blkno;
- __be64 bb_lsn;
- uuid_t bb_uuid;
- __be64 bb_owner;
- __le32 bb_crc;
- __be32 bb_pad; /* padding for alignment */
- } l; /* long form pointers */
+ struct xfs_btree_block_shdr s;
+ struct xfs_btree_block_lhdr l;
} bb_u; /* rest */
};
-#define XFS_BTREE_SBLOCK_LEN 16 /* size of a short form block */
-#define XFS_BTREE_LBLOCK_LEN 24 /* size of a long form block */
+/* size of a short form block */
+#define XFS_BTREE_SBLOCK_LEN \
+ (offsetof(struct xfs_btree_block, bb_u) + \
+ offsetof(struct xfs_btree_block_shdr, bb_blkno))
+/* size of a long form block */
+#define XFS_BTREE_LBLOCK_LEN \
+ (offsetof(struct xfs_btree_block, bb_u) + \
+ offsetof(struct xfs_btree_block_lhdr, bb_blkno))
/* sizes of CRC enabled btree blocks */
-#define XFS_BTREE_SBLOCK_CRC_LEN (XFS_BTREE_SBLOCK_LEN + 40)
-#define XFS_BTREE_LBLOCK_CRC_LEN (XFS_BTREE_LBLOCK_LEN + 48)
+#define XFS_BTREE_SBLOCK_CRC_LEN \
+ (offsetof(struct xfs_btree_block, bb_u) + \
+ sizeof(struct xfs_btree_block_shdr))
+#define XFS_BTREE_LBLOCK_CRC_LEN \
+ (offsetof(struct xfs_btree_block, bb_u) + \
+ sizeof(struct xfs_btree_block_lhdr))
#define XFS_BTREE_SBLOCK_CRC_OFF \
offsetof(struct xfs_btree_block, bb_u.s.bb_crc)
diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h
index fffe3d01bd9f..79455058b752 100644
--- a/fs/xfs/libxfs/xfs_fs.h
+++ b/fs/xfs/libxfs/xfs_fs.h
@@ -206,6 +206,7 @@ typedef struct xfs_fsop_resblks {
#define XFS_FSOP_GEOM_FLAGS_FTYPE 0x10000 /* inode directory types */
#define XFS_FSOP_GEOM_FLAGS_FINOBT 0x20000 /* free inode btree */
#define XFS_FSOP_GEOM_FLAGS_SPINODES 0x40000 /* sparse inode chunks */
+#define XFS_FSOP_GEOM_FLAGS_RMAPBT 0x80000 /* Reverse mapping btree */
/*
* Minimum and maximum sizes need for growth checks.
@@ -521,12 +522,8 @@ typedef struct xfs_swapext
#define XFS_IOC_ERROR_CLEARALL _IOW ('X', 117, struct xfs_error_injection)
/* XFS_IOC_ATTRCTL_BY_HANDLE -- deprecated 118 */
-/* XFS_IOC_FREEZE -- FIFREEZE 119 */
-/* XFS_IOC_THAW -- FITHAW 120 */
-#ifndef FIFREEZE
-#define XFS_IOC_FREEZE _IOWR('X', 119, int)
-#define XFS_IOC_THAW _IOWR('X', 120, int)
-#endif
+#define XFS_IOC_FREEZE _IOWR('X', 119, int) /* aka FIFREEZE */
+#define XFS_IOC_THAW _IOWR('X', 120, int) /* aka FITHAW */
#define XFS_IOC_FSSETDM_BY_HANDLE _IOW ('X', 121, struct xfs_fsop_setdm_handlereq)
#define XFS_IOC_ATTRLIST_BY_HANDLE _IOW ('X', 122, struct xfs_fsop_attrlist_handlereq)
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 22297f9b0fd5..51b4e0de1fdc 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -24,6 +24,7 @@
#include "xfs_bit.h"
#include "xfs_sb.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_ialloc.h"
@@ -39,6 +40,7 @@
#include "xfs_icache.h"
#include "xfs_trace.h"
#include "xfs_log.h"
+#include "xfs_rmap.h"
/*
@@ -614,6 +616,7 @@ xfs_ialloc_ag_alloc(
args.tp = tp;
args.mp = tp->t_mountp;
args.fsbno = NULLFSBLOCK;
+ xfs_rmap_ag_owner(&args.oinfo, XFS_RMAP_OWN_INODES);
#ifdef DEBUG
/* randomly do sparse inode allocations */
@@ -1817,20 +1820,21 @@ xfs_difree_inode_chunk(
struct xfs_mount *mp,
xfs_agnumber_t agno,
struct xfs_inobt_rec_incore *rec,
- struct xfs_bmap_free *flist)
+ struct xfs_defer_ops *dfops)
{
xfs_agblock_t sagbno = XFS_AGINO_TO_AGBNO(mp, rec->ir_startino);
int startidx, endidx;
int nextbit;
xfs_agblock_t agbno;
int contigblk;
+ struct xfs_owner_info oinfo;
DECLARE_BITMAP(holemask, XFS_INOBT_HOLEMASK_BITS);
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES);
if (!xfs_inobt_issparse(rec->ir_holemask)) {
/* not sparse, calculate extent info directly */
- xfs_bmap_add_free(XFS_AGB_TO_FSB(mp, agno,
- XFS_AGINO_TO_AGBNO(mp, rec->ir_startino)),
- mp->m_ialloc_blks, flist, mp);
+ xfs_bmap_add_free(mp, dfops, XFS_AGB_TO_FSB(mp, agno, sagbno),
+ mp->m_ialloc_blks, &oinfo);
return;
}
@@ -1873,8 +1877,8 @@ xfs_difree_inode_chunk(
ASSERT(agbno % mp->m_sb.sb_spino_align == 0);
ASSERT(contigblk % mp->m_sb.sb_spino_align == 0);
- xfs_bmap_add_free(XFS_AGB_TO_FSB(mp, agno, agbno), contigblk,
- flist, mp);
+ xfs_bmap_add_free(mp, dfops, XFS_AGB_TO_FSB(mp, agno, agbno),
+ contigblk, &oinfo);
/* reset range to current bit and carry on... */
startidx = endidx = nextbit;
@@ -1890,7 +1894,7 @@ xfs_difree_inobt(
struct xfs_trans *tp,
struct xfs_buf *agbp,
xfs_agino_t agino,
- struct xfs_bmap_free *flist,
+ struct xfs_defer_ops *dfops,
struct xfs_icluster *xic,
struct xfs_inobt_rec_incore *orec)
{
@@ -1977,7 +1981,7 @@ xfs_difree_inobt(
goto error0;
}
- xfs_difree_inode_chunk(mp, agno, &rec, flist);
+ xfs_difree_inode_chunk(mp, agno, &rec, dfops);
} else {
xic->deleted = 0;
@@ -2122,7 +2126,7 @@ int
xfs_difree(
struct xfs_trans *tp, /* transaction pointer */
xfs_ino_t inode, /* inode to be freed */
- struct xfs_bmap_free *flist, /* extents to free */
+ struct xfs_defer_ops *dfops, /* extents to free */
struct xfs_icluster *xic) /* cluster info if deleted */
{
/* REFERENCED */
@@ -2174,7 +2178,7 @@ xfs_difree(
/*
* Fix up the inode allocation btree.
*/
- error = xfs_difree_inobt(mp, tp, agbp, agino, flist, xic, &rec);
+ error = xfs_difree_inobt(mp, tp, agbp, agino, dfops, xic, &rec);
if (error)
goto error0;
@@ -2395,20 +2399,11 @@ void
xfs_ialloc_compute_maxlevels(
xfs_mount_t *mp) /* file system mount structure */
{
- int level;
- uint maxblocks;
- uint maxleafents;
- int minleafrecs;
- int minnoderecs;
-
- maxleafents = (1LL << XFS_INO_AGINO_BITS(mp)) >>
- XFS_INODES_PER_CHUNK_LOG;
- minleafrecs = mp->m_inobt_mnr[0];
- minnoderecs = mp->m_inobt_mnr[1];
- maxblocks = (maxleafents + minleafrecs - 1) / minleafrecs;
- for (level = 1; maxblocks > 1; level++)
- maxblocks = (maxblocks + minnoderecs - 1) / minnoderecs;
- mp->m_in_maxlevels = level;
+ uint inodes;
+
+ inodes = (1LL << XFS_INO_AGINO_BITS(mp)) >> XFS_INODES_PER_CHUNK_LOG;
+ mp->m_in_maxlevels = xfs_btree_compute_maxlevels(mp, mp->m_inobt_mnr,
+ inodes);
}
/*
diff --git a/fs/xfs/libxfs/xfs_ialloc.h b/fs/xfs/libxfs/xfs_ialloc.h
index 6e450df2979b..0bb89669fc07 100644
--- a/fs/xfs/libxfs/xfs_ialloc.h
+++ b/fs/xfs/libxfs/xfs_ialloc.h
@@ -95,7 +95,7 @@ int /* error */
xfs_difree(
struct xfs_trans *tp, /* transaction pointer */
xfs_ino_t inode, /* inode to be freed */
- struct xfs_bmap_free *flist, /* extents to free */
+ struct xfs_defer_ops *dfops, /* extents to free */
struct xfs_icluster *ifree); /* cluster info if deleted */
/*
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index 89c21d771e35..31ca2208c03d 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -32,6 +32,7 @@
#include "xfs_trace.h"
#include "xfs_cksum.h"
#include "xfs_trans.h"
+#include "xfs_rmap.h"
STATIC int
@@ -96,6 +97,7 @@ xfs_inobt_alloc_block(
memset(&args, 0, sizeof(args));
args.tp = cur->bc_tp;
args.mp = cur->bc_mp;
+ xfs_rmap_ag_owner(&args.oinfo, XFS_RMAP_OWN_INOBT);
args.fsbno = XFS_AGB_TO_FSB(args.mp, cur->bc_private.a.agno, sbno);
args.minlen = 1;
args.maxlen = 1;
@@ -125,8 +127,12 @@ xfs_inobt_free_block(
struct xfs_btree_cur *cur,
struct xfs_buf *bp)
{
+ struct xfs_owner_info oinfo;
+
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INOBT);
return xfs_free_extent(cur->bc_tp,
- XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)), 1);
+ XFS_DADDR_TO_FSB(cur->bc_mp, XFS_BUF_ADDR(bp)), 1,
+ &oinfo);
}
STATIC int
@@ -146,14 +152,6 @@ xfs_inobt_init_key_from_rec(
}
STATIC void
-xfs_inobt_init_rec_from_key(
- union xfs_btree_key *key,
- union xfs_btree_rec *rec)
-{
- rec->inobt.ir_startino = key->inobt.ir_startino;
-}
-
-STATIC void
xfs_inobt_init_rec_from_cur(
struct xfs_btree_cur *cur,
union xfs_btree_rec *rec)
@@ -314,7 +312,6 @@ static const struct xfs_btree_ops xfs_inobt_ops = {
.get_minrecs = xfs_inobt_get_minrecs,
.get_maxrecs = xfs_inobt_get_maxrecs,
.init_key_from_rec = xfs_inobt_init_key_from_rec,
- .init_rec_from_key = xfs_inobt_init_rec_from_key,
.init_rec_from_cur = xfs_inobt_init_rec_from_cur,
.init_ptr_from_cur = xfs_inobt_init_ptr_from_cur,
.key_diff = xfs_inobt_key_diff,
@@ -336,7 +333,6 @@ static const struct xfs_btree_ops xfs_finobt_ops = {
.get_minrecs = xfs_inobt_get_minrecs,
.get_maxrecs = xfs_inobt_get_maxrecs,
.init_key_from_rec = xfs_inobt_init_key_from_rec,
- .init_rec_from_key = xfs_inobt_init_rec_from_key,
.init_rec_from_cur = xfs_inobt_init_rec_from_cur,
.init_ptr_from_cur = xfs_finobt_init_ptr_from_cur,
.key_diff = xfs_inobt_key_diff,
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 9d9559eb2835..4b9769e23c83 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -22,6 +22,7 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_error.h"
#include "xfs_cksum.h"
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index e8f49c029ff0..a6eed43fa7cd 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -110,7 +110,9 @@ static inline uint xlog_get_cycle(char *ptr)
#define XLOG_REG_TYPE_COMMIT 18
#define XLOG_REG_TYPE_TRANSHDR 19
#define XLOG_REG_TYPE_ICREATE 20
-#define XLOG_REG_TYPE_MAX 20
+#define XLOG_REG_TYPE_RUI_FORMAT 21
+#define XLOG_REG_TYPE_RUD_FORMAT 22
+#define XLOG_REG_TYPE_MAX 22
/*
* Flags to log operation header
@@ -227,6 +229,8 @@ typedef struct xfs_trans_header {
#define XFS_LI_DQUOT 0x123d
#define XFS_LI_QUOTAOFF 0x123e
#define XFS_LI_ICREATE 0x123f
+#define XFS_LI_RUI 0x1240 /* rmap update intent */
+#define XFS_LI_RUD 0x1241
#define XFS_LI_TYPE_DESC \
{ XFS_LI_EFI, "XFS_LI_EFI" }, \
@@ -236,7 +240,9 @@ typedef struct xfs_trans_header {
{ XFS_LI_BUF, "XFS_LI_BUF" }, \
{ XFS_LI_DQUOT, "XFS_LI_DQUOT" }, \
{ XFS_LI_QUOTAOFF, "XFS_LI_QUOTAOFF" }, \
- { XFS_LI_ICREATE, "XFS_LI_ICREATE" }
+ { XFS_LI_ICREATE, "XFS_LI_ICREATE" }, \
+ { XFS_LI_RUI, "XFS_LI_RUI" }, \
+ { XFS_LI_RUD, "XFS_LI_RUD" }
/*
* Inode Log Item Format definitions.
@@ -604,6 +610,59 @@ typedef struct xfs_efd_log_format_64 {
} xfs_efd_log_format_64_t;
/*
+ * RUI/RUD (reverse mapping) log format definitions
+ */
+struct xfs_map_extent {
+ __uint64_t me_owner;
+ __uint64_t me_startblock;
+ __uint64_t me_startoff;
+ __uint32_t me_len;
+ __uint32_t me_flags;
+};
+
+/* rmap me_flags: upper bits are flags, lower byte is type code */
+#define XFS_RMAP_EXTENT_MAP 1
+#define XFS_RMAP_EXTENT_UNMAP 3
+#define XFS_RMAP_EXTENT_CONVERT 5
+#define XFS_RMAP_EXTENT_ALLOC 7
+#define XFS_RMAP_EXTENT_FREE 8
+#define XFS_RMAP_EXTENT_TYPE_MASK 0xFF
+
+#define XFS_RMAP_EXTENT_ATTR_FORK (1U << 31)
+#define XFS_RMAP_EXTENT_BMBT_BLOCK (1U << 30)
+#define XFS_RMAP_EXTENT_UNWRITTEN (1U << 29)
+
+#define XFS_RMAP_EXTENT_FLAGS (XFS_RMAP_EXTENT_TYPE_MASK | \
+ XFS_RMAP_EXTENT_ATTR_FORK | \
+ XFS_RMAP_EXTENT_BMBT_BLOCK | \
+ XFS_RMAP_EXTENT_UNWRITTEN)
+
+/*
+ * This is the structure used to lay out an rui log item in the
+ * log. The rui_extents field is a variable size array whose
+ * size is given by rui_nextents.
+ */
+struct xfs_rui_log_format {
+ __uint16_t rui_type; /* rui log item type */
+ __uint16_t rui_size; /* size of this item */
+ __uint32_t rui_nextents; /* # extents to free */
+ __uint64_t rui_id; /* rui identifier */
+ struct xfs_map_extent rui_extents[1]; /* array of extents to rmap */
+};
+
+/*
+ * This is the structure used to lay out an rud log item in the
+ * log. The rud_extents array is a variable size array whose
+ * size is given by rud_nextents;
+ */
+struct xfs_rud_log_format {
+ __uint16_t rud_type; /* rud log item type */
+ __uint16_t rud_size; /* size of this item */
+ __uint32_t __pad;
+ __uint64_t rud_rui_id; /* id of corresponding rui */
+};
+
+/*
* Dquot Log format definitions.
*
* The first two fields must be the type and size fitting into
diff --git a/fs/xfs/libxfs/xfs_rmap.c b/fs/xfs/libxfs/xfs_rmap.c
new file mode 100644
index 000000000000..73d05407d663
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_rmap.c
@@ -0,0 +1,1399 @@
+/*
+ * Copyright (c) 2014 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_bit.h"
+#include "xfs_sb.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
+#include "xfs_btree.h"
+#include "xfs_trans.h"
+#include "xfs_alloc.h"
+#include "xfs_rmap.h"
+#include "xfs_rmap_btree.h"
+#include "xfs_trans_space.h"
+#include "xfs_trace.h"
+#include "xfs_error.h"
+#include "xfs_extent_busy.h"
+#include "xfs_bmap.h"
+#include "xfs_inode.h"
+
+/*
+ * Lookup the first record less than or equal to [bno, len, owner, offset]
+ * in the btree given by cur.
+ */
+int
+xfs_rmap_lookup_le(
+ struct xfs_btree_cur *cur,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ uint64_t owner,
+ uint64_t offset,
+ unsigned int flags,
+ int *stat)
+{
+ cur->bc_rec.r.rm_startblock = bno;
+ cur->bc_rec.r.rm_blockcount = len;
+ cur->bc_rec.r.rm_owner = owner;
+ cur->bc_rec.r.rm_offset = offset;
+ cur->bc_rec.r.rm_flags = flags;
+ return xfs_btree_lookup(cur, XFS_LOOKUP_LE, stat);
+}
+
+/*
+ * Lookup the record exactly matching [bno, len, owner, offset]
+ * in the btree given by cur.
+ */
+int
+xfs_rmap_lookup_eq(
+ struct xfs_btree_cur *cur,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ uint64_t owner,
+ uint64_t offset,
+ unsigned int flags,
+ int *stat)
+{
+ cur->bc_rec.r.rm_startblock = bno;
+ cur->bc_rec.r.rm_blockcount = len;
+ cur->bc_rec.r.rm_owner = owner;
+ cur->bc_rec.r.rm_offset = offset;
+ cur->bc_rec.r.rm_flags = flags;
+ return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
+}
+
+/*
+ * Update the record referred to by cur to the value given
+ * by [bno, len, owner, offset].
+ * This either works (return 0) or gets an EFSCORRUPTED error.
+ */
+STATIC int
+xfs_rmap_update(
+ struct xfs_btree_cur *cur,
+ struct xfs_rmap_irec *irec)
+{
+ union xfs_btree_rec rec;
+ int error;
+
+ trace_xfs_rmap_update(cur->bc_mp, cur->bc_private.a.agno,
+ irec->rm_startblock, irec->rm_blockcount,
+ irec->rm_owner, irec->rm_offset, irec->rm_flags);
+
+ rec.rmap.rm_startblock = cpu_to_be32(irec->rm_startblock);
+ rec.rmap.rm_blockcount = cpu_to_be32(irec->rm_blockcount);
+ rec.rmap.rm_owner = cpu_to_be64(irec->rm_owner);
+ rec.rmap.rm_offset = cpu_to_be64(
+ xfs_rmap_irec_offset_pack(irec));
+ error = xfs_btree_update(cur, &rec);
+ if (error)
+ trace_xfs_rmap_update_error(cur->bc_mp,
+ cur->bc_private.a.agno, error, _RET_IP_);
+ return error;
+}
+
+int
+xfs_rmap_insert(
+ struct xfs_btree_cur *rcur,
+ xfs_agblock_t agbno,
+ xfs_extlen_t len,
+ uint64_t owner,
+ uint64_t offset,
+ unsigned int flags)
+{
+ int i;
+ int error;
+
+ trace_xfs_rmap_insert(rcur->bc_mp, rcur->bc_private.a.agno, agbno,
+ len, owner, offset, flags);
+
+ error = xfs_rmap_lookup_eq(rcur, agbno, len, owner, offset, flags, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(rcur->bc_mp, i == 0, done);
+
+ rcur->bc_rec.r.rm_startblock = agbno;
+ rcur->bc_rec.r.rm_blockcount = len;
+ rcur->bc_rec.r.rm_owner = owner;
+ rcur->bc_rec.r.rm_offset = offset;
+ rcur->bc_rec.r.rm_flags = flags;
+ error = xfs_btree_insert(rcur, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(rcur->bc_mp, i == 1, done);
+done:
+ if (error)
+ trace_xfs_rmap_insert_error(rcur->bc_mp,
+ rcur->bc_private.a.agno, error, _RET_IP_);
+ return error;
+}
+
+static int
+xfs_rmap_btrec_to_irec(
+ union xfs_btree_rec *rec,
+ struct xfs_rmap_irec *irec)
+{
+ irec->rm_flags = 0;
+ irec->rm_startblock = be32_to_cpu(rec->rmap.rm_startblock);
+ irec->rm_blockcount = be32_to_cpu(rec->rmap.rm_blockcount);
+ irec->rm_owner = be64_to_cpu(rec->rmap.rm_owner);
+ return xfs_rmap_irec_offset_unpack(be64_to_cpu(rec->rmap.rm_offset),
+ irec);
+}
+
+/*
+ * Get the data from the pointed-to record.
+ */
+int
+xfs_rmap_get_rec(
+ struct xfs_btree_cur *cur,
+ struct xfs_rmap_irec *irec,
+ int *stat)
+{
+ union xfs_btree_rec *rec;
+ int error;
+
+ error = xfs_btree_get_rec(cur, &rec, stat);
+ if (error || !*stat)
+ return error;
+
+ return xfs_rmap_btrec_to_irec(rec, irec);
+}
+
+/*
+ * Find the extent in the rmap btree and remove it.
+ *
+ * The record we find should always be an exact match for the extent that we're
+ * looking for, since we insert them into the btree without modification.
+ *
+ * Special Case #1: when growing the filesystem, we "free" an extent when
+ * growing the last AG. This extent is new space and so it is not tracked as
+ * used space in the btree. The growfs code will pass in an owner of
+ * XFS_RMAP_OWN_NULL to indicate that it expected that there is no owner of this
+ * extent. We verify that - the extent lookup result in a record that does not
+ * overlap.
+ *
+ * Special Case #2: EFIs do not record the owner of the extent, so when
+ * recovering EFIs from the log we pass in XFS_RMAP_OWN_UNKNOWN to tell the rmap
+ * btree to ignore the owner (i.e. wildcard match) so we don't trigger
+ * corruption checks during log recovery.
+ */
+STATIC int
+xfs_rmap_unmap(
+ struct xfs_btree_cur *cur,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ bool unwritten,
+ struct xfs_owner_info *oinfo)
+{
+ struct xfs_mount *mp = cur->bc_mp;
+ struct xfs_rmap_irec ltrec;
+ uint64_t ltoff;
+ int error = 0;
+ int i;
+ uint64_t owner;
+ uint64_t offset;
+ unsigned int flags;
+ bool ignore_off;
+
+ xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
+ ignore_off = XFS_RMAP_NON_INODE_OWNER(owner) ||
+ (flags & XFS_RMAP_BMBT_BLOCK);
+ if (unwritten)
+ flags |= XFS_RMAP_UNWRITTEN;
+ trace_xfs_rmap_unmap(mp, cur->bc_private.a.agno, bno, len,
+ unwritten, oinfo);
+
+ /*
+ * We should always have a left record because there's a static record
+ * for the AG headers at rm_startblock == 0 created by mkfs/growfs that
+ * will not ever be removed from the tree.
+ */
+ error = xfs_rmap_lookup_le(cur, bno, len, owner, offset, flags, &i);
+ if (error)
+ goto out_error;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+
+ error = xfs_rmap_get_rec(cur, &ltrec, &i);
+ if (error)
+ goto out_error;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+ trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
+ cur->bc_private.a.agno, ltrec.rm_startblock,
+ ltrec.rm_blockcount, ltrec.rm_owner,
+ ltrec.rm_offset, ltrec.rm_flags);
+ ltoff = ltrec.rm_offset;
+
+ /*
+ * For growfs, the incoming extent must be beyond the left record we
+ * just found as it is new space and won't be used by anyone. This is
+ * just a corruption check as we don't actually do anything with this
+ * extent. Note that we need to use >= instead of > because it might
+ * be the case that the "left" extent goes all the way to EOFS.
+ */
+ if (owner == XFS_RMAP_OWN_NULL) {
+ XFS_WANT_CORRUPTED_GOTO(mp, bno >= ltrec.rm_startblock +
+ ltrec.rm_blockcount, out_error);
+ goto out_done;
+ }
+
+ /* Make sure the unwritten flag matches. */
+ XFS_WANT_CORRUPTED_GOTO(mp, (flags & XFS_RMAP_UNWRITTEN) ==
+ (ltrec.rm_flags & XFS_RMAP_UNWRITTEN), out_error);
+
+ /* Make sure the extent we found covers the entire freeing range. */
+ XFS_WANT_CORRUPTED_GOTO(mp, ltrec.rm_startblock <= bno &&
+ ltrec.rm_startblock + ltrec.rm_blockcount >=
+ bno + len, out_error);
+
+ /* Make sure the owner matches what we expect to find in the tree. */
+ XFS_WANT_CORRUPTED_GOTO(mp, owner == ltrec.rm_owner ||
+ XFS_RMAP_NON_INODE_OWNER(owner), out_error);
+
+ /* Check the offset, if necessary. */
+ if (!XFS_RMAP_NON_INODE_OWNER(owner)) {
+ if (flags & XFS_RMAP_BMBT_BLOCK) {
+ XFS_WANT_CORRUPTED_GOTO(mp,
+ ltrec.rm_flags & XFS_RMAP_BMBT_BLOCK,
+ out_error);
+ } else {
+ XFS_WANT_CORRUPTED_GOTO(mp,
+ ltrec.rm_offset <= offset, out_error);
+ XFS_WANT_CORRUPTED_GOTO(mp,
+ ltoff + ltrec.rm_blockcount >= offset + len,
+ out_error);
+ }
+ }
+
+ if (ltrec.rm_startblock == bno && ltrec.rm_blockcount == len) {
+ /* exact match, simply remove the record from rmap tree */
+ trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
+ ltrec.rm_startblock, ltrec.rm_blockcount,
+ ltrec.rm_owner, ltrec.rm_offset,
+ ltrec.rm_flags);
+ error = xfs_btree_delete(cur, &i);
+ if (error)
+ goto out_error;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+ } else if (ltrec.rm_startblock == bno) {
+ /*
+ * overlap left hand side of extent: move the start, trim the
+ * length and update the current record.
+ *
+ * ltbno ltlen
+ * Orig: |oooooooooooooooooooo|
+ * Freeing: |fffffffff|
+ * Result: |rrrrrrrrrr|
+ * bno len
+ */
+ ltrec.rm_startblock += len;
+ ltrec.rm_blockcount -= len;
+ if (!ignore_off)
+ ltrec.rm_offset += len;
+ error = xfs_rmap_update(cur, &ltrec);
+ if (error)
+ goto out_error;
+ } else if (ltrec.rm_startblock + ltrec.rm_blockcount == bno + len) {
+ /*
+ * overlap right hand side of extent: trim the length and update
+ * the current record.
+ *
+ * ltbno ltlen
+ * Orig: |oooooooooooooooooooo|
+ * Freeing: |fffffffff|
+ * Result: |rrrrrrrrrr|
+ * bno len
+ */
+ ltrec.rm_blockcount -= len;
+ error = xfs_rmap_update(cur, &ltrec);
+ if (error)
+ goto out_error;
+ } else {
+
+ /*
+ * overlap middle of extent: trim the length of the existing
+ * record to the length of the new left-extent size, increment
+ * the insertion position so we can insert a new record
+ * containing the remaining right-extent space.
+ *
+ * ltbno ltlen
+ * Orig: |oooooooooooooooooooo|
+ * Freeing: |fffffffff|
+ * Result: |rrrrr| |rrrr|
+ * bno len
+ */
+ xfs_extlen_t orig_len = ltrec.rm_blockcount;
+
+ ltrec.rm_blockcount = bno - ltrec.rm_startblock;
+ error = xfs_rmap_update(cur, &ltrec);
+ if (error)
+ goto out_error;
+
+ error = xfs_btree_increment(cur, 0, &i);
+ if (error)
+ goto out_error;
+
+ cur->bc_rec.r.rm_startblock = bno + len;
+ cur->bc_rec.r.rm_blockcount = orig_len - len -
+ ltrec.rm_blockcount;
+ cur->bc_rec.r.rm_owner = ltrec.rm_owner;
+ if (ignore_off)
+ cur->bc_rec.r.rm_offset = 0;
+ else
+ cur->bc_rec.r.rm_offset = offset + len;
+ cur->bc_rec.r.rm_flags = flags;
+ trace_xfs_rmap_insert(mp, cur->bc_private.a.agno,
+ cur->bc_rec.r.rm_startblock,
+ cur->bc_rec.r.rm_blockcount,
+ cur->bc_rec.r.rm_owner,
+ cur->bc_rec.r.rm_offset,
+ cur->bc_rec.r.rm_flags);
+ error = xfs_btree_insert(cur, &i);
+ if (error)
+ goto out_error;
+ }
+
+out_done:
+ trace_xfs_rmap_unmap_done(mp, cur->bc_private.a.agno, bno, len,
+ unwritten, oinfo);
+out_error:
+ if (error)
+ trace_xfs_rmap_unmap_error(mp, cur->bc_private.a.agno,
+ error, _RET_IP_);
+ return error;
+}
+
+/*
+ * Remove a reference to an extent in the rmap btree.
+ */
+int
+xfs_rmap_free(
+ struct xfs_trans *tp,
+ struct xfs_buf *agbp,
+ xfs_agnumber_t agno,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ struct xfs_owner_info *oinfo)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_btree_cur *cur;
+ int error;
+
+ if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
+ return 0;
+
+ cur = xfs_rmapbt_init_cursor(mp, tp, agbp, agno);
+
+ error = xfs_rmap_unmap(cur, bno, len, false, oinfo);
+ if (error)
+ goto out_error;
+
+ xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
+ return 0;
+
+out_error:
+ xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+ return error;
+}
+
+/*
+ * A mergeable rmap must have the same owner and the same values for
+ * the unwritten, attr_fork, and bmbt flags. The startblock and
+ * offset are checked separately.
+ */
+static bool
+xfs_rmap_is_mergeable(
+ struct xfs_rmap_irec *irec,
+ uint64_t owner,
+ unsigned int flags)
+{
+ if (irec->rm_owner == XFS_RMAP_OWN_NULL)
+ return false;
+ if (irec->rm_owner != owner)
+ return false;
+ if ((flags & XFS_RMAP_UNWRITTEN) ^
+ (irec->rm_flags & XFS_RMAP_UNWRITTEN))
+ return false;
+ if ((flags & XFS_RMAP_ATTR_FORK) ^
+ (irec->rm_flags & XFS_RMAP_ATTR_FORK))
+ return false;
+ if ((flags & XFS_RMAP_BMBT_BLOCK) ^
+ (irec->rm_flags & XFS_RMAP_BMBT_BLOCK))
+ return false;
+ return true;
+}
+
+/*
+ * When we allocate a new block, the first thing we do is add a reference to
+ * the extent in the rmap btree. This takes the form of a [agbno, length,
+ * owner, offset] record. Flags are encoded in the high bits of the offset
+ * field.
+ */
+STATIC int
+xfs_rmap_map(
+ struct xfs_btree_cur *cur,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ bool unwritten,
+ struct xfs_owner_info *oinfo)
+{
+ struct xfs_mount *mp = cur->bc_mp;
+ struct xfs_rmap_irec ltrec;
+ struct xfs_rmap_irec gtrec;
+ int have_gt;
+ int have_lt;
+ int error = 0;
+ int i;
+ uint64_t owner;
+ uint64_t offset;
+ unsigned int flags = 0;
+ bool ignore_off;
+
+ xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
+ ASSERT(owner != 0);
+ ignore_off = XFS_RMAP_NON_INODE_OWNER(owner) ||
+ (flags & XFS_RMAP_BMBT_BLOCK);
+ if (unwritten)
+ flags |= XFS_RMAP_UNWRITTEN;
+ trace_xfs_rmap_map(mp, cur->bc_private.a.agno, bno, len,
+ unwritten, oinfo);
+
+ /*
+ * For the initial lookup, look for an exact match or the left-adjacent
+ * record for our insertion point. This will also give us the record for
+ * start block contiguity tests.
+ */
+ error = xfs_rmap_lookup_le(cur, bno, len, owner, offset, flags,
+ &have_lt);
+ if (error)
+ goto out_error;
+ XFS_WANT_CORRUPTED_GOTO(mp, have_lt == 1, out_error);
+
+ error = xfs_rmap_get_rec(cur, &ltrec, &have_lt);
+ if (error)
+ goto out_error;
+ XFS_WANT_CORRUPTED_GOTO(mp, have_lt == 1, out_error);
+ trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
+ cur->bc_private.a.agno, ltrec.rm_startblock,
+ ltrec.rm_blockcount, ltrec.rm_owner,
+ ltrec.rm_offset, ltrec.rm_flags);
+
+ if (!xfs_rmap_is_mergeable(&ltrec, owner, flags))
+ have_lt = 0;
+
+ XFS_WANT_CORRUPTED_GOTO(mp,
+ have_lt == 0 ||
+ ltrec.rm_startblock + ltrec.rm_blockcount <= bno, out_error);
+
+ /*
+ * Increment the cursor to see if we have a right-adjacent record to our
+ * insertion point. This will give us the record for end block
+ * contiguity tests.
+ */
+ error = xfs_btree_increment(cur, 0, &have_gt);
+ if (error)
+ goto out_error;
+ if (have_gt) {
+ error = xfs_rmap_get_rec(cur, &gtrec, &have_gt);
+ if (error)
+ goto out_error;
+ XFS_WANT_CORRUPTED_GOTO(mp, have_gt == 1, out_error);
+ XFS_WANT_CORRUPTED_GOTO(mp, bno + len <= gtrec.rm_startblock,
+ out_error);
+ trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
+ cur->bc_private.a.agno, gtrec.rm_startblock,
+ gtrec.rm_blockcount, gtrec.rm_owner,
+ gtrec.rm_offset, gtrec.rm_flags);
+ if (!xfs_rmap_is_mergeable(&gtrec, owner, flags))
+ have_gt = 0;
+ }
+
+ /*
+ * Note: cursor currently points one record to the right of ltrec, even
+ * if there is no record in the tree to the right.
+ */
+ if (have_lt &&
+ ltrec.rm_startblock + ltrec.rm_blockcount == bno &&
+ (ignore_off || ltrec.rm_offset + ltrec.rm_blockcount == offset)) {
+ /*
+ * left edge contiguous, merge into left record.
+ *
+ * ltbno ltlen
+ * orig: |ooooooooo|
+ * adding: |aaaaaaaaa|
+ * result: |rrrrrrrrrrrrrrrrrrr|
+ * bno len
+ */
+ ltrec.rm_blockcount += len;
+ if (have_gt &&
+ bno + len == gtrec.rm_startblock &&
+ (ignore_off || offset + len == gtrec.rm_offset) &&
+ (unsigned long)ltrec.rm_blockcount + len +
+ gtrec.rm_blockcount <= XFS_RMAP_LEN_MAX) {
+ /*
+ * right edge also contiguous, delete right record
+ * and merge into left record.
+ *
+ * ltbno ltlen gtbno gtlen
+ * orig: |ooooooooo| |ooooooooo|
+ * adding: |aaaaaaaaa|
+ * result: |rrrrrrrrrrrrrrrrrrrrrrrrrrrrr|
+ */
+ ltrec.rm_blockcount += gtrec.rm_blockcount;
+ trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
+ gtrec.rm_startblock,
+ gtrec.rm_blockcount,
+ gtrec.rm_owner,
+ gtrec.rm_offset,
+ gtrec.rm_flags);
+ error = xfs_btree_delete(cur, &i);
+ if (error)
+ goto out_error;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+ }
+
+ /* point the cursor back to the left record and update */
+ error = xfs_btree_decrement(cur, 0, &have_gt);
+ if (error)
+ goto out_error;
+ error = xfs_rmap_update(cur, &ltrec);
+ if (error)
+ goto out_error;
+ } else if (have_gt &&
+ bno + len == gtrec.rm_startblock &&
+ (ignore_off || offset + len == gtrec.rm_offset)) {
+ /*
+ * right edge contiguous, merge into right record.
+ *
+ * gtbno gtlen
+ * Orig: |ooooooooo|
+ * adding: |aaaaaaaaa|
+ * Result: |rrrrrrrrrrrrrrrrrrr|
+ * bno len
+ */
+ gtrec.rm_startblock = bno;
+ gtrec.rm_blockcount += len;
+ if (!ignore_off)
+ gtrec.rm_offset = offset;
+ error = xfs_rmap_update(cur, &gtrec);
+ if (error)
+ goto out_error;
+ } else {
+ /*
+ * no contiguous edge with identical owner, insert
+ * new record at current cursor position.
+ */
+ cur->bc_rec.r.rm_startblock = bno;
+ cur->bc_rec.r.rm_blockcount = len;
+ cur->bc_rec.r.rm_owner = owner;
+ cur->bc_rec.r.rm_offset = offset;
+ cur->bc_rec.r.rm_flags = flags;
+ trace_xfs_rmap_insert(mp, cur->bc_private.a.agno, bno, len,
+ owner, offset, flags);
+ error = xfs_btree_insert(cur, &i);
+ if (error)
+ goto out_error;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, out_error);
+ }
+
+ trace_xfs_rmap_map_done(mp, cur->bc_private.a.agno, bno, len,
+ unwritten, oinfo);
+out_error:
+ if (error)
+ trace_xfs_rmap_map_error(mp, cur->bc_private.a.agno,
+ error, _RET_IP_);
+ return error;
+}
+
+/*
+ * Add a reference to an extent in the rmap btree.
+ */
+int
+xfs_rmap_alloc(
+ struct xfs_trans *tp,
+ struct xfs_buf *agbp,
+ xfs_agnumber_t agno,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ struct xfs_owner_info *oinfo)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_btree_cur *cur;
+ int error;
+
+ if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
+ return 0;
+
+ cur = xfs_rmapbt_init_cursor(mp, tp, agbp, agno);
+ error = xfs_rmap_map(cur, bno, len, false, oinfo);
+ if (error)
+ goto out_error;
+
+ xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
+ return 0;
+
+out_error:
+ xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+ return error;
+}
+
+#define RMAP_LEFT_CONTIG (1 << 0)
+#define RMAP_RIGHT_CONTIG (1 << 1)
+#define RMAP_LEFT_FILLING (1 << 2)
+#define RMAP_RIGHT_FILLING (1 << 3)
+#define RMAP_LEFT_VALID (1 << 6)
+#define RMAP_RIGHT_VALID (1 << 7)
+
+#define LEFT r[0]
+#define RIGHT r[1]
+#define PREV r[2]
+#define NEW r[3]
+
+/*
+ * Convert an unwritten extent to a real extent or vice versa.
+ * Does not handle overlapping extents.
+ */
+STATIC int
+xfs_rmap_convert(
+ struct xfs_btree_cur *cur,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ bool unwritten,
+ struct xfs_owner_info *oinfo)
+{
+ struct xfs_mount *mp = cur->bc_mp;
+ struct xfs_rmap_irec r[4]; /* neighbor extent entries */
+ /* left is 0, right is 1, prev is 2 */
+ /* new is 3 */
+ uint64_t owner;
+ uint64_t offset;
+ uint64_t new_endoff;
+ unsigned int oldext;
+ unsigned int newext;
+ unsigned int flags = 0;
+ int i;
+ int state = 0;
+ int error;
+
+ xfs_owner_info_unpack(oinfo, &owner, &offset, &flags);
+ ASSERT(!(XFS_RMAP_NON_INODE_OWNER(owner) ||
+ (flags & (XFS_RMAP_ATTR_FORK | XFS_RMAP_BMBT_BLOCK))));
+ oldext = unwritten ? XFS_RMAP_UNWRITTEN : 0;
+ new_endoff = offset + len;
+ trace_xfs_rmap_convert(mp, cur->bc_private.a.agno, bno, len,
+ unwritten, oinfo);
+
+ /*
+ * For the initial lookup, look for an exact match or the left-adjacent
+ * record for our insertion point. This will also give us the record for
+ * start block contiguity tests.
+ */
+ error = xfs_rmap_lookup_le(cur, bno, len, owner, offset, oldext, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+
+ error = xfs_rmap_get_rec(cur, &PREV, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ trace_xfs_rmap_lookup_le_range_result(cur->bc_mp,
+ cur->bc_private.a.agno, PREV.rm_startblock,
+ PREV.rm_blockcount, PREV.rm_owner,
+ PREV.rm_offset, PREV.rm_flags);
+
+ ASSERT(PREV.rm_offset <= offset);
+ ASSERT(PREV.rm_offset + PREV.rm_blockcount >= new_endoff);
+ ASSERT((PREV.rm_flags & XFS_RMAP_UNWRITTEN) == oldext);
+ newext = ~oldext & XFS_RMAP_UNWRITTEN;
+
+ /*
+ * Set flags determining what part of the previous oldext allocation
+ * extent is being replaced by a newext allocation.
+ */
+ if (PREV.rm_offset == offset)
+ state |= RMAP_LEFT_FILLING;
+ if (PREV.rm_offset + PREV.rm_blockcount == new_endoff)
+ state |= RMAP_RIGHT_FILLING;
+
+ /*
+ * Decrement the cursor to see if we have a left-adjacent record to our
+ * insertion point. This will give us the record for end block
+ * contiguity tests.
+ */
+ error = xfs_btree_decrement(cur, 0, &i);
+ if (error)
+ goto done;
+ if (i) {
+ state |= RMAP_LEFT_VALID;
+ error = xfs_rmap_get_rec(cur, &LEFT, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp,
+ LEFT.rm_startblock + LEFT.rm_blockcount <= bno,
+ done);
+ trace_xfs_rmap_find_left_neighbor_result(cur->bc_mp,
+ cur->bc_private.a.agno, LEFT.rm_startblock,
+ LEFT.rm_blockcount, LEFT.rm_owner,
+ LEFT.rm_offset, LEFT.rm_flags);
+ if (LEFT.rm_startblock + LEFT.rm_blockcount == bno &&
+ LEFT.rm_offset + LEFT.rm_blockcount == offset &&
+ xfs_rmap_is_mergeable(&LEFT, owner, newext))
+ state |= RMAP_LEFT_CONTIG;
+ }
+
+ /*
+ * Increment the cursor to see if we have a right-adjacent record to our
+ * insertion point. This will give us the record for end block
+ * contiguity tests.
+ */
+ error = xfs_btree_increment(cur, 0, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ error = xfs_btree_increment(cur, 0, &i);
+ if (error)
+ goto done;
+ if (i) {
+ state |= RMAP_RIGHT_VALID;
+ error = xfs_rmap_get_rec(cur, &RIGHT, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ XFS_WANT_CORRUPTED_GOTO(mp, bno + len <= RIGHT.rm_startblock,
+ done);
+ trace_xfs_rmap_find_right_neighbor_result(cur->bc_mp,
+ cur->bc_private.a.agno, RIGHT.rm_startblock,
+ RIGHT.rm_blockcount, RIGHT.rm_owner,
+ RIGHT.rm_offset, RIGHT.rm_flags);
+ if (bno + len == RIGHT.rm_startblock &&
+ offset + len == RIGHT.rm_offset &&
+ xfs_rmap_is_mergeable(&RIGHT, owner, newext))
+ state |= RMAP_RIGHT_CONTIG;
+ }
+
+ /* check that left + prev + right is not too long */
+ if ((state & (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
+ RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG)) ==
+ (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
+ RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG) &&
+ (unsigned long)LEFT.rm_blockcount + len +
+ RIGHT.rm_blockcount > XFS_RMAP_LEN_MAX)
+ state &= ~RMAP_RIGHT_CONTIG;
+
+ trace_xfs_rmap_convert_state(mp, cur->bc_private.a.agno, state,
+ _RET_IP_);
+
+ /* reset the cursor back to PREV */
+ error = xfs_rmap_lookup_le(cur, bno, len, owner, offset, oldext, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+
+ /*
+ * Switch out based on the FILLING and CONTIG state bits.
+ */
+ switch (state & (RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
+ RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG)) {
+ case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG |
+ RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
+ /*
+ * Setting all of a previous oldext extent to newext.
+ * The left and right neighbors are both contiguous with new.
+ */
+ error = xfs_btree_increment(cur, 0, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
+ RIGHT.rm_startblock, RIGHT.rm_blockcount,
+ RIGHT.rm_owner, RIGHT.rm_offset,
+ RIGHT.rm_flags);
+ error = xfs_btree_delete(cur, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ error = xfs_btree_decrement(cur, 0, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
+ PREV.rm_startblock, PREV.rm_blockcount,
+ PREV.rm_owner, PREV.rm_offset,
+ PREV.rm_flags);
+ error = xfs_btree_delete(cur, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ error = xfs_btree_decrement(cur, 0, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ NEW = LEFT;
+ NEW.rm_blockcount += PREV.rm_blockcount + RIGHT.rm_blockcount;
+ error = xfs_rmap_update(cur, &NEW);
+ if (error)
+ goto done;
+ break;
+
+ case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG:
+ /*
+ * Setting all of a previous oldext extent to newext.
+ * The left neighbor is contiguous, the right is not.
+ */
+ trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
+ PREV.rm_startblock, PREV.rm_blockcount,
+ PREV.rm_owner, PREV.rm_offset,
+ PREV.rm_flags);
+ error = xfs_btree_delete(cur, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ error = xfs_btree_decrement(cur, 0, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ NEW = LEFT;
+ NEW.rm_blockcount += PREV.rm_blockcount;
+ error = xfs_rmap_update(cur, &NEW);
+ if (error)
+ goto done;
+ break;
+
+ case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
+ /*
+ * Setting all of a previous oldext extent to newext.
+ * The right neighbor is contiguous, the left is not.
+ */
+ error = xfs_btree_increment(cur, 0, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ trace_xfs_rmap_delete(mp, cur->bc_private.a.agno,
+ RIGHT.rm_startblock, RIGHT.rm_blockcount,
+ RIGHT.rm_owner, RIGHT.rm_offset,
+ RIGHT.rm_flags);
+ error = xfs_btree_delete(cur, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ error = xfs_btree_decrement(cur, 0, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ NEW = PREV;
+ NEW.rm_blockcount = len + RIGHT.rm_blockcount;
+ NEW.rm_flags = newext;
+ error = xfs_rmap_update(cur, &NEW);
+ if (error)
+ goto done;
+ break;
+
+ case RMAP_LEFT_FILLING | RMAP_RIGHT_FILLING:
+ /*
+ * Setting all of a previous oldext extent to newext.
+ * Neither the left nor right neighbors are contiguous with
+ * the new one.
+ */
+ NEW = PREV;
+ NEW.rm_flags = newext;
+ error = xfs_rmap_update(cur, &NEW);
+ if (error)
+ goto done;
+ break;
+
+ case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG:
+ /*
+ * Setting the first part of a previous oldext extent to newext.
+ * The left neighbor is contiguous.
+ */
+ NEW = PREV;
+ NEW.rm_offset += len;
+ NEW.rm_startblock += len;
+ NEW.rm_blockcount -= len;
+ error = xfs_rmap_update(cur, &NEW);
+ if (error)
+ goto done;
+ error = xfs_btree_decrement(cur, 0, &i);
+ if (error)
+ goto done;
+ NEW = LEFT;
+ NEW.rm_blockcount += len;
+ error = xfs_rmap_update(cur, &NEW);
+ if (error)
+ goto done;
+ break;
+
+ case RMAP_LEFT_FILLING:
+ /*
+ * Setting the first part of a previous oldext extent to newext.
+ * The left neighbor is not contiguous.
+ */
+ NEW = PREV;
+ NEW.rm_startblock += len;
+ NEW.rm_offset += len;
+ NEW.rm_blockcount -= len;
+ error = xfs_rmap_update(cur, &NEW);
+ if (error)
+ goto done;
+ NEW.rm_startblock = bno;
+ NEW.rm_owner = owner;
+ NEW.rm_offset = offset;
+ NEW.rm_blockcount = len;
+ NEW.rm_flags = newext;
+ cur->bc_rec.r = NEW;
+ trace_xfs_rmap_insert(mp, cur->bc_private.a.agno, bno,
+ len, owner, offset, newext);
+ error = xfs_btree_insert(cur, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ break;
+
+ case RMAP_RIGHT_FILLING | RMAP_RIGHT_CONTIG:
+ /*
+ * Setting the last part of a previous oldext extent to newext.
+ * The right neighbor is contiguous with the new allocation.
+ */
+ NEW = PREV;
+ NEW.rm_blockcount -= len;
+ error = xfs_rmap_update(cur, &NEW);
+ if (error)
+ goto done;
+ error = xfs_btree_increment(cur, 0, &i);
+ if (error)
+ goto done;
+ NEW = RIGHT;
+ NEW.rm_offset = offset;
+ NEW.rm_startblock = bno;
+ NEW.rm_blockcount += len;
+ error = xfs_rmap_update(cur, &NEW);
+ if (error)
+ goto done;
+ break;
+
+ case RMAP_RIGHT_FILLING:
+ /*
+ * Setting the last part of a previous oldext extent to newext.
+ * The right neighbor is not contiguous.
+ */
+ NEW = PREV;
+ NEW.rm_blockcount -= len;
+ error = xfs_rmap_update(cur, &NEW);
+ if (error)
+ goto done;
+ error = xfs_rmap_lookup_eq(cur, bno, len, owner, offset,
+ oldext, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
+ NEW.rm_startblock = bno;
+ NEW.rm_owner = owner;
+ NEW.rm_offset = offset;
+ NEW.rm_blockcount = len;
+ NEW.rm_flags = newext;
+ cur->bc_rec.r = NEW;
+ trace_xfs_rmap_insert(mp, cur->bc_private.a.agno, bno,
+ len, owner, offset, newext);
+ error = xfs_btree_insert(cur, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ break;
+
+ case 0:
+ /*
+ * Setting the middle part of a previous oldext extent to
+ * newext. Contiguity is impossible here.
+ * One extent becomes three extents.
+ */
+ /* new right extent - oldext */
+ NEW.rm_startblock = bno + len;
+ NEW.rm_owner = owner;
+ NEW.rm_offset = new_endoff;
+ NEW.rm_blockcount = PREV.rm_offset + PREV.rm_blockcount -
+ new_endoff;
+ NEW.rm_flags = PREV.rm_flags;
+ error = xfs_rmap_update(cur, &NEW);
+ if (error)
+ goto done;
+ /* new left extent - oldext */
+ NEW = PREV;
+ NEW.rm_blockcount = offset - PREV.rm_offset;
+ cur->bc_rec.r = NEW;
+ trace_xfs_rmap_insert(mp, cur->bc_private.a.agno,
+ NEW.rm_startblock, NEW.rm_blockcount,
+ NEW.rm_owner, NEW.rm_offset,
+ NEW.rm_flags);
+ error = xfs_btree_insert(cur, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ /*
+ * Reset the cursor to the position of the new extent
+ * we are about to insert as we can't trust it after
+ * the previous insert.
+ */
+ error = xfs_rmap_lookup_eq(cur, bno, len, owner, offset,
+ oldext, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 0, done);
+ /* new middle extent - newext */
+ cur->bc_rec.r.rm_flags &= ~XFS_RMAP_UNWRITTEN;
+ cur->bc_rec.r.rm_flags |= newext;
+ trace_xfs_rmap_insert(mp, cur->bc_private.a.agno, bno, len,
+ owner, offset, newext);
+ error = xfs_btree_insert(cur, &i);
+ if (error)
+ goto done;
+ XFS_WANT_CORRUPTED_GOTO(mp, i == 1, done);
+ break;
+
+ case RMAP_LEFT_FILLING | RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
+ case RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
+ case RMAP_LEFT_FILLING | RMAP_RIGHT_CONTIG:
+ case RMAP_RIGHT_FILLING | RMAP_LEFT_CONTIG:
+ case RMAP_LEFT_CONTIG | RMAP_RIGHT_CONTIG:
+ case RMAP_LEFT_CONTIG:
+ case RMAP_RIGHT_CONTIG:
+ /*
+ * These cases are all impossible.
+ */
+ ASSERT(0);
+ }
+
+ trace_xfs_rmap_convert_done(mp, cur->bc_private.a.agno, bno, len,
+ unwritten, oinfo);
+done:
+ if (error)
+ trace_xfs_rmap_convert_error(cur->bc_mp,
+ cur->bc_private.a.agno, error, _RET_IP_);
+ return error;
+}
+
+#undef NEW
+#undef LEFT
+#undef RIGHT
+#undef PREV
+
+struct xfs_rmap_query_range_info {
+ xfs_rmap_query_range_fn fn;
+ void *priv;
+};
+
+/* Format btree record and pass to our callback. */
+STATIC int
+xfs_rmap_query_range_helper(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_rec *rec,
+ void *priv)
+{
+ struct xfs_rmap_query_range_info *query = priv;
+ struct xfs_rmap_irec irec;
+ int error;
+
+ error = xfs_rmap_btrec_to_irec(rec, &irec);
+ if (error)
+ return error;
+ return query->fn(cur, &irec, query->priv);
+}
+
+/* Find all rmaps between two keys. */
+int
+xfs_rmap_query_range(
+ struct xfs_btree_cur *cur,
+ struct xfs_rmap_irec *low_rec,
+ struct xfs_rmap_irec *high_rec,
+ xfs_rmap_query_range_fn fn,
+ void *priv)
+{
+ union xfs_btree_irec low_brec;
+ union xfs_btree_irec high_brec;
+ struct xfs_rmap_query_range_info query;
+
+ low_brec.r = *low_rec;
+ high_brec.r = *high_rec;
+ query.priv = priv;
+ query.fn = fn;
+ return xfs_btree_query_range(cur, &low_brec, &high_brec,
+ xfs_rmap_query_range_helper, &query);
+}
+
+/* Clean up after calling xfs_rmap_finish_one. */
+void
+xfs_rmap_finish_one_cleanup(
+ struct xfs_trans *tp,
+ struct xfs_btree_cur *rcur,
+ int error)
+{
+ struct xfs_buf *agbp;
+
+ if (rcur == NULL)
+ return;
+ agbp = rcur->bc_private.a.agbp;
+ xfs_btree_del_cursor(rcur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ if (error)
+ xfs_trans_brelse(tp, agbp);
+}
+
+/*
+ * Process one of the deferred rmap operations. We pass back the
+ * btree cursor to maintain our lock on the rmapbt between calls.
+ * This saves time and eliminates a buffer deadlock between the
+ * superblock and the AGF because we'll always grab them in the same
+ * order.
+ */
+int
+xfs_rmap_finish_one(
+ struct xfs_trans *tp,
+ enum xfs_rmap_intent_type type,
+ __uint64_t owner,
+ int whichfork,
+ xfs_fileoff_t startoff,
+ xfs_fsblock_t startblock,
+ xfs_filblks_t blockcount,
+ xfs_exntst_t state,
+ struct xfs_btree_cur **pcur)
+{
+ struct xfs_mount *mp = tp->t_mountp;
+ struct xfs_btree_cur *rcur;
+ struct xfs_buf *agbp = NULL;
+ int error = 0;
+ xfs_agnumber_t agno;
+ struct xfs_owner_info oinfo;
+ xfs_agblock_t bno;
+ bool unwritten;
+
+ agno = XFS_FSB_TO_AGNO(mp, startblock);
+ ASSERT(agno != NULLAGNUMBER);
+ bno = XFS_FSB_TO_AGBNO(mp, startblock);
+
+ trace_xfs_rmap_deferred(mp, agno, type, bno, owner, whichfork,
+ startoff, blockcount, state);
+
+ if (XFS_TEST_ERROR(false, mp,
+ XFS_ERRTAG_RMAP_FINISH_ONE,
+ XFS_RANDOM_RMAP_FINISH_ONE))
+ return -EIO;
+
+ /*
+ * If we haven't gotten a cursor or the cursor AG doesn't match
+ * the startblock, get one now.
+ */
+ rcur = *pcur;
+ if (rcur != NULL && rcur->bc_private.a.agno != agno) {
+ xfs_rmap_finish_one_cleanup(tp, rcur, 0);
+ rcur = NULL;
+ *pcur = NULL;
+ }
+ if (rcur == NULL) {
+ /*
+ * Refresh the freelist before we start changing the
+ * rmapbt, because a shape change could cause us to
+ * allocate blocks.
+ */
+ error = xfs_free_extent_fix_freelist(tp, agno, &agbp);
+ if (error)
+ return error;
+ if (!agbp)
+ return -EFSCORRUPTED;
+
+ rcur = xfs_rmapbt_init_cursor(mp, tp, agbp, agno);
+ if (!rcur) {
+ error = -ENOMEM;
+ goto out_cur;
+ }
+ }
+ *pcur = rcur;
+
+ xfs_rmap_ino_owner(&oinfo, owner, whichfork, startoff);
+ unwritten = state == XFS_EXT_UNWRITTEN;
+ bno = XFS_FSB_TO_AGBNO(rcur->bc_mp, startblock);
+
+ switch (type) {
+ case XFS_RMAP_ALLOC:
+ case XFS_RMAP_MAP:
+ error = xfs_rmap_map(rcur, bno, blockcount, unwritten, &oinfo);
+ break;
+ case XFS_RMAP_FREE:
+ case XFS_RMAP_UNMAP:
+ error = xfs_rmap_unmap(rcur, bno, blockcount, unwritten,
+ &oinfo);
+ break;
+ case XFS_RMAP_CONVERT:
+ error = xfs_rmap_convert(rcur, bno, blockcount, !unwritten,
+ &oinfo);
+ break;
+ default:
+ ASSERT(0);
+ error = -EFSCORRUPTED;
+ }
+ return error;
+
+out_cur:
+ xfs_trans_brelse(tp, agbp);
+
+ return error;
+}
+
+/*
+ * Don't defer an rmap if we aren't an rmap filesystem.
+ */
+static bool
+xfs_rmap_update_is_needed(
+ struct xfs_mount *mp)
+{
+ return xfs_sb_version_hasrmapbt(&mp->m_sb);
+}
+
+/*
+ * Record a rmap intent; the list is kept sorted first by AG and then by
+ * increasing age.
+ */
+static int
+__xfs_rmap_add(
+ struct xfs_mount *mp,
+ struct xfs_defer_ops *dfops,
+ enum xfs_rmap_intent_type type,
+ __uint64_t owner,
+ int whichfork,
+ struct xfs_bmbt_irec *bmap)
+{
+ struct xfs_rmap_intent *ri;
+
+ trace_xfs_rmap_defer(mp, XFS_FSB_TO_AGNO(mp, bmap->br_startblock),
+ type,
+ XFS_FSB_TO_AGBNO(mp, bmap->br_startblock),
+ owner, whichfork,
+ bmap->br_startoff,
+ bmap->br_blockcount,
+ bmap->br_state);
+
+ ri = kmem_alloc(sizeof(struct xfs_rmap_intent), KM_SLEEP | KM_NOFS);
+ INIT_LIST_HEAD(&ri->ri_list);
+ ri->ri_type = type;
+ ri->ri_owner = owner;
+ ri->ri_whichfork = whichfork;
+ ri->ri_bmap = *bmap;
+
+ xfs_defer_add(dfops, XFS_DEFER_OPS_TYPE_RMAP, &ri->ri_list);
+ return 0;
+}
+
+/* Map an extent into a file. */
+int
+xfs_rmap_map_extent(
+ struct xfs_mount *mp,
+ struct xfs_defer_ops *dfops,
+ struct xfs_inode *ip,
+ int whichfork,
+ struct xfs_bmbt_irec *PREV)
+{
+ if (!xfs_rmap_update_is_needed(mp))
+ return 0;
+
+ return __xfs_rmap_add(mp, dfops, XFS_RMAP_MAP, ip->i_ino,
+ whichfork, PREV);
+}
+
+/* Unmap an extent out of a file. */
+int
+xfs_rmap_unmap_extent(
+ struct xfs_mount *mp,
+ struct xfs_defer_ops *dfops,
+ struct xfs_inode *ip,
+ int whichfork,
+ struct xfs_bmbt_irec *PREV)
+{
+ if (!xfs_rmap_update_is_needed(mp))
+ return 0;
+
+ return __xfs_rmap_add(mp, dfops, XFS_RMAP_UNMAP, ip->i_ino,
+ whichfork, PREV);
+}
+
+/* Convert a data fork extent from unwritten to real or vice versa. */
+int
+xfs_rmap_convert_extent(
+ struct xfs_mount *mp,
+ struct xfs_defer_ops *dfops,
+ struct xfs_inode *ip,
+ int whichfork,
+ struct xfs_bmbt_irec *PREV)
+{
+ if (!xfs_rmap_update_is_needed(mp))
+ return 0;
+
+ return __xfs_rmap_add(mp, dfops, XFS_RMAP_CONVERT, ip->i_ino,
+ whichfork, PREV);
+}
+
+/* Schedule the creation of an rmap for non-file data. */
+int
+xfs_rmap_alloc_extent(
+ struct xfs_mount *mp,
+ struct xfs_defer_ops *dfops,
+ xfs_agnumber_t agno,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ __uint64_t owner)
+{
+ struct xfs_bmbt_irec bmap;
+
+ if (!xfs_rmap_update_is_needed(mp))
+ return 0;
+
+ bmap.br_startblock = XFS_AGB_TO_FSB(mp, agno, bno);
+ bmap.br_blockcount = len;
+ bmap.br_startoff = 0;
+ bmap.br_state = XFS_EXT_NORM;
+
+ return __xfs_rmap_add(mp, dfops, XFS_RMAP_ALLOC, owner,
+ XFS_DATA_FORK, &bmap);
+}
+
+/* Schedule the deletion of an rmap for non-file data. */
+int
+xfs_rmap_free_extent(
+ struct xfs_mount *mp,
+ struct xfs_defer_ops *dfops,
+ xfs_agnumber_t agno,
+ xfs_agblock_t bno,
+ xfs_extlen_t len,
+ __uint64_t owner)
+{
+ struct xfs_bmbt_irec bmap;
+
+ if (!xfs_rmap_update_is_needed(mp))
+ return 0;
+
+ bmap.br_startblock = XFS_AGB_TO_FSB(mp, agno, bno);
+ bmap.br_blockcount = len;
+ bmap.br_startoff = 0;
+ bmap.br_state = XFS_EXT_NORM;
+
+ return __xfs_rmap_add(mp, dfops, XFS_RMAP_FREE, owner,
+ XFS_DATA_FORK, &bmap);
+}
diff --git a/fs/xfs/libxfs/xfs_rmap.h b/fs/xfs/libxfs/xfs_rmap.h
new file mode 100644
index 000000000000..71cf99a4acba
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_rmap.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright (C) 2016 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef __XFS_RMAP_H__
+#define __XFS_RMAP_H__
+
+static inline void
+xfs_rmap_ag_owner(
+ struct xfs_owner_info *oi,
+ uint64_t owner)
+{
+ oi->oi_owner = owner;
+ oi->oi_offset = 0;
+ oi->oi_flags = 0;
+}
+
+static inline void
+xfs_rmap_ino_bmbt_owner(
+ struct xfs_owner_info *oi,
+ xfs_ino_t ino,
+ int whichfork)
+{
+ oi->oi_owner = ino;
+ oi->oi_offset = 0;
+ oi->oi_flags = XFS_OWNER_INFO_BMBT_BLOCK;
+ if (whichfork == XFS_ATTR_FORK)
+ oi->oi_flags |= XFS_OWNER_INFO_ATTR_FORK;
+}
+
+static inline void
+xfs_rmap_ino_owner(
+ struct xfs_owner_info *oi,
+ xfs_ino_t ino,
+ int whichfork,
+ xfs_fileoff_t offset)
+{
+ oi->oi_owner = ino;
+ oi->oi_offset = offset;
+ oi->oi_flags = 0;
+ if (whichfork == XFS_ATTR_FORK)
+ oi->oi_flags |= XFS_OWNER_INFO_ATTR_FORK;
+}
+
+static inline void
+xfs_rmap_skip_owner_update(
+ struct xfs_owner_info *oi)
+{
+ oi->oi_owner = XFS_RMAP_OWN_UNKNOWN;
+}
+
+/* Reverse mapping functions. */
+
+struct xfs_buf;
+
+static inline __u64
+xfs_rmap_irec_offset_pack(
+ const struct xfs_rmap_irec *irec)
+{
+ __u64 x;
+
+ x = XFS_RMAP_OFF(irec->rm_offset);
+ if (irec->rm_flags & XFS_RMAP_ATTR_FORK)
+ x |= XFS_RMAP_OFF_ATTR_FORK;
+ if (irec->rm_flags & XFS_RMAP_BMBT_BLOCK)
+ x |= XFS_RMAP_OFF_BMBT_BLOCK;
+ if (irec->rm_flags & XFS_RMAP_UNWRITTEN)
+ x |= XFS_RMAP_OFF_UNWRITTEN;
+ return x;
+}
+
+static inline int
+xfs_rmap_irec_offset_unpack(
+ __u64 offset,
+ struct xfs_rmap_irec *irec)
+{
+ if (offset & ~(XFS_RMAP_OFF_MASK | XFS_RMAP_OFF_FLAGS))
+ return -EFSCORRUPTED;
+ irec->rm_offset = XFS_RMAP_OFF(offset);
+ if (offset & XFS_RMAP_OFF_ATTR_FORK)
+ irec->rm_flags |= XFS_RMAP_ATTR_FORK;
+ if (offset & XFS_RMAP_OFF_BMBT_BLOCK)
+ irec->rm_flags |= XFS_RMAP_BMBT_BLOCK;
+ if (offset & XFS_RMAP_OFF_UNWRITTEN)
+ irec->rm_flags |= XFS_RMAP_UNWRITTEN;
+ return 0;
+}
+
+static inline void
+xfs_owner_info_unpack(
+ struct xfs_owner_info *oinfo,
+ uint64_t *owner,
+ uint64_t *offset,
+ unsigned int *flags)
+{
+ unsigned int r = 0;
+
+ *owner = oinfo->oi_owner;
+ *offset = oinfo->oi_offset;
+ if (oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK)
+ r |= XFS_RMAP_ATTR_FORK;
+ if (oinfo->oi_flags & XFS_OWNER_INFO_BMBT_BLOCK)
+ r |= XFS_RMAP_BMBT_BLOCK;
+ *flags = r;
+}
+
+static inline void
+xfs_owner_info_pack(
+ struct xfs_owner_info *oinfo,
+ uint64_t owner,
+ uint64_t offset,
+ unsigned int flags)
+{
+ oinfo->oi_owner = owner;
+ oinfo->oi_offset = XFS_RMAP_OFF(offset);
+ oinfo->oi_flags = 0;
+ if (flags & XFS_RMAP_ATTR_FORK)
+ oinfo->oi_flags |= XFS_OWNER_INFO_ATTR_FORK;
+ if (flags & XFS_RMAP_BMBT_BLOCK)
+ oinfo->oi_flags |= XFS_OWNER_INFO_BMBT_BLOCK;
+}
+
+int xfs_rmap_alloc(struct xfs_trans *tp, struct xfs_buf *agbp,
+ xfs_agnumber_t agno, xfs_agblock_t bno, xfs_extlen_t len,
+ struct xfs_owner_info *oinfo);
+int xfs_rmap_free(struct xfs_trans *tp, struct xfs_buf *agbp,
+ xfs_agnumber_t agno, xfs_agblock_t bno, xfs_extlen_t len,
+ struct xfs_owner_info *oinfo);
+
+int xfs_rmap_lookup_le(struct xfs_btree_cur *cur, xfs_agblock_t bno,
+ xfs_extlen_t len, uint64_t owner, uint64_t offset,
+ unsigned int flags, int *stat);
+int xfs_rmap_lookup_eq(struct xfs_btree_cur *cur, xfs_agblock_t bno,
+ xfs_extlen_t len, uint64_t owner, uint64_t offset,
+ unsigned int flags, int *stat);
+int xfs_rmap_insert(struct xfs_btree_cur *rcur, xfs_agblock_t agbno,
+ xfs_extlen_t len, uint64_t owner, uint64_t offset,
+ unsigned int flags);
+int xfs_rmap_get_rec(struct xfs_btree_cur *cur, struct xfs_rmap_irec *irec,
+ int *stat);
+
+typedef int (*xfs_rmap_query_range_fn)(
+ struct xfs_btree_cur *cur,
+ struct xfs_rmap_irec *rec,
+ void *priv);
+
+int xfs_rmap_query_range(struct xfs_btree_cur *cur,
+ struct xfs_rmap_irec *low_rec, struct xfs_rmap_irec *high_rec,
+ xfs_rmap_query_range_fn fn, void *priv);
+
+enum xfs_rmap_intent_type {
+ XFS_RMAP_MAP,
+ XFS_RMAP_MAP_SHARED,
+ XFS_RMAP_UNMAP,
+ XFS_RMAP_UNMAP_SHARED,
+ XFS_RMAP_CONVERT,
+ XFS_RMAP_CONVERT_SHARED,
+ XFS_RMAP_ALLOC,
+ XFS_RMAP_FREE,
+};
+
+struct xfs_rmap_intent {
+ struct list_head ri_list;
+ enum xfs_rmap_intent_type ri_type;
+ __uint64_t ri_owner;
+ int ri_whichfork;
+ struct xfs_bmbt_irec ri_bmap;
+};
+
+/* functions for updating the rmapbt based on bmbt map/unmap operations */
+int xfs_rmap_map_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
+ struct xfs_inode *ip, int whichfork,
+ struct xfs_bmbt_irec *imap);
+int xfs_rmap_unmap_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
+ struct xfs_inode *ip, int whichfork,
+ struct xfs_bmbt_irec *imap);
+int xfs_rmap_convert_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
+ struct xfs_inode *ip, int whichfork,
+ struct xfs_bmbt_irec *imap);
+int xfs_rmap_alloc_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
+ xfs_agnumber_t agno, xfs_agblock_t bno, xfs_extlen_t len,
+ __uint64_t owner);
+int xfs_rmap_free_extent(struct xfs_mount *mp, struct xfs_defer_ops *dfops,
+ xfs_agnumber_t agno, xfs_agblock_t bno, xfs_extlen_t len,
+ __uint64_t owner);
+
+void xfs_rmap_finish_one_cleanup(struct xfs_trans *tp,
+ struct xfs_btree_cur *rcur, int error);
+int xfs_rmap_finish_one(struct xfs_trans *tp, enum xfs_rmap_intent_type type,
+ __uint64_t owner, int whichfork, xfs_fileoff_t startoff,
+ xfs_fsblock_t startblock, xfs_filblks_t blockcount,
+ xfs_exntst_t state, struct xfs_btree_cur **pcur);
+
+#endif /* __XFS_RMAP_H__ */
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c
new file mode 100644
index 000000000000..bc1faebc84ec
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_rmap_btree.c
@@ -0,0 +1,511 @@
+/*
+ * Copyright (c) 2014 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_bit.h"
+#include "xfs_sb.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_inode.h"
+#include "xfs_trans.h"
+#include "xfs_alloc.h"
+#include "xfs_btree.h"
+#include "xfs_rmap.h"
+#include "xfs_rmap_btree.h"
+#include "xfs_trace.h"
+#include "xfs_cksum.h"
+#include "xfs_error.h"
+#include "xfs_extent_busy.h"
+
+/*
+ * Reverse map btree.
+ *
+ * This is a per-ag tree used to track the owner(s) of a given extent. With
+ * reflink it is possible for there to be multiple owners, which is a departure
+ * from classic XFS. Owner records for data extents are inserted when the
+ * extent is mapped and removed when an extent is unmapped. Owner records for
+ * all other block types (i.e. metadata) are inserted when an extent is
+ * allocated and removed when an extent is freed. There can only be one owner
+ * of a metadata extent, usually an inode or some other metadata structure like
+ * an AG btree.
+ *
+ * The rmap btree is part of the free space management, so blocks for the tree
+ * are sourced from the agfl. Hence we need transaction reservation support for
+ * this tree so that the freelist is always large enough. This also impacts on
+ * the minimum space we need to leave free in the AG.
+ *
+ * The tree is ordered by [ag block, owner, offset]. This is a large key size,
+ * but it is the only way to enforce unique keys when a block can be owned by
+ * multiple files at any offset. There's no need to order/search by extent
+ * size for online updating/management of the tree. It is intended that most
+ * reverse lookups will be to find the owner(s) of a particular block, or to
+ * try to recover tree and file data from corrupt primary metadata.
+ */
+
+static struct xfs_btree_cur *
+xfs_rmapbt_dup_cursor(
+ struct xfs_btree_cur *cur)
+{
+ return xfs_rmapbt_init_cursor(cur->bc_mp, cur->bc_tp,
+ cur->bc_private.a.agbp, cur->bc_private.a.agno);
+}
+
+STATIC void
+xfs_rmapbt_set_root(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *ptr,
+ int inc)
+{
+ struct xfs_buf *agbp = cur->bc_private.a.agbp;
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
+ xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
+ int btnum = cur->bc_btnum;
+ struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno);
+
+ ASSERT(ptr->s != 0);
+
+ agf->agf_roots[btnum] = ptr->s;
+ be32_add_cpu(&agf->agf_levels[btnum], inc);
+ pag->pagf_levels[btnum] += inc;
+ xfs_perag_put(pag);
+
+ xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
+}
+
+STATIC int
+xfs_rmapbt_alloc_block(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *start,
+ union xfs_btree_ptr *new,
+ int *stat)
+{
+ int error;
+ xfs_agblock_t bno;
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);
+
+ /* Allocate the new block from the freelist. If we can't, give up. */
+ error = xfs_alloc_get_freelist(cur->bc_tp, cur->bc_private.a.agbp,
+ &bno, 1);
+ if (error) {
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_ERROR);
+ return error;
+ }
+
+ trace_xfs_rmapbt_alloc_block(cur->bc_mp, cur->bc_private.a.agno,
+ bno, 1);
+ if (bno == NULLAGBLOCK) {
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = 0;
+ return 0;
+ }
+
+ xfs_extent_busy_reuse(cur->bc_mp, cur->bc_private.a.agno, bno, 1,
+ false);
+
+ xfs_trans_agbtree_delta(cur->bc_tp, 1);
+ new->s = cpu_to_be32(bno);
+
+ XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
+ *stat = 1;
+ return 0;
+}
+
+STATIC int
+xfs_rmapbt_free_block(
+ struct xfs_btree_cur *cur,
+ struct xfs_buf *bp)
+{
+ struct xfs_buf *agbp = cur->bc_private.a.agbp;
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
+ xfs_agblock_t bno;
+ int error;
+
+ bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
+ trace_xfs_rmapbt_free_block(cur->bc_mp, cur->bc_private.a.agno,
+ bno, 1);
+ error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
+ if (error)
+ return error;
+
+ xfs_extent_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1,
+ XFS_EXTENT_BUSY_SKIP_DISCARD);
+ xfs_trans_agbtree_delta(cur->bc_tp, -1);
+
+ return 0;
+}
+
+STATIC int
+xfs_rmapbt_get_minrecs(
+ struct xfs_btree_cur *cur,
+ int level)
+{
+ return cur->bc_mp->m_rmap_mnr[level != 0];
+}
+
+STATIC int
+xfs_rmapbt_get_maxrecs(
+ struct xfs_btree_cur *cur,
+ int level)
+{
+ return cur->bc_mp->m_rmap_mxr[level != 0];
+}
+
+STATIC void
+xfs_rmapbt_init_key_from_rec(
+ union xfs_btree_key *key,
+ union xfs_btree_rec *rec)
+{
+ key->rmap.rm_startblock = rec->rmap.rm_startblock;
+ key->rmap.rm_owner = rec->rmap.rm_owner;
+ key->rmap.rm_offset = rec->rmap.rm_offset;
+}
+
+/*
+ * The high key for a reverse mapping record can be computed by shifting
+ * the startblock and offset to the highest value that would still map
+ * to that record. In practice this means that we add blockcount-1 to
+ * the startblock for all records, and if the record is for a data/attr
+ * fork mapping, we add blockcount-1 to the offset too.
+ */
+STATIC void
+xfs_rmapbt_init_high_key_from_rec(
+ union xfs_btree_key *key,
+ union xfs_btree_rec *rec)
+{
+ __uint64_t off;
+ int adj;
+
+ adj = be32_to_cpu(rec->rmap.rm_blockcount) - 1;
+
+ key->rmap.rm_startblock = rec->rmap.rm_startblock;
+ be32_add_cpu(&key->rmap.rm_startblock, adj);
+ key->rmap.rm_owner = rec->rmap.rm_owner;
+ key->rmap.rm_offset = rec->rmap.rm_offset;
+ if (XFS_RMAP_NON_INODE_OWNER(be64_to_cpu(rec->rmap.rm_owner)) ||
+ XFS_RMAP_IS_BMBT_BLOCK(be64_to_cpu(rec->rmap.rm_offset)))
+ return;
+ off = be64_to_cpu(key->rmap.rm_offset);
+ off = (XFS_RMAP_OFF(off) + adj) | (off & ~XFS_RMAP_OFF_MASK);
+ key->rmap.rm_offset = cpu_to_be64(off);
+}
+
+STATIC void
+xfs_rmapbt_init_rec_from_cur(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_rec *rec)
+{
+ rec->rmap.rm_startblock = cpu_to_be32(cur->bc_rec.r.rm_startblock);
+ rec->rmap.rm_blockcount = cpu_to_be32(cur->bc_rec.r.rm_blockcount);
+ rec->rmap.rm_owner = cpu_to_be64(cur->bc_rec.r.rm_owner);
+ rec->rmap.rm_offset = cpu_to_be64(
+ xfs_rmap_irec_offset_pack(&cur->bc_rec.r));
+}
+
+STATIC void
+xfs_rmapbt_init_ptr_from_cur(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *ptr)
+{
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
+
+ ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
+ ASSERT(agf->agf_roots[cur->bc_btnum] != 0);
+
+ ptr->s = agf->agf_roots[cur->bc_btnum];
+}
+
+STATIC __int64_t
+xfs_rmapbt_key_diff(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_key *key)
+{
+ struct xfs_rmap_irec *rec = &cur->bc_rec.r;
+ struct xfs_rmap_key *kp = &key->rmap;
+ __u64 x, y;
+ __int64_t d;
+
+ d = (__int64_t)be32_to_cpu(kp->rm_startblock) - rec->rm_startblock;
+ if (d)
+ return d;
+
+ x = be64_to_cpu(kp->rm_owner);
+ y = rec->rm_owner;
+ if (x > y)
+ return 1;
+ else if (y > x)
+ return -1;
+
+ x = XFS_RMAP_OFF(be64_to_cpu(kp->rm_offset));
+ y = rec->rm_offset;
+ if (x > y)
+ return 1;
+ else if (y > x)
+ return -1;
+ return 0;
+}
+
+STATIC __int64_t
+xfs_rmapbt_diff_two_keys(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_key *k1,
+ union xfs_btree_key *k2)
+{
+ struct xfs_rmap_key *kp1 = &k1->rmap;
+ struct xfs_rmap_key *kp2 = &k2->rmap;
+ __int64_t d;
+ __u64 x, y;
+
+ d = (__int64_t)be32_to_cpu(kp1->rm_startblock) -
+ be32_to_cpu(kp2->rm_startblock);
+ if (d)
+ return d;
+
+ x = be64_to_cpu(kp1->rm_owner);
+ y = be64_to_cpu(kp2->rm_owner);
+ if (x > y)
+ return 1;
+ else if (y > x)
+ return -1;
+
+ x = XFS_RMAP_OFF(be64_to_cpu(kp1->rm_offset));
+ y = XFS_RMAP_OFF(be64_to_cpu(kp2->rm_offset));
+ if (x > y)
+ return 1;
+ else if (y > x)
+ return -1;
+ return 0;
+}
+
+static bool
+xfs_rmapbt_verify(
+ struct xfs_buf *bp)
+{
+ struct xfs_mount *mp = bp->b_target->bt_mount;
+ struct xfs_btree_block *block = XFS_BUF_TO_BLOCK(bp);
+ struct xfs_perag *pag = bp->b_pag;
+ unsigned int level;
+
+ /*
+ * magic number and level verification
+ *
+ * During growfs operations, we can't verify the exact level or owner as
+ * the perag is not fully initialised and hence not attached to the
+ * buffer. In this case, check against the maximum tree depth.
+ *
+ * Similarly, during log recovery we will have a perag structure
+ * attached, but the agf information will not yet have been initialised
+ * from the on disk AGF. Again, we can only check against maximum limits
+ * in this case.
+ */
+ if (block->bb_magic != cpu_to_be32(XFS_RMAP_CRC_MAGIC))
+ return false;
+
+ if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
+ return false;
+ if (!xfs_btree_sblock_v5hdr_verify(bp))
+ return false;
+
+ level = be16_to_cpu(block->bb_level);
+ if (pag && pag->pagf_init) {
+ if (level >= pag->pagf_levels[XFS_BTNUM_RMAPi])
+ return false;
+ } else if (level >= mp->m_rmap_maxlevels)
+ return false;
+
+ return xfs_btree_sblock_verify(bp, mp->m_rmap_mxr[level != 0]);
+}
+
+static void
+xfs_rmapbt_read_verify(
+ struct xfs_buf *bp)
+{
+ if (!xfs_btree_sblock_verify_crc(bp))
+ xfs_buf_ioerror(bp, -EFSBADCRC);
+ else if (!xfs_rmapbt_verify(bp))
+ xfs_buf_ioerror(bp, -EFSCORRUPTED);
+
+ if (bp->b_error) {
+ trace_xfs_btree_corrupt(bp, _RET_IP_);
+ xfs_verifier_error(bp);
+ }
+}
+
+static void
+xfs_rmapbt_write_verify(
+ struct xfs_buf *bp)
+{
+ if (!xfs_rmapbt_verify(bp)) {
+ trace_xfs_btree_corrupt(bp, _RET_IP_);
+ xfs_buf_ioerror(bp, -EFSCORRUPTED);
+ xfs_verifier_error(bp);
+ return;
+ }
+ xfs_btree_sblock_calc_crc(bp);
+
+}
+
+const struct xfs_buf_ops xfs_rmapbt_buf_ops = {
+ .name = "xfs_rmapbt",
+ .verify_read = xfs_rmapbt_read_verify,
+ .verify_write = xfs_rmapbt_write_verify,
+};
+
+#if defined(DEBUG) || defined(XFS_WARN)
+STATIC int
+xfs_rmapbt_keys_inorder(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_key *k1,
+ union xfs_btree_key *k2)
+{
+ __uint32_t x;
+ __uint32_t y;
+ __uint64_t a;
+ __uint64_t b;
+
+ x = be32_to_cpu(k1->rmap.rm_startblock);
+ y = be32_to_cpu(k2->rmap.rm_startblock);
+ if (x < y)
+ return 1;
+ else if (x > y)
+ return 0;
+ a = be64_to_cpu(k1->rmap.rm_owner);
+ b = be64_to_cpu(k2->rmap.rm_owner);
+ if (a < b)
+ return 1;
+ else if (a > b)
+ return 0;
+ a = XFS_RMAP_OFF(be64_to_cpu(k1->rmap.rm_offset));
+ b = XFS_RMAP_OFF(be64_to_cpu(k2->rmap.rm_offset));
+ if (a <= b)
+ return 1;
+ return 0;
+}
+
+STATIC int
+xfs_rmapbt_recs_inorder(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_rec *r1,
+ union xfs_btree_rec *r2)
+{
+ __uint32_t x;
+ __uint32_t y;
+ __uint64_t a;
+ __uint64_t b;
+
+ x = be32_to_cpu(r1->rmap.rm_startblock);
+ y = be32_to_cpu(r2->rmap.rm_startblock);
+ if (x < y)
+ return 1;
+ else if (x > y)
+ return 0;
+ a = be64_to_cpu(r1->rmap.rm_owner);
+ b = be64_to_cpu(r2->rmap.rm_owner);
+ if (a < b)
+ return 1;
+ else if (a > b)
+ return 0;
+ a = XFS_RMAP_OFF(be64_to_cpu(r1->rmap.rm_offset));
+ b = XFS_RMAP_OFF(be64_to_cpu(r2->rmap.rm_offset));
+ if (a <= b)
+ return 1;
+ return 0;
+}
+#endif /* DEBUG */
+
+static const struct xfs_btree_ops xfs_rmapbt_ops = {
+ .rec_len = sizeof(struct xfs_rmap_rec),
+ .key_len = 2 * sizeof(struct xfs_rmap_key),
+
+ .dup_cursor = xfs_rmapbt_dup_cursor,
+ .set_root = xfs_rmapbt_set_root,
+ .alloc_block = xfs_rmapbt_alloc_block,
+ .free_block = xfs_rmapbt_free_block,
+ .get_minrecs = xfs_rmapbt_get_minrecs,
+ .get_maxrecs = xfs_rmapbt_get_maxrecs,
+ .init_key_from_rec = xfs_rmapbt_init_key_from_rec,
+ .init_high_key_from_rec = xfs_rmapbt_init_high_key_from_rec,
+ .init_rec_from_cur = xfs_rmapbt_init_rec_from_cur,
+ .init_ptr_from_cur = xfs_rmapbt_init_ptr_from_cur,
+ .key_diff = xfs_rmapbt_key_diff,
+ .buf_ops = &xfs_rmapbt_buf_ops,
+ .diff_two_keys = xfs_rmapbt_diff_two_keys,
+#if defined(DEBUG) || defined(XFS_WARN)
+ .keys_inorder = xfs_rmapbt_keys_inorder,
+ .recs_inorder = xfs_rmapbt_recs_inorder,
+#endif
+};
+
+/*
+ * Allocate a new allocation btree cursor.
+ */
+struct xfs_btree_cur *
+xfs_rmapbt_init_cursor(
+ struct xfs_mount *mp,
+ struct xfs_trans *tp,
+ struct xfs_buf *agbp,
+ xfs_agnumber_t agno)
+{
+ struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
+ struct xfs_btree_cur *cur;
+
+ cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
+ cur->bc_tp = tp;
+ cur->bc_mp = mp;
+ /* Overlapping btree; 2 keys per pointer. */
+ cur->bc_btnum = XFS_BTNUM_RMAP;
+ cur->bc_flags = XFS_BTREE_CRC_BLOCKS | XFS_BTREE_OVERLAPPING;
+ cur->bc_blocklog = mp->m_sb.sb_blocklog;
+ cur->bc_ops = &xfs_rmapbt_ops;
+ cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
+
+ cur->bc_private.a.agbp = agbp;
+ cur->bc_private.a.agno = agno;
+
+ return cur;
+}
+
+/*
+ * Calculate number of records in an rmap btree block.
+ */
+int
+xfs_rmapbt_maxrecs(
+ struct xfs_mount *mp,
+ int blocklen,
+ int leaf)
+{
+ blocklen -= XFS_RMAP_BLOCK_LEN;
+
+ if (leaf)
+ return blocklen / sizeof(struct xfs_rmap_rec);
+ return blocklen /
+ (2 * sizeof(struct xfs_rmap_key) + sizeof(xfs_rmap_ptr_t));
+}
+
+/* Compute the maximum height of an rmap btree. */
+void
+xfs_rmapbt_compute_maxlevels(
+ struct xfs_mount *mp)
+{
+ mp->m_rmap_maxlevels = xfs_btree_compute_maxlevels(mp,
+ mp->m_rmap_mnr, mp->m_sb.sb_agblocks);
+}
diff --git a/fs/xfs/libxfs/xfs_rmap_btree.h b/fs/xfs/libxfs/xfs_rmap_btree.h
new file mode 100644
index 000000000000..e73a55357dab
--- /dev/null
+++ b/fs/xfs/libxfs/xfs_rmap_btree.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2014 Red Hat, Inc.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef __XFS_RMAP_BTREE_H__
+#define __XFS_RMAP_BTREE_H__
+
+struct xfs_buf;
+struct xfs_btree_cur;
+struct xfs_mount;
+
+/* rmaps only exist on crc enabled filesystems */
+#define XFS_RMAP_BLOCK_LEN XFS_BTREE_SBLOCK_CRC_LEN
+
+/*
+ * Record, key, and pointer address macros for btree blocks.
+ *
+ * (note that some of these may appear unused, but they are used in userspace)
+ */
+#define XFS_RMAP_REC_ADDR(block, index) \
+ ((struct xfs_rmap_rec *) \
+ ((char *)(block) + XFS_RMAP_BLOCK_LEN + \
+ (((index) - 1) * sizeof(struct xfs_rmap_rec))))
+
+#define XFS_RMAP_KEY_ADDR(block, index) \
+ ((struct xfs_rmap_key *) \
+ ((char *)(block) + XFS_RMAP_BLOCK_LEN + \
+ ((index) - 1) * 2 * sizeof(struct xfs_rmap_key)))
+
+#define XFS_RMAP_HIGH_KEY_ADDR(block, index) \
+ ((struct xfs_rmap_key *) \
+ ((char *)(block) + XFS_RMAP_BLOCK_LEN + \
+ sizeof(struct xfs_rmap_key) + \
+ ((index) - 1) * 2 * sizeof(struct xfs_rmap_key)))
+
+#define XFS_RMAP_PTR_ADDR(block, index, maxrecs) \
+ ((xfs_rmap_ptr_t *) \
+ ((char *)(block) + XFS_RMAP_BLOCK_LEN + \
+ (maxrecs) * 2 * sizeof(struct xfs_rmap_key) + \
+ ((index) - 1) * sizeof(xfs_rmap_ptr_t)))
+
+struct xfs_btree_cur *xfs_rmapbt_init_cursor(struct xfs_mount *mp,
+ struct xfs_trans *tp, struct xfs_buf *bp,
+ xfs_agnumber_t agno);
+int xfs_rmapbt_maxrecs(struct xfs_mount *mp, int blocklen, int leaf);
+extern void xfs_rmapbt_compute_maxlevels(struct xfs_mount *mp);
+
+#endif /* __XFS_RMAP_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
index 951c044e24e4..e2e1106c9fad 100644
--- a/fs/xfs/libxfs/xfs_rtbitmap.c
+++ b/fs/xfs/libxfs/xfs_rtbitmap.c
@@ -70,7 +70,7 @@ const struct xfs_buf_ops xfs_rtbuf_ops = {
* Get a buffer for the bitmap or summary file block specified.
* The buffer is returned read and locked.
*/
-int
+static int
xfs_rtbuf_get(
xfs_mount_t *mp, /* file system mount structure */
xfs_trans_t *tp, /* transaction pointer */
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 12ca86778e02..0e3d4f5ec33c 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -24,6 +24,7 @@
#include "xfs_bit.h"
#include "xfs_sb.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_ialloc.h"
#include "xfs_alloc.h"
@@ -36,6 +37,7 @@
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
#include "xfs_log.h"
+#include "xfs_rmap_btree.h"
/*
* Physical superblock buffer manipulations. Shared with libxfs in userspace.
@@ -729,6 +731,11 @@ xfs_sb_mount_common(
mp->m_bmap_dmnr[0] = mp->m_bmap_dmxr[0] / 2;
mp->m_bmap_dmnr[1] = mp->m_bmap_dmxr[1] / 2;
+ mp->m_rmap_mxr[0] = xfs_rmapbt_maxrecs(mp, sbp->sb_blocksize, 1);
+ mp->m_rmap_mxr[1] = xfs_rmapbt_maxrecs(mp, sbp->sb_blocksize, 0);
+ mp->m_rmap_mnr[0] = mp->m_rmap_mxr[0] / 2;
+ mp->m_rmap_mnr[1] = mp->m_rmap_mxr[1] / 2;
+
mp->m_bsize = XFS_FSB_TO_BB(mp, 1);
mp->m_ialloc_inos = (int)MAX((__uint16_t)XFS_INODES_PER_CHUNK,
sbp->sb_inopblock);
@@ -738,6 +745,8 @@ xfs_sb_mount_common(
mp->m_ialloc_min_blks = sbp->sb_spino_align;
else
mp->m_ialloc_min_blks = mp->m_ialloc_blks;
+ mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
+ mp->m_ag_max_usable = xfs_alloc_ag_max_usable(mp);
}
/*
diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h
index 16002b5ec4eb..0c5b30bd884c 100644
--- a/fs/xfs/libxfs/xfs_shared.h
+++ b/fs/xfs/libxfs/xfs_shared.h
@@ -38,6 +38,7 @@ extern const struct xfs_buf_ops xfs_agi_buf_ops;
extern const struct xfs_buf_ops xfs_agf_buf_ops;
extern const struct xfs_buf_ops xfs_agfl_buf_ops;
extern const struct xfs_buf_ops xfs_allocbt_buf_ops;
+extern const struct xfs_buf_ops xfs_rmapbt_buf_ops;
extern const struct xfs_buf_ops xfs_attr3_leaf_buf_ops;
extern const struct xfs_buf_ops xfs_attr3_rmt_buf_ops;
extern const struct xfs_buf_ops xfs_bmbt_buf_ops;
@@ -116,6 +117,7 @@ int xfs_log_calc_minimum_size(struct xfs_mount *);
#define XFS_INO_BTREE_REF 3
#define XFS_ALLOC_BTREE_REF 2
#define XFS_BMAP_BTREE_REF 2
+#define XFS_RMAP_BTREE_REF 2
#define XFS_DIR_BTREE_REF 2
#define XFS_INO_REF 2
#define XFS_ATTR_BTREE_REF 1
diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c
index 68cb1e7bf2bb..301ef2f4dbd6 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.c
+++ b/fs/xfs/libxfs/xfs_trans_resv.c
@@ -64,6 +64,30 @@ xfs_calc_buf_res(
}
/*
+ * Per-extent log reservation for the btree changes involved in freeing or
+ * allocating an extent. In classic XFS there were two trees that will be
+ * modified (bnobt + cntbt). With rmap enabled, there are three trees
+ * (rmapbt). The number of blocks reserved is based on the formula:
+ *
+ * num trees * ((2 blocks/level * max depth) - 1)
+ *
+ * Keep in mind that max depth is calculated separately for each type of tree.
+ */
+static uint
+xfs_allocfree_log_count(
+ struct xfs_mount *mp,
+ uint num_ops)
+{
+ uint blocks;
+
+ blocks = num_ops * 2 * (2 * mp->m_ag_maxlevels - 1);
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+ blocks += num_ops * (2 * mp->m_rmap_maxlevels - 1);
+
+ return blocks;
+}
+
+/*
* Logging inodes is really tricksy. They are logged in memory format,
* which means that what we write into the log doesn't directly translate into
* the amount of space they use on disk.
@@ -126,7 +150,7 @@ xfs_calc_inode_res(
*/
STATIC uint
xfs_calc_finobt_res(
- struct xfs_mount *mp,
+ struct xfs_mount *mp,
int alloc,
int modify)
{
@@ -137,7 +161,7 @@ xfs_calc_finobt_res(
res = xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1));
if (alloc)
- res += xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
+ res += xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
XFS_FSB_TO_B(mp, 1));
if (modify)
res += (uint)XFS_FSB_TO_B(mp, 1);
@@ -153,9 +177,9 @@ xfs_calc_finobt_res(
* item logged to try to account for the overhead of the transaction mechanism.
*
* Note: Most of the reservations underestimate the number of allocation
- * groups into which they could free extents in the xfs_bmap_finish() call.
+ * groups into which they could free extents in the xfs_defer_finish() call.
* This is because the number in the worst case is quite high and quite
- * unusual. In order to fix this we need to change xfs_bmap_finish() to free
+ * unusual. In order to fix this we need to change xfs_defer_finish() to free
* extents in only a single AG at a time. This will require changes to the
* EFI code as well, however, so that the EFI for the extents not freed is
* logged again in each transaction. See SGI PV #261917.
@@ -188,10 +212,10 @@ xfs_calc_write_reservation(
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK),
XFS_FSB_TO_B(mp, 1)) +
xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 2),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2),
XFS_FSB_TO_B(mp, 1))),
(xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 2),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2),
XFS_FSB_TO_B(mp, 1))));
}
@@ -217,10 +241,10 @@ xfs_calc_itruncate_reservation(
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1,
XFS_FSB_TO_B(mp, 1))),
(xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 4),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4),
XFS_FSB_TO_B(mp, 1)) +
xfs_calc_buf_res(5, 0) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
XFS_FSB_TO_B(mp, 1)) +
xfs_calc_buf_res(2 + mp->m_ialloc_blks +
mp->m_in_maxlevels, 0)));
@@ -247,7 +271,7 @@ xfs_calc_rename_reservation(
xfs_calc_buf_res(2 * XFS_DIROP_LOG_COUNT(mp),
XFS_FSB_TO_B(mp, 1))),
(xfs_calc_buf_res(7, mp->m_sb.sb_sectsize) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 3),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 3),
XFS_FSB_TO_B(mp, 1))));
}
@@ -286,7 +310,7 @@ xfs_calc_link_reservation(
xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp),
XFS_FSB_TO_B(mp, 1))),
(xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
XFS_FSB_TO_B(mp, 1))));
}
@@ -324,7 +348,7 @@ xfs_calc_remove_reservation(
xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp),
XFS_FSB_TO_B(mp, 1))),
(xfs_calc_buf_res(4, mp->m_sb.sb_sectsize) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 2),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2),
XFS_FSB_TO_B(mp, 1))));
}
@@ -371,7 +395,7 @@ xfs_calc_create_resv_alloc(
mp->m_sb.sb_sectsize +
xfs_calc_buf_res(mp->m_ialloc_blks, XFS_FSB_TO_B(mp, 1)) +
xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
XFS_FSB_TO_B(mp, 1));
}
@@ -399,7 +423,7 @@ xfs_calc_icreate_resv_alloc(
return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
mp->m_sb.sb_sectsize +
xfs_calc_buf_res(mp->m_in_maxlevels, XFS_FSB_TO_B(mp, 1)) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
XFS_FSB_TO_B(mp, 1)) +
xfs_calc_finobt_res(mp, 0, 0);
}
@@ -483,7 +507,7 @@ xfs_calc_ifree_reservation(
xfs_calc_buf_res(1, 0) +
xfs_calc_buf_res(2 + mp->m_ialloc_blks +
mp->m_in_maxlevels, 0) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
XFS_FSB_TO_B(mp, 1)) +
xfs_calc_finobt_res(mp, 0, 1);
}
@@ -513,7 +537,7 @@ xfs_calc_growdata_reservation(
struct xfs_mount *mp)
{
return xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
XFS_FSB_TO_B(mp, 1));
}
@@ -535,7 +559,7 @@ xfs_calc_growrtalloc_reservation(
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK),
XFS_FSB_TO_B(mp, 1)) +
xfs_calc_inode_res(mp, 1) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
XFS_FSB_TO_B(mp, 1));
}
@@ -611,7 +635,7 @@ xfs_calc_addafork_reservation(
xfs_calc_buf_res(1, mp->m_dir_geo->blksize) +
xfs_calc_buf_res(XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1,
XFS_FSB_TO_B(mp, 1)) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 1),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1),
XFS_FSB_TO_B(mp, 1));
}
@@ -634,7 +658,7 @@ xfs_calc_attrinval_reservation(
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK),
XFS_FSB_TO_B(mp, 1))),
(xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 4),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4),
XFS_FSB_TO_B(mp, 1))));
}
@@ -701,7 +725,7 @@ xfs_calc_attrrm_reservation(
XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) +
xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), 0)),
(xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
- xfs_calc_buf_res(XFS_ALLOCFREE_LOG_COUNT(mp, 2),
+ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2),
XFS_FSB_TO_B(mp, 1))));
}
diff --git a/fs/xfs/libxfs/xfs_trans_resv.h b/fs/xfs/libxfs/xfs_trans_resv.h
index 797815012c0e..0eb46ed6d404 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.h
+++ b/fs/xfs/libxfs/xfs_trans_resv.h
@@ -68,16 +68,6 @@ struct xfs_trans_resv {
#define M_RES(mp) (&(mp)->m_resv)
/*
- * Per-extent log reservation for the allocation btree changes
- * involved in freeing or allocating an extent.
- * 2 trees * (2 blocks/level * max depth - 1) * block size
- */
-#define XFS_ALLOCFREE_LOG_RES(mp,nx) \
- ((nx) * (2 * XFS_FSB_TO_B((mp), 2 * (mp)->m_ag_maxlevels - 1)))
-#define XFS_ALLOCFREE_LOG_COUNT(mp,nx) \
- ((nx) * (2 * (2 * (mp)->m_ag_maxlevels - 1)))
-
-/*
* Per-directory log reservation for any directory change.
* dir blocks: (1 btree block per level + data block + free block) * dblock size
* bmap btree: (levels + 2) * max depth * block size
diff --git a/fs/xfs/libxfs/xfs_types.h b/fs/xfs/libxfs/xfs_types.h
index b79dc66b2ecd..3d503647f26b 100644
--- a/fs/xfs/libxfs/xfs_types.h
+++ b/fs/xfs/libxfs/xfs_types.h
@@ -108,8 +108,8 @@ typedef enum {
} xfs_lookup_t;
typedef enum {
- XFS_BTNUM_BNOi, XFS_BTNUM_CNTi, XFS_BTNUM_BMAPi, XFS_BTNUM_INOi,
- XFS_BTNUM_FINOi, XFS_BTNUM_MAX
+ XFS_BTNUM_BNOi, XFS_BTNUM_CNTi, XFS_BTNUM_RMAPi, XFS_BTNUM_BMAPi,
+ XFS_BTNUM_INOi, XFS_BTNUM_FINOi, XFS_BTNUM_MAX
} xfs_btnum_t;
struct xfs_name {
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 4c463b99fe57..7575cfc3ad15 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -87,6 +87,12 @@ xfs_find_bdev_for_inode(
* We're now finished for good with this page. Update the page state via the
* associated buffer_heads, paying attention to the start and end offsets that
* we need to process on the page.
+ *
+ * Landmine Warning: bh->b_end_io() will call end_page_writeback() on the last
+ * buffer in the IO. Once it does this, it is unsafe to access the bufferhead or
+ * the page at all, as we may be racing with memory reclaim and it can free both
+ * the bufferhead chain and the page as it will see the page as clean and
+ * unused.
*/
static void
xfs_finish_page_writeback(
@@ -95,8 +101,9 @@ xfs_finish_page_writeback(
int error)
{
unsigned int end = bvec->bv_offset + bvec->bv_len - 1;
- struct buffer_head *head, *bh;
+ struct buffer_head *head, *bh, *next;
unsigned int off = 0;
+ unsigned int bsize;
ASSERT(bvec->bv_offset < PAGE_SIZE);
ASSERT((bvec->bv_offset & ((1 << inode->i_blkbits) - 1)) == 0);
@@ -105,15 +112,17 @@ xfs_finish_page_writeback(
bh = head = page_buffers(bvec->bv_page);
+ bsize = bh->b_size;
do {
+ next = bh->b_this_page;
if (off < bvec->bv_offset)
goto next_bh;
if (off > end)
break;
bh->b_end_io(bh, !error);
next_bh:
- off += bh->b_size;
- } while ((bh = bh->b_this_page) != head);
+ off += bsize;
+ } while ((bh = next) != head);
}
/*
@@ -438,7 +447,8 @@ xfs_submit_ioend(
ioend->io_bio->bi_private = ioend;
ioend->io_bio->bi_end_io = xfs_end_bio;
-
+ bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE,
+ (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0);
/*
* If we are failing the IO now, just mark the ioend with an
* error and finish it. This will run IO completion immediately
@@ -451,8 +461,7 @@ xfs_submit_ioend(
return status;
}
- submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE,
- ioend->io_bio);
+ submit_bio(ioend->io_bio);
return 0;
}
@@ -510,8 +519,9 @@ xfs_chain_bio(
bio_chain(ioend->io_bio, new);
bio_get(ioend->io_bio); /* for xfs_destroy_ioend */
- submit_bio(wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE,
- ioend->io_bio);
+ bio_set_op_attrs(ioend->io_bio, REQ_OP_WRITE,
+ (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : 0);
+ submit_bio(ioend->io_bio);
ioend->io_bio = new;
}
@@ -1040,6 +1050,20 @@ xfs_vm_releasepage(
trace_xfs_releasepage(page->mapping->host, page, 0, 0);
+ /*
+ * mm accommodates an old ext3 case where clean pages might not have had
+ * the dirty bit cleared. Thus, it can send actual dirty pages to
+ * ->releasepage() via shrink_active_list(). Conversely,
+ * block_invalidatepage() can send pages that are still marked dirty
+ * but otherwise have invalidated buffers.
+ *
+ * We've historically freed buffers on the latter. Instead, quietly
+ * filter out all dirty pages to avoid spurious buffer state warnings.
+ * This can likely be removed once shrink_active_list() is fixed.
+ */
+ if (PageDirty(page))
+ return 0;
+
xfs_count_page_state(page, &delalloc, &unwritten);
if (WARN_ON_ONCE(delalloc))
@@ -1143,6 +1167,8 @@ __xfs_get_blocks(
ssize_t size;
int new = 0;
+ BUG_ON(create && !direct);
+
if (XFS_FORCED_SHUTDOWN(mp))
return -EIO;
@@ -1150,22 +1176,14 @@ __xfs_get_blocks(
ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
size = bh_result->b_size;
- if (!create && direct && offset >= i_size_read(inode))
+ if (!create && offset >= i_size_read(inode))
return 0;
/*
* Direct I/O is usually done on preallocated files, so try getting
- * a block mapping without an exclusive lock first. For buffered
- * writes we already have the exclusive iolock anyway, so avoiding
- * a lock roundtrip here by taking the ilock exclusive from the
- * beginning is a useful micro optimization.
+ * a block mapping without an exclusive lock first.
*/
- if (create && !direct) {
- lockmode = XFS_ILOCK_EXCL;
- xfs_ilock(ip, lockmode);
- } else {
- lockmode = xfs_ilock_data_map_shared(ip);
- }
+ lockmode = xfs_ilock_data_map_shared(ip);
ASSERT(offset <= mp->m_super->s_maxbytes);
if (offset + size > mp->m_super->s_maxbytes)
@@ -1184,37 +1202,19 @@ __xfs_get_blocks(
(imap.br_startblock == HOLESTARTBLOCK ||
imap.br_startblock == DELAYSTARTBLOCK) ||
(IS_DAX(inode) && ISUNWRITTEN(&imap)))) {
- if (direct || xfs_get_extsz_hint(ip)) {
- /*
- * xfs_iomap_write_direct() expects the shared lock. It
- * is unlocked on return.
- */
- if (lockmode == XFS_ILOCK_EXCL)
- xfs_ilock_demote(ip, lockmode);
-
- error = xfs_iomap_write_direct(ip, offset, size,
- &imap, nimaps);
- if (error)
- return error;
- new = 1;
+ /*
+ * xfs_iomap_write_direct() expects the shared lock. It
+ * is unlocked on return.
+ */
+ if (lockmode == XFS_ILOCK_EXCL)
+ xfs_ilock_demote(ip, lockmode);
- } else {
- /*
- * Delalloc reservations do not require a transaction,
- * we can go on without dropping the lock here. If we
- * are allocating a new delalloc block, make sure that
- * we set the new flag so that we mark the buffer new so
- * that we know that it is newly allocated if the write
- * fails.
- */
- if (nimaps && imap.br_startblock == HOLESTARTBLOCK)
- new = 1;
- error = xfs_iomap_write_delay(ip, offset, size, &imap);
- if (error)
- goto out_unlock;
+ error = xfs_iomap_write_direct(ip, offset, size,
+ &imap, nimaps);
+ if (error)
+ return error;
+ new = 1;
- xfs_iunlock(ip, lockmode);
- }
trace_xfs_get_blocks_alloc(ip, offset, size,
ISUNWRITTEN(&imap) ? XFS_IO_UNWRITTEN
: XFS_IO_DELALLOC, &imap);
@@ -1235,9 +1235,7 @@ __xfs_get_blocks(
}
/* trim mapping down to size requested */
- if (direct || size > (1 << inode->i_blkbits))
- xfs_map_trim_size(inode, iblock, bh_result,
- &imap, offset, size);
+ xfs_map_trim_size(inode, iblock, bh_result, &imap, offset, size);
/*
* For unwritten extents do not report a disk address in the buffered
@@ -1250,7 +1248,7 @@ __xfs_get_blocks(
if (ISUNWRITTEN(&imap))
set_buffer_unwritten(bh_result);
/* direct IO needs special help */
- if (create && direct) {
+ if (create) {
if (dax_fault)
ASSERT(!ISUNWRITTEN(&imap));
else
@@ -1279,14 +1277,7 @@ __xfs_get_blocks(
(new || ISUNWRITTEN(&imap))))
set_buffer_new(bh_result);
- if (imap.br_startblock == DELAYSTARTBLOCK) {
- BUG_ON(direct);
- if (create) {
- set_buffer_uptodate(bh_result);
- set_buffer_mapped(bh_result);
- set_buffer_delay(bh_result);
- }
- }
+ BUG_ON(direct && imap.br_startblock == DELAYSTARTBLOCK);
return 0;
@@ -1336,7 +1327,7 @@ xfs_get_blocks_dax_fault(
* whereas if we have flags set we will always be called in task context
* (i.e. from a workqueue).
*/
-STATIC int
+int
xfs_end_io_direct_write(
struct kiocb *iocb,
loff_t offset,
@@ -1407,234 +1398,10 @@ xfs_vm_direct_IO(
struct kiocb *iocb,
struct iov_iter *iter)
{
- struct inode *inode = iocb->ki_filp->f_mapping->host;
- dio_iodone_t *endio = NULL;
- int flags = 0;
- struct block_device *bdev;
-
- if (iov_iter_rw(iter) == WRITE) {
- endio = xfs_end_io_direct_write;
- flags = DIO_ASYNC_EXTEND;
- }
-
- if (IS_DAX(inode)) {
- return dax_do_io(iocb, inode, iter,
- xfs_get_blocks_direct, endio, 0);
- }
-
- bdev = xfs_find_bdev_for_inode(inode);
- return __blockdev_direct_IO(iocb, inode, bdev, iter,
- xfs_get_blocks_direct, endio, NULL, flags);
-}
-
-/*
- * Punch out the delalloc blocks we have already allocated.
- *
- * Don't bother with xfs_setattr given that nothing can have made it to disk yet
- * as the page is still locked at this point.
- */
-STATIC void
-xfs_vm_kill_delalloc_range(
- struct inode *inode,
- loff_t start,
- loff_t end)
-{
- struct xfs_inode *ip = XFS_I(inode);
- xfs_fileoff_t start_fsb;
- xfs_fileoff_t end_fsb;
- int error;
-
- start_fsb = XFS_B_TO_FSB(ip->i_mount, start);
- end_fsb = XFS_B_TO_FSB(ip->i_mount, end);
- if (end_fsb <= start_fsb)
- return;
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
- end_fsb - start_fsb);
- if (error) {
- /* something screwed, just bail */
- if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
- xfs_alert(ip->i_mount,
- "xfs_vm_write_failed: unable to clean up ino %lld",
- ip->i_ino);
- }
- }
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
-}
-
-STATIC void
-xfs_vm_write_failed(
- struct inode *inode,
- struct page *page,
- loff_t pos,
- unsigned len)
-{
- loff_t block_offset;
- loff_t block_start;
- loff_t block_end;
- loff_t from = pos & (PAGE_SIZE - 1);
- loff_t to = from + len;
- struct buffer_head *bh, *head;
- struct xfs_mount *mp = XFS_I(inode)->i_mount;
-
/*
- * The request pos offset might be 32 or 64 bit, this is all fine
- * on 64-bit platform. However, for 64-bit pos request on 32-bit
- * platform, the high 32-bit will be masked off if we evaluate the
- * block_offset via (pos & PAGE_MASK) because the PAGE_MASK is
- * 0xfffff000 as an unsigned long, hence the result is incorrect
- * which could cause the following ASSERT failed in most cases.
- * In order to avoid this, we can evaluate the block_offset of the
- * start of the page by using shifts rather than masks the mismatch
- * problem.
+ * We just need the method present so that open/fcntl allow direct I/O.
*/
- block_offset = (pos >> PAGE_SHIFT) << PAGE_SHIFT;
-
- ASSERT(block_offset + from == pos);
-
- head = page_buffers(page);
- block_start = 0;
- for (bh = head; bh != head || !block_start;
- bh = bh->b_this_page, block_start = block_end,
- block_offset += bh->b_size) {
- block_end = block_start + bh->b_size;
-
- /* skip buffers before the write */
- if (block_end <= from)
- continue;
-
- /* if the buffer is after the write, we're done */
- if (block_start >= to)
- break;
-
- /*
- * Process delalloc and unwritten buffers beyond EOF. We can
- * encounter unwritten buffers in the event that a file has
- * post-EOF unwritten extents and an extending write happens to
- * fail (e.g., an unaligned write that also involves a delalloc
- * to the same page).
- */
- if (!buffer_delay(bh) && !buffer_unwritten(bh))
- continue;
-
- if (!xfs_mp_fail_writes(mp) && !buffer_new(bh) &&
- block_offset < i_size_read(inode))
- continue;
-
- if (buffer_delay(bh))
- xfs_vm_kill_delalloc_range(inode, block_offset,
- block_offset + bh->b_size);
-
- /*
- * This buffer does not contain data anymore. make sure anyone
- * who finds it knows that for certain.
- */
- clear_buffer_delay(bh);
- clear_buffer_uptodate(bh);
- clear_buffer_mapped(bh);
- clear_buffer_new(bh);
- clear_buffer_dirty(bh);
- clear_buffer_unwritten(bh);
- }
-
-}
-
-/*
- * This used to call block_write_begin(), but it unlocks and releases the page
- * on error, and we need that page to be able to punch stale delalloc blocks out
- * on failure. hence we copy-n-waste it here and call xfs_vm_write_failed() at
- * the appropriate point.
- */
-STATIC int
-xfs_vm_write_begin(
- struct file *file,
- struct address_space *mapping,
- loff_t pos,
- unsigned len,
- unsigned flags,
- struct page **pagep,
- void **fsdata)
-{
- pgoff_t index = pos >> PAGE_SHIFT;
- struct page *page;
- int status;
- struct xfs_mount *mp = XFS_I(mapping->host)->i_mount;
-
- ASSERT(len <= PAGE_SIZE);
-
- page = grab_cache_page_write_begin(mapping, index, flags);
- if (!page)
- return -ENOMEM;
-
- status = __block_write_begin(page, pos, len, xfs_get_blocks);
- if (xfs_mp_fail_writes(mp))
- status = -EIO;
- if (unlikely(status)) {
- struct inode *inode = mapping->host;
- size_t isize = i_size_read(inode);
-
- xfs_vm_write_failed(inode, page, pos, len);
- unlock_page(page);
-
- /*
- * If the write is beyond EOF, we only want to kill blocks
- * allocated in this write, not blocks that were previously
- * written successfully.
- */
- if (xfs_mp_fail_writes(mp))
- isize = 0;
- if (pos + len > isize) {
- ssize_t start = max_t(ssize_t, pos, isize);
-
- truncate_pagecache_range(inode, start, pos + len);
- }
-
- put_page(page);
- page = NULL;
- }
-
- *pagep = page;
- return status;
-}
-
-/*
- * On failure, we only need to kill delalloc blocks beyond EOF in the range of
- * this specific write because they will never be written. Previous writes
- * beyond EOF where block allocation succeeded do not need to be trashed, so
- * only new blocks from this write should be trashed. For blocks within
- * EOF, generic_write_end() zeros them so they are safe to leave alone and be
- * written with all the other valid data.
- */
-STATIC int
-xfs_vm_write_end(
- struct file *file,
- struct address_space *mapping,
- loff_t pos,
- unsigned len,
- unsigned copied,
- struct page *page,
- void *fsdata)
-{
- int ret;
-
- ASSERT(len <= PAGE_SIZE);
-
- ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
- if (unlikely(ret < len)) {
- struct inode *inode = mapping->host;
- size_t isize = i_size_read(inode);
- loff_t to = pos + len;
-
- if (to > isize) {
- /* only kill blocks in this write beyond EOF */
- if (pos > isize)
- isize = pos;
- xfs_vm_kill_delalloc_range(inode, isize, to);
- truncate_pagecache_range(inode, isize, to);
- }
- }
- return ret;
+ return -EINVAL;
}
STATIC sector_t
@@ -1747,8 +1514,6 @@ const struct address_space_operations xfs_address_space_operations = {
.set_page_dirty = xfs_vm_set_page_dirty,
.releasepage = xfs_vm_releasepage,
.invalidatepage = xfs_vm_invalidatepage,
- .write_begin = xfs_vm_write_begin,
- .write_end = xfs_vm_write_end,
.bmap = xfs_vm_bmap,
.direct_IO = xfs_vm_direct_IO,
.migratepage = buffer_migrate_page,
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index 814aab790713..bf2d9a141a73 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -60,6 +60,9 @@ int xfs_get_blocks_direct(struct inode *inode, sector_t offset,
int xfs_get_blocks_dax_fault(struct inode *inode, sector_t offset,
struct buffer_head *map_bh, int create);
+int xfs_end_io_direct_write(struct kiocb *iocb, loff_t offset,
+ ssize_t size, void *private);
+
extern void xfs_count_page_state(struct page *, int *, int *);
extern struct block_device *xfs_find_bdev_for_inode(struct inode *);
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index 55d214981ed2..be0b79d8900f 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -322,7 +322,7 @@ xfs_attr3_node_inactive(
* Recurse (gasp!) through the attribute nodes until we find leaves.
* We're doing a depth-first traversal in order to invalidate everything.
*/
-int
+static int
xfs_attr3_root_inactive(
struct xfs_trans **trans,
struct xfs_inode *dp)
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index d25f26b22ac9..25e76cd6c053 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -65,7 +65,7 @@ xfs_attr_shortform_compare(const void *a, const void *b)
* we have to calculate each entries' hashvalue and sort them before
* we can begin returning them to the user.
*/
-int
+static int
xfs_attr_shortform_list(xfs_attr_list_context_t *context)
{
attrlist_cursor_kern_t *cursor;
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 586bb64e674b..4ece4f2ffc72 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -25,6 +25,7 @@
#include "xfs_bit.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
+#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_trans.h"
@@ -40,6 +41,7 @@
#include "xfs_trace.h"
#include "xfs_icache.h"
#include "xfs_log.h"
+#include "xfs_rmap_btree.h"
/* Kernel only BMAP related definitions and functions */
@@ -79,79 +81,6 @@ xfs_zero_extent(
GFP_NOFS, true);
}
-/*
- * Routine to be called at transaction's end by xfs_bmapi, xfs_bunmapi
- * caller. Frees all the extents that need freeing, which must be done
- * last due to locking considerations. We never free any extents in
- * the first transaction.
- *
- * If an inode *ip is provided, rejoin it to the transaction if
- * the transaction was committed.
- */
-int /* error */
-xfs_bmap_finish(
- struct xfs_trans **tp, /* transaction pointer addr */
- struct xfs_bmap_free *flist, /* i/o: list extents to free */
- struct xfs_inode *ip)
-{
- struct xfs_efd_log_item *efd; /* extent free data */
- struct xfs_efi_log_item *efi; /* extent free intention */
- int error; /* error return value */
- int committed;/* xact committed or not */
- struct xfs_bmap_free_item *free; /* free extent item */
- struct xfs_bmap_free_item *next; /* next item on free list */
-
- ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
- if (flist->xbf_count == 0)
- return 0;
-
- efi = xfs_trans_get_efi(*tp, flist->xbf_count);
- for (free = flist->xbf_first; free; free = free->xbfi_next)
- xfs_trans_log_efi_extent(*tp, efi, free->xbfi_startblock,
- free->xbfi_blockcount);
-
- error = __xfs_trans_roll(tp, ip, &committed);
- if (error) {
- /*
- * If the transaction was committed, drop the EFD reference
- * since we're bailing out of here. The other reference is
- * dropped when the EFI hits the AIL.
- *
- * If the transaction was not committed, the EFI is freed by the
- * EFI item unlock handler on abort. Also, we have a new
- * transaction so we should return committed=1 even though we're
- * returning an error.
- */
- if (committed) {
- xfs_efi_release(efi);
- xfs_force_shutdown((*tp)->t_mountp,
- (error == -EFSCORRUPTED) ?
- SHUTDOWN_CORRUPT_INCORE :
- SHUTDOWN_META_IO_ERROR);
- }
- return error;
- }
-
- /*
- * Get an EFD and free each extent in the list, logging to the EFD in
- * the process. The remaining bmap free list is cleaned up by the caller
- * on error.
- */
- efd = xfs_trans_get_efd(*tp, efi, flist->xbf_count);
- for (free = flist->xbf_first; free != NULL; free = next) {
- next = free->xbfi_next;
-
- error = xfs_trans_free_extent(*tp, efd, free->xbfi_startblock,
- free->xbfi_blockcount);
- if (error)
- return error;
-
- xfs_bmap_del_free(flist, NULL, free);
- }
-
- return 0;
-}
-
int
xfs_bmap_rtalloc(
struct xfs_bmalloca *ap) /* bmap alloc argument struct */
@@ -198,9 +127,9 @@ xfs_bmap_rtalloc(
/*
* Lock out modifications to both the RT bitmap and summary inodes
*/
- xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL);
+ xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
- xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL);
+ xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
/*
@@ -409,7 +338,7 @@ xfs_bmap_count_tree(
/*
* Count fsblocks of the given fork.
*/
-int /* error */
+static int /* error */
xfs_bmap_count_blocks(
xfs_trans_t *tp, /* transaction pointer */
xfs_inode_t *ip, /* incore inode */
@@ -757,7 +686,7 @@ xfs_bmap_punch_delalloc_range(
xfs_bmbt_irec_t imap;
int nimaps = 1;
xfs_fsblock_t firstblock;
- xfs_bmap_free_t flist;
+ struct xfs_defer_ops dfops;
/*
* Map the range first and check that it is a delalloc extent
@@ -788,18 +717,18 @@ xfs_bmap_punch_delalloc_range(
WARN_ON(imap.br_blockcount == 0);
/*
- * Note: while we initialise the firstblock/flist pair, they
+ * Note: while we initialise the firstblock/dfops pair, they
* should never be used because blocks should never be
* allocated or freed for a delalloc extent and hence we need
* don't cancel or finish them after the xfs_bunmapi() call.
*/
- xfs_bmap_init(&flist, &firstblock);
+ xfs_defer_init(&dfops, &firstblock);
error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
- &flist, &done);
+ &dfops, &done);
if (error)
break;
- ASSERT(!flist.xbf_count && !flist.xbf_first);
+ ASSERT(!xfs_defer_has_unfinished_work(&dfops));
next_block:
start_fsb++;
remaining--;
@@ -956,7 +885,7 @@ xfs_alloc_file_space(
int rt;
xfs_trans_t *tp;
xfs_bmbt_irec_t imaps[1], *imapp;
- xfs_bmap_free_t free_list;
+ struct xfs_defer_ops dfops;
uint qblocks, resblks, resrtextents;
int error;
@@ -1047,17 +976,17 @@ xfs_alloc_file_space(
xfs_trans_ijoin(tp, ip, 0);
- xfs_bmap_init(&free_list, &firstfsb);
+ xfs_defer_init(&dfops, &firstfsb);
error = xfs_bmapi_write(tp, ip, startoffset_fsb,
allocatesize_fsb, alloc_type, &firstfsb,
- resblks, imapp, &nimaps, &free_list);
+ resblks, imapp, &nimaps, &dfops);
if (error)
goto error0;
/*
* Complete the transaction
*/
- error = xfs_bmap_finish(&tp, &free_list, NULL);
+ error = xfs_defer_finish(&tp, &dfops, NULL);
if (error)
goto error0;
@@ -1080,7 +1009,7 @@ xfs_alloc_file_space(
return error;
error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
error1: /* Just cancel transaction */
@@ -1089,99 +1018,120 @@ error1: /* Just cancel transaction */
return error;
}
-/*
- * Zero file bytes between startoff and endoff inclusive.
- * The iolock is held exclusive and no blocks are buffered.
- *
- * This function is used by xfs_free_file_space() to zero
- * partial blocks when the range to free is not block aligned.
- * When unreserving space with boundaries that are not block
- * aligned we round up the start and round down the end
- * boundaries and then use this function to zero the parts of
- * the blocks that got dropped during the rounding.
- */
-STATIC int
-xfs_zero_remaining_bytes(
- xfs_inode_t *ip,
- xfs_off_t startoff,
- xfs_off_t endoff)
+static int
+xfs_unmap_extent(
+ struct xfs_inode *ip,
+ xfs_fileoff_t startoffset_fsb,
+ xfs_filblks_t len_fsb,
+ int *done)
{
- xfs_bmbt_irec_t imap;
- xfs_fileoff_t offset_fsb;
- xfs_off_t lastoffset;
- xfs_off_t offset;
- xfs_buf_t *bp;
- xfs_mount_t *mp = ip->i_mount;
- int nimap;
- int error = 0;
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_trans *tp;
+ struct xfs_defer_ops dfops;
+ xfs_fsblock_t firstfsb;
+ uint resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
+ int error;
- /*
- * Avoid doing I/O beyond eof - it's not necessary
- * since nothing can read beyond eof. The space will
- * be zeroed when the file is extended anyway.
- */
- if (startoff >= XFS_ISIZE(ip))
- return 0;
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
+ if (error) {
+ ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
+ return error;
+ }
- if (endoff > XFS_ISIZE(ip))
- endoff = XFS_ISIZE(ip);
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot,
+ ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS);
+ if (error)
+ goto out_trans_cancel;
- for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
- uint lock_mode;
+ xfs_trans_ijoin(tp, ip, 0);
- offset_fsb = XFS_B_TO_FSBT(mp, offset);
- nimap = 1;
+ xfs_defer_init(&dfops, &firstfsb);
+ error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, &firstfsb,
+ &dfops, done);
+ if (error)
+ goto out_bmap_cancel;
- lock_mode = xfs_ilock_data_map_shared(ip);
- error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0);
- xfs_iunlock(ip, lock_mode);
+ error = xfs_defer_finish(&tp, &dfops, ip);
+ if (error)
+ goto out_bmap_cancel;
- if (error || nimap < 1)
- break;
- ASSERT(imap.br_blockcount >= 1);
- ASSERT(imap.br_startoff == offset_fsb);
- ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
+ error = xfs_trans_commit(tp);
+out_unlock:
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ return error;
- if (imap.br_startblock == HOLESTARTBLOCK ||
- imap.br_state == XFS_EXT_UNWRITTEN) {
- /* skip the entire extent */
- lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff +
- imap.br_blockcount) - 1;
- continue;
- }
+out_bmap_cancel:
+ xfs_defer_cancel(&dfops);
+out_trans_cancel:
+ xfs_trans_cancel(tp);
+ goto out_unlock;
+}
- lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1;
- if (lastoffset > endoff)
- lastoffset = endoff;
+static int
+xfs_adjust_extent_unmap_boundaries(
+ struct xfs_inode *ip,
+ xfs_fileoff_t *startoffset_fsb,
+ xfs_fileoff_t *endoffset_fsb)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_bmbt_irec imap;
+ int nimap, error;
+ xfs_extlen_t mod = 0;
- /* DAX can just zero the backing device directly */
- if (IS_DAX(VFS_I(ip))) {
- error = dax_zero_page_range(VFS_I(ip), offset,
- lastoffset - offset + 1,
- xfs_get_blocks_direct);
- if (error)
- return error;
- continue;
- }
+ nimap = 1;
+ error = xfs_bmapi_read(ip, *startoffset_fsb, 1, &imap, &nimap, 0);
+ if (error)
+ return error;
- error = xfs_buf_read_uncached(XFS_IS_REALTIME_INODE(ip) ?
- mp->m_rtdev_targp : mp->m_ddev_targp,
- xfs_fsb_to_db(ip, imap.br_startblock),
- BTOBB(mp->m_sb.sb_blocksize),
- 0, &bp, NULL);
- if (error)
- return error;
+ if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
+ xfs_daddr_t block;
- memset(bp->b_addr +
- (offset - XFS_FSB_TO_B(mp, imap.br_startoff)),
- 0, lastoffset - offset + 1);
+ ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
+ block = imap.br_startblock;
+ mod = do_div(block, mp->m_sb.sb_rextsize);
+ if (mod)
+ *startoffset_fsb += mp->m_sb.sb_rextsize - mod;
+ }
- error = xfs_bwrite(bp);
- xfs_buf_relse(bp);
- if (error)
- return error;
+ nimap = 1;
+ error = xfs_bmapi_read(ip, *endoffset_fsb - 1, 1, &imap, &nimap, 0);
+ if (error)
+ return error;
+
+ if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
+ ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
+ mod++;
+ if (mod && mod != mp->m_sb.sb_rextsize)
+ *endoffset_fsb -= mod;
}
- return error;
+
+ return 0;
+}
+
+static int
+xfs_flush_unmap_range(
+ struct xfs_inode *ip,
+ xfs_off_t offset,
+ xfs_off_t len)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct inode *inode = VFS_I(ip);
+ xfs_off_t rounding, start, end;
+ int error;
+
+ /* wait for the completion of any pending DIOs */
+ inode_dio_wait(inode);
+
+ rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
+ start = round_down(offset, rounding);
+ end = round_up(offset + len, rounding) - 1;
+
+ error = filemap_write_and_wait_range(inode->i_mapping, start, end);
+ if (error)
+ return error;
+ truncate_pagecache_range(inode, start, end);
+ return 0;
}
int
@@ -1190,24 +1140,10 @@ xfs_free_file_space(
xfs_off_t offset,
xfs_off_t len)
{
- int done;
- xfs_fileoff_t endoffset_fsb;
- int error;
- xfs_fsblock_t firstfsb;
- xfs_bmap_free_t free_list;
- xfs_bmbt_irec_t imap;
- xfs_off_t ioffset;
- xfs_off_t iendoffset;
- xfs_extlen_t mod=0;
- xfs_mount_t *mp;
- int nimap;
- uint resblks;
- xfs_off_t rounding;
- int rt;
+ struct xfs_mount *mp = ip->i_mount;
xfs_fileoff_t startoffset_fsb;
- xfs_trans_t *tp;
-
- mp = ip->i_mount;
+ xfs_fileoff_t endoffset_fsb;
+ int done = 0, error;
trace_xfs_free_file_space(ip);
@@ -1215,135 +1151,45 @@ xfs_free_file_space(
if (error)
return error;
- error = 0;
if (len <= 0) /* if nothing being freed */
- return error;
- rt = XFS_IS_REALTIME_INODE(ip);
- startoffset_fsb = XFS_B_TO_FSB(mp, offset);
- endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
-
- /* wait for the completion of any pending DIOs */
- inode_dio_wait(VFS_I(ip));
+ return 0;
- rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
- ioffset = round_down(offset, rounding);
- iendoffset = round_up(offset + len, rounding) - 1;
- error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, ioffset,
- iendoffset);
+ error = xfs_flush_unmap_range(ip, offset, len);
if (error)
- goto out;
- truncate_pagecache_range(VFS_I(ip), ioffset, iendoffset);
+ return error;
+
+ startoffset_fsb = XFS_B_TO_FSB(mp, offset);
+ endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
/*
- * Need to zero the stuff we're not freeing, on disk.
- * If it's a realtime file & can't use unwritten extents then we
- * actually need to zero the extent edges. Otherwise xfs_bunmapi
- * will take care of it for us.
+ * Need to zero the stuff we're not freeing, on disk. If it's a RT file
+ * and we can't use unwritten extents then we actually need to ensure
+ * to zero the whole extent, otherwise we just need to take of block
+ * boundaries, and xfs_bunmapi will handle the rest.
*/
- if (rt && !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
- nimap = 1;
- error = xfs_bmapi_read(ip, startoffset_fsb, 1,
- &imap, &nimap, 0);
- if (error)
- goto out;
- ASSERT(nimap == 0 || nimap == 1);
- if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
- xfs_daddr_t block;
-
- ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
- block = imap.br_startblock;
- mod = do_div(block, mp->m_sb.sb_rextsize);
- if (mod)
- startoffset_fsb += mp->m_sb.sb_rextsize - mod;
- }
- nimap = 1;
- error = xfs_bmapi_read(ip, endoffset_fsb - 1, 1,
- &imap, &nimap, 0);
+ if (XFS_IS_REALTIME_INODE(ip) &&
+ !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
+ error = xfs_adjust_extent_unmap_boundaries(ip, &startoffset_fsb,
+ &endoffset_fsb);
if (error)
- goto out;
- ASSERT(nimap == 0 || nimap == 1);
- if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
- ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
- mod++;
- if (mod && (mod != mp->m_sb.sb_rextsize))
- endoffset_fsb -= mod;
- }
- }
- if ((done = (endoffset_fsb <= startoffset_fsb)))
- /*
- * One contiguous piece to clear
- */
- error = xfs_zero_remaining_bytes(ip, offset, offset + len - 1);
- else {
- /*
- * Some full blocks, possibly two pieces to clear
- */
- if (offset < XFS_FSB_TO_B(mp, startoffset_fsb))
- error = xfs_zero_remaining_bytes(ip, offset,
- XFS_FSB_TO_B(mp, startoffset_fsb) - 1);
- if (!error &&
- XFS_FSB_TO_B(mp, endoffset_fsb) < offset + len)
- error = xfs_zero_remaining_bytes(ip,
- XFS_FSB_TO_B(mp, endoffset_fsb),
- offset + len - 1);
+ return error;
}
- /*
- * free file space until done or until there is an error
- */
- resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
- while (!error && !done) {
-
- /*
- * allocate and setup the transaction. Allow this
- * transaction to dip into the reserve blocks to ensure
- * the freeing of the space succeeds at ENOSPC.
- */
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
- &tp);
- if (error) {
- ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
- break;
+ if (endoffset_fsb > startoffset_fsb) {
+ while (!done) {
+ error = xfs_unmap_extent(ip, startoffset_fsb,
+ endoffset_fsb - startoffset_fsb, &done);
+ if (error)
+ return error;
}
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- error = xfs_trans_reserve_quota(tp, mp,
- ip->i_udquot, ip->i_gdquot, ip->i_pdquot,
- resblks, 0, XFS_QMOPT_RES_REGBLKS);
- if (error)
- goto error1;
-
- xfs_trans_ijoin(tp, ip, 0);
-
- /*
- * issue the bunmapi() call to free the blocks
- */
- xfs_bmap_init(&free_list, &firstfsb);
- error = xfs_bunmapi(tp, ip, startoffset_fsb,
- endoffset_fsb - startoffset_fsb,
- 0, 2, &firstfsb, &free_list, &done);
- if (error)
- goto error0;
-
- /*
- * complete the transaction
- */
- error = xfs_bmap_finish(&tp, &free_list, NULL);
- if (error)
- goto error0;
-
- error = xfs_trans_commit(tp);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
}
- out:
- return error;
-
- error0:
- xfs_bmap_cancel(&free_list);
- error1:
- xfs_trans_cancel(tp);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- goto out;
+ /*
+ * Now that we've unmap all full blocks we'll have to zero out any
+ * partial block at the beginning and/or end. xfs_zero_range is
+ * smart enough to skip any holes, including those we just created.
+ */
+ return xfs_zero_range(ip, offset, len, NULL);
}
/*
@@ -1405,7 +1251,7 @@ xfs_shift_file_space(
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
int error;
- struct xfs_bmap_free free_list;
+ struct xfs_defer_ops dfops;
xfs_fsblock_t first_block;
xfs_fileoff_t stop_fsb;
xfs_fileoff_t next_fsb;
@@ -1483,19 +1329,19 @@ xfs_shift_file_space(
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_bmap_init(&free_list, &first_block);
+ xfs_defer_init(&dfops, &first_block);
/*
* We are using the write transaction in which max 2 bmbt
* updates are allowed
*/
error = xfs_bmap_shift_extents(tp, ip, &next_fsb, shift_fsb,
- &done, stop_fsb, &first_block, &free_list,
+ &done, stop_fsb, &first_block, &dfops,
direction, XFS_BMAP_MAX_SHIFT_EXTENTS);
if (error)
goto out_bmap_cancel;
- error = xfs_bmap_finish(&tp, &free_list, NULL);
+ error = xfs_defer_finish(&tp, &dfops, NULL);
if (error)
goto out_bmap_cancel;
@@ -1505,7 +1351,7 @@ xfs_shift_file_space(
return error;
out_bmap_cancel:
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
out_trans_cancel:
xfs_trans_cancel(tp);
return error;
@@ -1689,6 +1535,10 @@ xfs_swap_extents(
__uint64_t tmp;
int lock_flags;
+ /* XXX: we can't do this with rmap, will fix later */
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb))
+ return -EOPNOTSUPP;
+
tempifp = kmem_alloc(sizeof(xfs_ifork_t), KM_MAYFAIL);
if (!tempifp) {
error = -ENOMEM;
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index af97d9a1dfb4..68a621a8e0c0 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -21,7 +21,7 @@
/* Kernel only BMAP related definitions and functions */
struct xfs_bmbt_irec;
-struct xfs_bmap_free_item;
+struct xfs_extent_free_item;
struct xfs_ifork;
struct xfs_inode;
struct xfs_mount;
@@ -31,8 +31,6 @@ struct xfs_bmalloca;
int xfs_bmap_rtalloc(struct xfs_bmalloca *ap);
int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
int whichfork, int *eof);
-int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip,
- int whichfork, int *count);
int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip,
xfs_fileoff_t start_fsb, xfs_fileoff_t length);
@@ -42,9 +40,6 @@ int xfs_getbmap(struct xfs_inode *ip, struct getbmapx *bmv,
xfs_bmap_format_t formatter, void *arg);
/* functions in xfs_bmap.c that are only needed by xfs_bmap_util.c */
-void xfs_bmap_del_free(struct xfs_bmap_free *flist,
- struct xfs_bmap_free_item *prev,
- struct xfs_bmap_free_item *free);
int xfs_bmap_extsize_align(struct xfs_mount *mp, struct xfs_bmbt_irec *gotp,
struct xfs_bmbt_irec *prevp, xfs_extlen_t extsz,
int rt, int eof, int delay, int convert,
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index e71cfbd5acb3..47a318ce82e0 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -80,6 +80,47 @@ xfs_buf_vmap_len(
}
/*
+ * Bump the I/O in flight count on the buftarg if we haven't yet done so for
+ * this buffer. The count is incremented once per buffer (per hold cycle)
+ * because the corresponding decrement is deferred to buffer release. Buffers
+ * can undergo I/O multiple times in a hold-release cycle and per buffer I/O
+ * tracking adds unnecessary overhead. This is used for sychronization purposes
+ * with unmount (see xfs_wait_buftarg()), so all we really need is a count of
+ * in-flight buffers.
+ *
+ * Buffers that are never released (e.g., superblock, iclog buffers) must set
+ * the XBF_NO_IOACCT flag before I/O submission. Otherwise, the buftarg count
+ * never reaches zero and unmount hangs indefinitely.
+ */
+static inline void
+xfs_buf_ioacct_inc(
+ struct xfs_buf *bp)
+{
+ if (bp->b_flags & (XBF_NO_IOACCT|_XBF_IN_FLIGHT))
+ return;
+
+ ASSERT(bp->b_flags & XBF_ASYNC);
+ bp->b_flags |= _XBF_IN_FLIGHT;
+ percpu_counter_inc(&bp->b_target->bt_io_count);
+}
+
+/*
+ * Clear the in-flight state on a buffer about to be released to the LRU or
+ * freed and unaccount from the buftarg.
+ */
+static inline void
+xfs_buf_ioacct_dec(
+ struct xfs_buf *bp)
+{
+ if (!(bp->b_flags & _XBF_IN_FLIGHT))
+ return;
+
+ ASSERT(bp->b_flags & XBF_ASYNC);
+ bp->b_flags &= ~_XBF_IN_FLIGHT;
+ percpu_counter_dec(&bp->b_target->bt_io_count);
+}
+
+/*
* When we mark a buffer stale, we remove the buffer from the LRU and clear the
* b_lru_ref count so that the buffer is freed immediately when the buffer
* reference count falls to zero. If the buffer is already on the LRU, we need
@@ -102,6 +143,14 @@ xfs_buf_stale(
*/
bp->b_flags &= ~_XBF_DELWRI_Q;
+ /*
+ * Once the buffer is marked stale and unlocked, a subsequent lookup
+ * could reset b_flags. There is no guarantee that the buffer is
+ * unaccounted (released to LRU) before that occurs. Drop in-flight
+ * status now to preserve accounting consistency.
+ */
+ xfs_buf_ioacct_dec(bp);
+
spin_lock(&bp->b_lock);
atomic_set(&bp->b_lru_ref, 0);
if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
@@ -815,7 +864,8 @@ xfs_buf_get_uncached(
struct xfs_buf *bp;
DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
- bp = _xfs_buf_alloc(target, &map, 1, 0);
+ /* flags might contain irrelevant bits, pass only what we care about */
+ bp = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT);
if (unlikely(bp == NULL))
goto fail;
@@ -866,63 +916,85 @@ xfs_buf_hold(
}
/*
- * Releases a hold on the specified buffer. If the
- * the hold count is 1, calls xfs_buf_free.
+ * Release a hold on the specified buffer. If the hold count is 1, the buffer is
+ * placed on LRU or freed (depending on b_lru_ref).
*/
void
xfs_buf_rele(
xfs_buf_t *bp)
{
struct xfs_perag *pag = bp->b_pag;
+ bool release;
+ bool freebuf = false;
trace_xfs_buf_rele(bp, _RET_IP_);
if (!pag) {
ASSERT(list_empty(&bp->b_lru));
ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
- if (atomic_dec_and_test(&bp->b_hold))
+ if (atomic_dec_and_test(&bp->b_hold)) {
+ xfs_buf_ioacct_dec(bp);
xfs_buf_free(bp);
+ }
return;
}
ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
ASSERT(atomic_read(&bp->b_hold) > 0);
- if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
- spin_lock(&bp->b_lock);
- if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
- /*
- * If the buffer is added to the LRU take a new
- * reference to the buffer for the LRU and clear the
- * (now stale) dispose list state flag
- */
- if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
- bp->b_state &= ~XFS_BSTATE_DISPOSE;
- atomic_inc(&bp->b_hold);
- }
- spin_unlock(&bp->b_lock);
- spin_unlock(&pag->pag_buf_lock);
- } else {
- /*
- * most of the time buffers will already be removed from
- * the LRU, so optimise that case by checking for the
- * XFS_BSTATE_DISPOSE flag indicating the last list the
- * buffer was on was the disposal list
- */
- if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
- list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
- } else {
- ASSERT(list_empty(&bp->b_lru));
- }
- spin_unlock(&bp->b_lock);
- ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
- rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
- spin_unlock(&pag->pag_buf_lock);
- xfs_perag_put(pag);
- xfs_buf_free(bp);
+ release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock);
+ spin_lock(&bp->b_lock);
+ if (!release) {
+ /*
+ * Drop the in-flight state if the buffer is already on the LRU
+ * and it holds the only reference. This is racy because we
+ * haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT
+ * ensures the decrement occurs only once per-buf.
+ */
+ if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru))
+ xfs_buf_ioacct_dec(bp);
+ goto out_unlock;
+ }
+
+ /* the last reference has been dropped ... */
+ xfs_buf_ioacct_dec(bp);
+ if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
+ /*
+ * If the buffer is added to the LRU take a new reference to the
+ * buffer for the LRU and clear the (now stale) dispose list
+ * state flag
+ */
+ if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
+ bp->b_state &= ~XFS_BSTATE_DISPOSE;
+ atomic_inc(&bp->b_hold);
+ }
+ spin_unlock(&pag->pag_buf_lock);
+ } else {
+ /*
+ * most of the time buffers will already be removed from the
+ * LRU, so optimise that case by checking for the
+ * XFS_BSTATE_DISPOSE flag indicating the last list the buffer
+ * was on was the disposal list
+ */
+ if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
+ list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
+ } else {
+ ASSERT(list_empty(&bp->b_lru));
}
+
+ ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
+ rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
+ spin_unlock(&pag->pag_buf_lock);
+ xfs_perag_put(pag);
+ freebuf = true;
}
+
+out_unlock:
+ spin_unlock(&bp->b_lock);
+
+ if (freebuf)
+ xfs_buf_free(bp);
}
@@ -944,10 +1016,12 @@ xfs_buf_trylock(
int locked;
locked = down_trylock(&bp->b_sema) == 0;
- if (locked)
+ if (locked) {
XB_SET_OWNER(bp);
-
- trace_xfs_buf_trylock(bp, _RET_IP_);
+ trace_xfs_buf_trylock(bp, _RET_IP_);
+ } else {
+ trace_xfs_buf_trylock_fail(bp, _RET_IP_);
+ }
return locked;
}
@@ -1127,7 +1201,8 @@ xfs_buf_ioapply_map(
int map,
int *buf_offset,
int *count,
- int rw)
+ int op,
+ int op_flags)
{
int page_index;
int total_nr_pages = bp->b_page_count;
@@ -1157,16 +1232,14 @@ xfs_buf_ioapply_map(
next_chunk:
atomic_inc(&bp->b_io_remaining);
- nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
- if (nr_pages > total_nr_pages)
- nr_pages = total_nr_pages;
+ nr_pages = min(total_nr_pages, BIO_MAX_PAGES);
bio = bio_alloc(GFP_NOIO, nr_pages);
bio->bi_bdev = bp->b_target->bt_bdev;
bio->bi_iter.bi_sector = sector;
bio->bi_end_io = xfs_buf_bio_end_io;
bio->bi_private = bp;
-
+ bio_set_op_attrs(bio, op, op_flags);
for (; size && nr_pages; nr_pages--, page_index++) {
int rbytes, nbytes = PAGE_SIZE - offset;
@@ -1190,7 +1263,7 @@ next_chunk:
flush_kernel_vmap_range(bp->b_addr,
xfs_buf_vmap_len(bp));
}
- submit_bio(rw, bio);
+ submit_bio(bio);
if (size)
goto next_chunk;
} else {
@@ -1210,7 +1283,8 @@ _xfs_buf_ioapply(
struct xfs_buf *bp)
{
struct blk_plug plug;
- int rw;
+ int op;
+ int op_flags = 0;
int offset;
int size;
int i;
@@ -1229,14 +1303,13 @@ _xfs_buf_ioapply(
bp->b_ioend_wq = bp->b_target->bt_mount->m_buf_workqueue;
if (bp->b_flags & XBF_WRITE) {
+ op = REQ_OP_WRITE;
if (bp->b_flags & XBF_SYNCIO)
- rw = WRITE_SYNC;
- else
- rw = WRITE;
+ op_flags = WRITE_SYNC;
if (bp->b_flags & XBF_FUA)
- rw |= REQ_FUA;
+ op_flags |= REQ_FUA;
if (bp->b_flags & XBF_FLUSH)
- rw |= REQ_FLUSH;
+ op_flags |= REQ_PREFLUSH;
/*
* Run the write verifier callback function if it exists. If
@@ -1266,13 +1339,14 @@ _xfs_buf_ioapply(
}
}
} else if (bp->b_flags & XBF_READ_AHEAD) {
- rw = READA;
+ op = REQ_OP_READ;
+ op_flags = REQ_RAHEAD;
} else {
- rw = READ;
+ op = REQ_OP_READ;
}
/* we only use the buffer cache for meta-data */
- rw |= REQ_META;
+ op_flags |= REQ_META;
/*
* Walk all the vectors issuing IO on them. Set up the initial offset
@@ -1284,7 +1358,7 @@ _xfs_buf_ioapply(
size = BBTOB(bp->b_io_length);
blk_start_plug(&plug);
for (i = 0; i < bp->b_map_count; i++) {
- xfs_buf_ioapply_map(bp, i, &offset, &size, rw);
+ xfs_buf_ioapply_map(bp, i, &offset, &size, op, op_flags);
if (bp->b_error)
break;
if (size <= 0)
@@ -1339,6 +1413,7 @@ xfs_buf_submit(
* xfs_buf_ioend too early.
*/
atomic_set(&bp->b_io_remaining, 1);
+ xfs_buf_ioacct_inc(bp);
_xfs_buf_ioapply(bp);
/*
@@ -1524,13 +1599,19 @@ xfs_wait_buftarg(
int loop = 0;
/*
- * We need to flush the buffer workqueue to ensure that all IO
- * completion processing is 100% done. Just waiting on buffer locks is
- * not sufficient for async IO as the reference count held over IO is
- * not released until after the buffer lock is dropped. Hence we need to
- * ensure here that all reference counts have been dropped before we
- * start walking the LRU list.
+ * First wait on the buftarg I/O count for all in-flight buffers to be
+ * released. This is critical as new buffers do not make the LRU until
+ * they are released.
+ *
+ * Next, flush the buffer workqueue to ensure all completion processing
+ * has finished. Just waiting on buffer locks is not sufficient for
+ * async IO as the reference count held over IO is not released until
+ * after the buffer lock is dropped. Hence we need to ensure here that
+ * all reference counts have been dropped before we start walking the
+ * LRU list.
*/
+ while (percpu_counter_sum(&btp->bt_io_count))
+ delay(100);
drain_workqueue(btp->bt_mount->m_buf_workqueue);
/* loop until there is nothing left on the lru list. */
@@ -1627,6 +1708,8 @@ xfs_free_buftarg(
struct xfs_buftarg *btp)
{
unregister_shrinker(&btp->bt_shrinker);
+ ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0);
+ percpu_counter_destroy(&btp->bt_io_count);
list_lru_destroy(&btp->bt_lru);
if (mp->m_flags & XFS_MOUNT_BARRIER)
@@ -1691,6 +1774,9 @@ xfs_alloc_buftarg(
if (list_lru_init(&btp->bt_lru))
goto error;
+ if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL))
+ goto error;
+
btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
btp->bt_shrinker.seeks = DEFAULT_SEEKS;
@@ -1774,18 +1860,33 @@ xfs_buf_cmp(
return 0;
}
+/*
+ * submit buffers for write.
+ *
+ * When we have a large buffer list, we do not want to hold all the buffers
+ * locked while we block on the request queue waiting for IO dispatch. To avoid
+ * this problem, we lock and submit buffers in groups of 50, thereby minimising
+ * the lock hold times for lists which may contain thousands of objects.
+ *
+ * To do this, we sort the buffer list before we walk the list to lock and
+ * submit buffers, and we plug and unplug around each group of buffers we
+ * submit.
+ */
static int
-__xfs_buf_delwri_submit(
+xfs_buf_delwri_submit_buffers(
struct list_head *buffer_list,
- struct list_head *io_list,
- bool wait)
+ struct list_head *wait_list)
{
- struct blk_plug plug;
struct xfs_buf *bp, *n;
+ LIST_HEAD (submit_list);
int pinned = 0;
+ struct blk_plug plug;
+ list_sort(NULL, buffer_list, xfs_buf_cmp);
+
+ blk_start_plug(&plug);
list_for_each_entry_safe(bp, n, buffer_list, b_list) {
- if (!wait) {
+ if (!wait_list) {
if (xfs_buf_ispinned(bp)) {
pinned++;
continue;
@@ -1808,25 +1909,21 @@ __xfs_buf_delwri_submit(
continue;
}
- list_move_tail(&bp->b_list, io_list);
trace_xfs_buf_delwri_split(bp, _RET_IP_);
- }
-
- list_sort(NULL, io_list, xfs_buf_cmp);
-
- blk_start_plug(&plug);
- list_for_each_entry_safe(bp, n, io_list, b_list) {
- bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL);
- bp->b_flags |= XBF_WRITE | XBF_ASYNC;
/*
- * we do all Io submission async. This means if we need to wait
- * for IO completion we need to take an extra reference so the
- * buffer is still valid on the other side.
+ * We do all IO submission async. This means if we need
+ * to wait for IO completion we need to take an extra
+ * reference so the buffer is still valid on the other
+ * side. We need to move the buffer onto the io_list
+ * at this point so the caller can still access it.
*/
- if (wait)
+ bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_WRITE_FAIL);
+ bp->b_flags |= XBF_WRITE | XBF_ASYNC;
+ if (wait_list) {
xfs_buf_hold(bp);
- else
+ list_move_tail(&bp->b_list, wait_list);
+ } else
list_del_init(&bp->b_list);
xfs_buf_submit(bp);
@@ -1849,8 +1946,7 @@ int
xfs_buf_delwri_submit_nowait(
struct list_head *buffer_list)
{
- LIST_HEAD (io_list);
- return __xfs_buf_delwri_submit(buffer_list, &io_list, false);
+ return xfs_buf_delwri_submit_buffers(buffer_list, NULL);
}
/*
@@ -1865,15 +1961,15 @@ int
xfs_buf_delwri_submit(
struct list_head *buffer_list)
{
- LIST_HEAD (io_list);
+ LIST_HEAD (wait_list);
int error = 0, error2;
struct xfs_buf *bp;
- __xfs_buf_delwri_submit(buffer_list, &io_list, true);
+ xfs_buf_delwri_submit_buffers(buffer_list, &wait_list);
/* Wait for IO to complete. */
- while (!list_empty(&io_list)) {
- bp = list_first_entry(&io_list, struct xfs_buf, b_list);
+ while (!list_empty(&wait_list)) {
+ bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
list_del_init(&bp->b_list);
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 8bfb974f0772..1c2e52b2d926 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -43,6 +43,7 @@ typedef enum {
#define XBF_READ (1 << 0) /* buffer intended for reading from device */
#define XBF_WRITE (1 << 1) /* buffer intended for writing to device */
#define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */
+#define XBF_NO_IOACCT (1 << 3) /* bypass I/O accounting (non-LRU bufs) */
#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */
#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
#define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */
@@ -62,6 +63,7 @@ typedef enum {
#define _XBF_KMEM (1 << 21)/* backed by heap memory */
#define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */
#define _XBF_COMPOUND (1 << 23)/* compound buffer */
+#define _XBF_IN_FLIGHT (1 << 25) /* I/O in flight, for accounting purposes */
typedef unsigned int xfs_buf_flags_t;
@@ -81,7 +83,8 @@ typedef unsigned int xfs_buf_flags_t;
{ _XBF_PAGES, "PAGES" }, \
{ _XBF_KMEM, "KMEM" }, \
{ _XBF_DELWRI_Q, "DELWRI_Q" }, \
- { _XBF_COMPOUND, "COMPOUND" }
+ { _XBF_COMPOUND, "COMPOUND" }, \
+ { _XBF_IN_FLIGHT, "IN_FLIGHT" }
/*
@@ -115,6 +118,8 @@ typedef struct xfs_buftarg {
/* LRU control structures */
struct shrinker bt_shrinker;
struct list_lru bt_lru;
+
+ struct percpu_counter bt_io_count;
} xfs_buftarg_t;
struct xfs_buf;
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 34257992934c..e455f9098d49 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -359,7 +359,7 @@ xfs_buf_item_format(
for (i = 0; i < bip->bli_format_count; i++) {
xfs_buf_item_format_segment(bip, lv, &vecp, offset,
&bip->bli_formats[i]);
- offset += bp->b_maps[i].bm_len;
+ offset += BBTOB(bp->b_maps[i].bm_len);
}
/*
@@ -915,20 +915,28 @@ xfs_buf_item_log(
for (i = 0; i < bip->bli_format_count; i++) {
if (start > last)
break;
- end = start + BBTOB(bp->b_maps[i].bm_len);
+ end = start + BBTOB(bp->b_maps[i].bm_len) - 1;
+
+ /* skip to the map that includes the first byte to log */
if (first > end) {
start += BBTOB(bp->b_maps[i].bm_len);
continue;
}
+
+ /*
+ * Trim the range to this segment and mark it in the bitmap.
+ * Note that we must convert buffer offsets to segment relative
+ * offsets (e.g., the first byte of each segment is byte 0 of
+ * that segment).
+ */
if (first < start)
first = start;
if (end > last)
end = last;
-
- xfs_buf_item_log_segment(first, end,
+ xfs_buf_item_log_segment(first - start, end - start,
&bip->bli_formats[i].blf_data_map[0]);
- start += bp->b_maps[i].bm_len;
+ start += BBTOB(bp->b_maps[i].bm_len);
}
}
@@ -949,6 +957,7 @@ xfs_buf_item_free(
xfs_buf_log_item_t *bip)
{
xfs_buf_item_free_format(bip);
+ kmem_free(bip->bli_item.li_lv_shadow);
kmem_zone_free(xfs_buf_item_zone, bip);
}
@@ -1073,6 +1082,8 @@ xfs_buf_iodone_callback_error(
trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
ASSERT(bp->b_iodone != NULL);
+ cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error);
+
/*
* If the write was asynchronous then no one will be looking for the
* error. If this is the first failure of this type, clear the error
@@ -1080,13 +1091,12 @@ xfs_buf_iodone_callback_error(
* async write failure at least once, but we also need to set the buffer
* up to behave correctly now for repeated failures.
*/
- if (!(bp->b_flags & (XBF_STALE|XBF_WRITE_FAIL)) ||
+ if (!(bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL)) ||
bp->b_last_error != bp->b_error) {
- bp->b_flags |= (XBF_WRITE | XBF_ASYNC |
- XBF_DONE | XBF_WRITE_FAIL);
+ bp->b_flags |= (XBF_WRITE | XBF_DONE | XBF_WRITE_FAIL);
bp->b_last_error = bp->b_error;
- bp->b_retries = 0;
- bp->b_first_retry_time = jiffies;
+ if (cfg->retry_timeout && !bp->b_first_retry_time)
+ bp->b_first_retry_time = jiffies;
xfs_buf_ioerror(bp, 0);
xfs_buf_submit(bp);
@@ -1097,7 +1107,6 @@ xfs_buf_iodone_callback_error(
* Repeated failure on an async write. Take action according to the
* error configuration we have been set up to use.
*/
- cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error);
if (cfg->max_retries != XFS_ERR_RETRY_FOREVER &&
++bp->b_retries > cfg->max_retries)
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 272c3f8b6f7d..4ff499aa7338 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -179,7 +179,7 @@ xfs_ioc_trim(
* matter as trimming blocks is an advisory interface.
*/
if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) ||
- range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp)) ||
+ range.minlen > XFS_FSB_TO_B(mp, mp->m_ag_max_usable) ||
range.len < mp->m_sb.sb_blocksize)
return -EINVAL;
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index e0646659ce16..7a30b8f11db7 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -23,6 +23,7 @@
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
@@ -74,6 +75,7 @@ xfs_qm_dqdestroy(
{
ASSERT(list_empty(&dqp->q_lru));
+ kmem_free(dqp->q_logitem.qli_item.li_lv_shadow);
mutex_destroy(&dqp->q_qlock);
XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot);
@@ -306,7 +308,7 @@ xfs_qm_dqalloc(
xfs_buf_t **O_bpp)
{
xfs_fsblock_t firstblock;
- xfs_bmap_free_t flist;
+ struct xfs_defer_ops dfops;
xfs_bmbt_irec_t map;
int nmaps, error;
xfs_buf_t *bp;
@@ -319,7 +321,7 @@ xfs_qm_dqalloc(
/*
* Initialize the bmap freelist prior to calling bmapi code.
*/
- xfs_bmap_init(&flist, &firstblock);
+ xfs_defer_init(&dfops, &firstblock);
xfs_ilock(quotip, XFS_ILOCK_EXCL);
/*
* Return if this type of quotas is turned off while we didn't
@@ -335,7 +337,7 @@ xfs_qm_dqalloc(
error = xfs_bmapi_write(tp, quotip, offset_fsb,
XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
&firstblock, XFS_QM_DQALLOC_SPACE_RES(mp),
- &map, &nmaps, &flist);
+ &map, &nmaps, &dfops);
if (error)
goto error0;
ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
@@ -367,7 +369,7 @@ xfs_qm_dqalloc(
dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
/*
- * xfs_bmap_finish() may commit the current transaction and
+ * xfs_defer_finish() may commit the current transaction and
* start a second transaction if the freelist is not empty.
*
* Since we still want to modify this buffer, we need to
@@ -381,7 +383,7 @@ xfs_qm_dqalloc(
xfs_trans_bhold(tp, bp);
- error = xfs_bmap_finish(tpp, &flist, NULL);
+ error = xfs_defer_finish(tpp, &dfops, NULL);
if (error)
goto error1;
@@ -397,7 +399,7 @@ xfs_qm_dqalloc(
return 0;
error1:
- xfs_bmap_cancel(&flist);
+ xfs_defer_cancel(&dfops);
error0:
xfs_iunlock(quotip, XFS_ILOCK_EXCL);
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index 814cff94e78f..2c7a1629e064 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -370,6 +370,8 @@ xfs_qm_qoffend_logitem_committed(
spin_lock(&ailp->xa_lock);
xfs_trans_ail_delete(ailp, &qfs->qql_item, SHUTDOWN_LOG_IO_ERROR);
+ kmem_free(qfs->qql_item.li_lv_shadow);
+ kmem_free(lip->li_lv_shadow);
kmem_free(qfs);
kmem_free(qfe);
return (xfs_lsn_t)-1;
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index 88693a98fac5..ed7ee4e8af73 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -55,12 +55,15 @@ xfs_error_test(int error_tag, int *fsidp, char *expression,
}
int
-xfs_errortag_add(int error_tag, xfs_mount_t *mp)
+xfs_errortag_add(unsigned int error_tag, xfs_mount_t *mp)
{
int i;
int len;
int64_t fsid;
+ if (error_tag >= XFS_ERRTAG_MAX)
+ return -EINVAL;
+
memcpy(&fsid, mp->m_fixedfsid, sizeof(xfs_fsid_t));
for (i = 0; i < XFS_NUM_INJECT_ERROR; i++) {
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index 4ed3042a0f16..3d224702fbc0 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -90,7 +90,9 @@ extern void xfs_verifier_error(struct xfs_buf *bp);
#define XFS_ERRTAG_STRATCMPL_IOERR 19
#define XFS_ERRTAG_DIOWRITE_IOERR 20
#define XFS_ERRTAG_BMAPIFORMAT 21
-#define XFS_ERRTAG_MAX 22
+#define XFS_ERRTAG_FREE_EXTENT 22
+#define XFS_ERRTAG_RMAP_FINISH_ONE 23
+#define XFS_ERRTAG_MAX 24
/*
* Random factors for above tags, 1 means always, 2 means 1/2 time, etc.
@@ -117,6 +119,8 @@ extern void xfs_verifier_error(struct xfs_buf *bp);
#define XFS_RANDOM_STRATCMPL_IOERR (XFS_RANDOM_DEFAULT/10)
#define XFS_RANDOM_DIOWRITE_IOERR (XFS_RANDOM_DEFAULT/10)
#define XFS_RANDOM_BMAPIFORMAT XFS_RANDOM_DEFAULT
+#define XFS_RANDOM_FREE_EXTENT 1
+#define XFS_RANDOM_RMAP_FINISH_ONE 1
#ifdef DEBUG
extern int xfs_error_test_active;
@@ -128,7 +132,7 @@ extern int xfs_error_test(int, int *, char *, int, char *, unsigned long);
xfs_error_test((tag), (mp)->m_fixedfsid, "expr", __LINE__, __FILE__, \
(rf))))
-extern int xfs_errortag_add(int error_tag, struct xfs_mount *mp);
+extern int xfs_errortag_add(unsigned int error_tag, struct xfs_mount *mp);
extern int xfs_errortag_clearall(struct xfs_mount *mp, int loud);
#else
#define XFS_TEST_ERROR(expr, mp, tag, rf) (expr)
diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
index a1b2dd828b9d..fe1bfee35898 100644
--- a/fs/xfs/xfs_export.c
+++ b/fs/xfs/xfs_export.c
@@ -246,7 +246,7 @@ const struct export_operations xfs_export_operations = {
.fh_to_parent = xfs_fs_fh_to_parent,
.get_parent = xfs_fs_get_parent,
.commit_metadata = xfs_fs_nfs_commit_metadata,
-#ifdef CONFIG_NFSD_BLOCKLAYOUT
+#ifdef CONFIG_EXPORTFS_BLOCK_OPS
.get_uuid = xfs_fs_get_uuid,
.map_blocks = xfs_fs_map_blocks,
.commit_blocks = xfs_fs_commit_blocks,
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index 4aa0153214f9..d7bc14906af8 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -20,12 +20,15 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
+#include "xfs_bit.h"
#include "xfs_mount.h"
#include "xfs_trans.h"
#include "xfs_trans_priv.h"
#include "xfs_buf_item.h"
#include "xfs_extfree_item.h"
#include "xfs_log.h"
+#include "xfs_btree.h"
+#include "xfs_rmap.h"
kmem_zone_t *xfs_efi_zone;
@@ -40,6 +43,7 @@ void
xfs_efi_item_free(
struct xfs_efi_log_item *efip)
{
+ kmem_free(efip->efi_item.li_lv_shadow);
if (efip->efi_format.efi_nextents > XFS_EFI_MAX_FAST_EXTENTS)
kmem_free(efip);
else
@@ -300,6 +304,7 @@ static inline struct xfs_efd_log_item *EFD_ITEM(struct xfs_log_item *lip)
STATIC void
xfs_efd_item_free(struct xfs_efd_log_item *efdp)
{
+ kmem_free(efdp->efd_item.li_lv_shadow);
if (efdp->efd_format.efd_nextents > XFS_EFD_MAX_FAST_EXTENTS)
kmem_free(efdp);
else
@@ -484,3 +489,69 @@ xfs_efd_init(
return efdp;
}
+
+/*
+ * Process an extent free intent item that was recovered from
+ * the log. We need to free the extents that it describes.
+ */
+int
+xfs_efi_recover(
+ struct xfs_mount *mp,
+ struct xfs_efi_log_item *efip)
+{
+ struct xfs_efd_log_item *efdp;
+ struct xfs_trans *tp;
+ int i;
+ int error = 0;
+ xfs_extent_t *extp;
+ xfs_fsblock_t startblock_fsb;
+ struct xfs_owner_info oinfo;
+
+ ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
+
+ /*
+ * First check the validity of the extents described by the
+ * EFI. If any are bad, then assume that all are bad and
+ * just toss the EFI.
+ */
+ for (i = 0; i < efip->efi_format.efi_nextents; i++) {
+ extp = &efip->efi_format.efi_extents[i];
+ startblock_fsb = XFS_BB_TO_FSB(mp,
+ XFS_FSB_TO_DADDR(mp, extp->ext_start));
+ if (startblock_fsb == 0 ||
+ extp->ext_len == 0 ||
+ startblock_fsb >= mp->m_sb.sb_dblocks ||
+ extp->ext_len >= mp->m_sb.sb_agblocks) {
+ /*
+ * This will pull the EFI from the AIL and
+ * free the memory associated with it.
+ */
+ set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
+ xfs_efi_release(efip);
+ return -EIO;
+ }
+ }
+
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
+ if (error)
+ return error;
+ efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
+
+ xfs_rmap_skip_owner_update(&oinfo);
+ for (i = 0; i < efip->efi_format.efi_nextents; i++) {
+ extp = &efip->efi_format.efi_extents[i];
+ error = xfs_trans_free_extent(tp, efdp, extp->ext_start,
+ extp->ext_len, &oinfo);
+ if (error)
+ goto abort_error;
+
+ }
+
+ set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
+ error = xfs_trans_commit(tp);
+ return error;
+
+abort_error:
+ xfs_trans_cancel(tp);
+ return error;
+}
diff --git a/fs/xfs/xfs_extfree_item.h b/fs/xfs/xfs_extfree_item.h
index 8fa8651705e1..a32c794a86b7 100644
--- a/fs/xfs/xfs_extfree_item.h
+++ b/fs/xfs/xfs_extfree_item.h
@@ -98,4 +98,7 @@ int xfs_efi_copy_format(xfs_log_iovec_t *buf,
void xfs_efi_item_free(xfs_efi_log_item_t *);
void xfs_efi_release(struct xfs_efi_log_item *);
+int xfs_efi_recover(struct xfs_mount *mp,
+ struct xfs_efi_log_item *efip);
+
#endif /* __XFS_EXTFREE_ITEM_H__ */
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 47fc63295422..ed95e5bb04e6 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -37,6 +37,7 @@
#include "xfs_log.h"
#include "xfs_icache.h"
#include "xfs_pnfs.h"
+#include "xfs_iomap.h"
#include <linux/dcache.h>
#include <linux/falloc.h>
@@ -80,61 +81,17 @@ xfs_rw_ilock_demote(
}
/*
- * xfs_iozero clears the specified range supplied via the page cache (except in
- * the DAX case). Writes through the page cache will allocate blocks over holes,
- * though the callers usually map the holes first and avoid them. If a block is
- * not completely zeroed, then it will be read from disk before being partially
- * zeroed.
- *
- * In the DAX case, we can just directly write to the underlying pages. This
- * will not allocate blocks, but will avoid holes and unwritten extents and so
- * not do unnecessary work.
+ * Clear the specified ranges to zero through either the pagecache or DAX.
+ * Holes and unwritten extents will be left as-is as they already are zeroed.
*/
int
-xfs_iozero(
- struct xfs_inode *ip, /* inode */
- loff_t pos, /* offset in file */
- size_t count) /* size of data to zero */
+xfs_zero_range(
+ struct xfs_inode *ip,
+ xfs_off_t pos,
+ xfs_off_t count,
+ bool *did_zero)
{
- struct page *page;
- struct address_space *mapping;
- int status = 0;
-
-
- mapping = VFS_I(ip)->i_mapping;
- do {
- unsigned offset, bytes;
- void *fsdata;
-
- offset = (pos & (PAGE_SIZE -1)); /* Within page */
- bytes = PAGE_SIZE - offset;
- if (bytes > count)
- bytes = count;
-
- if (IS_DAX(VFS_I(ip))) {
- status = dax_zero_page_range(VFS_I(ip), pos, bytes,
- xfs_get_blocks_direct);
- if (status)
- break;
- } else {
- status = pagecache_write_begin(NULL, mapping, pos, bytes,
- AOP_FLAG_UNINTERRUPTIBLE,
- &page, &fsdata);
- if (status)
- break;
-
- zero_user(page, offset, bytes);
-
- status = pagecache_write_end(NULL, mapping, pos, bytes,
- bytes, page, fsdata);
- WARN_ON(status <= 0); /* can't return less than zero! */
- status = 0;
- }
- pos += bytes;
- count -= bytes;
- } while (count);
-
- return status;
+ return iomap_zero_range(VFS_I(ip), pos, count, NULL, &xfs_iomap_ops);
}
int
@@ -282,48 +239,35 @@ xfs_file_fsync(
}
STATIC ssize_t
-xfs_file_read_iter(
+xfs_file_dio_aio_read(
struct kiocb *iocb,
struct iov_iter *to)
{
- struct file *file = iocb->ki_filp;
- struct inode *inode = file->f_mapping->host;
+ struct address_space *mapping = iocb->ki_filp->f_mapping;
+ struct inode *inode = mapping->host;
struct xfs_inode *ip = XFS_I(inode);
- struct xfs_mount *mp = ip->i_mount;
- size_t size = iov_iter_count(to);
+ loff_t isize = i_size_read(inode);
+ size_t count = iov_iter_count(to);
+ struct iov_iter data;
+ struct xfs_buftarg *target;
ssize_t ret = 0;
- int ioflags = 0;
- xfs_fsize_t n;
- loff_t pos = iocb->ki_pos;
- XFS_STATS_INC(mp, xs_read_calls);
-
- if (unlikely(iocb->ki_flags & IOCB_DIRECT))
- ioflags |= XFS_IO_ISDIRECT;
- if (file->f_mode & FMODE_NOCMTIME)
- ioflags |= XFS_IO_INVIS;
-
- if ((ioflags & XFS_IO_ISDIRECT) && !IS_DAX(inode)) {
- xfs_buftarg_t *target =
- XFS_IS_REALTIME_INODE(ip) ?
- mp->m_rtdev_targp : mp->m_ddev_targp;
- /* DIO must be aligned to device logical sector size */
- if ((pos | size) & target->bt_logical_sectormask) {
- if (pos == i_size_read(inode))
- return 0;
- return -EINVAL;
- }
- }
+ trace_xfs_file_direct_read(ip, count, iocb->ki_pos);
- n = mp->m_super->s_maxbytes - pos;
- if (n <= 0 || size == 0)
- return 0;
+ if (!count)
+ return 0; /* skip atime */
- if (n < size)
- size = n;
+ if (XFS_IS_REALTIME_INODE(ip))
+ target = ip->i_mount->m_rtdev_targp;
+ else
+ target = ip->i_mount->m_ddev_targp;
- if (XFS_FORCED_SHUTDOWN(mp))
- return -EIO;
+ /* DIO must be aligned to device logical sector size */
+ if ((iocb->ki_pos | count) & target->bt_logical_sectormask) {
+ if (iocb->ki_pos == isize)
+ return 0;
+ return -EINVAL;
+ }
/*
* Locking is a bit tricky here. If we take an exclusive lock for direct
@@ -336,7 +280,7 @@ xfs_file_read_iter(
* serialisation.
*/
xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
- if ((ioflags & XFS_IO_ISDIRECT) && inode->i_mapping->nrpages) {
+ if (mapping->nrpages) {
xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
@@ -351,8 +295,8 @@ xfs_file_read_iter(
* flush and reduce the chances of repeated iolock cycles going
* forward.
*/
- if (inode->i_mapping->nrpages) {
- ret = filemap_write_and_wait(VFS_I(ip)->i_mapping);
+ if (mapping->nrpages) {
+ ret = filemap_write_and_wait(mapping);
if (ret) {
xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL);
return ret;
@@ -363,20 +307,95 @@ xfs_file_read_iter(
* we fail to invalidate a page, but this should never
* happen on XFS. Warn if it does fail.
*/
- ret = invalidate_inode_pages2(VFS_I(ip)->i_mapping);
+ ret = invalidate_inode_pages2(mapping);
WARN_ON_ONCE(ret);
ret = 0;
}
xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
}
- trace_xfs_file_read(ip, size, pos, ioflags);
+ data = *to;
+ ret = __blockdev_direct_IO(iocb, inode, target->bt_bdev, &data,
+ xfs_get_blocks_direct, NULL, NULL, 0);
+ if (ret > 0) {
+ iocb->ki_pos += ret;
+ iov_iter_advance(to, ret);
+ }
+ xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
+ file_accessed(iocb->ki_filp);
+ return ret;
+}
+
+static noinline ssize_t
+xfs_file_dax_read(
+ struct kiocb *iocb,
+ struct iov_iter *to)
+{
+ struct address_space *mapping = iocb->ki_filp->f_mapping;
+ struct inode *inode = mapping->host;
+ struct xfs_inode *ip = XFS_I(inode);
+ struct iov_iter data = *to;
+ size_t count = iov_iter_count(to);
+ ssize_t ret = 0;
+
+ trace_xfs_file_dax_read(ip, count, iocb->ki_pos);
+
+ if (!count)
+ return 0; /* skip atime */
+
+ xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
+ ret = dax_do_io(iocb, inode, &data, xfs_get_blocks_direct, NULL, 0);
+ if (ret > 0) {
+ iocb->ki_pos += ret;
+ iov_iter_advance(to, ret);
+ }
+ xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
+
+ file_accessed(iocb->ki_filp);
+ return ret;
+}
+
+STATIC ssize_t
+xfs_file_buffered_aio_read(
+ struct kiocb *iocb,
+ struct iov_iter *to)
+{
+ struct xfs_inode *ip = XFS_I(file_inode(iocb->ki_filp));
+ ssize_t ret;
+
+ trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos);
+
+ xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
ret = generic_file_read_iter(iocb, to);
+ xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
+
+ return ret;
+}
+
+STATIC ssize_t
+xfs_file_read_iter(
+ struct kiocb *iocb,
+ struct iov_iter *to)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct xfs_mount *mp = XFS_I(inode)->i_mount;
+ ssize_t ret = 0;
+
+ XFS_STATS_INC(mp, xs_read_calls);
+
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return -EIO;
+
+ if (IS_DAX(inode))
+ ret = xfs_file_dax_read(iocb, to);
+ else if (iocb->ki_flags & IOCB_DIRECT)
+ ret = xfs_file_dio_aio_read(iocb, to);
+ else
+ ret = xfs_file_buffered_aio_read(iocb, to);
+
if (ret > 0)
XFS_STATS_ADD(mp, xs_read_bytes, ret);
-
- xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
return ret;
}
@@ -389,18 +408,14 @@ xfs_file_splice_read(
unsigned int flags)
{
struct xfs_inode *ip = XFS_I(infilp->f_mapping->host);
- int ioflags = 0;
ssize_t ret;
XFS_STATS_INC(ip->i_mount, xs_read_calls);
- if (infilp->f_mode & FMODE_NOCMTIME)
- ioflags |= XFS_IO_INVIS;
-
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return -EIO;
- trace_xfs_file_splice_read(ip, count, *ppos, ioflags);
+ trace_xfs_file_splice_read(ip, count, *ppos);
/*
* DAX inodes cannot ues the page cache for splice, so we have to push
@@ -424,49 +439,6 @@ out:
}
/*
- * This routine is called to handle zeroing any space in the last block of the
- * file that is beyond the EOF. We do this since the size is being increased
- * without writing anything to that block and we don't want to read the
- * garbage on the disk.
- */
-STATIC int /* error (positive) */
-xfs_zero_last_block(
- struct xfs_inode *ip,
- xfs_fsize_t offset,
- xfs_fsize_t isize,
- bool *did_zeroing)
-{
- struct xfs_mount *mp = ip->i_mount;
- xfs_fileoff_t last_fsb = XFS_B_TO_FSBT(mp, isize);
- int zero_offset = XFS_B_FSB_OFFSET(mp, isize);
- int zero_len;
- int nimaps = 1;
- int error = 0;
- struct xfs_bmbt_irec imap;
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- error = xfs_bmapi_read(ip, last_fsb, 1, &imap, &nimaps, 0);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- if (error)
- return error;
-
- ASSERT(nimaps > 0);
-
- /*
- * If the block underlying isize is just a hole, then there
- * is nothing to zero.
- */
- if (imap.br_startblock == HOLESTARTBLOCK)
- return 0;
-
- zero_len = mp->m_sb.sb_blocksize - zero_offset;
- if (isize + zero_len > offset)
- zero_len = offset - isize;
- *did_zeroing = true;
- return xfs_iozero(ip, isize, zero_len);
-}
-
-/*
* Zero any on disk space between the current EOF and the new, larger EOF.
*
* This handles the normal case of zeroing the remainder of the last block in
@@ -484,94 +456,11 @@ xfs_zero_eof(
xfs_fsize_t isize, /* current inode size */
bool *did_zeroing)
{
- struct xfs_mount *mp = ip->i_mount;
- xfs_fileoff_t start_zero_fsb;
- xfs_fileoff_t end_zero_fsb;
- xfs_fileoff_t zero_count_fsb;
- xfs_fileoff_t last_fsb;
- xfs_fileoff_t zero_off;
- xfs_fsize_t zero_len;
- int nimaps;
- int error = 0;
- struct xfs_bmbt_irec imap;
-
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
ASSERT(offset > isize);
trace_xfs_zero_eof(ip, isize, offset - isize);
-
- /*
- * First handle zeroing the block on which isize resides.
- *
- * We only zero a part of that block so it is handled specially.
- */
- if (XFS_B_FSB_OFFSET(mp, isize) != 0) {
- error = xfs_zero_last_block(ip, offset, isize, did_zeroing);
- if (error)
- return error;
- }
-
- /*
- * Calculate the range between the new size and the old where blocks
- * needing to be zeroed may exist.
- *
- * To get the block where the last byte in the file currently resides,
- * we need to subtract one from the size and truncate back to a block
- * boundary. We subtract 1 in case the size is exactly on a block
- * boundary.
- */
- last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1;
- start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
- end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1);
- ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb);
- if (last_fsb == end_zero_fsb) {
- /*
- * The size was only incremented on its last block.
- * We took care of that above, so just return.
- */
- return 0;
- }
-
- ASSERT(start_zero_fsb <= end_zero_fsb);
- while (start_zero_fsb <= end_zero_fsb) {
- nimaps = 1;
- zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- error = xfs_bmapi_read(ip, start_zero_fsb, zero_count_fsb,
- &imap, &nimaps, 0);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- if (error)
- return error;
-
- ASSERT(nimaps > 0);
-
- if (imap.br_state == XFS_EXT_UNWRITTEN ||
- imap.br_startblock == HOLESTARTBLOCK) {
- start_zero_fsb = imap.br_startoff + imap.br_blockcount;
- ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
- continue;
- }
-
- /*
- * There are blocks we need to zero.
- */
- zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
- zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
-
- if ((zero_off + zero_len) > offset)
- zero_len = offset - zero_off;
-
- error = xfs_iozero(ip, zero_off, zero_len);
- if (error)
- return error;
-
- *did_zeroing = true;
- start_zero_fsb = imap.br_startoff + imap.br_blockcount;
- ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
- }
-
- return 0;
+ return xfs_zero_range(ip, isize, offset - isize, did_zeroing);
}
/*
@@ -722,8 +611,7 @@ xfs_file_dio_aio_write(
mp->m_rtdev_targp : mp->m_ddev_targp;
/* DIO must be aligned to device logical sector size */
- if (!IS_DAX(inode) &&
- ((iocb->ki_pos | count) & target->bt_logical_sectormask))
+ if ((iocb->ki_pos | count) & target->bt_logical_sectormask)
return -EINVAL;
/* "unaligned" here means not aligned to a filesystem block */
@@ -762,7 +650,7 @@ xfs_file_dio_aio_write(
end = iocb->ki_pos + count - 1;
/*
- * See xfs_file_read_iter() for why we do a full-file flush here.
+ * See xfs_file_dio_aio_read() for why we do a full-file flush here.
*/
if (mapping->nrpages) {
ret = filemap_write_and_wait(VFS_I(ip)->i_mapping);
@@ -789,10 +677,12 @@ xfs_file_dio_aio_write(
iolock = XFS_IOLOCK_SHARED;
}
- trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
+ trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
data = *from;
- ret = mapping->a_ops->direct_IO(iocb, &data);
+ ret = __blockdev_direct_IO(iocb, inode, target->bt_bdev, &data,
+ xfs_get_blocks_direct, xfs_end_io_direct_write,
+ NULL, DIO_ASYNC_EXTEND);
/* see generic_file_direct_write() for why this is necessary */
if (mapping->nrpages) {
@@ -809,10 +699,70 @@ out:
xfs_rw_iunlock(ip, iolock);
/*
- * No fallback to buffered IO on errors for XFS. DAX can result in
- * partial writes, but direct IO will either complete fully or fail.
+ * No fallback to buffered IO on errors for XFS, direct IO will either
+ * complete fully or fail.
*/
- ASSERT(ret < 0 || ret == count || IS_DAX(VFS_I(ip)));
+ ASSERT(ret < 0 || ret == count);
+ return ret;
+}
+
+static noinline ssize_t
+xfs_file_dax_write(
+ struct kiocb *iocb,
+ struct iov_iter *from)
+{
+ struct address_space *mapping = iocb->ki_filp->f_mapping;
+ struct inode *inode = mapping->host;
+ struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_mount *mp = ip->i_mount;
+ ssize_t ret = 0;
+ int unaligned_io = 0;
+ int iolock;
+ struct iov_iter data;
+
+ /* "unaligned" here means not aligned to a filesystem block */
+ if ((iocb->ki_pos & mp->m_blockmask) ||
+ ((iocb->ki_pos + iov_iter_count(from)) & mp->m_blockmask)) {
+ unaligned_io = 1;
+ iolock = XFS_IOLOCK_EXCL;
+ } else if (mapping->nrpages) {
+ iolock = XFS_IOLOCK_EXCL;
+ } else {
+ iolock = XFS_IOLOCK_SHARED;
+ }
+ xfs_rw_ilock(ip, iolock);
+
+ ret = xfs_file_aio_write_checks(iocb, from, &iolock);
+ if (ret)
+ goto out;
+
+ /*
+ * Yes, even DAX files can have page cache attached to them: A zeroed
+ * page is inserted into the pagecache when we have to serve a write
+ * fault on a hole. It should never be dirtied and can simply be
+ * dropped from the pagecache once we get real data for the page.
+ */
+ if (mapping->nrpages) {
+ ret = invalidate_inode_pages2(mapping);
+ WARN_ON_ONCE(ret);
+ }
+
+ if (iolock == XFS_IOLOCK_EXCL && !unaligned_io) {
+ xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
+ iolock = XFS_IOLOCK_SHARED;
+ }
+
+ trace_xfs_file_dax_write(ip, iov_iter_count(from), iocb->ki_pos);
+
+ data = *from;
+ ret = dax_do_io(iocb, inode, &data, xfs_get_blocks_direct,
+ xfs_end_io_direct_write, 0);
+ if (ret > 0) {
+ iocb->ki_pos += ret;
+ iov_iter_advance(from, ret);
+ }
+out:
+ xfs_rw_iunlock(ip, iolock);
return ret;
}
@@ -839,9 +789,8 @@ xfs_file_buffered_aio_write(
current->backing_dev_info = inode_to_bdi(inode);
write_retry:
- trace_xfs_file_buffered_write(ip, iov_iter_count(from),
- iocb->ki_pos, 0);
- ret = generic_perform_write(file, from, iocb->ki_pos);
+ trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
+ ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops);
if (likely(ret >= 0))
iocb->ki_pos += ret;
@@ -895,7 +844,9 @@ xfs_file_write_iter(
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return -EIO;
- if ((iocb->ki_flags & IOCB_DIRECT) || IS_DAX(inode))
+ if (IS_DAX(inode))
+ ret = xfs_file_dax_write(iocb, from);
+ else if (iocb->ki_flags & IOCB_DIRECT)
ret = xfs_file_dio_aio_write(iocb, from);
else
ret = xfs_file_buffered_aio_write(iocb, from);
@@ -1551,9 +1502,9 @@ xfs_filemap_page_mkwrite(
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
if (IS_DAX(inode)) {
- ret = __dax_mkwrite(vma, vmf, xfs_get_blocks_dax_fault);
+ ret = dax_mkwrite(vma, vmf, xfs_get_blocks_dax_fault);
} else {
- ret = block_page_mkwrite(vma, vmf, xfs_get_blocks);
+ ret = iomap_page_mkwrite(vma, vmf, &xfs_iomap_ops);
ret = block_page_mkwrite_return(ret);
}
@@ -1585,7 +1536,7 @@ xfs_filemap_fault(
* changes to xfs_get_blocks_direct() to map unwritten extent
* ioend for conversion on read-only mappings.
*/
- ret = __dax_fault(vma, vmf, xfs_get_blocks_dax_fault);
+ ret = dax_fault(vma, vmf, xfs_get_blocks_dax_fault);
} else
ret = filemap_fault(vma, vmf);
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
@@ -1622,7 +1573,7 @@ xfs_filemap_pmd_fault(
}
xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
- ret = __dax_pmd_fault(vma, addr, pmd, flags, xfs_get_blocks_dax_fault);
+ ret = dax_pmd_fault(vma, addr, pmd, flags, xfs_get_blocks_dax_fault);
xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
if (flags & FAULT_FLAG_WRITE)
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index a51353a1f87f..4a33a3304369 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -22,6 +22,7 @@
#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
@@ -385,7 +386,7 @@ xfs_filestream_new_ag(
}
flags = (ap->userdata ? XFS_PICK_USERDATA : 0) |
- (ap->flist->xbf_low ? XFS_PICK_LOWSPACE : 0);
+ (ap->dfops->dop_low ? XFS_PICK_LOWSPACE : 0);
err = xfs_filestream_pick_ag(pip, startag, agp, flags, minlen);
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index b4d75825ae37..0f96847b90e1 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -23,6 +23,7 @@
#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_inode.h"
@@ -32,6 +33,7 @@
#include "xfs_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_alloc.h"
+#include "xfs_rmap_btree.h"
#include "xfs_ialloc.h"
#include "xfs_fsops.h"
#include "xfs_itable.h"
@@ -40,6 +42,7 @@
#include "xfs_trace.h"
#include "xfs_log.h"
#include "xfs_filestream.h"
+#include "xfs_rmap.h"
/*
* File system operations
@@ -103,7 +106,9 @@ xfs_fs_geometry(
(xfs_sb_version_hasfinobt(&mp->m_sb) ?
XFS_FSOP_GEOM_FLAGS_FINOBT : 0) |
(xfs_sb_version_hassparseinodes(&mp->m_sb) ?
- XFS_FSOP_GEOM_FLAGS_SPINODES : 0);
+ XFS_FSOP_GEOM_FLAGS_SPINODES : 0) |
+ (xfs_sb_version_hasrmapbt(&mp->m_sb) ?
+ XFS_FSOP_GEOM_FLAGS_RMAPBT : 0);
geo->logsectsize = xfs_sb_version_hassector(&mp->m_sb) ?
mp->m_sb.sb_logsectsize : BBSIZE;
geo->rtsectsize = mp->m_sb.sb_blocksize;
@@ -239,10 +244,16 @@ xfs_growfs_data_private(
agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp));
agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1);
agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1);
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
+ agf->agf_roots[XFS_BTNUM_RMAPi] =
+ cpu_to_be32(XFS_RMAP_BLOCK(mp));
+ agf->agf_levels[XFS_BTNUM_RMAPi] = cpu_to_be32(1);
+ }
+
agf->agf_flfirst = cpu_to_be32(1);
agf->agf_fllast = 0;
agf->agf_flcount = 0;
- tmpsize = agsize - XFS_PREALLOC_BLOCKS(mp);
+ tmpsize = agsize - mp->m_ag_prealloc_blocks;
agf->agf_freeblks = cpu_to_be32(tmpsize);
agf->agf_longest = cpu_to_be32(tmpsize);
if (xfs_sb_version_hascrc(&mp->m_sb))
@@ -339,7 +350,7 @@ xfs_growfs_data_private(
agno, 0);
arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
- arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp));
+ arec->ar_startblock = cpu_to_be32(mp->m_ag_prealloc_blocks);
arec->ar_blockcount = cpu_to_be32(
agsize - be32_to_cpu(arec->ar_startblock));
@@ -368,7 +379,7 @@ xfs_growfs_data_private(
agno, 0);
arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
- arec->ar_startblock = cpu_to_be32(XFS_PREALLOC_BLOCKS(mp));
+ arec->ar_startblock = cpu_to_be32(mp->m_ag_prealloc_blocks);
arec->ar_blockcount = cpu_to_be32(
agsize - be32_to_cpu(arec->ar_startblock));
nfree += be32_to_cpu(arec->ar_blockcount);
@@ -378,6 +389,72 @@ xfs_growfs_data_private(
if (error)
goto error0;
+ /* RMAP btree root block */
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
+ struct xfs_rmap_rec *rrec;
+ struct xfs_btree_block *block;
+
+ bp = xfs_growfs_get_hdr_buf(mp,
+ XFS_AGB_TO_DADDR(mp, agno, XFS_RMAP_BLOCK(mp)),
+ BTOBB(mp->m_sb.sb_blocksize), 0,
+ &xfs_rmapbt_buf_ops);
+ if (!bp) {
+ error = -ENOMEM;
+ goto error0;
+ }
+
+ xfs_btree_init_block(mp, bp, XFS_RMAP_CRC_MAGIC, 0, 0,
+ agno, XFS_BTREE_CRC_BLOCKS);
+ block = XFS_BUF_TO_BLOCK(bp);
+
+
+ /*
+ * mark the AG header regions as static metadata The BNO
+ * btree block is the first block after the headers, so
+ * it's location defines the size of region the static
+ * metadata consumes.
+ *
+ * Note: unlike mkfs, we never have to account for log
+ * space when growing the data regions
+ */
+ rrec = XFS_RMAP_REC_ADDR(block, 1);
+ rrec->rm_startblock = 0;
+ rrec->rm_blockcount = cpu_to_be32(XFS_BNO_BLOCK(mp));
+ rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_FS);
+ rrec->rm_offset = 0;
+ be16_add_cpu(&block->bb_numrecs, 1);
+
+ /* account freespace btree root blocks */
+ rrec = XFS_RMAP_REC_ADDR(block, 2);
+ rrec->rm_startblock = cpu_to_be32(XFS_BNO_BLOCK(mp));
+ rrec->rm_blockcount = cpu_to_be32(2);
+ rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
+ rrec->rm_offset = 0;
+ be16_add_cpu(&block->bb_numrecs, 1);
+
+ /* account inode btree root blocks */
+ rrec = XFS_RMAP_REC_ADDR(block, 3);
+ rrec->rm_startblock = cpu_to_be32(XFS_IBT_BLOCK(mp));
+ rrec->rm_blockcount = cpu_to_be32(XFS_RMAP_BLOCK(mp) -
+ XFS_IBT_BLOCK(mp));
+ rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_INOBT);
+ rrec->rm_offset = 0;
+ be16_add_cpu(&block->bb_numrecs, 1);
+
+ /* account for rmap btree root */
+ rrec = XFS_RMAP_REC_ADDR(block, 4);
+ rrec->rm_startblock = cpu_to_be32(XFS_RMAP_BLOCK(mp));
+ rrec->rm_blockcount = cpu_to_be32(1);
+ rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
+ rrec->rm_offset = 0;
+ be16_add_cpu(&block->bb_numrecs, 1);
+
+ error = xfs_bwrite(bp);
+ xfs_buf_relse(bp);
+ if (error)
+ goto error0;
+ }
+
/*
* INO btree root block
*/
@@ -435,6 +512,8 @@ xfs_growfs_data_private(
* There are new blocks in the old last a.g.
*/
if (new) {
+ struct xfs_owner_info oinfo;
+
/*
* Change the agi length.
*/
@@ -462,14 +541,20 @@ xfs_growfs_data_private(
be32_to_cpu(agi->agi_length));
xfs_alloc_log_agf(tp, bp, XFS_AGF_LENGTH);
+
/*
* Free the new space.
+ *
+ * XFS_RMAP_OWN_NULL is used here to tell the rmap btree that
+ * this doesn't actually exist in the rmap btree.
*/
- error = xfs_free_extent(tp, XFS_AGB_TO_FSB(mp, agno,
- be32_to_cpu(agf->agf_length) - new), new);
- if (error) {
+ xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_NULL);
+ error = xfs_free_extent(tp,
+ XFS_AGB_TO_FSB(mp, agno,
+ be32_to_cpu(agf->agf_length) - new),
+ new, &oinfo);
+ if (error)
goto error0;
- }
}
/*
@@ -501,6 +586,7 @@ xfs_growfs_data_private(
} else
mp->m_maxicount = 0;
xfs_set_low_space_thresholds(mp);
+ mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
/* update secondary superblocks. */
for (agno = 1; agno < nagcount; agno++) {
@@ -638,7 +724,7 @@ xfs_fs_counts(
cnt->allocino = percpu_counter_read_positive(&mp->m_icount);
cnt->freeino = percpu_counter_read_positive(&mp->m_ifree);
cnt->freedata = percpu_counter_read_positive(&mp->m_fdblocks) -
- XFS_ALLOC_SET_ASIDE(mp);
+ mp->m_alloc_set_aside;
spin_lock(&mp->m_sb_lock);
cnt->freertx = mp->m_sb.sb_frextents;
@@ -667,8 +753,11 @@ xfs_reserve_blocks(
__uint64_t *inval,
xfs_fsop_resblks_t *outval)
{
- __int64_t lcounter, delta, fdblks_delta;
+ __int64_t lcounter, delta;
+ __int64_t fdblks_delta = 0;
__uint64_t request;
+ __int64_t free;
+ int error = 0;
/* If inval is null, report current values and return */
if (inval == (__uint64_t *)NULL) {
@@ -682,24 +771,23 @@ xfs_reserve_blocks(
request = *inval;
/*
- * With per-cpu counters, this becomes an interesting
- * problem. we needto work out if we are freeing or allocation
- * blocks first, then we can do the modification as necessary.
+ * With per-cpu counters, this becomes an interesting problem. we need
+ * to work out if we are freeing or allocation blocks first, then we can
+ * do the modification as necessary.
*
- * We do this under the m_sb_lock so that if we are near
- * ENOSPC, we will hold out any changes while we work out
- * what to do. This means that the amount of free space can
- * change while we do this, so we need to retry if we end up
- * trying to reserve more space than is available.
+ * We do this under the m_sb_lock so that if we are near ENOSPC, we will
+ * hold out any changes while we work out what to do. This means that
+ * the amount of free space can change while we do this, so we need to
+ * retry if we end up trying to reserve more space than is available.
*/
-retry:
spin_lock(&mp->m_sb_lock);
/*
* If our previous reservation was larger than the current value,
- * then move any unused blocks back to the free pool.
+ * then move any unused blocks back to the free pool. Modify the resblks
+ * counters directly since we shouldn't have any problems unreserving
+ * space.
*/
- fdblks_delta = 0;
if (mp->m_resblks > request) {
lcounter = mp->m_resblks_avail - request;
if (lcounter > 0) { /* release unused blocks */
@@ -707,54 +795,67 @@ retry:
mp->m_resblks_avail -= lcounter;
}
mp->m_resblks = request;
- } else {
- __int64_t free;
+ if (fdblks_delta) {
+ spin_unlock(&mp->m_sb_lock);
+ error = xfs_mod_fdblocks(mp, fdblks_delta, 0);
+ spin_lock(&mp->m_sb_lock);
+ }
+
+ goto out;
+ }
+ /*
+ * If the request is larger than the current reservation, reserve the
+ * blocks before we update the reserve counters. Sample m_fdblocks and
+ * perform a partial reservation if the request exceeds free space.
+ */
+ error = -ENOSPC;
+ do {
free = percpu_counter_sum(&mp->m_fdblocks) -
- XFS_ALLOC_SET_ASIDE(mp);
+ mp->m_alloc_set_aside;
if (!free)
- goto out; /* ENOSPC and fdblks_delta = 0 */
+ break;
delta = request - mp->m_resblks;
lcounter = free - delta;
- if (lcounter < 0) {
+ if (lcounter < 0)
/* We can't satisfy the request, just get what we can */
- mp->m_resblks += free;
- mp->m_resblks_avail += free;
- fdblks_delta = -free;
- } else {
- fdblks_delta = -delta;
- mp->m_resblks = request;
- mp->m_resblks_avail += delta;
- }
- }
-out:
- if (outval) {
- outval->resblks = mp->m_resblks;
- outval->resblks_avail = mp->m_resblks_avail;
- }
- spin_unlock(&mp->m_sb_lock);
+ fdblks_delta = free;
+ else
+ fdblks_delta = delta;
- if (fdblks_delta) {
/*
- * If we are putting blocks back here, m_resblks_avail is
- * already at its max so this will put it in the free pool.
- *
- * If we need space, we'll either succeed in getting it
- * from the free block count or we'll get an enospc. If
- * we get a ENOSPC, it means things changed while we were
- * calculating fdblks_delta and so we should try again to
- * see if there is anything left to reserve.
+ * We'll either succeed in getting space from the free block
+ * count or we'll get an ENOSPC. If we get a ENOSPC, it means
+ * things changed while we were calculating fdblks_delta and so
+ * we should try again to see if there is anything left to
+ * reserve.
*
* Don't set the reserved flag here - we don't want to reserve
* the extra reserve blocks from the reserve.....
*/
- int error;
- error = xfs_mod_fdblocks(mp, fdblks_delta, 0);
- if (error == -ENOSPC)
- goto retry;
+ spin_unlock(&mp->m_sb_lock);
+ error = xfs_mod_fdblocks(mp, -fdblks_delta, 0);
+ spin_lock(&mp->m_sb_lock);
+ } while (error == -ENOSPC);
+
+ /*
+ * Update the reserve counters if blocks have been successfully
+ * allocated.
+ */
+ if (!error && fdblks_delta) {
+ mp->m_resblks += fdblks_delta;
+ mp->m_resblks_avail += fdblks_delta;
}
- return 0;
+
+out:
+ if (outval) {
+ outval->resblks = mp->m_resblks;
+ outval->resblks_avail = mp->m_resblks_avail;
+ }
+
+ spin_unlock(&mp->m_sb_lock);
+ return error;
}
int
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 99ee6eee5e0b..fb39a66914dd 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -765,7 +765,7 @@ restart:
* Background scanning to trim post-EOF preallocated space. This is queued
* based on the 'speculative_prealloc_lifetime' tunable (5m by default).
*/
-STATIC void
+void
xfs_queue_eofblocks(
struct xfs_mount *mp)
{
diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h
index 62f1f91c32cb..05bac99bef75 100644
--- a/fs/xfs/xfs_icache.h
+++ b/fs/xfs/xfs_icache.h
@@ -68,6 +68,7 @@ void xfs_inode_clear_eofblocks_tag(struct xfs_inode *ip);
int xfs_icache_free_eofblocks(struct xfs_mount *, struct xfs_eofblocks *);
int xfs_inode_free_quota_eofblocks(struct xfs_inode *ip);
void xfs_eofblocks_worker(struct work_struct *);
+void xfs_queue_eofblocks(struct xfs_mount *);
int xfs_inode_ag_iterator(struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, int flags, void *args),
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index ee6799e0476f..e08eaea6327b 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -25,6 +25,7 @@
#include "xfs_trans_resv.h"
#include "xfs_sb.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
@@ -431,7 +432,7 @@ xfs_lock_inumorder(int lock_mode, int subclass)
* lock more than one at a time, lockdep will report false positives saying we
* have violated locking orders.
*/
-void
+static void
xfs_lock_inodes(
xfs_inode_t **ips,
int inodes,
@@ -667,14 +668,6 @@ xfs_ip2xflags(
return _xfs_dic2xflags(dic->di_flags, dic->di_flags2, XFS_IFORK_Q(ip));
}
-uint
-xfs_dic2xflags(
- struct xfs_dinode *dip)
-{
- return _xfs_dic2xflags(be16_to_cpu(dip->di_flags),
- be64_to_cpu(dip->di_flags2), XFS_DFORK_Q(dip));
-}
-
/*
* Lookups up an inode from "name". If ci_name is not NULL, then a CI match
* is allowed, otherwise it has to be an exact match. If a CI match is found,
@@ -748,7 +741,7 @@ out_unlock:
* are not linked into the directory structure - they are attached
* directly to the superblock - and so have no parent.
*/
-int
+static int
xfs_ialloc(
xfs_trans_t *tp,
xfs_inode_t *pip,
@@ -1085,7 +1078,7 @@ xfs_dir_ialloc(
* link count to go to zero, move the inode to AGI unlinked list so that it can
* be freed when the last active reference goes away via xfs_inactive().
*/
-int /* error */
+static int /* error */
xfs_droplink(
xfs_trans_t *tp,
xfs_inode_t *ip)
@@ -1104,7 +1097,7 @@ xfs_droplink(
/*
* Increment the link count on an inode & log the change.
*/
-int
+static int
xfs_bumplink(
xfs_trans_t *tp,
xfs_inode_t *ip)
@@ -1130,7 +1123,7 @@ xfs_create(
struct xfs_inode *ip = NULL;
struct xfs_trans *tp = NULL;
int error;
- xfs_bmap_free_t free_list;
+ struct xfs_defer_ops dfops;
xfs_fsblock_t first_block;
bool unlock_dp_on_error = false;
prid_t prid;
@@ -1190,7 +1183,7 @@ xfs_create(
XFS_IOLOCK_PARENT | XFS_ILOCK_PARENT);
unlock_dp_on_error = true;
- xfs_bmap_init(&free_list, &first_block);
+ xfs_defer_init(&dfops, &first_block);
/*
* Reserve disk quota and the inode.
@@ -1227,7 +1220,7 @@ xfs_create(
unlock_dp_on_error = false;
error = xfs_dir_createname(tp, dp, name, ip->i_ino,
- &first_block, &free_list, resblks ?
+ &first_block, &dfops, resblks ?
resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
if (error) {
ASSERT(error != -ENOSPC);
@@ -1261,7 +1254,7 @@ xfs_create(
*/
xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
- error = xfs_bmap_finish(&tp, &free_list, NULL);
+ error = xfs_defer_finish(&tp, &dfops, NULL);
if (error)
goto out_bmap_cancel;
@@ -1277,7 +1270,7 @@ xfs_create(
return 0;
out_bmap_cancel:
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
out_trans_cancel:
xfs_trans_cancel(tp);
out_release_inode:
@@ -1409,7 +1402,7 @@ xfs_link(
xfs_mount_t *mp = tdp->i_mount;
xfs_trans_t *tp;
int error;
- xfs_bmap_free_t free_list;
+ struct xfs_defer_ops dfops;
xfs_fsblock_t first_block;
int resblks;
@@ -1460,7 +1453,7 @@ xfs_link(
goto error_return;
}
- xfs_bmap_init(&free_list, &first_block);
+ xfs_defer_init(&dfops, &first_block);
/*
* Handle initial link state of O_TMPFILE inode
@@ -1472,7 +1465,7 @@ xfs_link(
}
error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
- &first_block, &free_list, resblks);
+ &first_block, &dfops, resblks);
if (error)
goto error_return;
xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
@@ -1490,9 +1483,9 @@ xfs_link(
if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
xfs_trans_set_sync(tp);
- error = xfs_bmap_finish(&tp, &free_list, NULL);
+ error = xfs_defer_finish(&tp, &dfops, NULL);
if (error) {
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
goto error_return;
}
@@ -1534,7 +1527,7 @@ xfs_itruncate_extents(
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp = *tpp;
- xfs_bmap_free_t free_list;
+ struct xfs_defer_ops dfops;
xfs_fsblock_t first_block;
xfs_fileoff_t first_unmap_block;
xfs_fileoff_t last_block;
@@ -1570,12 +1563,12 @@ xfs_itruncate_extents(
ASSERT(first_unmap_block < last_block);
unmap_len = last_block - first_unmap_block + 1;
while (!done) {
- xfs_bmap_init(&free_list, &first_block);
+ xfs_defer_init(&dfops, &first_block);
error = xfs_bunmapi(tp, ip,
first_unmap_block, unmap_len,
xfs_bmapi_aflag(whichfork),
XFS_ITRUNC_MAX_EXTENTS,
- &first_block, &free_list,
+ &first_block, &dfops,
&done);
if (error)
goto out_bmap_cancel;
@@ -1584,7 +1577,7 @@ xfs_itruncate_extents(
* Duplicate the transaction that has the permanent
* reservation and commit the old transaction.
*/
- error = xfs_bmap_finish(&tp, &free_list, ip);
+ error = xfs_defer_finish(&tp, &dfops, ip);
if (error)
goto out_bmap_cancel;
@@ -1610,7 +1603,7 @@ out_bmap_cancel:
* the transaction can be properly aborted. We just need to make sure
* we're not holding any resources that we were not when we came in.
*/
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
goto out;
}
@@ -1751,7 +1744,7 @@ STATIC int
xfs_inactive_ifree(
struct xfs_inode *ip)
{
- xfs_bmap_free_t free_list;
+ struct xfs_defer_ops dfops;
xfs_fsblock_t first_block;
struct xfs_mount *mp = ip->i_mount;
struct xfs_trans *tp;
@@ -1788,8 +1781,8 @@ xfs_inactive_ifree(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
- xfs_bmap_init(&free_list, &first_block);
- error = xfs_ifree(tp, ip, &free_list);
+ xfs_defer_init(&dfops, &first_block);
+ error = xfs_ifree(tp, ip, &dfops);
if (error) {
/*
* If we fail to free the inode, shut down. The cancel
@@ -1815,11 +1808,11 @@ xfs_inactive_ifree(
* Just ignore errors at this point. There is nothing we can do except
* to try to keep going. Make sure it's not a silent error.
*/
- error = xfs_bmap_finish(&tp, &free_list, NULL);
+ error = xfs_defer_finish(&tp, &dfops, NULL);
if (error) {
- xfs_notice(mp, "%s: xfs_bmap_finish returned error %d",
+ xfs_notice(mp, "%s: xfs_defer_finish returned error %d",
__func__, error);
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
}
error = xfs_trans_commit(tp);
if (error)
@@ -2375,7 +2368,7 @@ int
xfs_ifree(
xfs_trans_t *tp,
xfs_inode_t *ip,
- xfs_bmap_free_t *flist)
+ struct xfs_defer_ops *dfops)
{
int error;
struct xfs_icluster xic = { 0 };
@@ -2394,7 +2387,7 @@ xfs_ifree(
if (error)
return error;
- error = xfs_difree(tp, ip->i_ino, flist, &xic);
+ error = xfs_difree(tp, ip->i_ino, dfops, &xic);
if (error)
return error;
@@ -2482,7 +2475,7 @@ xfs_iunpin_wait(
* directory entry.
*
* This is still safe from a transactional point of view - it is not until we
- * get to xfs_bmap_finish() that we have the possibility of multiple
+ * get to xfs_defer_finish() that we have the possibility of multiple
* transactions in this operation. Hence as long as we remove the directory
* entry and drop the link count in the first transaction of the remove
* operation, there are no transactional constraints on the ordering here.
@@ -2497,7 +2490,7 @@ xfs_remove(
xfs_trans_t *tp = NULL;
int is_dir = S_ISDIR(VFS_I(ip)->i_mode);
int error = 0;
- xfs_bmap_free_t free_list;
+ struct xfs_defer_ops dfops;
xfs_fsblock_t first_block;
uint resblks;
@@ -2579,9 +2572,9 @@ xfs_remove(
if (error)
goto out_trans_cancel;
- xfs_bmap_init(&free_list, &first_block);
+ xfs_defer_init(&dfops, &first_block);
error = xfs_dir_removename(tp, dp, name, ip->i_ino,
- &first_block, &free_list, resblks);
+ &first_block, &dfops, resblks);
if (error) {
ASSERT(error != -ENOENT);
goto out_bmap_cancel;
@@ -2595,7 +2588,7 @@ xfs_remove(
if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
xfs_trans_set_sync(tp);
- error = xfs_bmap_finish(&tp, &free_list, NULL);
+ error = xfs_defer_finish(&tp, &dfops, NULL);
if (error)
goto out_bmap_cancel;
@@ -2609,7 +2602,7 @@ xfs_remove(
return 0;
out_bmap_cancel:
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
out_trans_cancel:
xfs_trans_cancel(tp);
std_return:
@@ -2670,7 +2663,7 @@ xfs_sort_for_rename(
static int
xfs_finish_rename(
struct xfs_trans *tp,
- struct xfs_bmap_free *free_list)
+ struct xfs_defer_ops *dfops)
{
int error;
@@ -2681,9 +2674,9 @@ xfs_finish_rename(
if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
xfs_trans_set_sync(tp);
- error = xfs_bmap_finish(&tp, free_list, NULL);
+ error = xfs_defer_finish(&tp, dfops, NULL);
if (error) {
- xfs_bmap_cancel(free_list);
+ xfs_defer_cancel(dfops);
xfs_trans_cancel(tp);
return error;
}
@@ -2705,7 +2698,7 @@ xfs_cross_rename(
struct xfs_inode *dp2,
struct xfs_name *name2,
struct xfs_inode *ip2,
- struct xfs_bmap_free *free_list,
+ struct xfs_defer_ops *dfops,
xfs_fsblock_t *first_block,
int spaceres)
{
@@ -2717,14 +2710,14 @@ xfs_cross_rename(
/* Swap inode number for dirent in first parent */
error = xfs_dir_replace(tp, dp1, name1,
ip2->i_ino,
- first_block, free_list, spaceres);
+ first_block, dfops, spaceres);
if (error)
goto out_trans_abort;
/* Swap inode number for dirent in second parent */
error = xfs_dir_replace(tp, dp2, name2,
ip1->i_ino,
- first_block, free_list, spaceres);
+ first_block, dfops, spaceres);
if (error)
goto out_trans_abort;
@@ -2739,7 +2732,7 @@ xfs_cross_rename(
if (S_ISDIR(VFS_I(ip2)->i_mode)) {
error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
dp1->i_ino, first_block,
- free_list, spaceres);
+ dfops, spaceres);
if (error)
goto out_trans_abort;
@@ -2766,7 +2759,7 @@ xfs_cross_rename(
if (S_ISDIR(VFS_I(ip1)->i_mode)) {
error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
dp2->i_ino, first_block,
- free_list, spaceres);
+ dfops, spaceres);
if (error)
goto out_trans_abort;
@@ -2805,10 +2798,10 @@ xfs_cross_rename(
}
xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
- return xfs_finish_rename(tp, free_list);
+ return xfs_finish_rename(tp, dfops);
out_trans_abort:
- xfs_bmap_cancel(free_list);
+ xfs_defer_cancel(dfops);
xfs_trans_cancel(tp);
return error;
}
@@ -2863,7 +2856,7 @@ xfs_rename(
{
struct xfs_mount *mp = src_dp->i_mount;
struct xfs_trans *tp;
- struct xfs_bmap_free free_list;
+ struct xfs_defer_ops dfops;
xfs_fsblock_t first_block;
struct xfs_inode *wip = NULL; /* whiteout inode */
struct xfs_inode *inodes[__XFS_SORT_INODES];
@@ -2952,13 +2945,13 @@ xfs_rename(
goto out_trans_cancel;
}
- xfs_bmap_init(&free_list, &first_block);
+ xfs_defer_init(&dfops, &first_block);
/* RENAME_EXCHANGE is unique from here on. */
if (flags & RENAME_EXCHANGE)
return xfs_cross_rename(tp, src_dp, src_name, src_ip,
target_dp, target_name, target_ip,
- &free_list, &first_block, spaceres);
+ &dfops, &first_block, spaceres);
/*
* Set up the target.
@@ -2980,7 +2973,7 @@ xfs_rename(
*/
error = xfs_dir_createname(tp, target_dp, target_name,
src_ip->i_ino, &first_block,
- &free_list, spaceres);
+ &dfops, spaceres);
if (error)
goto out_bmap_cancel;
@@ -3020,7 +3013,7 @@ xfs_rename(
*/
error = xfs_dir_replace(tp, target_dp, target_name,
src_ip->i_ino,
- &first_block, &free_list, spaceres);
+ &first_block, &dfops, spaceres);
if (error)
goto out_bmap_cancel;
@@ -3055,7 +3048,7 @@ xfs_rename(
*/
error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
target_dp->i_ino,
- &first_block, &free_list, spaceres);
+ &first_block, &dfops, spaceres);
ASSERT(error != -EEXIST);
if (error)
goto out_bmap_cancel;
@@ -3094,10 +3087,10 @@ xfs_rename(
*/
if (wip) {
error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
- &first_block, &free_list, spaceres);
+ &first_block, &dfops, spaceres);
} else
error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
- &first_block, &free_list, spaceres);
+ &first_block, &dfops, spaceres);
if (error)
goto out_bmap_cancel;
@@ -3132,13 +3125,13 @@ xfs_rename(
if (new_parent)
xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
- error = xfs_finish_rename(tp, &free_list);
+ error = xfs_finish_rename(tp, &dfops);
if (wip)
IRELE(wip);
return error;
out_bmap_cancel:
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
out_trans_cancel:
xfs_trans_cancel(tp);
out_release_wip:
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index e52d7c7aeb5b..e1a411e08f00 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -27,7 +27,7 @@
struct xfs_dinode;
struct xfs_inode;
struct xfs_buf;
-struct xfs_bmap_free;
+struct xfs_defer_ops;
struct xfs_bmbt_irec;
struct xfs_inode_log_item;
struct xfs_mount;
@@ -395,14 +395,10 @@ void xfs_ilock_demote(xfs_inode_t *, uint);
int xfs_isilocked(xfs_inode_t *, uint);
uint xfs_ilock_data_map_shared(struct xfs_inode *);
uint xfs_ilock_attr_map_shared(struct xfs_inode *);
-int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, umode_t,
- xfs_nlink_t, xfs_dev_t, prid_t, int,
- struct xfs_buf **, xfs_inode_t **);
uint xfs_ip2xflags(struct xfs_inode *);
-uint xfs_dic2xflags(struct xfs_dinode *);
int xfs_ifree(struct xfs_trans *, xfs_inode_t *,
- struct xfs_bmap_free *);
+ struct xfs_defer_ops *);
int xfs_itruncate_extents(struct xfs_trans **, struct xfs_inode *,
int, xfs_fsize_t);
void xfs_iext_realloc(xfs_inode_t *, int, int);
@@ -411,7 +407,6 @@ void xfs_iunpin_wait(xfs_inode_t *);
#define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount))
int xfs_iflush(struct xfs_inode *, struct xfs_buf **);
-void xfs_lock_inodes(xfs_inode_t **, int, uint);
void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);
xfs_extlen_t xfs_get_extsz_hint(struct xfs_inode *ip);
@@ -419,8 +414,6 @@ xfs_extlen_t xfs_get_extsz_hint(struct xfs_inode *ip);
int xfs_dir_ialloc(struct xfs_trans **, struct xfs_inode *, umode_t,
xfs_nlink_t, xfs_dev_t, prid_t, int,
struct xfs_inode **, int *);
-int xfs_droplink(struct xfs_trans *, struct xfs_inode *);
-int xfs_bumplink(struct xfs_trans *, struct xfs_inode *);
/* from xfs_file.c */
enum xfs_prealloc_flags {
@@ -434,7 +427,8 @@ int xfs_update_prealloc_flags(struct xfs_inode *ip,
enum xfs_prealloc_flags flags);
int xfs_zero_eof(struct xfs_inode *ip, xfs_off_t offset,
xfs_fsize_t isize, bool *did_zeroing);
-int xfs_iozero(struct xfs_inode *ip, loff_t pos, size_t count);
+int xfs_zero_range(struct xfs_inode *ip, xfs_off_t pos, xfs_off_t count,
+ bool *did_zero);
loff_t __xfs_seek_hole_data(struct inode *inode, loff_t start,
loff_t eof, int whence);
@@ -479,14 +473,4 @@ do { \
extern struct kmem_zone *xfs_inode_zone;
-/*
- * Flags for read/write calls
- */
-#define XFS_IO_ISDIRECT 0x00001 /* bypass page cache */
-#define XFS_IO_INVIS 0x00002 /* don't update inode timestamps */
-
-#define XFS_IO_FLAGS \
- { XFS_IO_ISDIRECT, "DIRECT" }, \
- { XFS_IO_INVIS, "INVIS"}
-
#endif /* __XFS_INODE_H__ */
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index a1b07612224c..892c2aced207 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -651,6 +651,7 @@ void
xfs_inode_item_destroy(
xfs_inode_t *ip)
{
+ kmem_free(ip->i_itemp->ili_item.li_lv_shadow);
kmem_zone_free(xfs_ili_zone, ip->i_itemp);
}
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index dbca7375deef..96a70fd1f5d6 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -232,7 +232,7 @@ xfs_open_by_handle(
}
if ((fmode & FMODE_WRITE) && IS_IMMUTABLE(inode)) {
- error = -EACCES;
+ error = -EPERM;
goto out_dput;
}
@@ -387,6 +387,7 @@ xfs_attrlist_by_handle(
{
int error = -ENOMEM;
attrlist_cursor_kern_t *cursor;
+ struct xfs_fsop_attrlist_handlereq __user *p = arg;
xfs_fsop_attrlist_handlereq_t al_hreq;
struct dentry *dentry;
char *kbuf;
@@ -419,6 +420,11 @@ xfs_attrlist_by_handle(
if (error)
goto out_kfree;
+ if (copy_to_user(&p->pos, cursor, sizeof(attrlist_cursor_kern_t))) {
+ error = -EFAULT;
+ goto out_kfree;
+ }
+
if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen))
error = -EFAULT;
@@ -595,13 +601,12 @@ xfs_attrmulti_by_handle(
int
xfs_ioc_space(
- struct xfs_inode *ip,
- struct inode *inode,
struct file *filp,
- int ioflags,
unsigned int cmd,
xfs_flock64_t *bf)
{
+ struct inode *inode = file_inode(filp);
+ struct xfs_inode *ip = XFS_I(inode);
struct iattr iattr;
enum xfs_prealloc_flags flags = 0;
uint iolock = XFS_IOLOCK_EXCL;
@@ -626,7 +631,7 @@ xfs_ioc_space(
if (filp->f_flags & O_DSYNC)
flags |= XFS_PREALLOC_SYNC;
- if (ioflags & XFS_IO_INVIS)
+ if (filp->f_mode & FMODE_NOCMTIME)
flags |= XFS_PREALLOC_INVISIBLE;
error = mnt_want_write_file(filp);
@@ -1464,8 +1469,7 @@ xfs_getbmap_format(void **ap, struct getbmapx *bmv, int *full)
STATIC int
xfs_ioc_getbmap(
- struct xfs_inode *ip,
- int ioflags,
+ struct file *file,
unsigned int cmd,
void __user *arg)
{
@@ -1479,10 +1483,10 @@ xfs_ioc_getbmap(
return -EINVAL;
bmx.bmv_iflags = (cmd == XFS_IOC_GETBMAPA ? BMV_IF_ATTRFORK : 0);
- if (ioflags & XFS_IO_INVIS)
+ if (file->f_mode & FMODE_NOCMTIME)
bmx.bmv_iflags |= BMV_IF_NO_DMAPI_READ;
- error = xfs_getbmap(ip, &bmx, xfs_getbmap_format,
+ error = xfs_getbmap(XFS_I(file_inode(file)), &bmx, xfs_getbmap_format,
(__force struct getbmap *)arg+1);
if (error)
return error;
@@ -1575,6 +1579,17 @@ xfs_ioc_swapext(
goto out_put_tmp_file;
}
+ /*
+ * We need to ensure that the fds passed in point to XFS inodes
+ * before we cast and access them as XFS structures as we have no
+ * control over what the user passes us here.
+ */
+ if (f.file->f_op != &xfs_file_operations ||
+ tmp.file->f_op != &xfs_file_operations) {
+ error = -EINVAL;
+ goto out_put_tmp_file;
+ }
+
ip = XFS_I(file_inode(f.file));
tip = XFS_I(file_inode(tmp.file));
@@ -1619,12 +1634,8 @@ xfs_file_ioctl(
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
void __user *arg = (void __user *)p;
- int ioflags = 0;
int error;
- if (filp->f_mode & FMODE_NOCMTIME)
- ioflags |= XFS_IO_INVIS;
-
trace_xfs_file_ioctl(ip);
switch (cmd) {
@@ -1643,7 +1654,7 @@ xfs_file_ioctl(
if (copy_from_user(&bf, arg, sizeof(bf)))
return -EFAULT;
- return xfs_ioc_space(ip, inode, filp, ioflags, cmd, &bf);
+ return xfs_ioc_space(filp, cmd, &bf);
}
case XFS_IOC_DIOINFO: {
struct dioattr da;
@@ -1702,7 +1713,7 @@ xfs_file_ioctl(
case XFS_IOC_GETBMAP:
case XFS_IOC_GETBMAPA:
- return xfs_ioc_getbmap(ip, ioflags, cmd, arg);
+ return xfs_ioc_getbmap(filp, cmd, arg);
case XFS_IOC_GETBMAPX:
return xfs_ioc_getbmapx(ip, arg);
diff --git a/fs/xfs/xfs_ioctl.h b/fs/xfs/xfs_ioctl.h
index 77c02c7900b6..8b52881bfd90 100644
--- a/fs/xfs/xfs_ioctl.h
+++ b/fs/xfs/xfs_ioctl.h
@@ -20,10 +20,7 @@
extern int
xfs_ioc_space(
- struct xfs_inode *ip,
- struct inode *inode,
struct file *filp,
- int ioflags,
unsigned int cmd,
xfs_flock64_t *bf);
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index 1a05d8ae327d..321f57721b92 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -532,12 +532,8 @@ xfs_file_compat_ioctl(
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
void __user *arg = (void __user *)p;
- int ioflags = 0;
int error;
- if (filp->f_mode & FMODE_NOCMTIME)
- ioflags |= XFS_IO_INVIS;
-
trace_xfs_file_compat_ioctl(ip);
switch (cmd) {
@@ -589,7 +585,7 @@ xfs_file_compat_ioctl(
if (xfs_compat_flock64_copyin(&bf, arg))
return -EFAULT;
cmd = _NATIVE_IOC(cmd, struct xfs_flock64);
- return xfs_ioc_space(ip, inode, filp, ioflags, cmd, &bf);
+ return xfs_ioc_space(filp, cmd, &bf);
}
case XFS_IOC_FSGEOMETRY_V1_32:
return xfs_compat_ioc_fsgeometry_v1(mp, arg);
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 58391355a44d..2114d53df433 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -15,6 +15,7 @@
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include <linux/iomap.h>
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
@@ -22,6 +23,7 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_bmap_btree.h"
@@ -127,7 +129,7 @@ xfs_iomap_write_direct(
int quota_flag;
int rt;
xfs_trans_t *tp;
- xfs_bmap_free_t free_list;
+ struct xfs_defer_ops dfops;
uint qblocks, resblks, resrtextents;
int error;
int lockmode;
@@ -230,18 +232,18 @@ xfs_iomap_write_direct(
* From this point onwards we overwrite the imap pointer that the
* caller gave to us.
*/
- xfs_bmap_init(&free_list, &firstfsb);
+ xfs_defer_init(&dfops, &firstfsb);
nimaps = 1;
error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
bmapi_flags, &firstfsb, resblks, imap,
- &nimaps, &free_list);
+ &nimaps, &dfops);
if (error)
goto out_bmap_cancel;
/*
* Complete the transaction
*/
- error = xfs_bmap_finish(&tp, &free_list, NULL);
+ error = xfs_defer_finish(&tp, &dfops, NULL);
if (error)
goto out_bmap_cancel;
@@ -265,7 +267,7 @@ out_unlock:
return error;
out_bmap_cancel:
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
out_trans_cancel:
xfs_trans_cancel(tp);
@@ -684,7 +686,7 @@ xfs_iomap_write_allocate(
xfs_fileoff_t offset_fsb, last_block;
xfs_fileoff_t end_fsb, map_start_fsb;
xfs_fsblock_t first_block;
- xfs_bmap_free_t free_list;
+ struct xfs_defer_ops dfops;
xfs_filblks_t count_fsb;
xfs_trans_t *tp;
int nimaps;
@@ -726,7 +728,7 @@ xfs_iomap_write_allocate(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, 0);
- xfs_bmap_init(&free_list, &first_block);
+ xfs_defer_init(&dfops, &first_block);
/*
* it is possible that the extents have changed since
@@ -782,11 +784,11 @@ xfs_iomap_write_allocate(
error = xfs_bmapi_write(tp, ip, map_start_fsb,
count_fsb, 0, &first_block,
nres, imap, &nimaps,
- &free_list);
+ &dfops);
if (error)
goto trans_cancel;
- error = xfs_bmap_finish(&tp, &free_list, NULL);
+ error = xfs_defer_finish(&tp, &dfops, NULL);
if (error)
goto trans_cancel;
@@ -820,7 +822,7 @@ xfs_iomap_write_allocate(
}
trans_cancel:
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
xfs_trans_cancel(tp);
error0:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -841,7 +843,7 @@ xfs_iomap_write_unwritten(
int nimaps;
xfs_trans_t *tp;
xfs_bmbt_irec_t imap;
- xfs_bmap_free_t free_list;
+ struct xfs_defer_ops dfops;
xfs_fsize_t i_size;
uint resblks;
int error;
@@ -885,11 +887,11 @@ xfs_iomap_write_unwritten(
/*
* Modify the unwritten extent state of the buffer.
*/
- xfs_bmap_init(&free_list, &firstfsb);
+ xfs_defer_init(&dfops, &firstfsb);
nimaps = 1;
error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb,
XFS_BMAPI_CONVERT, &firstfsb, resblks,
- &imap, &nimaps, &free_list);
+ &imap, &nimaps, &dfops);
if (error)
goto error_on_bmapi_transaction;
@@ -908,7 +910,7 @@ xfs_iomap_write_unwritten(
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
}
- error = xfs_bmap_finish(&tp, &free_list, NULL);
+ error = xfs_defer_finish(&tp, &dfops, NULL);
if (error)
goto error_on_bmapi_transaction;
@@ -935,8 +937,178 @@ xfs_iomap_write_unwritten(
return 0;
error_on_bmapi_transaction:
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
xfs_trans_cancel(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
+
+void
+xfs_bmbt_to_iomap(
+ struct xfs_inode *ip,
+ struct iomap *iomap,
+ struct xfs_bmbt_irec *imap)
+{
+ struct xfs_mount *mp = ip->i_mount;
+
+ if (imap->br_startblock == HOLESTARTBLOCK) {
+ iomap->blkno = IOMAP_NULL_BLOCK;
+ iomap->type = IOMAP_HOLE;
+ } else if (imap->br_startblock == DELAYSTARTBLOCK) {
+ iomap->blkno = IOMAP_NULL_BLOCK;
+ iomap->type = IOMAP_DELALLOC;
+ } else {
+ iomap->blkno = xfs_fsb_to_db(ip, imap->br_startblock);
+ if (imap->br_state == XFS_EXT_UNWRITTEN)
+ iomap->type = IOMAP_UNWRITTEN;
+ else
+ iomap->type = IOMAP_MAPPED;
+ }
+ iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
+ iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
+ iomap->bdev = xfs_find_bdev_for_inode(VFS_I(ip));
+}
+
+static inline bool imap_needs_alloc(struct xfs_bmbt_irec *imap, int nimaps)
+{
+ return !nimaps ||
+ imap->br_startblock == HOLESTARTBLOCK ||
+ imap->br_startblock == DELAYSTARTBLOCK;
+}
+
+static int
+xfs_file_iomap_begin(
+ struct inode *inode,
+ loff_t offset,
+ loff_t length,
+ unsigned flags,
+ struct iomap *iomap)
+{
+ struct xfs_inode *ip = XFS_I(inode);
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_bmbt_irec imap;
+ xfs_fileoff_t offset_fsb, end_fsb;
+ int nimaps = 1, error = 0;
+
+ if (XFS_FORCED_SHUTDOWN(mp))
+ return -EIO;
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+
+ ASSERT(offset <= mp->m_super->s_maxbytes);
+ if ((xfs_fsize_t)offset + length > mp->m_super->s_maxbytes)
+ length = mp->m_super->s_maxbytes - offset;
+ offset_fsb = XFS_B_TO_FSBT(mp, offset);
+ end_fsb = XFS_B_TO_FSB(mp, offset + length);
+
+ error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap,
+ &nimaps, XFS_BMAPI_ENTIRE);
+ if (error) {
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ return error;
+ }
+
+ if ((flags & IOMAP_WRITE) && imap_needs_alloc(&imap, nimaps)) {
+ /*
+ * We cap the maximum length we map here to MAX_WRITEBACK_PAGES
+ * pages to keep the chunks of work done where somewhat symmetric
+ * with the work writeback does. This is a completely arbitrary
+ * number pulled out of thin air as a best guess for initial
+ * testing.
+ *
+ * Note that the values needs to be less than 32-bits wide until
+ * the lower level functions are updated.
+ */
+ length = min_t(loff_t, length, 1024 * PAGE_SIZE);
+ if (xfs_get_extsz_hint(ip)) {
+ /*
+ * xfs_iomap_write_direct() expects the shared lock. It
+ * is unlocked on return.
+ */
+ xfs_ilock_demote(ip, XFS_ILOCK_EXCL);
+ error = xfs_iomap_write_direct(ip, offset, length, &imap,
+ nimaps);
+ } else {
+ error = xfs_iomap_write_delay(ip, offset, length, &imap);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ }
+
+ if (error)
+ return error;
+
+ trace_xfs_iomap_alloc(ip, offset, length, 0, &imap);
+ xfs_bmbt_to_iomap(ip, iomap, &imap);
+ } else if (nimaps) {
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ trace_xfs_iomap_found(ip, offset, length, 0, &imap);
+ xfs_bmbt_to_iomap(ip, iomap, &imap);
+ } else {
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ trace_xfs_iomap_not_found(ip, offset, length, 0, &imap);
+ iomap->blkno = IOMAP_NULL_BLOCK;
+ iomap->type = IOMAP_HOLE;
+ iomap->offset = offset;
+ iomap->length = length;
+ }
+
+ return 0;
+}
+
+static int
+xfs_file_iomap_end_delalloc(
+ struct xfs_inode *ip,
+ loff_t offset,
+ loff_t length,
+ ssize_t written)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ xfs_fileoff_t start_fsb;
+ xfs_fileoff_t end_fsb;
+ int error = 0;
+
+ start_fsb = XFS_B_TO_FSB(mp, offset + written);
+ end_fsb = XFS_B_TO_FSB(mp, offset + length);
+
+ /*
+ * Trim back delalloc blocks if we didn't manage to write the whole
+ * range reserved.
+ *
+ * We don't need to care about racing delalloc as we hold i_mutex
+ * across the reserve/allocate/unreserve calls. If there are delalloc
+ * blocks in the range, they are ours.
+ */
+ if (start_fsb < end_fsb) {
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
+ end_fsb - start_fsb);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+
+ if (error && !XFS_FORCED_SHUTDOWN(mp)) {
+ xfs_alert(mp, "%s: unable to clean up ino %lld",
+ __func__, ip->i_ino);
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+static int
+xfs_file_iomap_end(
+ struct inode *inode,
+ loff_t offset,
+ loff_t length,
+ ssize_t written,
+ unsigned flags,
+ struct iomap *iomap)
+{
+ if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC)
+ return xfs_file_iomap_end_delalloc(XFS_I(inode), offset,
+ length, written);
+ return 0;
+}
+
+struct iomap_ops xfs_iomap_ops = {
+ .iomap_begin = xfs_file_iomap_begin,
+ .iomap_end = xfs_file_iomap_end,
+};
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index 8688e663d744..e066d045e2ff 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -18,6 +18,8 @@
#ifndef __XFS_IOMAP_H__
#define __XFS_IOMAP_H__
+#include <linux/iomap.h>
+
struct xfs_inode;
struct xfs_bmbt_irec;
@@ -29,4 +31,9 @@ int xfs_iomap_write_allocate(struct xfs_inode *, xfs_off_t,
struct xfs_bmbt_irec *);
int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t);
+void xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,
+ struct xfs_bmbt_irec *);
+
+extern struct iomap_ops xfs_iomap_ops;
+
#endif /* __XFS_IOMAP_H__*/
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index c5d4eba6972e..ab820f84ed50 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -38,12 +38,13 @@
#include "xfs_dir2.h"
#include "xfs_trans_space.h"
#include "xfs_pnfs.h"
+#include "xfs_iomap.h"
#include <linux/capability.h>
#include <linux/xattr.h>
#include <linux/posix_acl.h>
#include <linux/security.h>
-#include <linux/fiemap.h>
+#include <linux/iomap.h>
#include <linux/slab.h>
/*
@@ -801,20 +802,30 @@ xfs_setattr_size(
return error;
/*
+ * Wait for all direct I/O to complete.
+ */
+ inode_dio_wait(inode);
+
+ /*
* File data changes must be complete before we start the transaction to
* modify the inode. This needs to be done before joining the inode to
* the transaction because the inode cannot be unlocked once it is a
* part of the transaction.
*
- * Start with zeroing any data block beyond EOF that we may expose on
- * file extension.
+ * Start with zeroing any data beyond EOF that we may expose on file
+ * extension, or zeroing out the rest of the block on a downward
+ * truncate.
*/
if (newsize > oldsize) {
error = xfs_zero_eof(ip, newsize, oldsize, &did_zeroing);
- if (error)
- return error;
+ } else {
+ error = iomap_truncate_page(inode, newsize, &did_zeroing,
+ &xfs_iomap_ops);
}
+ if (error)
+ return error;
+
/*
* We are going to log the inode size change in this transaction so
* any previous writes that are beyond the on disk EOF and the new
@@ -823,17 +834,14 @@ xfs_setattr_size(
* problem. Note that this includes any block zeroing we did above;
* otherwise those blocks may not be zeroed after a crash.
*/
- if (newsize > ip->i_d.di_size &&
- (oldsize != ip->i_d.di_size || did_zeroing)) {
+ if (did_zeroing ||
+ (newsize > ip->i_d.di_size && oldsize != ip->i_d.di_size)) {
error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
ip->i_d.di_size, newsize);
if (error)
return error;
}
- /* Now wait for all direct I/O to complete. */
- inode_dio_wait(inode);
-
/*
* We've already locked out new page faults, so now we can safely remove
* pages from the page cache knowing they won't get refaulted until we
@@ -851,13 +859,6 @@ xfs_setattr_size(
* to hope that the caller sees ENOMEM and retries the truncate
* operation.
*/
- if (IS_DAX(inode))
- error = dax_truncate_page(inode, newsize, xfs_get_blocks_direct);
- else
- error = block_truncate_page(inode->i_mapping, newsize,
- xfs_get_blocks);
- if (error)
- return error;
truncate_setsize(inode, newsize);
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
@@ -998,51 +999,6 @@ xfs_vn_update_time(
return xfs_trans_commit(tp);
}
-#define XFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
-
-/*
- * Call fiemap helper to fill in user data.
- * Returns positive errors to xfs_getbmap.
- */
-STATIC int
-xfs_fiemap_format(
- void **arg,
- struct getbmapx *bmv,
- int *full)
-{
- int error;
- struct fiemap_extent_info *fieinfo = *arg;
- u32 fiemap_flags = 0;
- u64 logical, physical, length;
-
- /* Do nothing for a hole */
- if (bmv->bmv_block == -1LL)
- return 0;
-
- logical = BBTOB(bmv->bmv_offset);
- physical = BBTOB(bmv->bmv_block);
- length = BBTOB(bmv->bmv_length);
-
- if (bmv->bmv_oflags & BMV_OF_PREALLOC)
- fiemap_flags |= FIEMAP_EXTENT_UNWRITTEN;
- else if (bmv->bmv_oflags & BMV_OF_DELALLOC) {
- fiemap_flags |= (FIEMAP_EXTENT_DELALLOC |
- FIEMAP_EXTENT_UNKNOWN);
- physical = 0; /* no block yet */
- }
- if (bmv->bmv_oflags & BMV_OF_LAST)
- fiemap_flags |= FIEMAP_EXTENT_LAST;
-
- error = fiemap_fill_next_extent(fieinfo, logical, physical,
- length, fiemap_flags);
- if (error > 0) {
- error = 0;
- *full = 1; /* user array now full */
- }
-
- return error;
-}
-
STATIC int
xfs_vn_fiemap(
struct inode *inode,
@@ -1050,38 +1006,13 @@ xfs_vn_fiemap(
u64 start,
u64 length)
{
- xfs_inode_t *ip = XFS_I(inode);
- struct getbmapx bm;
int error;
- error = fiemap_check_flags(fieinfo, XFS_FIEMAP_FLAGS);
- if (error)
- return error;
-
- /* Set up bmap header for xfs internal routine */
- bm.bmv_offset = BTOBBT(start);
- /* Special case for whole file */
- if (length == FIEMAP_MAX_OFFSET)
- bm.bmv_length = -1LL;
- else
- bm.bmv_length = BTOBB(start + length) - bm.bmv_offset;
-
- /* We add one because in getbmap world count includes the header */
- bm.bmv_count = !fieinfo->fi_extents_max ? MAXEXTNUM :
- fieinfo->fi_extents_max + 1;
- bm.bmv_count = min_t(__s32, bm.bmv_count,
- (PAGE_SIZE * 16 / sizeof(struct getbmapx)));
- bm.bmv_iflags = BMV_IF_PREALLOC | BMV_IF_NO_HOLES;
- if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR)
- bm.bmv_iflags |= BMV_IF_ATTRFORK;
- if (!(fieinfo->fi_flags & FIEMAP_FLAG_SYNC))
- bm.bmv_iflags |= BMV_IF_DELALLOC;
-
- error = xfs_getbmap(ip, &bm, xfs_fiemap_format, fieinfo);
- if (error)
- return error;
+ xfs_ilock(XFS_I(inode), XFS_IOLOCK_SHARED);
+ error = iomap_fiemap(inode, fieinfo, start, length, &xfs_iomap_ops);
+ xfs_iunlock(XFS_I(inode), XFS_IOLOCK_SHARED);
- return 0;
+ return error;
}
STATIC int
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index a8192dc797dc..b8d64d520e12 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -328,13 +328,6 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
return x;
}
-/* ARM old ABI has some weird alignment/padding */
-#if defined(__arm__) && !defined(__ARM_EABI__)
-#define __arch_pack __attribute__((packed))
-#else
-#define __arch_pack
-#endif
-
#define ASSERT_ALWAYS(expr) \
(unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index bde02f1fba73..3b74fa011bb1 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -788,7 +788,7 @@ xfs_log_mount_cancel(
* As far as I know, there weren't any dependencies on the old behaviour.
*/
-int
+static int
xfs_log_unmount_write(xfs_mount_t *mp)
{
struct xlog *log = mp->m_log;
@@ -1036,7 +1036,7 @@ xfs_log_space_wake(
* there's no point in running a dummy transaction at this point because we
* can't start trying to idle the log until both the CIL and AIL are empty.
*/
-int
+static int
xfs_log_need_covered(xfs_mount_t *mp)
{
struct xlog *log = mp->m_log;
@@ -1177,7 +1177,7 @@ xlog_space_left(
* The log manager needs its own routine, in order to control what
* happens with the buffer after the write completes.
*/
-void
+static void
xlog_iodone(xfs_buf_t *bp)
{
struct xlog_in_core *iclog = bp->b_fspriv;
@@ -1302,7 +1302,7 @@ xfs_log_work_queue(
* disk. If there is nothing dirty, then we might need to cover the log to
* indicate that the filesystem is idle.
*/
-void
+static void
xfs_log_worker(
struct work_struct *work)
{
@@ -1415,7 +1415,7 @@ xlog_alloc_log(
*/
error = -ENOMEM;
bp = xfs_buf_alloc(mp->m_logdev_targp, XFS_BUF_DADDR_NULL,
- BTOBB(log->l_iclog_size), 0);
+ BTOBB(log->l_iclog_size), XBF_NO_IOACCT);
if (!bp)
goto out_free_log;
@@ -1454,7 +1454,8 @@ xlog_alloc_log(
prev_iclog = iclog;
bp = xfs_buf_get_uncached(mp->m_logdev_targp,
- BTOBB(log->l_iclog_size), 0);
+ BTOBB(log->l_iclog_size),
+ XBF_NO_IOACCT);
if (!bp)
goto out_free_iclog;
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 80ba0c047090..b5e71072fde5 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -163,12 +163,8 @@ int xfs_log_reserve(struct xfs_mount *mp,
__uint8_t clientid,
bool permanent);
int xfs_log_regrant(struct xfs_mount *mp, struct xlog_ticket *tic);
-int xfs_log_unmount_write(struct xfs_mount *mp);
void xfs_log_unmount(struct xfs_mount *mp);
int xfs_log_force_umount(struct xfs_mount *mp, int logerror);
-int xfs_log_need_covered(struct xfs_mount *mp);
-
-void xlog_iodone(struct xfs_buf *);
struct xlog_ticket *xfs_log_ticket_get(struct xlog_ticket *ticket);
void xfs_log_ticket_put(struct xlog_ticket *ticket);
@@ -178,7 +174,6 @@ void xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp,
bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
void xfs_log_work_queue(struct xfs_mount *mp);
-void xfs_log_worker(struct work_struct *work);
void xfs_log_quiesce(struct xfs_mount *mp);
bool xfs_log_check_lsn(struct xfs_mount *, xfs_lsn_t);
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index 5e54e7955ea6..a4ab192e1792 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -78,6 +78,157 @@ xlog_cil_init_post_recovery(
log->l_cilp->xc_ctx->sequence = 1;
}
+static inline int
+xlog_cil_iovec_space(
+ uint niovecs)
+{
+ return round_up((sizeof(struct xfs_log_vec) +
+ niovecs * sizeof(struct xfs_log_iovec)),
+ sizeof(uint64_t));
+}
+
+/*
+ * Allocate or pin log vector buffers for CIL insertion.
+ *
+ * The CIL currently uses disposable buffers for copying a snapshot of the
+ * modified items into the log during a push. The biggest problem with this is
+ * the requirement to allocate the disposable buffer during the commit if:
+ * a) does not exist; or
+ * b) it is too small
+ *
+ * If we do this allocation within xlog_cil_insert_format_items(), it is done
+ * under the xc_ctx_lock, which means that a CIL push cannot occur during
+ * the memory allocation. This means that we have a potential deadlock situation
+ * under low memory conditions when we have lots of dirty metadata pinned in
+ * the CIL and we need a CIL commit to occur to free memory.
+ *
+ * To avoid this, we need to move the memory allocation outside the
+ * xc_ctx_lock, but because the log vector buffers are disposable, that opens
+ * up a TOCTOU race condition w.r.t. the CIL committing and removing the log
+ * vector buffers between the check and the formatting of the item into the
+ * log vector buffer within the xc_ctx_lock.
+ *
+ * Because the log vector buffer needs to be unchanged during the CIL push
+ * process, we cannot share the buffer between the transaction commit (which
+ * modifies the buffer) and the CIL push context that is writing the changes
+ * into the log. This means skipping preallocation of buffer space is
+ * unreliable, but we most definitely do not want to be allocating and freeing
+ * buffers unnecessarily during commits when overwrites can be done safely.
+ *
+ * The simplest solution to this problem is to allocate a shadow buffer when a
+ * log item is committed for the second time, and then to only use this buffer
+ * if necessary. The buffer can remain attached to the log item until such time
+ * it is needed, and this is the buffer that is reallocated to match the size of
+ * the incoming modification. Then during the formatting of the item we can swap
+ * the active buffer with the new one if we can't reuse the existing buffer. We
+ * don't free the old buffer as it may be reused on the next modification if
+ * it's size is right, otherwise we'll free and reallocate it at that point.
+ *
+ * This function builds a vector for the changes in each log item in the
+ * transaction. It then works out the length of the buffer needed for each log
+ * item, allocates them and attaches the vector to the log item in preparation
+ * for the formatting step which occurs under the xc_ctx_lock.
+ *
+ * While this means the memory footprint goes up, it avoids the repeated
+ * alloc/free pattern that repeated modifications of an item would otherwise
+ * cause, and hence minimises the CPU overhead of such behaviour.
+ */
+static void
+xlog_cil_alloc_shadow_bufs(
+ struct xlog *log,
+ struct xfs_trans *tp)
+{
+ struct xfs_log_item_desc *lidp;
+
+ list_for_each_entry(lidp, &tp->t_items, lid_trans) {
+ struct xfs_log_item *lip = lidp->lid_item;
+ struct xfs_log_vec *lv;
+ int niovecs = 0;
+ int nbytes = 0;
+ int buf_size;
+ bool ordered = false;
+
+ /* Skip items which aren't dirty in this transaction. */
+ if (!(lidp->lid_flags & XFS_LID_DIRTY))
+ continue;
+
+ /* get number of vecs and size of data to be stored */
+ lip->li_ops->iop_size(lip, &niovecs, &nbytes);
+
+ /*
+ * Ordered items need to be tracked but we do not wish to write
+ * them. We need a logvec to track the object, but we do not
+ * need an iovec or buffer to be allocated for copying data.
+ */
+ if (niovecs == XFS_LOG_VEC_ORDERED) {
+ ordered = true;
+ niovecs = 0;
+ nbytes = 0;
+ }
+
+ /*
+ * We 64-bit align the length of each iovec so that the start
+ * of the next one is naturally aligned. We'll need to
+ * account for that slack space here. Then round nbytes up
+ * to 64-bit alignment so that the initial buffer alignment is
+ * easy to calculate and verify.
+ */
+ nbytes += niovecs * sizeof(uint64_t);
+ nbytes = round_up(nbytes, sizeof(uint64_t));
+
+ /*
+ * The data buffer needs to start 64-bit aligned, so round up
+ * that space to ensure we can align it appropriately and not
+ * overrun the buffer.
+ */
+ buf_size = nbytes + xlog_cil_iovec_space(niovecs);
+
+ /*
+ * if we have no shadow buffer, or it is too small, we need to
+ * reallocate it.
+ */
+ if (!lip->li_lv_shadow ||
+ buf_size > lip->li_lv_shadow->lv_size) {
+
+ /*
+ * We free and allocate here as a realloc would copy
+ * unecessary data. We don't use kmem_zalloc() for the
+ * same reason - we don't need to zero the data area in
+ * the buffer, only the log vector header and the iovec
+ * storage.
+ */
+ kmem_free(lip->li_lv_shadow);
+
+ lv = kmem_alloc(buf_size, KM_SLEEP|KM_NOFS);
+ memset(lv, 0, xlog_cil_iovec_space(niovecs));
+
+ lv->lv_item = lip;
+ lv->lv_size = buf_size;
+ if (ordered)
+ lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
+ else
+ lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1];
+ lip->li_lv_shadow = lv;
+ } else {
+ /* same or smaller, optimise common overwrite case */
+ lv = lip->li_lv_shadow;
+ if (ordered)
+ lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
+ else
+ lv->lv_buf_len = 0;
+ lv->lv_bytes = 0;
+ lv->lv_next = NULL;
+ }
+
+ /* Ensure the lv is set up according to ->iop_size */
+ lv->lv_niovecs = niovecs;
+
+ /* The allocated data region lies beyond the iovec region */
+ lv->lv_buf = (char *)lv + xlog_cil_iovec_space(niovecs);
+ }
+
+}
+
/*
* Prepare the log item for insertion into the CIL. Calculate the difference in
* log space and vectors it will consume, and if it is a new item pin it as
@@ -100,16 +251,19 @@ xfs_cil_prepare_item(
/*
* If there is no old LV, this is the first time we've seen the item in
* this CIL context and so we need to pin it. If we are replacing the
- * old_lv, then remove the space it accounts for and free it.
+ * old_lv, then remove the space it accounts for and make it the shadow
+ * buffer for later freeing. In both cases we are now switching to the
+ * shadow buffer, so update the the pointer to it appropriately.
*/
- if (!old_lv)
+ if (!old_lv) {
lv->lv_item->li_ops->iop_pin(lv->lv_item);
- else if (old_lv != lv) {
+ lv->lv_item->li_lv_shadow = NULL;
+ } else if (old_lv != lv) {
ASSERT(lv->lv_buf_len != XFS_LOG_VEC_ORDERED);
*diff_len -= old_lv->lv_bytes;
*diff_iovecs -= old_lv->lv_niovecs;
- kmem_free(old_lv);
+ lv->lv_item->li_lv_shadow = old_lv;
}
/* attach new log vector to log item */
@@ -133,11 +287,13 @@ xfs_cil_prepare_item(
* write it out asynchronously without needing to relock the object that was
* modified at the time it gets written into the iclog.
*
- * This function builds a vector for the changes in each log item in the
- * transaction. It then works out the length of the buffer needed for each log
- * item, allocates them and formats the vector for the item into the buffer.
- * The buffer is then attached to the log item are then inserted into the
- * Committed Item List for tracking until the next checkpoint is written out.
+ * This function takes the prepared log vectors attached to each log item, and
+ * formats the changes into the log vector buffer. The buffer it uses is
+ * dependent on the current state of the vector in the CIL - the shadow lv is
+ * guaranteed to be large enough for the current modification, but we will only
+ * use that if we can't reuse the existing lv. If we can't reuse the existing
+ * lv, then simple swap it out for the shadow lv. We don't free it - that is
+ * done lazily either by th enext modification or the freeing of the log item.
*
* We don't set up region headers during this process; we simply copy the
* regions into the flat buffer. We can do this because we still have to do a
@@ -170,59 +326,29 @@ xlog_cil_insert_format_items(
list_for_each_entry(lidp, &tp->t_items, lid_trans) {
struct xfs_log_item *lip = lidp->lid_item;
struct xfs_log_vec *lv;
- struct xfs_log_vec *old_lv;
- int niovecs = 0;
- int nbytes = 0;
- int buf_size;
+ struct xfs_log_vec *old_lv = NULL;
+ struct xfs_log_vec *shadow;
bool ordered = false;
/* Skip items which aren't dirty in this transaction. */
if (!(lidp->lid_flags & XFS_LID_DIRTY))
continue;
- /* get number of vecs and size of data to be stored */
- lip->li_ops->iop_size(lip, &niovecs, &nbytes);
-
- /* Skip items that do not have any vectors for writing */
- if (!niovecs)
- continue;
-
/*
- * Ordered items need to be tracked but we do not wish to write
- * them. We need a logvec to track the object, but we do not
- * need an iovec or buffer to be allocated for copying data.
+ * The formatting size information is already attached to
+ * the shadow lv on the log item.
*/
- if (niovecs == XFS_LOG_VEC_ORDERED) {
+ shadow = lip->li_lv_shadow;
+ if (shadow->lv_buf_len == XFS_LOG_VEC_ORDERED)
ordered = true;
- niovecs = 0;
- nbytes = 0;
- }
- /*
- * We 64-bit align the length of each iovec so that the start
- * of the next one is naturally aligned. We'll need to
- * account for that slack space here. Then round nbytes up
- * to 64-bit alignment so that the initial buffer alignment is
- * easy to calculate and verify.
- */
- nbytes += niovecs * sizeof(uint64_t);
- nbytes = round_up(nbytes, sizeof(uint64_t));
-
- /* grab the old item if it exists for reservation accounting */
- old_lv = lip->li_lv;
-
- /*
- * The data buffer needs to start 64-bit aligned, so round up
- * that space to ensure we can align it appropriately and not
- * overrun the buffer.
- */
- buf_size = nbytes +
- round_up((sizeof(struct xfs_log_vec) +
- niovecs * sizeof(struct xfs_log_iovec)),
- sizeof(uint64_t));
+ /* Skip items that do not have any vectors for writing */
+ if (!shadow->lv_niovecs && !ordered)
+ continue;
/* compare to existing item size */
- if (lip->li_lv && buf_size <= lip->li_lv->lv_size) {
+ old_lv = lip->li_lv;
+ if (lip->li_lv && shadow->lv_size <= lip->li_lv->lv_size) {
/* same or smaller, optimise common overwrite case */
lv = lip->li_lv;
lv->lv_next = NULL;
@@ -236,32 +362,29 @@ xlog_cil_insert_format_items(
*/
*diff_iovecs -= lv->lv_niovecs;
*diff_len -= lv->lv_bytes;
+
+ /* Ensure the lv is set up according to ->iop_size */
+ lv->lv_niovecs = shadow->lv_niovecs;
+
+ /* reset the lv buffer information for new formatting */
+ lv->lv_buf_len = 0;
+ lv->lv_bytes = 0;
+ lv->lv_buf = (char *)lv +
+ xlog_cil_iovec_space(lv->lv_niovecs);
} else {
- /* allocate new data chunk */
- lv = kmem_zalloc(buf_size, KM_SLEEP|KM_NOFS);
+ /* switch to shadow buffer! */
+ lv = shadow;
lv->lv_item = lip;
- lv->lv_size = buf_size;
if (ordered) {
/* track as an ordered logvec */
ASSERT(lip->li_lv == NULL);
- lv->lv_buf_len = XFS_LOG_VEC_ORDERED;
goto insert;
}
- lv->lv_iovecp = (struct xfs_log_iovec *)&lv[1];
}
- /* Ensure the lv is set up according to ->iop_size */
- lv->lv_niovecs = niovecs;
-
- /* The allocated data region lies beyond the iovec region */
- lv->lv_buf_len = 0;
- lv->lv_bytes = 0;
- lv->lv_buf = (char *)lv + buf_size - nbytes;
ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t)));
-
lip->li_ops->iop_format(lip, lv);
insert:
- ASSERT(lv->lv_buf_len <= nbytes);
xfs_cil_prepare_item(log, lv, old_lv, diff_len, diff_iovecs);
}
}
@@ -783,6 +906,13 @@ xfs_log_commit_cil(
struct xlog *log = mp->m_log;
struct xfs_cil *cil = log->l_cilp;
+ /*
+ * Do all necessary memory allocation before we lock the CIL.
+ * This ensures the allocation does not deadlock with a CIL
+ * push in memory reclaim (e.g. from kswapd).
+ */
+ xlog_cil_alloc_shadow_bufs(log, tp);
+
/* lock out background commit */
down_read(&cil->xc_ctx_lock);
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 835997843846..e8638fd2c0c3 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -43,6 +43,7 @@
#include "xfs_bmap_btree.h"
#include "xfs_error.h"
#include "xfs_dir2.h"
+#include "xfs_rmap_item.h"
#define BLK_AVG(blk1, blk2) ((blk1+blk2) >> 1)
@@ -1911,6 +1912,8 @@ xlog_recover_reorder_trans(
case XFS_LI_QUOTAOFF:
case XFS_LI_EFD:
case XFS_LI_EFI:
+ case XFS_LI_RUI:
+ case XFS_LI_RUD:
trace_xfs_log_recover_item_reorder_tail(log,
trans, item, pass);
list_move_tail(&item->ri_list, &inode_list);
@@ -2228,6 +2231,7 @@ xlog_recover_get_buf_lsn(
case XFS_ABTC_CRC_MAGIC:
case XFS_ABTB_MAGIC:
case XFS_ABTC_MAGIC:
+ case XFS_RMAP_CRC_MAGIC:
case XFS_IBT_CRC_MAGIC:
case XFS_IBT_MAGIC: {
struct xfs_btree_block *btb = blk;
@@ -2396,6 +2400,9 @@ xlog_recover_validate_buf_type(
case XFS_BMAP_MAGIC:
bp->b_ops = &xfs_bmbt_buf_ops;
break;
+ case XFS_RMAP_CRC_MAGIC:
+ bp->b_ops = &xfs_rmapbt_buf_ops;
+ break;
default:
xfs_warn(mp, "Bad btree block magic!");
ASSERT(0);
@@ -3415,6 +3422,99 @@ xlog_recover_efd_pass2(
}
/*
+ * This routine is called to create an in-core extent rmap update
+ * item from the rui format structure which was logged on disk.
+ * It allocates an in-core rui, copies the extents from the format
+ * structure into it, and adds the rui to the AIL with the given
+ * LSN.
+ */
+STATIC int
+xlog_recover_rui_pass2(
+ struct xlog *log,
+ struct xlog_recover_item *item,
+ xfs_lsn_t lsn)
+{
+ int error;
+ struct xfs_mount *mp = log->l_mp;
+ struct xfs_rui_log_item *ruip;
+ struct xfs_rui_log_format *rui_formatp;
+
+ rui_formatp = item->ri_buf[0].i_addr;
+
+ ruip = xfs_rui_init(mp, rui_formatp->rui_nextents);
+ error = xfs_rui_copy_format(&item->ri_buf[0], &ruip->rui_format);
+ if (error) {
+ xfs_rui_item_free(ruip);
+ return error;
+ }
+ atomic_set(&ruip->rui_next_extent, rui_formatp->rui_nextents);
+
+ spin_lock(&log->l_ailp->xa_lock);
+ /*
+ * The RUI has two references. One for the RUD and one for RUI to ensure
+ * it makes it into the AIL. Insert the RUI into the AIL directly and
+ * drop the RUI reference. Note that xfs_trans_ail_update() drops the
+ * AIL lock.
+ */
+ xfs_trans_ail_update(log->l_ailp, &ruip->rui_item, lsn);
+ xfs_rui_release(ruip);
+ return 0;
+}
+
+
+/*
+ * This routine is called when an RUD format structure is found in a committed
+ * transaction in the log. Its purpose is to cancel the corresponding RUI if it
+ * was still in the log. To do this it searches the AIL for the RUI with an id
+ * equal to that in the RUD format structure. If we find it we drop the RUD
+ * reference, which removes the RUI from the AIL and frees it.
+ */
+STATIC int
+xlog_recover_rud_pass2(
+ struct xlog *log,
+ struct xlog_recover_item *item)
+{
+ struct xfs_rud_log_format *rud_formatp;
+ struct xfs_rui_log_item *ruip = NULL;
+ struct xfs_log_item *lip;
+ __uint64_t rui_id;
+ struct xfs_ail_cursor cur;
+ struct xfs_ail *ailp = log->l_ailp;
+
+ rud_formatp = item->ri_buf[0].i_addr;
+ ASSERT(item->ri_buf[0].i_len == sizeof(struct xfs_rud_log_format));
+ rui_id = rud_formatp->rud_rui_id;
+
+ /*
+ * Search for the RUI with the id in the RUD format structure in the
+ * AIL.
+ */
+ spin_lock(&ailp->xa_lock);
+ lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
+ while (lip != NULL) {
+ if (lip->li_type == XFS_LI_RUI) {
+ ruip = (struct xfs_rui_log_item *)lip;
+ if (ruip->rui_format.rui_id == rui_id) {
+ /*
+ * Drop the RUD reference to the RUI. This
+ * removes the RUI from the AIL and frees it.
+ */
+ spin_unlock(&ailp->xa_lock);
+ xfs_rui_release(ruip);
+ spin_lock(&ailp->xa_lock);
+ break;
+ }
+ }
+ lip = xfs_trans_ail_cursor_next(ailp, &cur);
+ }
+
+ xfs_trans_ail_cursor_done(&cur);
+ spin_unlock(&ailp->xa_lock);
+
+ return 0;
+}
+
+/*
* This routine is called when an inode create format structure is found in a
* committed transaction in the log. It's purpose is to initialise the inodes
* being allocated on disk. This requires us to get inode cluster buffers that
@@ -3639,6 +3739,8 @@ xlog_recover_ra_pass2(
case XFS_LI_EFI:
case XFS_LI_EFD:
case XFS_LI_QUOTAOFF:
+ case XFS_LI_RUI:
+ case XFS_LI_RUD:
default:
break;
}
@@ -3662,6 +3764,8 @@ xlog_recover_commit_pass1(
case XFS_LI_EFD:
case XFS_LI_DQUOT:
case XFS_LI_ICREATE:
+ case XFS_LI_RUI:
+ case XFS_LI_RUD:
/* nothing to do in pass 1 */
return 0;
default:
@@ -3692,6 +3796,10 @@ xlog_recover_commit_pass2(
return xlog_recover_efi_pass2(log, item, trans->r_lsn);
case XFS_LI_EFD:
return xlog_recover_efd_pass2(log, item);
+ case XFS_LI_RUI:
+ return xlog_recover_rui_pass2(log, item, trans->r_lsn);
+ case XFS_LI_RUD:
+ return xlog_recover_rud_pass2(log, item);
case XFS_LI_DQUOT:
return xlog_recover_dquot_pass2(log, buffer_list, item,
trans->r_lsn);
@@ -4164,126 +4272,156 @@ xlog_recover_process_data(
return 0;
}
-/*
- * Process an extent free intent item that was recovered from
- * the log. We need to free the extents that it describes.
- */
+/* Recover the EFI if necessary. */
STATIC int
xlog_recover_process_efi(
- xfs_mount_t *mp,
- xfs_efi_log_item_t *efip)
+ struct xfs_mount *mp,
+ struct xfs_ail *ailp,
+ struct xfs_log_item *lip)
{
- xfs_efd_log_item_t *efdp;
- xfs_trans_t *tp;
- int i;
- int error = 0;
- xfs_extent_t *extp;
- xfs_fsblock_t startblock_fsb;
-
- ASSERT(!test_bit(XFS_EFI_RECOVERED, &efip->efi_flags));
+ struct xfs_efi_log_item *efip;
+ int error;
/*
- * First check the validity of the extents described by the
- * EFI. If any are bad, then assume that all are bad and
- * just toss the EFI.
+ * Skip EFIs that we've already processed.
*/
- for (i = 0; i < efip->efi_format.efi_nextents; i++) {
- extp = &(efip->efi_format.efi_extents[i]);
- startblock_fsb = XFS_BB_TO_FSB(mp,
- XFS_FSB_TO_DADDR(mp, extp->ext_start));
- if ((startblock_fsb == 0) ||
- (extp->ext_len == 0) ||
- (startblock_fsb >= mp->m_sb.sb_dblocks) ||
- (extp->ext_len >= mp->m_sb.sb_agblocks)) {
- /*
- * This will pull the EFI from the AIL and
- * free the memory associated with it.
- */
- set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
- xfs_efi_release(efip);
- return -EIO;
- }
- }
+ efip = container_of(lip, struct xfs_efi_log_item, efi_item);
+ if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags))
+ return 0;
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
- if (error)
- return error;
- efdp = xfs_trans_get_efd(tp, efip, efip->efi_format.efi_nextents);
+ spin_unlock(&ailp->xa_lock);
+ error = xfs_efi_recover(mp, efip);
+ spin_lock(&ailp->xa_lock);
- for (i = 0; i < efip->efi_format.efi_nextents; i++) {
- extp = &(efip->efi_format.efi_extents[i]);
- error = xfs_trans_free_extent(tp, efdp, extp->ext_start,
- extp->ext_len);
- if (error)
- goto abort_error;
+ return error;
+}
- }
+/* Release the EFI since we're cancelling everything. */
+STATIC void
+xlog_recover_cancel_efi(
+ struct xfs_mount *mp,
+ struct xfs_ail *ailp,
+ struct xfs_log_item *lip)
+{
+ struct xfs_efi_log_item *efip;
- set_bit(XFS_EFI_RECOVERED, &efip->efi_flags);
- error = xfs_trans_commit(tp);
- return error;
+ efip = container_of(lip, struct xfs_efi_log_item, efi_item);
+
+ spin_unlock(&ailp->xa_lock);
+ xfs_efi_release(efip);
+ spin_lock(&ailp->xa_lock);
+}
+
+/* Recover the RUI if necessary. */
+STATIC int
+xlog_recover_process_rui(
+ struct xfs_mount *mp,
+ struct xfs_ail *ailp,
+ struct xfs_log_item *lip)
+{
+ struct xfs_rui_log_item *ruip;
+ int error;
+
+ /*
+ * Skip RUIs that we've already processed.
+ */
+ ruip = container_of(lip, struct xfs_rui_log_item, rui_item);
+ if (test_bit(XFS_RUI_RECOVERED, &ruip->rui_flags))
+ return 0;
+
+ spin_unlock(&ailp->xa_lock);
+ error = xfs_rui_recover(mp, ruip);
+ spin_lock(&ailp->xa_lock);
-abort_error:
- xfs_trans_cancel(tp);
return error;
}
+/* Release the RUI since we're cancelling everything. */
+STATIC void
+xlog_recover_cancel_rui(
+ struct xfs_mount *mp,
+ struct xfs_ail *ailp,
+ struct xfs_log_item *lip)
+{
+ struct xfs_rui_log_item *ruip;
+
+ ruip = container_of(lip, struct xfs_rui_log_item, rui_item);
+
+ spin_unlock(&ailp->xa_lock);
+ xfs_rui_release(ruip);
+ spin_lock(&ailp->xa_lock);
+}
+
+/* Is this log item a deferred action intent? */
+static inline bool xlog_item_is_intent(struct xfs_log_item *lip)
+{
+ switch (lip->li_type) {
+ case XFS_LI_EFI:
+ case XFS_LI_RUI:
+ return true;
+ default:
+ return false;
+ }
+}
+
/*
- * When this is called, all of the EFIs which did not have
- * corresponding EFDs should be in the AIL. What we do now
- * is free the extents associated with each one.
+ * When this is called, all of the log intent items which did not have
+ * corresponding log done items should be in the AIL. What we do now
+ * is update the data structures associated with each one.
*
- * Since we process the EFIs in normal transactions, they
- * will be removed at some point after the commit. This prevents
- * us from just walking down the list processing each one.
- * We'll use a flag in the EFI to skip those that we've already
- * processed and use the AIL iteration mechanism's generation
- * count to try to speed this up at least a bit.
+ * Since we process the log intent items in normal transactions, they
+ * will be removed at some point after the commit. This prevents us
+ * from just walking down the list processing each one. We'll use a
+ * flag in the intent item to skip those that we've already processed
+ * and use the AIL iteration mechanism's generation count to try to
+ * speed this up at least a bit.
*
- * When we start, we know that the EFIs are the only things in
- * the AIL. As we process them, however, other items are added
- * to the AIL. Since everything added to the AIL must come after
- * everything already in the AIL, we stop processing as soon as
- * we see something other than an EFI in the AIL.
+ * When we start, we know that the intents are the only things in the
+ * AIL. As we process them, however, other items are added to the
+ * AIL.
*/
STATIC int
-xlog_recover_process_efis(
+xlog_recover_process_intents(
struct xlog *log)
{
struct xfs_log_item *lip;
- struct xfs_efi_log_item *efip;
int error = 0;
struct xfs_ail_cursor cur;
struct xfs_ail *ailp;
+ xfs_lsn_t last_lsn;
ailp = log->l_ailp;
spin_lock(&ailp->xa_lock);
lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
+ last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
while (lip != NULL) {
/*
- * We're done when we see something other than an EFI.
- * There should be no EFIs left in the AIL now.
+ * We're done when we see something other than an intent.
+ * There should be no intents left in the AIL now.
*/
- if (lip->li_type != XFS_LI_EFI) {
+ if (!xlog_item_is_intent(lip)) {
#ifdef DEBUG
for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
- ASSERT(lip->li_type != XFS_LI_EFI);
+ ASSERT(!xlog_item_is_intent(lip));
#endif
break;
}
/*
- * Skip EFIs that we've already processed.
+ * We should never see a redo item with a LSN higher than
+ * the last transaction we found in the log at the start
+ * of recovery.
*/
- efip = container_of(lip, struct xfs_efi_log_item, efi_item);
- if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) {
- lip = xfs_trans_ail_cursor_next(ailp, &cur);
- continue;
- }
+ ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0);
- spin_unlock(&ailp->xa_lock);
- error = xlog_recover_process_efi(log->l_mp, efip);
- spin_lock(&ailp->xa_lock);
+ switch (lip->li_type) {
+ case XFS_LI_EFI:
+ error = xlog_recover_process_efi(log->l_mp, ailp, lip);
+ break;
+ case XFS_LI_RUI:
+ error = xlog_recover_process_rui(log->l_mp, ailp, lip);
+ break;
+ }
if (error)
goto out;
lip = xfs_trans_ail_cursor_next(ailp, &cur);
@@ -4295,15 +4433,14 @@ out:
}
/*
- * A cancel occurs when the mount has failed and we're bailing out. Release all
- * pending EFIs so they don't pin the AIL.
+ * A cancel occurs when the mount has failed and we're bailing out.
+ * Release all pending log intent items so they don't pin the AIL.
*/
STATIC int
-xlog_recover_cancel_efis(
+xlog_recover_cancel_intents(
struct xlog *log)
{
struct xfs_log_item *lip;
- struct xfs_efi_log_item *efip;
int error = 0;
struct xfs_ail_cursor cur;
struct xfs_ail *ailp;
@@ -4313,22 +4450,25 @@ xlog_recover_cancel_efis(
lip = xfs_trans_ail_cursor_first(ailp, &cur, 0);
while (lip != NULL) {
/*
- * We're done when we see something other than an EFI.
- * There should be no EFIs left in the AIL now.
+ * We're done when we see something other than an intent.
+ * There should be no intents left in the AIL now.
*/
- if (lip->li_type != XFS_LI_EFI) {
+ if (!xlog_item_is_intent(lip)) {
#ifdef DEBUG
for (; lip; lip = xfs_trans_ail_cursor_next(ailp, &cur))
- ASSERT(lip->li_type != XFS_LI_EFI);
+ ASSERT(!xlog_item_is_intent(lip));
#endif
break;
}
- efip = container_of(lip, struct xfs_efi_log_item, efi_item);
-
- spin_unlock(&ailp->xa_lock);
- xfs_efi_release(efip);
- spin_lock(&ailp->xa_lock);
+ switch (lip->li_type) {
+ case XFS_LI_EFI:
+ xlog_recover_cancel_efi(log->l_mp, ailp, lip);
+ break;
+ case XFS_LI_RUI:
+ xlog_recover_cancel_rui(log->l_mp, ailp, lip);
+ break;
+ }
lip = xfs_trans_ail_cursor_next(ailp, &cur);
}
@@ -5023,6 +5163,7 @@ xlog_do_recover(
xfs_warn(mp, "Failed post-recovery per-ag init: %d", error);
return error;
}
+ mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
xlog_recover_check_summary(log);
@@ -5139,16 +5280,17 @@ xlog_recover_finish(
*/
if (log->l_flags & XLOG_RECOVERY_NEEDED) {
int error;
- error = xlog_recover_process_efis(log);
+ error = xlog_recover_process_intents(log);
if (error) {
- xfs_alert(log->l_mp, "Failed to recover EFIs");
+ xfs_alert(log->l_mp, "Failed to recover intents");
return error;
}
+
/*
- * Sync the log to get all the EFIs out of the AIL.
+ * Sync the log to get all the intents out of the AIL.
* This isn't absolutely necessary, but it helps in
* case the unlink transactions would have problems
- * pushing the EFIs out of the way.
+ * pushing the intents out of the way.
*/
xfs_log_force(log->l_mp, XFS_LOG_SYNC);
@@ -5173,7 +5315,7 @@ xlog_recover_cancel(
int error = 0;
if (log->l_flags & XLOG_RECOVERY_NEEDED)
- error = xlog_recover_cancel_efis(log);
+ error = xlog_recover_cancel_intents(log);
return error;
}
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index e39b02351b4a..faeead671f9f 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -24,6 +24,7 @@
#include "xfs_bit.h"
#include "xfs_sb.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_inode.h"
@@ -41,6 +42,7 @@
#include "xfs_trace.h"
#include "xfs_icache.h"
#include "xfs_sysfs.h"
+#include "xfs_rmap_btree.h"
static DEFINE_MUTEX(xfs_uuid_table_mutex);
@@ -230,6 +232,8 @@ xfs_initialize_perag(
if (maxagi)
*maxagi = index;
+
+ mp->m_ag_prealloc_blocks = xfs_prealloc_blocks(mp);
return 0;
out_unwind:
@@ -272,13 +276,15 @@ xfs_readsb(
buf_ops = NULL;
/*
- * Allocate a (locked) buffer to hold the superblock.
- * This will be kept around at all times to optimize
- * access to the superblock.
+ * Allocate a (locked) buffer to hold the superblock. This will be kept
+ * around at all times to optimize access to the superblock. Therefore,
+ * set XBF_NO_IOACCT to make sure it doesn't hold the buftarg count
+ * elevated.
*/
reread:
error = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR,
- BTOBB(sector_size), 0, &bp, buf_ops);
+ BTOBB(sector_size), XBF_NO_IOACCT, &bp,
+ buf_ops);
if (error) {
if (loud)
xfs_warn(mp, "SB validate failed with error %d.", error);
@@ -677,6 +683,7 @@ xfs_mountfs(
xfs_bmap_compute_maxlevels(mp, XFS_DATA_FORK);
xfs_bmap_compute_maxlevels(mp, XFS_ATTR_FORK);
xfs_ialloc_compute_maxlevels(mp);
+ xfs_rmapbt_compute_maxlevels(mp);
xfs_set_maxicount(mp);
@@ -1214,7 +1221,7 @@ xfs_mod_fdblocks(
batch = XFS_FDBLOCKS_BATCH;
__percpu_counter_add(&mp->m_fdblocks, delta, batch);
- if (__percpu_counter_compare(&mp->m_fdblocks, XFS_ALLOC_SET_ASIDE(mp),
+ if (__percpu_counter_compare(&mp->m_fdblocks, mp->m_alloc_set_aside,
XFS_FDBLOCKS_BATCH) >= 0) {
/* we had space! */
return 0;
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index c1b798c72126..b36676cde103 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -116,9 +116,15 @@ typedef struct xfs_mount {
uint m_bmap_dmnr[2]; /* min bmap btree records */
uint m_inobt_mxr[2]; /* max inobt btree records */
uint m_inobt_mnr[2]; /* min inobt btree records */
+ uint m_rmap_mxr[2]; /* max rmap btree records */
+ uint m_rmap_mnr[2]; /* min rmap btree records */
uint m_ag_maxlevels; /* XFS_AG_MAXLEVELS */
uint m_bm_maxlevels[2]; /* XFS_BM_MAXLEVELS */
uint m_in_maxlevels; /* max inobt btree levels. */
+ uint m_rmap_maxlevels; /* max rmap btree levels */
+ xfs_extlen_t m_ag_prealloc_blocks; /* reserved ag blocks */
+ uint m_alloc_set_aside; /* space we can't use */
+ uint m_ag_max_usable; /* max space per AG */
struct radix_tree_root m_perag_tree; /* per-ag accounting info */
spinlock_t m_perag_lock; /* lock for m_perag_tree */
struct mutex m_growlock; /* growfs mutex */
diff --git a/fs/xfs/xfs_ondisk.h b/fs/xfs/xfs_ondisk.h
index 184c44effdd5..69e2986a3776 100644
--- a/fs/xfs/xfs_ondisk.h
+++ b/fs/xfs/xfs_ondisk.h
@@ -22,6 +22,11 @@
BUILD_BUG_ON_MSG(sizeof(structname) != (size), "XFS: sizeof(" \
#structname ") is wrong, expected " #size)
+#define XFS_CHECK_OFFSET(structname, member, off) \
+ BUILD_BUG_ON_MSG(offsetof(structname, member) != (off), \
+ "XFS: offsetof(" #structname ", " #member ") is wrong, " \
+ "expected " #off)
+
static inline void __init
xfs_check_ondisk_structs(void)
{
@@ -34,6 +39,8 @@ xfs_check_ondisk_structs(void)
XFS_CHECK_STRUCT_SIZE(struct xfs_bmbt_key, 8);
XFS_CHECK_STRUCT_SIZE(struct xfs_bmbt_rec, 16);
XFS_CHECK_STRUCT_SIZE(struct xfs_bmdr_block, 4);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_btree_block_shdr, 48);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_btree_block_lhdr, 64);
XFS_CHECK_STRUCT_SIZE(struct xfs_btree_block, 72);
XFS_CHECK_STRUCT_SIZE(struct xfs_dinode, 176);
XFS_CHECK_STRUCT_SIZE(struct xfs_disk_dquot, 104);
@@ -42,11 +49,14 @@ xfs_check_ondisk_structs(void)
XFS_CHECK_STRUCT_SIZE(struct xfs_dsymlink_hdr, 56);
XFS_CHECK_STRUCT_SIZE(struct xfs_inobt_key, 4);
XFS_CHECK_STRUCT_SIZE(struct xfs_inobt_rec, 16);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_rmap_key, 20);
+ XFS_CHECK_STRUCT_SIZE(struct xfs_rmap_rec, 24);
XFS_CHECK_STRUCT_SIZE(struct xfs_timestamp, 8);
XFS_CHECK_STRUCT_SIZE(xfs_alloc_key_t, 8);
XFS_CHECK_STRUCT_SIZE(xfs_alloc_ptr_t, 4);
XFS_CHECK_STRUCT_SIZE(xfs_alloc_rec_t, 8);
XFS_CHECK_STRUCT_SIZE(xfs_inobt_ptr_t, 4);
+ XFS_CHECK_STRUCT_SIZE(xfs_rmap_ptr_t, 4);
/* dir/attr trees */
XFS_CHECK_STRUCT_SIZE(struct xfs_attr3_leaf_hdr, 80);
@@ -75,27 +85,39 @@ xfs_check_ondisk_structs(void)
XFS_CHECK_STRUCT_SIZE(xfs_attr_leaf_name_remote_t, 12);
*/
+ XFS_CHECK_OFFSET(xfs_attr_leaf_name_local_t, valuelen, 0);
+ XFS_CHECK_OFFSET(xfs_attr_leaf_name_local_t, namelen, 2);
+ XFS_CHECK_OFFSET(xfs_attr_leaf_name_local_t, nameval, 3);
+ XFS_CHECK_OFFSET(xfs_attr_leaf_name_remote_t, valueblk, 0);
+ XFS_CHECK_OFFSET(xfs_attr_leaf_name_remote_t, valuelen, 4);
+ XFS_CHECK_OFFSET(xfs_attr_leaf_name_remote_t, namelen, 8);
+ XFS_CHECK_OFFSET(xfs_attr_leaf_name_remote_t, name, 9);
XFS_CHECK_STRUCT_SIZE(xfs_attr_leafblock_t, 40);
- XFS_CHECK_STRUCT_SIZE(xfs_attr_shortform_t, 8);
+ XFS_CHECK_OFFSET(xfs_attr_shortform_t, hdr.totsize, 0);
+ XFS_CHECK_OFFSET(xfs_attr_shortform_t, hdr.count, 2);
+ XFS_CHECK_OFFSET(xfs_attr_shortform_t, list[0].namelen, 4);
+ XFS_CHECK_OFFSET(xfs_attr_shortform_t, list[0].valuelen, 5);
+ XFS_CHECK_OFFSET(xfs_attr_shortform_t, list[0].flags, 6);
+ XFS_CHECK_OFFSET(xfs_attr_shortform_t, list[0].nameval, 7);
XFS_CHECK_STRUCT_SIZE(xfs_da_blkinfo_t, 12);
XFS_CHECK_STRUCT_SIZE(xfs_da_intnode_t, 16);
XFS_CHECK_STRUCT_SIZE(xfs_da_node_entry_t, 8);
XFS_CHECK_STRUCT_SIZE(xfs_da_node_hdr_t, 16);
XFS_CHECK_STRUCT_SIZE(xfs_dir2_data_free_t, 4);
XFS_CHECK_STRUCT_SIZE(xfs_dir2_data_hdr_t, 16);
- XFS_CHECK_STRUCT_SIZE(xfs_dir2_data_unused_t, 6);
+ XFS_CHECK_OFFSET(xfs_dir2_data_unused_t, freetag, 0);
+ XFS_CHECK_OFFSET(xfs_dir2_data_unused_t, length, 2);
XFS_CHECK_STRUCT_SIZE(xfs_dir2_free_hdr_t, 16);
XFS_CHECK_STRUCT_SIZE(xfs_dir2_free_t, 16);
- XFS_CHECK_STRUCT_SIZE(xfs_dir2_ino4_t, 4);
- XFS_CHECK_STRUCT_SIZE(xfs_dir2_ino8_t, 8);
- XFS_CHECK_STRUCT_SIZE(xfs_dir2_inou_t, 8);
XFS_CHECK_STRUCT_SIZE(xfs_dir2_leaf_entry_t, 8);
XFS_CHECK_STRUCT_SIZE(xfs_dir2_leaf_hdr_t, 16);
XFS_CHECK_STRUCT_SIZE(xfs_dir2_leaf_t, 16);
XFS_CHECK_STRUCT_SIZE(xfs_dir2_leaf_tail_t, 4);
XFS_CHECK_STRUCT_SIZE(xfs_dir2_sf_entry_t, 3);
+ XFS_CHECK_OFFSET(xfs_dir2_sf_entry_t, namelen, 0);
+ XFS_CHECK_OFFSET(xfs_dir2_sf_entry_t, offset, 1);
+ XFS_CHECK_OFFSET(xfs_dir2_sf_entry_t, name, 3);
XFS_CHECK_STRUCT_SIZE(xfs_dir2_sf_hdr_t, 10);
- XFS_CHECK_STRUCT_SIZE(xfs_dir2_sf_off_t, 2);
/* log structures */
XFS_CHECK_STRUCT_SIZE(struct xfs_dq_logformat, 24);
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index d5b756669fb5..0f14b2e4bf6c 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2014 Christoph Hellwig.
*/
+#include <linux/iomap.h>
#include "xfs.h"
#include "xfs_format.h"
#include "xfs_log_format.h"
@@ -79,32 +80,6 @@ xfs_fs_get_uuid(
return 0;
}
-static void
-xfs_bmbt_to_iomap(
- struct xfs_inode *ip,
- struct iomap *iomap,
- struct xfs_bmbt_irec *imap)
-{
- struct xfs_mount *mp = ip->i_mount;
-
- if (imap->br_startblock == HOLESTARTBLOCK) {
- iomap->blkno = IOMAP_NULL_BLOCK;
- iomap->type = IOMAP_HOLE;
- } else if (imap->br_startblock == DELAYSTARTBLOCK) {
- iomap->blkno = IOMAP_NULL_BLOCK;
- iomap->type = IOMAP_DELALLOC;
- } else {
- iomap->blkno =
- XFS_FSB_TO_DADDR(ip->i_mount, imap->br_startblock);
- if (imap->br_state == XFS_EXT_UNWRITTEN)
- iomap->type = IOMAP_UNWRITTEN;
- else
- iomap->type = IOMAP_MAPPED;
- }
- iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
- iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
-}
-
/*
* Get a layout for the pNFS client.
*/
diff --git a/fs/xfs/xfs_pnfs.h b/fs/xfs/xfs_pnfs.h
index 93f74853961b..e8339f74966b 100644
--- a/fs/xfs/xfs_pnfs.h
+++ b/fs/xfs/xfs_pnfs.h
@@ -1,7 +1,7 @@
#ifndef _XFS_PNFS_H
#define _XFS_PNFS_H 1
-#if defined(CONFIG_NFSD_BLOCKLAYOUT) || defined(CONFIG_NFSD_SCSILAYOUT)
+#ifdef CONFIG_EXPORTFS_BLOCK_OPS
int xfs_fs_get_uuid(struct super_block *sb, u8 *buf, u32 *len, u64 *offset);
int xfs_fs_map_blocks(struct inode *inode, loff_t offset, u64 length,
struct iomap *iomap, bool write, u32 *device_generation);
@@ -15,5 +15,5 @@ xfs_break_layouts(struct inode *inode, uint *iolock, bool with_imutex)
{
return 0;
}
-#endif /* CONFIG_NFSD_PNFS */
+#endif /* CONFIG_EXPORTFS_BLOCK_OPS */
#endif /* _XFS_PNFS_H */
diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c
new file mode 100644
index 000000000000..2500f28689d5
--- /dev/null
+++ b/fs/xfs/xfs_rmap_item.c
@@ -0,0 +1,536 @@
+/*
+ * Copyright (C) 2016 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_bit.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
+#include "xfs_buf_item.h"
+#include "xfs_rmap_item.h"
+#include "xfs_log.h"
+#include "xfs_rmap.h"
+
+
+kmem_zone_t *xfs_rui_zone;
+kmem_zone_t *xfs_rud_zone;
+
+static inline struct xfs_rui_log_item *RUI_ITEM(struct xfs_log_item *lip)
+{
+ return container_of(lip, struct xfs_rui_log_item, rui_item);
+}
+
+void
+xfs_rui_item_free(
+ struct xfs_rui_log_item *ruip)
+{
+ if (ruip->rui_format.rui_nextents > XFS_RUI_MAX_FAST_EXTENTS)
+ kmem_free(ruip);
+ else
+ kmem_zone_free(xfs_rui_zone, ruip);
+}
+
+/*
+ * This returns the number of iovecs needed to log the given rui item.
+ * We only need 1 iovec for an rui item. It just logs the rui_log_format
+ * structure.
+ */
+static inline int
+xfs_rui_item_sizeof(
+ struct xfs_rui_log_item *ruip)
+{
+ return sizeof(struct xfs_rui_log_format) +
+ (ruip->rui_format.rui_nextents - 1) *
+ sizeof(struct xfs_map_extent);
+}
+
+STATIC void
+xfs_rui_item_size(
+ struct xfs_log_item *lip,
+ int *nvecs,
+ int *nbytes)
+{
+ *nvecs += 1;
+ *nbytes += xfs_rui_item_sizeof(RUI_ITEM(lip));
+}
+
+/*
+ * This is called to fill in the vector of log iovecs for the
+ * given rui log item. We use only 1 iovec, and we point that
+ * at the rui_log_format structure embedded in the rui item.
+ * It is at this point that we assert that all of the extent
+ * slots in the rui item have been filled.
+ */
+STATIC void
+xfs_rui_item_format(
+ struct xfs_log_item *lip,
+ struct xfs_log_vec *lv)
+{
+ struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
+ struct xfs_log_iovec *vecp = NULL;
+
+ ASSERT(atomic_read(&ruip->rui_next_extent) ==
+ ruip->rui_format.rui_nextents);
+
+ ruip->rui_format.rui_type = XFS_LI_RUI;
+ ruip->rui_format.rui_size = 1;
+
+ xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUI_FORMAT, &ruip->rui_format,
+ xfs_rui_item_sizeof(ruip));
+}
+
+/*
+ * Pinning has no meaning for an rui item, so just return.
+ */
+STATIC void
+xfs_rui_item_pin(
+ struct xfs_log_item *lip)
+{
+}
+
+/*
+ * The unpin operation is the last place an RUI is manipulated in the log. It is
+ * either inserted in the AIL or aborted in the event of a log I/O error. In
+ * either case, the RUI transaction has been successfully committed to make it
+ * this far. Therefore, we expect whoever committed the RUI to either construct
+ * and commit the RUD or drop the RUD's reference in the event of error. Simply
+ * drop the log's RUI reference now that the log is done with it.
+ */
+STATIC void
+xfs_rui_item_unpin(
+ struct xfs_log_item *lip,
+ int remove)
+{
+ struct xfs_rui_log_item *ruip = RUI_ITEM(lip);
+
+ xfs_rui_release(ruip);
+}
+
+/*
+ * RUI items have no locking or pushing. However, since RUIs are pulled from
+ * the AIL when their corresponding RUDs are committed to disk, their situation
+ * is very similar to being pinned. Return XFS_ITEM_PINNED so that the caller
+ * will eventually flush the log. This should help in getting the RUI out of
+ * the AIL.
+ */
+STATIC uint
+xfs_rui_item_push(
+ struct xfs_log_item *lip,
+ struct list_head *buffer_list)
+{
+ return XFS_ITEM_PINNED;
+}
+
+/*
+ * The RUI has been either committed or aborted if the transaction has been
+ * cancelled. If the transaction was cancelled, an RUD isn't going to be
+ * constructed and thus we free the RUI here directly.
+ */
+STATIC void
+xfs_rui_item_unlock(
+ struct xfs_log_item *lip)
+{
+ if (lip->li_flags & XFS_LI_ABORTED)
+ xfs_rui_item_free(RUI_ITEM(lip));
+}
+
+/*
+ * The RUI is logged only once and cannot be moved in the log, so simply return
+ * the lsn at which it's been logged.
+ */
+STATIC xfs_lsn_t
+xfs_rui_item_committed(
+ struct xfs_log_item *lip,
+ xfs_lsn_t lsn)
+{
+ return lsn;
+}
+
+/*
+ * The RUI dependency tracking op doesn't do squat. It can't because
+ * it doesn't know where the free extent is coming from. The dependency
+ * tracking has to be handled by the "enclosing" metadata object. For
+ * example, for inodes, the inode is locked throughout the extent freeing
+ * so the dependency should be recorded there.
+ */
+STATIC void
+xfs_rui_item_committing(
+ struct xfs_log_item *lip,
+ xfs_lsn_t lsn)
+{
+}
+
+/*
+ * This is the ops vector shared by all rui log items.
+ */
+static const struct xfs_item_ops xfs_rui_item_ops = {
+ .iop_size = xfs_rui_item_size,
+ .iop_format = xfs_rui_item_format,
+ .iop_pin = xfs_rui_item_pin,
+ .iop_unpin = xfs_rui_item_unpin,
+ .iop_unlock = xfs_rui_item_unlock,
+ .iop_committed = xfs_rui_item_committed,
+ .iop_push = xfs_rui_item_push,
+ .iop_committing = xfs_rui_item_committing,
+};
+
+/*
+ * Allocate and initialize an rui item with the given number of extents.
+ */
+struct xfs_rui_log_item *
+xfs_rui_init(
+ struct xfs_mount *mp,
+ uint nextents)
+
+{
+ struct xfs_rui_log_item *ruip;
+ uint size;
+
+ ASSERT(nextents > 0);
+ if (nextents > XFS_RUI_MAX_FAST_EXTENTS) {
+ size = (uint)(sizeof(struct xfs_rui_log_item) +
+ ((nextents - 1) * sizeof(struct xfs_map_extent)));
+ ruip = kmem_zalloc(size, KM_SLEEP);
+ } else {
+ ruip = kmem_zone_zalloc(xfs_rui_zone, KM_SLEEP);
+ }
+
+ xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops);
+ ruip->rui_format.rui_nextents = nextents;
+ ruip->rui_format.rui_id = (uintptr_t)(void *)ruip;
+ atomic_set(&ruip->rui_next_extent, 0);
+ atomic_set(&ruip->rui_refcount, 2);
+
+ return ruip;
+}
+
+/*
+ * Copy an RUI format buffer from the given buf, and into the destination
+ * RUI format structure. The RUI/RUD items were designed not to need any
+ * special alignment handling.
+ */
+int
+xfs_rui_copy_format(
+ struct xfs_log_iovec *buf,
+ struct xfs_rui_log_format *dst_rui_fmt)
+{
+ struct xfs_rui_log_format *src_rui_fmt;
+ uint len;
+
+ src_rui_fmt = buf->i_addr;
+ len = sizeof(struct xfs_rui_log_format) +
+ (src_rui_fmt->rui_nextents - 1) *
+ sizeof(struct xfs_map_extent);
+
+ if (buf->i_len != len)
+ return -EFSCORRUPTED;
+
+ memcpy((char *)dst_rui_fmt, (char *)src_rui_fmt, len);
+ return 0;
+}
+
+/*
+ * Freeing the RUI requires that we remove it from the AIL if it has already
+ * been placed there. However, the RUI may not yet have been placed in the AIL
+ * when called by xfs_rui_release() from RUD processing due to the ordering of
+ * committed vs unpin operations in bulk insert operations. Hence the reference
+ * count to ensure only the last caller frees the RUI.
+ */
+void
+xfs_rui_release(
+ struct xfs_rui_log_item *ruip)
+{
+ if (atomic_dec_and_test(&ruip->rui_refcount)) {
+ xfs_trans_ail_remove(&ruip->rui_item, SHUTDOWN_LOG_IO_ERROR);
+ xfs_rui_item_free(ruip);
+ }
+}
+
+static inline struct xfs_rud_log_item *RUD_ITEM(struct xfs_log_item *lip)
+{
+ return container_of(lip, struct xfs_rud_log_item, rud_item);
+}
+
+STATIC void
+xfs_rud_item_size(
+ struct xfs_log_item *lip,
+ int *nvecs,
+ int *nbytes)
+{
+ *nvecs += 1;
+ *nbytes += sizeof(struct xfs_rud_log_format);
+}
+
+/*
+ * This is called to fill in the vector of log iovecs for the
+ * given rud log item. We use only 1 iovec, and we point that
+ * at the rud_log_format structure embedded in the rud item.
+ * It is at this point that we assert that all of the extent
+ * slots in the rud item have been filled.
+ */
+STATIC void
+xfs_rud_item_format(
+ struct xfs_log_item *lip,
+ struct xfs_log_vec *lv)
+{
+ struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
+ struct xfs_log_iovec *vecp = NULL;
+
+ rudp->rud_format.rud_type = XFS_LI_RUD;
+ rudp->rud_format.rud_size = 1;
+
+ xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_RUD_FORMAT, &rudp->rud_format,
+ sizeof(struct xfs_rud_log_format));
+}
+
+/*
+ * Pinning has no meaning for an rud item, so just return.
+ */
+STATIC void
+xfs_rud_item_pin(
+ struct xfs_log_item *lip)
+{
+}
+
+/*
+ * Since pinning has no meaning for an rud item, unpinning does
+ * not either.
+ */
+STATIC void
+xfs_rud_item_unpin(
+ struct xfs_log_item *lip,
+ int remove)
+{
+}
+
+/*
+ * There isn't much you can do to push on an rud item. It is simply stuck
+ * waiting for the log to be flushed to disk.
+ */
+STATIC uint
+xfs_rud_item_push(
+ struct xfs_log_item *lip,
+ struct list_head *buffer_list)
+{
+ return XFS_ITEM_PINNED;
+}
+
+/*
+ * The RUD is either committed or aborted if the transaction is cancelled. If
+ * the transaction is cancelled, drop our reference to the RUI and free the
+ * RUD.
+ */
+STATIC void
+xfs_rud_item_unlock(
+ struct xfs_log_item *lip)
+{
+ struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
+
+ if (lip->li_flags & XFS_LI_ABORTED) {
+ xfs_rui_release(rudp->rud_ruip);
+ kmem_zone_free(xfs_rud_zone, rudp);
+ }
+}
+
+/*
+ * When the rud item is committed to disk, all we need to do is delete our
+ * reference to our partner rui item and then free ourselves. Since we're
+ * freeing ourselves we must return -1 to keep the transaction code from
+ * further referencing this item.
+ */
+STATIC xfs_lsn_t
+xfs_rud_item_committed(
+ struct xfs_log_item *lip,
+ xfs_lsn_t lsn)
+{
+ struct xfs_rud_log_item *rudp = RUD_ITEM(lip);
+
+ /*
+ * Drop the RUI reference regardless of whether the RUD has been
+ * aborted. Once the RUD transaction is constructed, it is the sole
+ * responsibility of the RUD to release the RUI (even if the RUI is
+ * aborted due to log I/O error).
+ */
+ xfs_rui_release(rudp->rud_ruip);
+ kmem_zone_free(xfs_rud_zone, rudp);
+
+ return (xfs_lsn_t)-1;
+}
+
+/*
+ * The RUD dependency tracking op doesn't do squat. It can't because
+ * it doesn't know where the free extent is coming from. The dependency
+ * tracking has to be handled by the "enclosing" metadata object. For
+ * example, for inodes, the inode is locked throughout the extent freeing
+ * so the dependency should be recorded there.
+ */
+STATIC void
+xfs_rud_item_committing(
+ struct xfs_log_item *lip,
+ xfs_lsn_t lsn)
+{
+}
+
+/*
+ * This is the ops vector shared by all rud log items.
+ */
+static const struct xfs_item_ops xfs_rud_item_ops = {
+ .iop_size = xfs_rud_item_size,
+ .iop_format = xfs_rud_item_format,
+ .iop_pin = xfs_rud_item_pin,
+ .iop_unpin = xfs_rud_item_unpin,
+ .iop_unlock = xfs_rud_item_unlock,
+ .iop_committed = xfs_rud_item_committed,
+ .iop_push = xfs_rud_item_push,
+ .iop_committing = xfs_rud_item_committing,
+};
+
+/*
+ * Allocate and initialize an rud item with the given number of extents.
+ */
+struct xfs_rud_log_item *
+xfs_rud_init(
+ struct xfs_mount *mp,
+ struct xfs_rui_log_item *ruip)
+
+{
+ struct xfs_rud_log_item *rudp;
+
+ rudp = kmem_zone_zalloc(xfs_rud_zone, KM_SLEEP);
+ xfs_log_item_init(mp, &rudp->rud_item, XFS_LI_RUD, &xfs_rud_item_ops);
+ rudp->rud_ruip = ruip;
+ rudp->rud_format.rud_rui_id = ruip->rui_format.rui_id;
+
+ return rudp;
+}
+
+/*
+ * Process an rmap update intent item that was recovered from the log.
+ * We need to update the rmapbt.
+ */
+int
+xfs_rui_recover(
+ struct xfs_mount *mp,
+ struct xfs_rui_log_item *ruip)
+{
+ int i;
+ int error = 0;
+ struct xfs_map_extent *rmap;
+ xfs_fsblock_t startblock_fsb;
+ bool op_ok;
+ struct xfs_rud_log_item *rudp;
+ enum xfs_rmap_intent_type type;
+ int whichfork;
+ xfs_exntst_t state;
+ struct xfs_trans *tp;
+ struct xfs_btree_cur *rcur = NULL;
+
+ ASSERT(!test_bit(XFS_RUI_RECOVERED, &ruip->rui_flags));
+
+ /*
+ * First check the validity of the extents described by the
+ * RUI. If any are bad, then assume that all are bad and
+ * just toss the RUI.
+ */
+ for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
+ rmap = &ruip->rui_format.rui_extents[i];
+ startblock_fsb = XFS_BB_TO_FSB(mp,
+ XFS_FSB_TO_DADDR(mp, rmap->me_startblock));
+ switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
+ case XFS_RMAP_EXTENT_MAP:
+ case XFS_RMAP_EXTENT_UNMAP:
+ case XFS_RMAP_EXTENT_CONVERT:
+ case XFS_RMAP_EXTENT_ALLOC:
+ case XFS_RMAP_EXTENT_FREE:
+ op_ok = true;
+ break;
+ default:
+ op_ok = false;
+ break;
+ }
+ if (!op_ok || startblock_fsb == 0 ||
+ rmap->me_len == 0 ||
+ startblock_fsb >= mp->m_sb.sb_dblocks ||
+ rmap->me_len >= mp->m_sb.sb_agblocks ||
+ (rmap->me_flags & ~XFS_RMAP_EXTENT_FLAGS)) {
+ /*
+ * This will pull the RUI from the AIL and
+ * free the memory associated with it.
+ */
+ set_bit(XFS_RUI_RECOVERED, &ruip->rui_flags);
+ xfs_rui_release(ruip);
+ return -EIO;
+ }
+ }
+
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
+ if (error)
+ return error;
+ rudp = xfs_trans_get_rud(tp, ruip);
+
+ for (i = 0; i < ruip->rui_format.rui_nextents; i++) {
+ rmap = &ruip->rui_format.rui_extents[i];
+ state = (rmap->me_flags & XFS_RMAP_EXTENT_UNWRITTEN) ?
+ XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
+ whichfork = (rmap->me_flags & XFS_RMAP_EXTENT_ATTR_FORK) ?
+ XFS_ATTR_FORK : XFS_DATA_FORK;
+ switch (rmap->me_flags & XFS_RMAP_EXTENT_TYPE_MASK) {
+ case XFS_RMAP_EXTENT_MAP:
+ type = XFS_RMAP_MAP;
+ break;
+ case XFS_RMAP_EXTENT_UNMAP:
+ type = XFS_RMAP_UNMAP;
+ break;
+ case XFS_RMAP_EXTENT_CONVERT:
+ type = XFS_RMAP_CONVERT;
+ break;
+ case XFS_RMAP_EXTENT_ALLOC:
+ type = XFS_RMAP_ALLOC;
+ break;
+ case XFS_RMAP_EXTENT_FREE:
+ type = XFS_RMAP_FREE;
+ break;
+ default:
+ error = -EFSCORRUPTED;
+ goto abort_error;
+ }
+ error = xfs_trans_log_finish_rmap_update(tp, rudp, type,
+ rmap->me_owner, whichfork,
+ rmap->me_startoff, rmap->me_startblock,
+ rmap->me_len, state, &rcur);
+ if (error)
+ goto abort_error;
+
+ }
+
+ xfs_rmap_finish_one_cleanup(tp, rcur, error);
+ set_bit(XFS_RUI_RECOVERED, &ruip->rui_flags);
+ error = xfs_trans_commit(tp);
+ return error;
+
+abort_error:
+ xfs_rmap_finish_one_cleanup(tp, rcur, error);
+ xfs_trans_cancel(tp);
+ return error;
+}
diff --git a/fs/xfs/xfs_rmap_item.h b/fs/xfs/xfs_rmap_item.h
new file mode 100644
index 000000000000..aefcc3a318a5
--- /dev/null
+++ b/fs/xfs/xfs_rmap_item.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2016 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#ifndef __XFS_RMAP_ITEM_H__
+#define __XFS_RMAP_ITEM_H__
+
+/*
+ * There are (currently) three pairs of rmap btree redo item types: map, unmap,
+ * and convert. The common abbreviations for these are RUI (rmap update
+ * intent) and RUD (rmap update done). The redo item type is encoded in the
+ * flags field of each xfs_map_extent.
+ *
+ * *I items should be recorded in the *first* of a series of rolled
+ * transactions, and the *D items should be recorded in the same transaction
+ * that records the associated rmapbt updates. Typically, the first
+ * transaction will record a bmbt update, followed by some number of
+ * transactions containing rmapbt updates, and finally transactions with any
+ * bnobt/cntbt updates.
+ *
+ * Should the system crash after the commit of the first transaction but
+ * before the commit of the final transaction in a series, log recovery will
+ * use the redo information recorded by the intent items to replay the
+ * (rmapbt/bnobt/cntbt) metadata updates in the non-first transaction.
+ */
+
+/* kernel only RUI/RUD definitions */
+
+struct xfs_mount;
+struct kmem_zone;
+
+/*
+ * Max number of extents in fast allocation path.
+ */
+#define XFS_RUI_MAX_FAST_EXTENTS 16
+
+/*
+ * Define RUI flag bits. Manipulated by set/clear/test_bit operators.
+ */
+#define XFS_RUI_RECOVERED 1
+
+/*
+ * This is the "rmap update intent" log item. It is used to log the fact that
+ * some reverse mappings need to change. It is used in conjunction with the
+ * "rmap update done" log item described below.
+ *
+ * These log items follow the same rules as struct xfs_efi_log_item; see the
+ * comments about that structure (in xfs_extfree_item.h) for more details.
+ */
+struct xfs_rui_log_item {
+ struct xfs_log_item rui_item;
+ atomic_t rui_refcount;
+ atomic_t rui_next_extent;
+ unsigned long rui_flags; /* misc flags */
+ struct xfs_rui_log_format rui_format;
+};
+
+/*
+ * This is the "rmap update done" log item. It is used to log the fact that
+ * some rmapbt updates mentioned in an earlier rui item have been performed.
+ */
+struct xfs_rud_log_item {
+ struct xfs_log_item rud_item;
+ struct xfs_rui_log_item *rud_ruip;
+ struct xfs_rud_log_format rud_format;
+};
+
+extern struct kmem_zone *xfs_rui_zone;
+extern struct kmem_zone *xfs_rud_zone;
+
+struct xfs_rui_log_item *xfs_rui_init(struct xfs_mount *, uint);
+struct xfs_rud_log_item *xfs_rud_init(struct xfs_mount *,
+ struct xfs_rui_log_item *);
+int xfs_rui_copy_format(struct xfs_log_iovec *buf,
+ struct xfs_rui_log_format *dst_rui_fmt);
+void xfs_rui_item_free(struct xfs_rui_log_item *);
+void xfs_rui_release(struct xfs_rui_log_item *);
+int xfs_rui_recover(struct xfs_mount *mp, struct xfs_rui_log_item *ruip);
+
+#endif /* __XFS_RMAP_ITEM_H__ */
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 3938b37d1043..802bcc326d9f 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -23,6 +23,7 @@
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_bmap.h"
#include "xfs_bmap_util.h"
@@ -769,7 +770,7 @@ xfs_growfs_rt_alloc(
xfs_daddr_t d; /* disk block address */
int error; /* error return value */
xfs_fsblock_t firstblock;/* first block allocated in xaction */
- struct xfs_bmap_free flist; /* list of freed blocks */
+ struct xfs_defer_ops dfops; /* list of freed blocks */
xfs_fsblock_t fsbno; /* filesystem block for bno */
struct xfs_bmbt_irec map; /* block map output */
int nmap; /* number of block maps */
@@ -794,14 +795,14 @@ xfs_growfs_rt_alloc(
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_bmap_init(&flist, &firstblock);
+ xfs_defer_init(&dfops, &firstblock);
/*
* Allocate blocks to the bitmap file.
*/
nmap = 1;
error = xfs_bmapi_write(tp, ip, oblocks, nblocks - oblocks,
XFS_BMAPI_METADATA, &firstblock,
- resblks, &map, &nmap, &flist);
+ resblks, &map, &nmap, &dfops);
if (!error && nmap < 1)
error = -ENOSPC;
if (error)
@@ -809,7 +810,7 @@ xfs_growfs_rt_alloc(
/*
* Free any blocks freed up in the transaction, then commit.
*/
- error = xfs_bmap_finish(&tp, &flist, NULL);
+ error = xfs_defer_finish(&tp, &dfops, NULL);
if (error)
goto out_bmap_cancel;
error = xfs_trans_commit(tp);
@@ -862,7 +863,7 @@ xfs_growfs_rt_alloc(
return 0;
out_bmap_cancel:
- xfs_bmap_cancel(&flist);
+ xfs_defer_cancel(&dfops);
out_trans_cancel:
xfs_trans_cancel(tp);
return error;
diff --git a/fs/xfs/xfs_rtalloc.h b/fs/xfs/xfs_rtalloc.h
index 76c0a4a9bb17..355dd9e1cb64 100644
--- a/fs/xfs/xfs_rtalloc.h
+++ b/fs/xfs/xfs_rtalloc.h
@@ -98,8 +98,6 @@ xfs_growfs_rt(
/*
* From xfs_rtbitmap.c
*/
-int xfs_rtbuf_get(struct xfs_mount *mp, struct xfs_trans *tp,
- xfs_rtblock_t block, int issum, struct xfs_buf **bpp);
int xfs_rtcheck_range(struct xfs_mount *mp, struct xfs_trans *tp,
xfs_rtblock_t start, xfs_extlen_t len, int val,
xfs_rtblock_t *new, int *stat);
diff --git a/fs/xfs/xfs_stats.c b/fs/xfs/xfs_stats.c
index 8686df6c7609..6e812fe0fd43 100644
--- a/fs/xfs/xfs_stats.c
+++ b/fs/xfs/xfs_stats.c
@@ -61,6 +61,7 @@ int xfs_stats_format(struct xfsstats __percpu *stats, char *buf)
{ "bmbt2", XFSSTAT_END_BMBT_V2 },
{ "ibt2", XFSSTAT_END_IBT_V2 },
{ "fibt2", XFSSTAT_END_FIBT_V2 },
+ { "rmapbt", XFSSTAT_END_RMAP_V2 },
/* we print both series of quota information together */
{ "qm", XFSSTAT_END_QM },
};
@@ -128,7 +129,6 @@ static int xqm_proc_open(struct inode *inode, struct file *file)
}
static const struct file_operations xqm_proc_fops = {
- .owner = THIS_MODULE,
.open = xqm_proc_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/fs/xfs/xfs_stats.h b/fs/xfs/xfs_stats.h
index 483b0eff1988..657865f51e78 100644
--- a/fs/xfs/xfs_stats.h
+++ b/fs/xfs/xfs_stats.h
@@ -197,7 +197,23 @@ struct xfsstats {
__uint32_t xs_fibt_2_alloc;
__uint32_t xs_fibt_2_free;
__uint32_t xs_fibt_2_moves;
-#define XFSSTAT_END_XQMSTAT (XFSSTAT_END_FIBT_V2+6)
+#define XFSSTAT_END_RMAP_V2 (XFSSTAT_END_FIBT_V2+15)
+ __uint32_t xs_rmap_2_lookup;
+ __uint32_t xs_rmap_2_compare;
+ __uint32_t xs_rmap_2_insrec;
+ __uint32_t xs_rmap_2_delrec;
+ __uint32_t xs_rmap_2_newroot;
+ __uint32_t xs_rmap_2_killroot;
+ __uint32_t xs_rmap_2_increment;
+ __uint32_t xs_rmap_2_decrement;
+ __uint32_t xs_rmap_2_lshift;
+ __uint32_t xs_rmap_2_rshift;
+ __uint32_t xs_rmap_2_split;
+ __uint32_t xs_rmap_2_join;
+ __uint32_t xs_rmap_2_alloc;
+ __uint32_t xs_rmap_2_free;
+ __uint32_t xs_rmap_2_moves;
+#define XFSSTAT_END_XQMSTAT (XFSSTAT_END_RMAP_V2+6)
__uint32_t xs_qm_dqreclaims;
__uint32_t xs_qm_dqreclaim_misses;
__uint32_t xs_qm_dquot_dups;
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 11ea5d51db56..24ef83ef04de 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -46,6 +46,7 @@
#include "xfs_quota.h"
#include "xfs_sysfs.h"
#include "xfs_ondisk.h"
+#include "xfs_rmap_item.h"
#include <linux/namei.h>
#include <linux/init.h>
@@ -546,7 +547,7 @@ xfs_showargs(
return 0;
}
-__uint64_t
+static __uint64_t
xfs_max_file_offset(
unsigned int blockshift)
{
@@ -1075,7 +1076,7 @@ xfs_fs_statfs(
statp->f_blocks = sbp->sb_dblocks - lsize;
spin_unlock(&mp->m_sb_lock);
- statp->f_bfree = fdblocks - XFS_ALLOC_SET_ASIDE(mp);
+ statp->f_bfree = fdblocks - mp->m_alloc_set_aside;
statp->f_bavail = statp->f_bfree;
fakeinos = statp->f_bfree << sbp->sb_inopblog;
@@ -1294,6 +1295,7 @@ xfs_fs_remount(
*/
xfs_restore_resvblks(mp);
xfs_log_work_queue(mp);
+ xfs_queue_eofblocks(mp);
}
/* rw -> ro */
@@ -1306,6 +1308,13 @@ xfs_fs_remount(
* return it to the same size.
*/
xfs_save_resvblks(mp);
+
+ /*
+ * Cancel background eofb scanning so it cannot race with the
+ * final log force+buftarg wait and deadlock the remount.
+ */
+ cancel_delayed_work_sync(&mp->m_eofblocks_work);
+
xfs_quiesce_attr(mp);
mp->m_flags |= XFS_MOUNT_RDONLY;
}
@@ -1565,9 +1574,9 @@ xfs_fs_fill_super(
}
}
- if (xfs_sb_version_hassparseinodes(&mp->m_sb))
+ if (xfs_sb_version_hasrmapbt(&mp->m_sb))
xfs_alert(mp,
- "EXPERIMENTAL sparse inode feature enabled. Use at your own risk!");
+ "EXPERIMENTAL reverse mapping btree feature enabled. Use at your own risk!");
error = xfs_mountfs(mp);
if (error)
@@ -1692,8 +1701,9 @@ xfs_init_zones(void)
if (!xfs_log_ticket_zone)
goto out_free_ioend_bioset;
- xfs_bmap_free_item_zone = kmem_zone_init(sizeof(xfs_bmap_free_item_t),
- "xfs_bmap_free_item");
+ xfs_bmap_free_item_zone = kmem_zone_init(
+ sizeof(struct xfs_extent_free_item),
+ "xfs_bmap_free_item");
if (!xfs_bmap_free_item_zone)
goto out_destroy_log_ticket_zone;
@@ -1760,8 +1770,24 @@ xfs_init_zones(void)
if (!xfs_icreate_zone)
goto out_destroy_ili_zone;
+ xfs_rud_zone = kmem_zone_init(sizeof(struct xfs_rud_log_item),
+ "xfs_rud_item");
+ if (!xfs_rud_zone)
+ goto out_destroy_icreate_zone;
+
+ xfs_rui_zone = kmem_zone_init((sizeof(struct xfs_rui_log_item) +
+ ((XFS_RUI_MAX_FAST_EXTENTS - 1) *
+ sizeof(struct xfs_map_extent))),
+ "xfs_rui_item");
+ if (!xfs_rui_zone)
+ goto out_destroy_rud_zone;
+
return 0;
+ out_destroy_rud_zone:
+ kmem_zone_destroy(xfs_rud_zone);
+ out_destroy_icreate_zone:
+ kmem_zone_destroy(xfs_icreate_zone);
out_destroy_ili_zone:
kmem_zone_destroy(xfs_ili_zone);
out_destroy_inode_zone:
@@ -1800,6 +1826,8 @@ xfs_destroy_zones(void)
* destroy caches.
*/
rcu_barrier();
+ kmem_zone_destroy(xfs_rui_zone);
+ kmem_zone_destroy(xfs_rud_zone);
kmem_zone_destroy(xfs_icreate_zone);
kmem_zone_destroy(xfs_ili_zone);
kmem_zone_destroy(xfs_inode_zone);
@@ -1849,6 +1877,9 @@ init_xfs_fs(void)
printk(KERN_INFO XFS_VERSION_STRING " with "
XFS_BUILD_OPTIONS " enabled\n");
+ xfs_extent_free_init_defer_op();
+ xfs_rmap_update_init_defer_op();
+
xfs_dir_startup();
error = xfs_init_zones();
diff --git a/fs/xfs/xfs_super.h b/fs/xfs/xfs_super.h
index 2dfb1ce4585f..529bce9fc37e 100644
--- a/fs/xfs/xfs_super.h
+++ b/fs/xfs/xfs_super.h
@@ -61,8 +61,6 @@ struct xfs_mount;
struct xfs_buftarg;
struct block_device;
-extern __uint64_t xfs_max_file_offset(unsigned int);
-
extern void xfs_flush_inodes(struct xfs_mount *mp);
extern void xfs_blkdev_issue_flush(struct xfs_buftarg *);
extern xfs_agnumber_t xfs_set_inode_alloc(struct xfs_mount *,
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index 08a46c6181fd..58142aeeeea6 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -26,6 +26,7 @@
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
+#include "xfs_defer.h"
#include "xfs_dir2.h"
#include "xfs_inode.h"
#include "xfs_ialloc.h"
@@ -172,7 +173,7 @@ xfs_symlink(
struct xfs_inode *ip = NULL;
int error = 0;
int pathlen;
- struct xfs_bmap_free free_list;
+ struct xfs_defer_ops dfops;
xfs_fsblock_t first_block;
bool unlock_dp_on_error = false;
xfs_fileoff_t first_fsb;
@@ -269,7 +270,7 @@ xfs_symlink(
* Initialize the bmap freelist prior to calling either
* bmapi or the directory create code.
*/
- xfs_bmap_init(&free_list, &first_block);
+ xfs_defer_init(&dfops, &first_block);
/*
* Allocate an inode for the symlink.
@@ -313,7 +314,7 @@ xfs_symlink(
error = xfs_bmapi_write(tp, ip, first_fsb, fs_blocks,
XFS_BMAPI_METADATA, &first_block, resblks,
- mval, &nmaps, &free_list);
+ mval, &nmaps, &dfops);
if (error)
goto out_bmap_cancel;
@@ -361,7 +362,7 @@ xfs_symlink(
* Create the directory entry for the symlink.
*/
error = xfs_dir_createname(tp, dp, link_name, ip->i_ino,
- &first_block, &free_list, resblks);
+ &first_block, &dfops, resblks);
if (error)
goto out_bmap_cancel;
xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
@@ -376,7 +377,7 @@ xfs_symlink(
xfs_trans_set_sync(tp);
}
- error = xfs_bmap_finish(&tp, &free_list, NULL);
+ error = xfs_defer_finish(&tp, &dfops, NULL);
if (error)
goto out_bmap_cancel;
@@ -392,7 +393,7 @@ xfs_symlink(
return 0;
out_bmap_cancel:
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
out_trans_cancel:
xfs_trans_cancel(tp);
out_release_inode:
@@ -426,7 +427,7 @@ xfs_inactive_symlink_rmt(
int done;
int error;
xfs_fsblock_t first_block;
- xfs_bmap_free_t free_list;
+ struct xfs_defer_ops dfops;
int i;
xfs_mount_t *mp;
xfs_bmbt_irec_t mval[XFS_SYMLINK_MAPS];
@@ -465,7 +466,7 @@ xfs_inactive_symlink_rmt(
* Find the block(s) so we can inval and unmap them.
*/
done = 0;
- xfs_bmap_init(&free_list, &first_block);
+ xfs_defer_init(&dfops, &first_block);
nmaps = ARRAY_SIZE(mval);
error = xfs_bmapi_read(ip, 0, xfs_symlink_blocks(mp, size),
mval, &nmaps, 0);
@@ -485,17 +486,17 @@ xfs_inactive_symlink_rmt(
xfs_trans_binval(tp, bp);
}
/*
- * Unmap the dead block(s) to the free_list.
+ * Unmap the dead block(s) to the dfops.
*/
error = xfs_bunmapi(tp, ip, 0, size, 0, nmaps,
- &first_block, &free_list, &done);
+ &first_block, &dfops, &done);
if (error)
goto error_bmap_cancel;
ASSERT(done);
/*
* Commit the first transaction. This logs the EFI and the inode.
*/
- error = xfs_bmap_finish(&tp, &free_list, ip);
+ error = xfs_defer_finish(&tp, &dfops, ip);
if (error)
goto error_bmap_cancel;
/*
@@ -525,7 +526,7 @@ xfs_inactive_symlink_rmt(
return 0;
error_bmap_cancel:
- xfs_bmap_cancel(&free_list);
+ xfs_defer_cancel(&dfops);
error_trans_cancel:
xfs_trans_cancel(tp);
error_unlock:
diff --git a/fs/xfs/xfs_sysfs.c b/fs/xfs/xfs_sysfs.c
index 4c2c55086208..79cfd3fc5324 100644
--- a/fs/xfs/xfs_sysfs.c
+++ b/fs/xfs/xfs_sysfs.c
@@ -634,6 +634,9 @@ xfs_error_get_cfg(
{
struct xfs_error_cfg *cfg;
+ if (error < 0)
+ error = -error;
+
switch (error) {
case EIO:
cfg = &mp->m_error_cfg[error_class][XFS_ERR_EIO];
diff --git a/fs/xfs/xfs_trace.c b/fs/xfs/xfs_trace.c
index 13a029806805..7f17ae6d709a 100644
--- a/fs/xfs/xfs_trace.c
+++ b/fs/xfs/xfs_trace.c
@@ -22,7 +22,9 @@
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_da_format.h"
+#include "xfs_defer.h"
#include "xfs_inode.h"
#include "xfs_btree.h"
#include "xfs_da_btree.h"
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index ea94ee0fe5ea..551b7e26980c 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -38,6 +38,7 @@ struct xlog_recover_item;
struct xfs_buf_log_format;
struct xfs_inode_log_format;
struct xfs_bmbt_irec;
+struct xfs_btree_cur;
DECLARE_EVENT_CLASS(xfs_attr_list_class,
TP_PROTO(struct xfs_attr_list_context *ctx),
@@ -354,6 +355,7 @@ DEFINE_BUF_EVENT(xfs_buf_submit_wait);
DEFINE_BUF_EVENT(xfs_buf_bawrite);
DEFINE_BUF_EVENT(xfs_buf_lock);
DEFINE_BUF_EVENT(xfs_buf_lock_done);
+DEFINE_BUF_EVENT(xfs_buf_trylock_fail);
DEFINE_BUF_EVENT(xfs_buf_trylock);
DEFINE_BUF_EVENT(xfs_buf_unlock);
DEFINE_BUF_EVENT(xfs_buf_iowait);
@@ -1134,15 +1136,14 @@ TRACE_EVENT(xfs_log_assign_tail_lsn,
)
DECLARE_EVENT_CLASS(xfs_file_class,
- TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags),
- TP_ARGS(ip, count, offset, flags),
+ TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset),
+ TP_ARGS(ip, count, offset),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_ino_t, ino)
__field(xfs_fsize_t, size)
__field(loff_t, offset)
__field(size_t, count)
- __field(int, flags)
),
TP_fast_assign(
__entry->dev = VFS_I(ip)->i_sb->s_dev;
@@ -1150,25 +1151,25 @@ DECLARE_EVENT_CLASS(xfs_file_class,
__entry->size = ip->i_d.di_size;
__entry->offset = offset;
__entry->count = count;
- __entry->flags = flags;
),
- TP_printk("dev %d:%d ino 0x%llx size 0x%llx "
- "offset 0x%llx count 0x%zx ioflags %s",
+ TP_printk("dev %d:%d ino 0x%llx size 0x%llx offset 0x%llx count 0x%zx",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ino,
__entry->size,
__entry->offset,
- __entry->count,
- __print_flags(__entry->flags, "|", XFS_IO_FLAGS))
+ __entry->count)
)
#define DEFINE_RW_EVENT(name) \
DEFINE_EVENT(xfs_file_class, name, \
- TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset, int flags), \
- TP_ARGS(ip, count, offset, flags))
-DEFINE_RW_EVENT(xfs_file_read);
+ TP_PROTO(struct xfs_inode *ip, size_t count, loff_t offset), \
+ TP_ARGS(ip, count, offset))
+DEFINE_RW_EVENT(xfs_file_buffered_read);
+DEFINE_RW_EVENT(xfs_file_direct_read);
+DEFINE_RW_EVENT(xfs_file_dax_read);
DEFINE_RW_EVENT(xfs_file_buffered_write);
DEFINE_RW_EVENT(xfs_file_direct_write);
+DEFINE_RW_EVENT(xfs_file_dax_write);
DEFINE_RW_EVENT(xfs_file_splice_read);
DECLARE_EVENT_CLASS(xfs_page_class,
@@ -1295,6 +1296,9 @@ DEFINE_IOMAP_EVENT(xfs_map_blocks_alloc);
DEFINE_IOMAP_EVENT(xfs_get_blocks_found);
DEFINE_IOMAP_EVENT(xfs_get_blocks_alloc);
DEFINE_IOMAP_EVENT(xfs_get_blocks_map_direct);
+DEFINE_IOMAP_EVENT(xfs_iomap_alloc);
+DEFINE_IOMAP_EVENT(xfs_iomap_found);
+DEFINE_IOMAP_EVENT(xfs_iomap_not_found);
DECLARE_EVENT_CLASS(xfs_simple_io_class,
TP_PROTO(struct xfs_inode *ip, xfs_off_t offset, ssize_t count),
@@ -2182,6 +2186,379 @@ DEFINE_DISCARD_EVENT(xfs_discard_toosmall);
DEFINE_DISCARD_EVENT(xfs_discard_exclude);
DEFINE_DISCARD_EVENT(xfs_discard_busy);
+/* btree cursor events */
+DECLARE_EVENT_CLASS(xfs_btree_cur_class,
+ TP_PROTO(struct xfs_btree_cur *cur, int level, struct xfs_buf *bp),
+ TP_ARGS(cur, level, bp),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_btnum_t, btnum)
+ __field(int, level)
+ __field(int, nlevels)
+ __field(int, ptr)
+ __field(xfs_daddr_t, daddr)
+ ),
+ TP_fast_assign(
+ __entry->dev = cur->bc_mp->m_super->s_dev;
+ __entry->btnum = cur->bc_btnum;
+ __entry->level = level;
+ __entry->nlevels = cur->bc_nlevels;
+ __entry->ptr = cur->bc_ptrs[level];
+ __entry->daddr = bp ? bp->b_bn : -1;
+ ),
+ TP_printk("dev %d:%d btnum %d level %d/%d ptr %d daddr 0x%llx",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->btnum,
+ __entry->level,
+ __entry->nlevels,
+ __entry->ptr,
+ (unsigned long long)__entry->daddr)
+)
+
+#define DEFINE_BTREE_CUR_EVENT(name) \
+DEFINE_EVENT(xfs_btree_cur_class, name, \
+ TP_PROTO(struct xfs_btree_cur *cur, int level, struct xfs_buf *bp), \
+ TP_ARGS(cur, level, bp))
+DEFINE_BTREE_CUR_EVENT(xfs_btree_updkeys);
+DEFINE_BTREE_CUR_EVENT(xfs_btree_overlapped_query_range);
+
+/* deferred ops */
+struct xfs_defer_pending;
+struct xfs_defer_intake;
+struct xfs_defer_ops;
+
+DECLARE_EVENT_CLASS(xfs_defer_class,
+ TP_PROTO(struct xfs_mount *mp, struct xfs_defer_ops *dop),
+ TP_ARGS(mp, dop),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(void *, dop)
+ __field(bool, committed)
+ __field(bool, low)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp ? mp->m_super->s_dev : 0;
+ __entry->dop = dop;
+ __entry->committed = dop->dop_committed;
+ __entry->low = dop->dop_low;
+ ),
+ TP_printk("dev %d:%d ops %p committed %d low %d\n",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->dop,
+ __entry->committed,
+ __entry->low)
+)
+#define DEFINE_DEFER_EVENT(name) \
+DEFINE_EVENT(xfs_defer_class, name, \
+ TP_PROTO(struct xfs_mount *mp, struct xfs_defer_ops *dop), \
+ TP_ARGS(mp, dop))
+
+DECLARE_EVENT_CLASS(xfs_defer_error_class,
+ TP_PROTO(struct xfs_mount *mp, struct xfs_defer_ops *dop, int error),
+ TP_ARGS(mp, dop, error),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(void *, dop)
+ __field(bool, committed)
+ __field(bool, low)
+ __field(int, error)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp ? mp->m_super->s_dev : 0;
+ __entry->dop = dop;
+ __entry->committed = dop->dop_committed;
+ __entry->low = dop->dop_low;
+ __entry->error = error;
+ ),
+ TP_printk("dev %d:%d ops %p committed %d low %d err %d\n",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->dop,
+ __entry->committed,
+ __entry->low,
+ __entry->error)
+)
+#define DEFINE_DEFER_ERROR_EVENT(name) \
+DEFINE_EVENT(xfs_defer_error_class, name, \
+ TP_PROTO(struct xfs_mount *mp, struct xfs_defer_ops *dop, int error), \
+ TP_ARGS(mp, dop, error))
+
+DECLARE_EVENT_CLASS(xfs_defer_pending_class,
+ TP_PROTO(struct xfs_mount *mp, struct xfs_defer_pending *dfp),
+ TP_ARGS(mp, dfp),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(int, type)
+ __field(void *, intent)
+ __field(bool, committed)
+ __field(int, nr)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp ? mp->m_super->s_dev : 0;
+ __entry->type = dfp->dfp_type->type;
+ __entry->intent = dfp->dfp_intent;
+ __entry->committed = dfp->dfp_committed;
+ __entry->nr = dfp->dfp_count;
+ ),
+ TP_printk("dev %d:%d optype %d intent %p committed %d nr %d\n",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->type,
+ __entry->intent,
+ __entry->committed,
+ __entry->nr)
+)
+#define DEFINE_DEFER_PENDING_EVENT(name) \
+DEFINE_EVENT(xfs_defer_pending_class, name, \
+ TP_PROTO(struct xfs_mount *mp, struct xfs_defer_pending *dfp), \
+ TP_ARGS(mp, dfp))
+
+DECLARE_EVENT_CLASS(xfs_phys_extent_deferred_class,
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
+ int type, xfs_agblock_t agbno, xfs_extlen_t len),
+ TP_ARGS(mp, agno, type, agbno, len),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(int, type)
+ __field(xfs_agblock_t, agbno)
+ __field(xfs_extlen_t, len)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->agno = agno;
+ __entry->type = type;
+ __entry->agbno = agbno;
+ __entry->len = len;
+ ),
+ TP_printk("dev %d:%d op %d agno %u agbno %u len %u",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->type,
+ __entry->agno,
+ __entry->agbno,
+ __entry->len)
+);
+#define DEFINE_PHYS_EXTENT_DEFERRED_EVENT(name) \
+DEFINE_EVENT(xfs_phys_extent_deferred_class, name, \
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
+ int type, \
+ xfs_agblock_t bno, \
+ xfs_extlen_t len), \
+ TP_ARGS(mp, agno, type, bno, len))
+
+DECLARE_EVENT_CLASS(xfs_map_extent_deferred_class,
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
+ int op,
+ xfs_agblock_t agbno,
+ xfs_ino_t ino,
+ int whichfork,
+ xfs_fileoff_t offset,
+ xfs_filblks_t len,
+ xfs_exntst_t state),
+ TP_ARGS(mp, agno, op, agbno, ino, whichfork, offset, len, state),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_ino_t, ino)
+ __field(xfs_agblock_t, agbno)
+ __field(int, whichfork)
+ __field(xfs_fileoff_t, l_loff)
+ __field(xfs_filblks_t, l_len)
+ __field(xfs_exntst_t, l_state)
+ __field(int, op)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->agno = agno;
+ __entry->ino = ino;
+ __entry->agbno = agbno;
+ __entry->whichfork = whichfork;
+ __entry->l_loff = offset;
+ __entry->l_len = len;
+ __entry->l_state = state;
+ __entry->op = op;
+ ),
+ TP_printk("dev %d:%d op %d agno %u agbno %u owner %lld %s offset %llu len %llu state %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->op,
+ __entry->agno,
+ __entry->agbno,
+ __entry->ino,
+ __entry->whichfork == XFS_ATTR_FORK ? "attr" : "data",
+ __entry->l_loff,
+ __entry->l_len,
+ __entry->l_state)
+);
+#define DEFINE_MAP_EXTENT_DEFERRED_EVENT(name) \
+DEFINE_EVENT(xfs_map_extent_deferred_class, name, \
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
+ int op, \
+ xfs_agblock_t agbno, \
+ xfs_ino_t ino, \
+ int whichfork, \
+ xfs_fileoff_t offset, \
+ xfs_filblks_t len, \
+ xfs_exntst_t state), \
+ TP_ARGS(mp, agno, op, agbno, ino, whichfork, offset, len, state))
+
+DEFINE_DEFER_EVENT(xfs_defer_init);
+DEFINE_DEFER_EVENT(xfs_defer_cancel);
+DEFINE_DEFER_EVENT(xfs_defer_trans_roll);
+DEFINE_DEFER_EVENT(xfs_defer_trans_abort);
+DEFINE_DEFER_EVENT(xfs_defer_finish);
+DEFINE_DEFER_EVENT(xfs_defer_finish_done);
+
+DEFINE_DEFER_ERROR_EVENT(xfs_defer_trans_roll_error);
+DEFINE_DEFER_ERROR_EVENT(xfs_defer_finish_error);
+DEFINE_DEFER_ERROR_EVENT(xfs_defer_op_finish_error);
+
+DEFINE_DEFER_PENDING_EVENT(xfs_defer_intake_work);
+DEFINE_DEFER_PENDING_EVENT(xfs_defer_intake_cancel);
+DEFINE_DEFER_PENDING_EVENT(xfs_defer_pending_commit);
+DEFINE_DEFER_PENDING_EVENT(xfs_defer_pending_cancel);
+DEFINE_DEFER_PENDING_EVENT(xfs_defer_pending_finish);
+DEFINE_DEFER_PENDING_EVENT(xfs_defer_pending_abort);
+
+#define DEFINE_BMAP_FREE_DEFERRED_EVENT DEFINE_PHYS_EXTENT_DEFERRED_EVENT
+DEFINE_BMAP_FREE_DEFERRED_EVENT(xfs_bmap_free_defer);
+DEFINE_BMAP_FREE_DEFERRED_EVENT(xfs_bmap_free_deferred);
+
+/* rmap tracepoints */
+DECLARE_EVENT_CLASS(xfs_rmap_class,
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
+ xfs_agblock_t agbno, xfs_extlen_t len, bool unwritten,
+ struct xfs_owner_info *oinfo),
+ TP_ARGS(mp, agno, agbno, len, unwritten, oinfo),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agblock_t, agbno)
+ __field(xfs_extlen_t, len)
+ __field(uint64_t, owner)
+ __field(uint64_t, offset)
+ __field(unsigned long, flags)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->agno = agno;
+ __entry->agbno = agbno;
+ __entry->len = len;
+ __entry->owner = oinfo->oi_owner;
+ __entry->offset = oinfo->oi_offset;
+ __entry->flags = oinfo->oi_flags;
+ if (unwritten)
+ __entry->flags |= XFS_RMAP_UNWRITTEN;
+ ),
+ TP_printk("dev %d:%d agno %u agbno %u len %u owner %lld offset %llu flags 0x%lx",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __entry->agbno,
+ __entry->len,
+ __entry->owner,
+ __entry->offset,
+ __entry->flags)
+);
+#define DEFINE_RMAP_EVENT(name) \
+DEFINE_EVENT(xfs_rmap_class, name, \
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
+ xfs_agblock_t agbno, xfs_extlen_t len, bool unwritten, \
+ struct xfs_owner_info *oinfo), \
+ TP_ARGS(mp, agno, agbno, len, unwritten, oinfo))
+
+/* simple AG-based error/%ip tracepoint class */
+DECLARE_EVENT_CLASS(xfs_ag_error_class,
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int error,
+ unsigned long caller_ip),
+ TP_ARGS(mp, agno, error, caller_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(int, error)
+ __field(unsigned long, caller_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->agno = agno;
+ __entry->error = error;
+ __entry->caller_ip = caller_ip;
+ ),
+ TP_printk("dev %d:%d agno %u error %d caller %ps",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __entry->error,
+ (char *)__entry->caller_ip)
+);
+
+#define DEFINE_AG_ERROR_EVENT(name) \
+DEFINE_EVENT(xfs_ag_error_class, name, \
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int error, \
+ unsigned long caller_ip), \
+ TP_ARGS(mp, agno, error, caller_ip))
+
+DEFINE_RMAP_EVENT(xfs_rmap_unmap);
+DEFINE_RMAP_EVENT(xfs_rmap_unmap_done);
+DEFINE_AG_ERROR_EVENT(xfs_rmap_unmap_error);
+DEFINE_RMAP_EVENT(xfs_rmap_map);
+DEFINE_RMAP_EVENT(xfs_rmap_map_done);
+DEFINE_AG_ERROR_EVENT(xfs_rmap_map_error);
+DEFINE_RMAP_EVENT(xfs_rmap_convert);
+DEFINE_RMAP_EVENT(xfs_rmap_convert_done);
+DEFINE_AG_ERROR_EVENT(xfs_rmap_convert_error);
+DEFINE_AG_ERROR_EVENT(xfs_rmap_convert_state);
+
+DECLARE_EVENT_CLASS(xfs_rmapbt_class,
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno,
+ xfs_agblock_t agbno, xfs_extlen_t len,
+ uint64_t owner, uint64_t offset, unsigned int flags),
+ TP_ARGS(mp, agno, agbno, len, owner, offset, flags),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agblock_t, agbno)
+ __field(xfs_extlen_t, len)
+ __field(uint64_t, owner)
+ __field(uint64_t, offset)
+ __field(unsigned int, flags)
+ ),
+ TP_fast_assign(
+ __entry->dev = mp->m_super->s_dev;
+ __entry->agno = agno;
+ __entry->agbno = agbno;
+ __entry->len = len;
+ __entry->owner = owner;
+ __entry->offset = offset;
+ __entry->flags = flags;
+ ),
+ TP_printk("dev %d:%d agno %u agbno %u len %u owner %lld offset %llu flags 0x%x",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __entry->agbno,
+ __entry->len,
+ __entry->owner,
+ __entry->offset,
+ __entry->flags)
+);
+#define DEFINE_RMAPBT_EVENT(name) \
+DEFINE_EVENT(xfs_rmapbt_class, name, \
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, \
+ xfs_agblock_t agbno, xfs_extlen_t len, \
+ uint64_t owner, uint64_t offset, unsigned int flags), \
+ TP_ARGS(mp, agno, agbno, len, owner, offset, flags))
+
+#define DEFINE_RMAP_DEFERRED_EVENT DEFINE_MAP_EXTENT_DEFERRED_EVENT
+DEFINE_RMAP_DEFERRED_EVENT(xfs_rmap_defer);
+DEFINE_RMAP_DEFERRED_EVENT(xfs_rmap_deferred);
+
+DEFINE_BUSY_EVENT(xfs_rmapbt_alloc_block);
+DEFINE_BUSY_EVENT(xfs_rmapbt_free_block);
+DEFINE_RMAPBT_EVENT(xfs_rmap_update);
+DEFINE_RMAPBT_EVENT(xfs_rmap_insert);
+DEFINE_RMAPBT_EVENT(xfs_rmap_delete);
+DEFINE_AG_ERROR_EVENT(xfs_rmap_insert_error);
+DEFINE_AG_ERROR_EVENT(xfs_rmap_delete_error);
+DEFINE_AG_ERROR_EVENT(xfs_rmap_update_error);
+DEFINE_RMAPBT_EVENT(xfs_rmap_lookup_le_range_result);
+DEFINE_RMAPBT_EVENT(xfs_rmap_find_right_neighbor_result);
+DEFINE_RMAPBT_EVENT(xfs_rmap_find_left_neighbor_result);
+
#endif /* _TRACE_XFS_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 9a462e892e4f..e2bf86aad33d 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -33,6 +33,9 @@ struct xfs_trans;
struct xfs_trans_res;
struct xfs_dquot_acct;
struct xfs_busy_extent;
+struct xfs_rud_log_item;
+struct xfs_rui_log_item;
+struct xfs_btree_cur;
typedef struct xfs_log_item {
struct list_head li_ail; /* AIL pointers */
@@ -52,6 +55,7 @@ typedef struct xfs_log_item {
/* delayed logging */
struct list_head li_cil; /* CIL pointers */
struct xfs_log_vec *li_lv; /* active log vector */
+ struct xfs_log_vec *li_lv_shadow; /* standby vector */
xfs_lsn_t li_seq; /* CIL commit seq */
} xfs_log_item_t;
@@ -209,17 +213,14 @@ void xfs_trans_ichgtime(struct xfs_trans *, struct xfs_inode *, int);
void xfs_trans_ijoin(struct xfs_trans *, struct xfs_inode *, uint);
void xfs_trans_log_buf(xfs_trans_t *, struct xfs_buf *, uint, uint);
void xfs_trans_log_inode(xfs_trans_t *, struct xfs_inode *, uint);
-struct xfs_efi_log_item *xfs_trans_get_efi(xfs_trans_t *, uint);
-void xfs_trans_log_efi_extent(xfs_trans_t *,
- struct xfs_efi_log_item *,
- xfs_fsblock_t,
- xfs_extlen_t);
-struct xfs_efd_log_item *xfs_trans_get_efd(xfs_trans_t *,
+
+void xfs_extent_free_init_defer_op(void);
+struct xfs_efd_log_item *xfs_trans_get_efd(struct xfs_trans *,
struct xfs_efi_log_item *,
uint);
int xfs_trans_free_extent(struct xfs_trans *,
struct xfs_efd_log_item *, xfs_fsblock_t,
- xfs_extlen_t);
+ xfs_extlen_t, struct xfs_owner_info *);
int xfs_trans_commit(struct xfs_trans *);
int __xfs_trans_roll(struct xfs_trans **, struct xfs_inode *, int *);
int xfs_trans_roll(struct xfs_trans **, struct xfs_inode *);
@@ -235,4 +236,16 @@ void xfs_trans_buf_copy_type(struct xfs_buf *dst_bp,
extern kmem_zone_t *xfs_trans_zone;
extern kmem_zone_t *xfs_log_item_desc_zone;
+/* rmap updates */
+enum xfs_rmap_intent_type;
+
+void xfs_rmap_update_init_defer_op(void);
+struct xfs_rud_log_item *xfs_trans_get_rud(struct xfs_trans *tp,
+ struct xfs_rui_log_item *ruip);
+int xfs_trans_log_finish_rmap_update(struct xfs_trans *tp,
+ struct xfs_rud_log_item *rudp, enum xfs_rmap_intent_type type,
+ __uint64_t owner, int whichfork, xfs_fileoff_t startoff,
+ xfs_fsblock_t startblock, xfs_filblks_t blockcount,
+ xfs_exntst_t state, struct xfs_btree_cur **pcur);
+
#endif /* __XFS_TRANS_H__ */
diff --git a/fs/xfs/xfs_trans_extfree.c b/fs/xfs/xfs_trans_extfree.c
index a96ae540eb62..459ddec137a4 100644
--- a/fs/xfs/xfs_trans_extfree.c
+++ b/fs/xfs/xfs_trans_extfree.c
@@ -21,66 +21,15 @@
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
+#include "xfs_bit.h"
#include "xfs_mount.h"
+#include "xfs_defer.h"
#include "xfs_trans.h"
#include "xfs_trans_priv.h"
#include "xfs_extfree_item.h"
#include "xfs_alloc.h"
-
-/*
- * This routine is called to allocate an "extent free intention"
- * log item that will hold nextents worth of extents. The
- * caller must use all nextents extents, because we are not
- * flexible about this at all.
- */
-xfs_efi_log_item_t *
-xfs_trans_get_efi(xfs_trans_t *tp,
- uint nextents)
-{
- xfs_efi_log_item_t *efip;
-
- ASSERT(tp != NULL);
- ASSERT(nextents > 0);
-
- efip = xfs_efi_init(tp->t_mountp, nextents);
- ASSERT(efip != NULL);
-
- /*
- * Get a log_item_desc to point at the new item.
- */
- xfs_trans_add_item(tp, &efip->efi_item);
- return efip;
-}
-
-/*
- * This routine is called to indicate that the described
- * extent is to be logged as needing to be freed. It should
- * be called once for each extent to be freed.
- */
-void
-xfs_trans_log_efi_extent(xfs_trans_t *tp,
- xfs_efi_log_item_t *efip,
- xfs_fsblock_t start_block,
- xfs_extlen_t ext_len)
-{
- uint next_extent;
- xfs_extent_t *extp;
-
- tp->t_flags |= XFS_TRANS_DIRTY;
- efip->efi_item.li_desc->lid_flags |= XFS_LID_DIRTY;
-
- /*
- * atomic_inc_return gives us the value after the increment;
- * we want to use it as an array index so we need to subtract 1 from
- * it.
- */
- next_extent = atomic_inc_return(&efip->efi_next_extent) - 1;
- ASSERT(next_extent < efip->efi_format.efi_nextents);
- extp = &(efip->efi_format.efi_extents[next_extent]);
- extp->ext_start = start_block;
- extp->ext_len = ext_len;
-}
-
+#include "xfs_bmap.h"
+#include "xfs_trace.h"
/*
* This routine is called to allocate an "extent free done"
@@ -88,12 +37,12 @@ xfs_trans_log_efi_extent(xfs_trans_t *tp,
* caller must use all nextents extents, because we are not
* flexible about this at all.
*/
-xfs_efd_log_item_t *
-xfs_trans_get_efd(xfs_trans_t *tp,
- xfs_efi_log_item_t *efip,
- uint nextents)
+struct xfs_efd_log_item *
+xfs_trans_get_efd(struct xfs_trans *tp,
+ struct xfs_efi_log_item *efip,
+ uint nextents)
{
- xfs_efd_log_item_t *efdp;
+ struct xfs_efd_log_item *efdp;
ASSERT(tp != NULL);
ASSERT(nextents > 0);
@@ -118,13 +67,19 @@ xfs_trans_free_extent(
struct xfs_trans *tp,
struct xfs_efd_log_item *efdp,
xfs_fsblock_t start_block,
- xfs_extlen_t ext_len)
+ xfs_extlen_t ext_len,
+ struct xfs_owner_info *oinfo)
{
+ struct xfs_mount *mp = tp->t_mountp;
uint next_extent;
+ xfs_agnumber_t agno = XFS_FSB_TO_AGNO(mp, start_block);
+ xfs_agblock_t agbno = XFS_FSB_TO_AGBNO(mp, start_block);
struct xfs_extent *extp;
int error;
- error = xfs_free_extent(tp, start_block, ext_len);
+ trace_xfs_bmap_free_deferred(tp->t_mountp, agno, 0, agbno, ext_len);
+
+ error = xfs_free_extent(tp, start_block, ext_len, oinfo);
/*
* Mark the transaction dirty, even on error. This ensures the
@@ -145,3 +100,139 @@ xfs_trans_free_extent(
return error;
}
+
+/* Sort bmap items by AG. */
+static int
+xfs_extent_free_diff_items(
+ void *priv,
+ struct list_head *a,
+ struct list_head *b)
+{
+ struct xfs_mount *mp = priv;
+ struct xfs_extent_free_item *ra;
+ struct xfs_extent_free_item *rb;
+
+ ra = container_of(a, struct xfs_extent_free_item, xefi_list);
+ rb = container_of(b, struct xfs_extent_free_item, xefi_list);
+ return XFS_FSB_TO_AGNO(mp, ra->xefi_startblock) -
+ XFS_FSB_TO_AGNO(mp, rb->xefi_startblock);
+}
+
+/* Get an EFI. */
+STATIC void *
+xfs_extent_free_create_intent(
+ struct xfs_trans *tp,
+ unsigned int count)
+{
+ struct xfs_efi_log_item *efip;
+
+ ASSERT(tp != NULL);
+ ASSERT(count > 0);
+
+ efip = xfs_efi_init(tp->t_mountp, count);
+ ASSERT(efip != NULL);
+
+ /*
+ * Get a log_item_desc to point at the new item.
+ */
+ xfs_trans_add_item(tp, &efip->efi_item);
+ return efip;
+}
+
+/* Log a free extent to the intent item. */
+STATIC void
+xfs_extent_free_log_item(
+ struct xfs_trans *tp,
+ void *intent,
+ struct list_head *item)
+{
+ struct xfs_efi_log_item *efip = intent;
+ struct xfs_extent_free_item *free;
+ uint next_extent;
+ struct xfs_extent *extp;
+
+ free = container_of(item, struct xfs_extent_free_item, xefi_list);
+
+ tp->t_flags |= XFS_TRANS_DIRTY;
+ efip->efi_item.li_desc->lid_flags |= XFS_LID_DIRTY;
+
+ /*
+ * atomic_inc_return gives us the value after the increment;
+ * we want to use it as an array index so we need to subtract 1 from
+ * it.
+ */
+ next_extent = atomic_inc_return(&efip->efi_next_extent) - 1;
+ ASSERT(next_extent < efip->efi_format.efi_nextents);
+ extp = &efip->efi_format.efi_extents[next_extent];
+ extp->ext_start = free->xefi_startblock;
+ extp->ext_len = free->xefi_blockcount;
+}
+
+/* Get an EFD so we can process all the free extents. */
+STATIC void *
+xfs_extent_free_create_done(
+ struct xfs_trans *tp,
+ void *intent,
+ unsigned int count)
+{
+ return xfs_trans_get_efd(tp, intent, count);
+}
+
+/* Process a free extent. */
+STATIC int
+xfs_extent_free_finish_item(
+ struct xfs_trans *tp,
+ struct xfs_defer_ops *dop,
+ struct list_head *item,
+ void *done_item,
+ void **state)
+{
+ struct xfs_extent_free_item *free;
+ int error;
+
+ free = container_of(item, struct xfs_extent_free_item, xefi_list);
+ error = xfs_trans_free_extent(tp, done_item,
+ free->xefi_startblock,
+ free->xefi_blockcount,
+ &free->xefi_oinfo);
+ kmem_free(free);
+ return error;
+}
+
+/* Abort all pending EFIs. */
+STATIC void
+xfs_extent_free_abort_intent(
+ void *intent)
+{
+ xfs_efi_release(intent);
+}
+
+/* Cancel a free extent. */
+STATIC void
+xfs_extent_free_cancel_item(
+ struct list_head *item)
+{
+ struct xfs_extent_free_item *free;
+
+ free = container_of(item, struct xfs_extent_free_item, xefi_list);
+ kmem_free(free);
+}
+
+static const struct xfs_defer_op_type xfs_extent_free_defer_type = {
+ .type = XFS_DEFER_OPS_TYPE_FREE,
+ .max_items = XFS_EFI_MAX_FAST_EXTENTS,
+ .diff_items = xfs_extent_free_diff_items,
+ .create_intent = xfs_extent_free_create_intent,
+ .abort_intent = xfs_extent_free_abort_intent,
+ .log_item = xfs_extent_free_log_item,
+ .create_done = xfs_extent_free_create_done,
+ .finish_item = xfs_extent_free_finish_item,
+ .cancel_item = xfs_extent_free_cancel_item,
+};
+
+/* Register the deferred op type. */
+void
+xfs_extent_free_init_defer_op(void)
+{
+ xfs_defer_init_op_type(&xfs_extent_free_defer_type);
+}
diff --git a/fs/xfs/xfs_trans_rmap.c b/fs/xfs/xfs_trans_rmap.c
new file mode 100644
index 000000000000..5a50ef881568
--- /dev/null
+++ b/fs/xfs/xfs_trans_rmap.c
@@ -0,0 +1,271 @@
+/*
+ * Copyright (C) 2016 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it would be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include "xfs.h"
+#include "xfs_fs.h"
+#include "xfs_shared.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_mount.h"
+#include "xfs_defer.h"
+#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
+#include "xfs_rmap_item.h"
+#include "xfs_alloc.h"
+#include "xfs_rmap.h"
+
+/* Set the map extent flags for this reverse mapping. */
+static void
+xfs_trans_set_rmap_flags(
+ struct xfs_map_extent *rmap,
+ enum xfs_rmap_intent_type type,
+ int whichfork,
+ xfs_exntst_t state)
+{
+ rmap->me_flags = 0;
+ if (state == XFS_EXT_UNWRITTEN)
+ rmap->me_flags |= XFS_RMAP_EXTENT_UNWRITTEN;
+ if (whichfork == XFS_ATTR_FORK)
+ rmap->me_flags |= XFS_RMAP_EXTENT_ATTR_FORK;
+ switch (type) {
+ case XFS_RMAP_MAP:
+ rmap->me_flags |= XFS_RMAP_EXTENT_MAP;
+ break;
+ case XFS_RMAP_UNMAP:
+ rmap->me_flags |= XFS_RMAP_EXTENT_UNMAP;
+ break;
+ case XFS_RMAP_CONVERT:
+ rmap->me_flags |= XFS_RMAP_EXTENT_CONVERT;
+ break;
+ case XFS_RMAP_ALLOC:
+ rmap->me_flags |= XFS_RMAP_EXTENT_ALLOC;
+ break;
+ case XFS_RMAP_FREE:
+ rmap->me_flags |= XFS_RMAP_EXTENT_FREE;
+ break;
+ default:
+ ASSERT(0);
+ }
+}
+
+struct xfs_rud_log_item *
+xfs_trans_get_rud(
+ struct xfs_trans *tp,
+ struct xfs_rui_log_item *ruip)
+{
+ struct xfs_rud_log_item *rudp;
+
+ rudp = xfs_rud_init(tp->t_mountp, ruip);
+ xfs_trans_add_item(tp, &rudp->rud_item);
+ return rudp;
+}
+
+/*
+ * Finish an rmap update and log it to the RUD. Note that the transaction is
+ * marked dirty regardless of whether the rmap update succeeds or fails to
+ * support the RUI/RUD lifecycle rules.
+ */
+int
+xfs_trans_log_finish_rmap_update(
+ struct xfs_trans *tp,
+ struct xfs_rud_log_item *rudp,
+ enum xfs_rmap_intent_type type,
+ __uint64_t owner,
+ int whichfork,
+ xfs_fileoff_t startoff,
+ xfs_fsblock_t startblock,
+ xfs_filblks_t blockcount,
+ xfs_exntst_t state,
+ struct xfs_btree_cur **pcur)
+{
+ int error;
+
+ error = xfs_rmap_finish_one(tp, type, owner, whichfork, startoff,
+ startblock, blockcount, state, pcur);
+
+ /*
+ * Mark the transaction dirty, even on error. This ensures the
+ * transaction is aborted, which:
+ *
+ * 1.) releases the RUI and frees the RUD
+ * 2.) shuts down the filesystem
+ */
+ tp->t_flags |= XFS_TRANS_DIRTY;
+ rudp->rud_item.li_desc->lid_flags |= XFS_LID_DIRTY;
+
+ return error;
+}
+
+/* Sort rmap intents by AG. */
+static int
+xfs_rmap_update_diff_items(
+ void *priv,
+ struct list_head *a,
+ struct list_head *b)
+{
+ struct xfs_mount *mp = priv;
+ struct xfs_rmap_intent *ra;
+ struct xfs_rmap_intent *rb;
+
+ ra = container_of(a, struct xfs_rmap_intent, ri_list);
+ rb = container_of(b, struct xfs_rmap_intent, ri_list);
+ return XFS_FSB_TO_AGNO(mp, ra->ri_bmap.br_startblock) -
+ XFS_FSB_TO_AGNO(mp, rb->ri_bmap.br_startblock);
+}
+
+/* Get an RUI. */
+STATIC void *
+xfs_rmap_update_create_intent(
+ struct xfs_trans *tp,
+ unsigned int count)
+{
+ struct xfs_rui_log_item *ruip;
+
+ ASSERT(tp != NULL);
+ ASSERT(count > 0);
+
+ ruip = xfs_rui_init(tp->t_mountp, count);
+ ASSERT(ruip != NULL);
+
+ /*
+ * Get a log_item_desc to point at the new item.
+ */
+ xfs_trans_add_item(tp, &ruip->rui_item);
+ return ruip;
+}
+
+/* Log rmap updates in the intent item. */
+STATIC void
+xfs_rmap_update_log_item(
+ struct xfs_trans *tp,
+ void *intent,
+ struct list_head *item)
+{
+ struct xfs_rui_log_item *ruip = intent;
+ struct xfs_rmap_intent *rmap;
+ uint next_extent;
+ struct xfs_map_extent *map;
+
+ rmap = container_of(item, struct xfs_rmap_intent, ri_list);
+
+ tp->t_flags |= XFS_TRANS_DIRTY;
+ ruip->rui_item.li_desc->lid_flags |= XFS_LID_DIRTY;
+
+ /*
+ * atomic_inc_return gives us the value after the increment;
+ * we want to use it as an array index so we need to subtract 1 from
+ * it.
+ */
+ next_extent = atomic_inc_return(&ruip->rui_next_extent) - 1;
+ ASSERT(next_extent < ruip->rui_format.rui_nextents);
+ map = &ruip->rui_format.rui_extents[next_extent];
+ map->me_owner = rmap->ri_owner;
+ map->me_startblock = rmap->ri_bmap.br_startblock;
+ map->me_startoff = rmap->ri_bmap.br_startoff;
+ map->me_len = rmap->ri_bmap.br_blockcount;
+ xfs_trans_set_rmap_flags(map, rmap->ri_type, rmap->ri_whichfork,
+ rmap->ri_bmap.br_state);
+}
+
+/* Get an RUD so we can process all the deferred rmap updates. */
+STATIC void *
+xfs_rmap_update_create_done(
+ struct xfs_trans *tp,
+ void *intent,
+ unsigned int count)
+{
+ return xfs_trans_get_rud(tp, intent);
+}
+
+/* Process a deferred rmap update. */
+STATIC int
+xfs_rmap_update_finish_item(
+ struct xfs_trans *tp,
+ struct xfs_defer_ops *dop,
+ struct list_head *item,
+ void *done_item,
+ void **state)
+{
+ struct xfs_rmap_intent *rmap;
+ int error;
+
+ rmap = container_of(item, struct xfs_rmap_intent, ri_list);
+ error = xfs_trans_log_finish_rmap_update(tp, done_item,
+ rmap->ri_type,
+ rmap->ri_owner, rmap->ri_whichfork,
+ rmap->ri_bmap.br_startoff,
+ rmap->ri_bmap.br_startblock,
+ rmap->ri_bmap.br_blockcount,
+ rmap->ri_bmap.br_state,
+ (struct xfs_btree_cur **)state);
+ kmem_free(rmap);
+ return error;
+}
+
+/* Clean up after processing deferred rmaps. */
+STATIC void
+xfs_rmap_update_finish_cleanup(
+ struct xfs_trans *tp,
+ void *state,
+ int error)
+{
+ struct xfs_btree_cur *rcur = state;
+
+ xfs_rmap_finish_one_cleanup(tp, rcur, error);
+}
+
+/* Abort all pending RUIs. */
+STATIC void
+xfs_rmap_update_abort_intent(
+ void *intent)
+{
+ xfs_rui_release(intent);
+}
+
+/* Cancel a deferred rmap update. */
+STATIC void
+xfs_rmap_update_cancel_item(
+ struct list_head *item)
+{
+ struct xfs_rmap_intent *rmap;
+
+ rmap = container_of(item, struct xfs_rmap_intent, ri_list);
+ kmem_free(rmap);
+}
+
+static const struct xfs_defer_op_type xfs_rmap_update_defer_type = {
+ .type = XFS_DEFER_OPS_TYPE_RMAP,
+ .max_items = XFS_RUI_MAX_FAST_EXTENTS,
+ .diff_items = xfs_rmap_update_diff_items,
+ .create_intent = xfs_rmap_update_create_intent,
+ .abort_intent = xfs_rmap_update_abort_intent,
+ .log_item = xfs_rmap_update_log_item,
+ .create_done = xfs_rmap_update_create_done,
+ .finish_item = xfs_rmap_update_finish_item,
+ .finish_cleanup = xfs_rmap_update_finish_cleanup,
+ .cancel_item = xfs_rmap_update_cancel_item,
+};
+
+/* Register the deferred op type. */
+void
+xfs_rmap_update_init_defer_op(void)
+{
+ xfs_defer_init_op_type(&xfs_rmap_update_defer_type);
+}