summaryrefslogtreecommitdiff
path: root/fs/xfs/libxfs/xfs_refcount_btree.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-04-02 13:02:07 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-04-02 13:02:07 -0700
commit7be97138e7276c71cc9ad1752dcb502d28f4400d (patch)
treec8e5139e55f01a2932f48d9fd8f832a4b02112d3 /fs/xfs/libxfs/xfs_refcount_btree.c
parent7db83c070bd29e73c8bb42d4b48c976be76f1dbe (diff)
parent27fb5a72f50aa770dd38b0478c07acacef97e3e7 (diff)
Merge tag 'xfs-5.7-merge-8' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
Pull xfs updates from Darrick Wong: "There's a lot going on this cycle with cleanups in the log code, the btree code, and the xattr code. We're tightening of metadata validation and online fsck checking, and introducing a common btree rebuilding library so that we can refactor xfs_repair and introduce online repair in a future cycle. We also fixed a few visible bugs -- most notably there's one in getdents that we introduced in 5.6; and a fix for hangs when disabling quotas. This series has been running fstests & other QA in the background for over a week and looks good so far. I anticipate sending a second pull request next week. That batch will change how xfs interacts with memory reclaim; how the log batches and throttles log items; how hard writes near ENOSPC will try to squeeze more space out of the filesystem; and hopefully fix the last of the umount hangs after a catastrophic failure. That should ease a lot of problems when running at the limits, but for now I'm leaving that in for-next for another week to make sure we got all the subtleties right. Summary: - Fix a hard to trigger race between iclog error checking and log shutdown. - Strengthen the AGF verifier. - Ratelimit some of the more spammy error messages. - Remove the icdinode uid/gid members and just use the ones in the vfs inode. - Hold ILOCK across insert/collapse range. - Clean up the extended attribute interfaces. - Clean up the attr flags mess. - Restore PF_MEMALLOC after exiting xfsaild thread to avoid triggering warnings in the process accounting code. - Remove the flexibly-sized array from struct xfs_agfl to eliminate compiler warnings about unaligned pointers and packed structures. - Various macro and typedef removals. - Stale metadata buffers if we decide they're corrupt outside of a verifier. - Check directory data/block/free block owners. - Fix a UAF when aborting inactivation of a corrupt xattr fork. - Teach online scrub to report failed directory and attr name lookups as a metadata corruption instead of a runtime error. - Avoid potential buffer overflows in sysfs files by using scnprintf. - Fix a regression in getdents lookups due to a mistake in pointer arithmetic. - Refactor btree cursor private data structures to use anonymous unions. - Cleanups in the log unmounting code. - Fix a potential mishandling of ENOMEM errors on multi-block directory buffer lookups. - Fix an incorrect test in the block allocation code. - Cleanups and name prefix shortening in the scrub code. - Introduce btree bulk loading code for online repair and scrub. - Fix a quotaoff log item leak (and hang) when the fs goes down midway through a quotaoff operation. - Remove di_version from the incore inode. - Refactor some of the log shutdown checking code. - Record the forcing of the log unmount records in the log force counters. - Fix a longstanding bug where quotacheck would purge the administrator's default quota grace interval and warning limits. - Reduce memory usage when scrubbing directory and xattr trees. - Don't let fsfreeze race with GETFSMAP or online scrub. - Handle bio_add_page failures more gracefully in xlog_write_iclog" * tag 'xfs-5.7-merge-8' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux: (108 commits) xfs: prohibit fs freezing when using empty transactions xfs: shutdown on failure to add page to log bio xfs: directory bestfree check should release buffers xfs: drop all altpath buffers at the end of the sibling check xfs: preserve default grace interval during quotacheck xfs: remove xlog_state_want_sync xfs: move the ioerror check out of xlog_state_clean_iclog xfs: refactor xlog_state_clean_iclog xfs: remove the aborted parameter to xlog_state_done_syncing xfs: simplify log shutdown checking in xfs_log_release_iclog xfs: simplify the xfs_log_release_iclog calling convention xfs: factor out a xlog_wait_on_iclog helper xfs: merge xlog_cil_push into xlog_cil_push_work xfs: remove the di_version field from struct icdinode xfs: simplify a check in xfs_ioctl_setattr_check_cowextsize xfs: simplify di_flags2 inheritance in xfs_ialloc xfs: only check the superblock version for dinode size calculation xfs: add a new xfs_sb_version_has_v3inode helper xfs: fix unmount hang and memory leak on shutdown during quotaoff xfs: factor out quotaoff intent AIL removal and memory free ...
Diffstat (limited to 'fs/xfs/libxfs/xfs_refcount_btree.c')
-rw-r--r--fs/xfs/libxfs/xfs_refcount_btree.c104
1 files changed, 77 insertions, 27 deletions
diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c
index 38529dbacd55..7fd6044a4f78 100644
--- a/fs/xfs/libxfs/xfs_refcount_btree.c
+++ b/fs/xfs/libxfs/xfs_refcount_btree.c
@@ -12,6 +12,7 @@
#include "xfs_sb.h"
#include "xfs_mount.h"
#include "xfs_btree.h"
+#include "xfs_btree_staging.h"
#include "xfs_refcount_btree.h"
#include "xfs_alloc.h"
#include "xfs_error.h"
@@ -25,7 +26,7 @@ xfs_refcountbt_dup_cursor(
struct xfs_btree_cur *cur)
{
return xfs_refcountbt_init_cursor(cur->bc_mp, cur->bc_tp,
- cur->bc_private.a.agbp, cur->bc_private.a.agno);
+ cur->bc_ag.agbp, cur->bc_ag.agno);
}
STATIC void
@@ -34,8 +35,8 @@ xfs_refcountbt_set_root(
union xfs_btree_ptr *ptr,
int inc)
{
- struct xfs_buf *agbp = cur->bc_private.a.agbp;
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
+ struct xfs_buf *agbp = cur->bc_ag.agbp;
+ struct xfs_agf *agf = agbp->b_addr;
xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno);
@@ -57,8 +58,8 @@ xfs_refcountbt_alloc_block(
union xfs_btree_ptr *new,
int *stat)
{
- struct xfs_buf *agbp = cur->bc_private.a.agbp;
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
+ struct xfs_buf *agbp = cur->bc_ag.agbp;
+ struct xfs_agf *agf = agbp->b_addr;
struct xfs_alloc_arg args; /* block allocation args */
int error; /* error return value */
@@ -66,7 +67,7 @@ xfs_refcountbt_alloc_block(
args.tp = cur->bc_tp;
args.mp = cur->bc_mp;
args.type = XFS_ALLOCTYPE_NEAR_BNO;
- args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_private.a.agno,
+ args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_ag.agno,
xfs_refc_block(args.mp));
args.oinfo = XFS_RMAP_OINFO_REFC;
args.minlen = args.maxlen = args.prod = 1;
@@ -75,13 +76,13 @@ xfs_refcountbt_alloc_block(
error = xfs_alloc_vextent(&args);
if (error)
goto out_error;
- trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_private.a.agno,
+ trace_xfs_refcountbt_alloc_block(cur->bc_mp, cur->bc_ag.agno,
args.agbno, 1);
if (args.fsbno == NULLFSBLOCK) {
*stat = 0;
return 0;
}
- ASSERT(args.agno == cur->bc_private.a.agno);
+ ASSERT(args.agno == cur->bc_ag.agno);
ASSERT(args.len == 1);
new->s = cpu_to_be32(args.agbno);
@@ -101,12 +102,12 @@ xfs_refcountbt_free_block(
struct xfs_buf *bp)
{
struct xfs_mount *mp = cur->bc_mp;
- struct xfs_buf *agbp = cur->bc_private.a.agbp;
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
+ struct xfs_buf *agbp = cur->bc_ag.agbp;
+ struct xfs_agf *agf = agbp->b_addr;
xfs_fsblock_t fsbno = XFS_DADDR_TO_FSB(mp, XFS_BUF_ADDR(bp));
int error;
- trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_private.a.agno,
+ trace_xfs_refcountbt_free_block(cur->bc_mp, cur->bc_ag.agno,
XFS_FSB_TO_AGBNO(cur->bc_mp, fsbno), 1);
be32_add_cpu(&agf->agf_refcount_blocks, -1);
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_BLOCKS);
@@ -169,9 +170,9 @@ xfs_refcountbt_init_ptr_from_cur(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *ptr)
{
- struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
+ struct xfs_agf *agf = cur->bc_ag.agbp->b_addr;
- ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
+ ASSERT(cur->bc_ag.agno == be32_to_cpu(agf->agf_seqno));
ptr->s = agf->agf_refcount_root;
}
@@ -311,41 +312,90 @@ static const struct xfs_btree_ops xfs_refcountbt_ops = {
};
/*
- * Allocate a new refcount btree cursor.
+ * Initialize a new refcount btree cursor.
*/
-struct xfs_btree_cur *
-xfs_refcountbt_init_cursor(
+static struct xfs_btree_cur *
+xfs_refcountbt_init_common(
struct xfs_mount *mp,
struct xfs_trans *tp,
- struct xfs_buf *agbp,
xfs_agnumber_t agno)
{
- struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
struct xfs_btree_cur *cur;
ASSERT(agno != NULLAGNUMBER);
ASSERT(agno < mp->m_sb.sb_agcount);
- cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
+ cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS);
cur->bc_tp = tp;
cur->bc_mp = mp;
cur->bc_btnum = XFS_BTNUM_REFC;
cur->bc_blocklog = mp->m_sb.sb_blocklog;
- cur->bc_ops = &xfs_refcountbt_ops;
cur->bc_statoff = XFS_STATS_CALC_INDEX(xs_refcbt_2);
- cur->bc_nlevels = be32_to_cpu(agf->agf_refcount_level);
-
- cur->bc_private.a.agbp = agbp;
- cur->bc_private.a.agno = agno;
+ cur->bc_ag.agno = agno;
cur->bc_flags |= XFS_BTREE_CRC_BLOCKS;
- cur->bc_private.a.priv.refc.nr_ops = 0;
- cur->bc_private.a.priv.refc.shape_changes = 0;
+ cur->bc_ag.refc.nr_ops = 0;
+ cur->bc_ag.refc.shape_changes = 0;
+ cur->bc_ops = &xfs_refcountbt_ops;
+ return cur;
+}
+/* Create a btree cursor. */
+struct xfs_btree_cur *
+xfs_refcountbt_init_cursor(
+ struct xfs_mount *mp,
+ struct xfs_trans *tp,
+ struct xfs_buf *agbp,
+ xfs_agnumber_t agno)
+{
+ struct xfs_agf *agf = agbp->b_addr;
+ struct xfs_btree_cur *cur;
+
+ cur = xfs_refcountbt_init_common(mp, tp, agno);
+ cur->bc_nlevels = be32_to_cpu(agf->agf_refcount_level);
+ cur->bc_ag.agbp = agbp;
return cur;
}
+/* Create a btree cursor with a fake root for staging. */
+struct xfs_btree_cur *
+xfs_refcountbt_stage_cursor(
+ struct xfs_mount *mp,
+ struct xbtree_afakeroot *afake,
+ xfs_agnumber_t agno)
+{
+ struct xfs_btree_cur *cur;
+
+ cur = xfs_refcountbt_init_common(mp, NULL, agno);
+ xfs_btree_stage_afakeroot(cur, afake);
+ return cur;
+}
+
+/*
+ * Swap in the new btree root. Once we pass this point the newly rebuilt btree
+ * is in place and we have to kill off all the old btree blocks.
+ */
+void
+xfs_refcountbt_commit_staged_btree(
+ struct xfs_btree_cur *cur,
+ struct xfs_trans *tp,
+ struct xfs_buf *agbp)
+{
+ struct xfs_agf *agf = agbp->b_addr;
+ struct xbtree_afakeroot *afake = cur->bc_ag.afake;
+
+ ASSERT(cur->bc_flags & XFS_BTREE_STAGING);
+
+ agf->agf_refcount_root = cpu_to_be32(afake->af_root);
+ agf->agf_refcount_level = cpu_to_be32(afake->af_levels);
+ agf->agf_refcount_blocks = cpu_to_be32(afake->af_blocks);
+ xfs_alloc_log_agf(tp, agbp, XFS_AGF_REFCOUNT_BLOCKS |
+ XFS_AGF_REFCOUNT_ROOT |
+ XFS_AGF_REFCOUNT_LEVEL);
+ xfs_btree_commit_afakeroot(cur, tp, agbp, &xfs_refcountbt_ops);
+}
+
/*
* Calculate the number of records in a refcount btree block.
*/
@@ -420,7 +470,7 @@ xfs_refcountbt_calc_reserves(
if (error)
return error;
- agf = XFS_BUF_TO_AGF(agbp);
+ agf = agbp->b_addr;
agblocks = be32_to_cpu(agf->agf_length);
tree_len = be32_to_cpu(agf->agf_refcount_blocks);
xfs_trans_brelse(tp, agbp);