summaryrefslogtreecommitdiff
path: root/fs/btrfs/extent-tree.c
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-01-16 11:57:53 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2017-01-16 11:57:53 +0100
commita47b66da0380dd1ebcf2fbfa5fc41c4b6af8a460 (patch)
tree3e1e18183f2041d534a1a763af95df1ec460e044 /fs/btrfs/extent-tree.c
parent1129d270cbfbb7e2b1ec3dede4a13930bdd10e41 (diff)
parent49def1853334396f948dcb4cedb9347abb318df5 (diff)
Merge 4.10-rc4 into usb-next
We need the USB fixes in here to make merges easier/possible with the other sub-maintainer USB trees. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'fs/btrfs/extent-tree.c')
-rw-r--r--fs/btrfs/extent-tree.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index e97302f437a1..dcd2e798767e 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2522,11 +2522,11 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
if (ref && ref->seq &&
btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
spin_unlock(&locked_ref->lock);
- btrfs_delayed_ref_unlock(locked_ref);
spin_lock(&delayed_refs->lock);
locked_ref->processing = 0;
delayed_refs->num_heads_ready++;
spin_unlock(&delayed_refs->lock);
+ btrfs_delayed_ref_unlock(locked_ref);
locked_ref = NULL;
cond_resched();
count++;
@@ -2572,7 +2572,10 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
*/
if (must_insert_reserved)
locked_ref->must_insert_reserved = 1;
+ spin_lock(&delayed_refs->lock);
locked_ref->processing = 0;
+ delayed_refs->num_heads_ready++;
+ spin_unlock(&delayed_refs->lock);
btrfs_debug(fs_info,
"run_delayed_extent_op returned %d",
ret);
@@ -7384,7 +7387,8 @@ btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
spin_unlock(&cluster->refill_lock);
- down_read(&used_bg->data_rwsem);
+ /* We should only have one-level nested. */
+ down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING);
spin_lock(&cluster->refill_lock);
if (used_bg == cluster->block_group)