diff options
Diffstat (limited to 'fs/btrfs/extent-tree.c')
| -rw-r--r-- | fs/btrfs/extent-tree.c | 20 | 
1 files changed, 18 insertions, 2 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 0867c5cd6e01..4157ecc27d4b 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3832,7 +3832,7 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,  	       block_group->start == fs_info->data_reloc_bg ||  	       fs_info->data_reloc_bg == 0); -	if (block_group->ro) { +	if (block_group->ro || block_group->zoned_data_reloc_ongoing) {  		ret = 1;  		goto out;  	} @@ -3894,8 +3894,24 @@ static int do_allocation_zoned(struct btrfs_block_group *block_group,  out:  	if (ret && ffe_ctl->for_treelog)  		fs_info->treelog_bg = 0; -	if (ret && ffe_ctl->for_data_reloc) +	if (ret && ffe_ctl->for_data_reloc && +	    fs_info->data_reloc_bg == block_group->start) { +		/* +		 * Do not allow further allocations from this block group. +		 * Compared to increasing the ->ro, setting the +		 * ->zoned_data_reloc_ongoing flag still allows nocow +		 *  writers to come in. See btrfs_inc_nocow_writers(). +		 * +		 * We need to disable an allocation to avoid an allocation of +		 * regular (non-relocation data) extent. With mix of relocation +		 * extents and regular extents, we can dispatch WRITE commands +		 * (for relocation extents) and ZONE APPEND commands (for +		 * regular extents) at the same time to the same zone, which +		 * easily break the write pointer. +		 */ +		block_group->zoned_data_reloc_ongoing = 1;  		fs_info->data_reloc_bg = 0; +	}  	spin_unlock(&fs_info->relocation_bg_lock);  	spin_unlock(&fs_info->treelog_bg_lock);  	spin_unlock(&block_group->lock);  | 
