diff options
Diffstat (limited to 'fs/btrfs/file.c')
| -rw-r--r-- | fs/btrfs/file.c | 43 | 
1 files changed, 20 insertions, 23 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 27e5b269e729..435a502a3226 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1692,7 +1692,7 @@ again:  				    force_page_uptodate);  		if (ret) {  			btrfs_delalloc_release_extents(BTRFS_I(inode), -						       reserve_bytes, true); +						       reserve_bytes);  			break;  		} @@ -1704,7 +1704,7 @@ again:  			if (extents_locked == -EAGAIN)  				goto again;  			btrfs_delalloc_release_extents(BTRFS_I(inode), -						       reserve_bytes, true); +						       reserve_bytes);  			ret = extents_locked;  			break;  		} @@ -1772,8 +1772,7 @@ again:  		else  			free_extent_state(cached_state); -		btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes, -					       true); +		btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);  		if (ret) {  			btrfs_drop_pages(pages, num_pages);  			break; @@ -2068,25 +2067,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)  	struct btrfs_trans_handle *trans;  	struct btrfs_log_ctx ctx;  	int ret = 0, err; -	u64 len; -	/* -	 * If the inode needs a full sync, make sure we use a full range to -	 * avoid log tree corruption, due to hole detection racing with ordered -	 * extent completion for adjacent ranges, and assertion failures during -	 * hole detection. -	 */ -	if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, -		     &BTRFS_I(inode)->runtime_flags)) { -		start = 0; -		end = LLONG_MAX; -	} - -	/* -	 * The range length can be represented by u64, we have to do the typecasts -	 * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync() -	 */ -	len = (u64)end - (u64)start + 1;  	trace_btrfs_sync_file(file, datasync);  	btrfs_init_log_ctx(&ctx, inode); @@ -2113,6 +2094,19 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)  	atomic_inc(&root->log_batch);  	/* +	 * If the inode needs a full sync, make sure we use a full range to +	 * avoid log tree corruption, due to hole detection racing with ordered +	 * extent completion for adjacent ranges, and assertion failures during +	 * hole detection. Do this while holding the inode lock, to avoid races +	 * with other tasks. +	 */ +	if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, +		     &BTRFS_I(inode)->runtime_flags)) { +		start = 0; +		end = LLONG_MAX; +	} + +	/*  	 * Before we acquired the inode's lock, someone may have dirtied more  	 * pages in the target range. We need to make sure that writeback for  	 * any such pages does not start while we are logging the inode, because @@ -2139,8 +2133,11 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)  	/*  	 * We have to do this here to avoid the priority inversion of waiting on  	 * IO of a lower priority task while holding a transaction open. +	 * +	 * Also, the range length can be represented by u64, we have to do the +	 * typecasts to avoid signed overflow if it's [0, LLONG_MAX].  	 */ -	ret = btrfs_wait_ordered_range(inode, start, len); +	ret = btrfs_wait_ordered_range(inode, start, (u64)end - (u64)start + 1);  	if (ret) {  		up_write(&BTRFS_I(inode)->dio_sem);  		inode_unlock(inode);  | 
