summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorJosef Bacik <josef@toxicpanda.com>2022-10-14 10:00:39 -0400
committerDavid Sterba <dsterba@suse.com>2022-12-05 18:00:40 +0100
commit48acc47d7813a0e650754845161f04b0b27ff8ac (patch)
tree51d3cda2115945fd40621293963d9c9794f3e6a5 /fs
parent7248e0cebbefaba94c0c37f708a134dad3acba0e (diff)
btrfs: do not use GFP_ATOMIC in the read endio
We have done read endio in an async thread for a very, very long time, which makes the use of GFP_ATOMIC and unlock_extent_atomic() unneeded in our read endio path. We've noticed under heavy memory pressure in our fleet that we can fail these allocations, and then often trip a BUG_ON(!allocation), which isn't an ideal outcome. Begin to address this by simply not using GFP_ATOMIC, which will allow us to do things like actually allocate a extent state when doing set_extent_bits(UPTODATE) in the endio handler. End io handlers are not called in atomic context, besides we have been allocating failrec with GFP_NOFS so we'd notice there's a problem. Signed-off-by: Josef Bacik <josef@toxicpanda.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/extent_io.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 4e4f28387ace..78d7ea10621d 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -897,9 +897,9 @@ static void end_sector_io(struct page *page, u64 offset, bool uptodate)
end_page_read(page, uptodate, offset, sectorsize);
if (uptodate)
set_extent_uptodate(&inode->io_tree, offset,
- offset + sectorsize - 1, &cached, GFP_ATOMIC);
- unlock_extent_atomic(&inode->io_tree, offset, offset + sectorsize - 1,
- &cached);
+ offset + sectorsize - 1, &cached, GFP_NOFS);
+ unlock_extent(&inode->io_tree, offset, offset + sectorsize - 1,
+ &cached);
}
static void submit_data_read_repair(struct inode *inode,
@@ -1103,7 +1103,7 @@ static void endio_readpage_release_extent(struct processed_extent *processed,
* Now we don't have range contiguous to the processed range, release
* the processed range now.
*/
- unlock_extent_atomic(tree, processed->start, processed->end, &cached);
+ unlock_extent(tree, processed->start, processed->end, &cached);
update:
/* Update processed to current range */