summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2022-12-15 21:43:52 +0000
committerAndrew Morton <akpm@linux-foundation.org>2023-01-18 17:12:39 -0800
commitabc8a8a2c7dc7b557619befa8fb29be60ed481bc (patch)
treecbf1b71e62d8bc6168b372b2748e66bb8e9e7792 /fs
parentd685c668b0695dff927c85e27ef27d4f404f16a3 (diff)
buffer: replace obvious uses of b_page with b_folio
These cases just check if it's NULL, or use b_page to get to the page's address space. They are assumptions that b_page never points to a tail page. Link: https://lkml.kernel.org/r/20221215214402.3522366-3-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Jan Kara <jack@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/buffer.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index d9c6d1fbb6dd..e1055fe0b366 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -321,7 +321,7 @@ static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate)
{
/* Decrypt if needed */
if (uptodate &&
- fscrypt_inode_uses_fs_layer_crypto(bh->b_page->mapping->host)) {
+ fscrypt_inode_uses_fs_layer_crypto(bh->b_folio->mapping->host)) {
struct decrypt_bh_ctx *ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
if (ctx) {
@@ -570,7 +570,7 @@ void write_boundary_block(struct block_device *bdev,
void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
{
struct address_space *mapping = inode->i_mapping;
- struct address_space *buffer_mapping = bh->b_page->mapping;
+ struct address_space *buffer_mapping = bh->b_folio->mapping;
mark_buffer_dirty(bh);
if (!mapping->private_data) {
@@ -1073,7 +1073,7 @@ __getblk_slow(struct block_device *bdev, sector_t block,
* and then attach the address_space's inode to its superblock's dirty
* inode list.
*
- * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
+ * mark_buffer_dirty() is atomic. It takes bh->b_folio->mapping->private_lock,
* i_pages lock and mapping->host->i_lock.
*/
void mark_buffer_dirty(struct buffer_head *bh)
@@ -1117,8 +1117,8 @@ void mark_buffer_write_io_error(struct buffer_head *bh)
set_buffer_write_io_error(bh);
/* FIXME: do we need to set this in both places? */
- if (bh->b_page && bh->b_page->mapping)
- mapping_set_error(bh->b_page->mapping, -EIO);
+ if (bh->b_folio && bh->b_folio->mapping)
+ mapping_set_error(bh->b_folio->mapping, -EIO);
if (bh->b_assoc_map)
mapping_set_error(bh->b_assoc_map, -EIO);
rcu_read_lock();
@@ -1154,7 +1154,7 @@ void __bforget(struct buffer_head *bh)
{
clear_buffer_dirty(bh);
if (bh->b_assoc_map) {
- struct address_space *buffer_mapping = bh->b_page->mapping;
+ struct address_space *buffer_mapping = bh->b_folio->mapping;
spin_lock(&buffer_mapping->private_lock);
list_del_init(&bh->b_assoc_buffers);