summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-06-26 11:00:18 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-06-26 11:00:18 -0700
commit098c5dd9cf96fc6d7f35429561ef58cd7c5fcecf (patch)
treedbd52fe5200f3d2c031400747eab2bbb703818c5 /fs
parent74774e243c5ff0903df22dff67be01f2d4a7f00c (diff)
parent8241fdd3cdfe88e31a3de09a72b5bff661e4534a (diff)
Merge tag 'erofs-for-6.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs
Pull erofs updates from Gao Xiang: "No outstanding new feature for this cycle. Most of these commits are decompression cleanups which are part of the ongoing development for subpage/folio compression support as well as xattr cleanups for the upcoming xattr bloom filter optimization [1]. In addition, there are bugfixes to address some corner cases of compressed images due to global data de-duplication and arm64 16k pages. Summary: - Fix rare I/O hang on deduplicated compressed images due to loop hooked chains - Fix compact compression layout of 16k blocks on arm64 devices - Fix atomic context detection of async decompression - Decompression/Xattr code cleanups" Link: https://lore.kernel.org/r/20230621083209.116024-1-jefflexu@linux.alibaba.com [1] * tag 'erofs-for-6.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs: erofs: clean up zmap.c erofs: remove unnecessary goto erofs: Fix detection of atomic context erofs: use separate xattr parsers for listxattr/getxattr erofs: unify inline/shared xattr iterators for listxattr/getxattr erofs: make the size of read data stored in buffer_ofs erofs: unify xattr_iter structures erofs: use absolute position in xattr iterator erofs: fix compact 4B support for 16k block size erofs: convert erofs_read_metabuf() to erofs_bread() for xattr erofs: use poison pointer to replace the hard-coded address erofs: use struct lockref to replace handcrafted approach erofs: adapt managed inode operations into folios erofs: kill hooked chains to avoid loops on deduplicated compressed images erofs: avoid on-stack pagepool directly passed by arguments erofs: allocate extra bvec pages directly instead of retrying erofs: clean up z_erofs_pcluster_readmore() erofs: remove the member readahead from struct z_erofs_decompress_frontend erofs: fold in z_erofs_decompress()
Diffstat (limited to 'fs')
-rw-r--r--fs/erofs/compress.h3
-rw-r--r--fs/erofs/decompressor.c8
-rw-r--r--fs/erofs/internal.h41
-rw-r--r--fs/erofs/super.c69
-rw-r--r--fs/erofs/utils.c86
-rw-r--r--fs/erofs/xattr.c670
-rw-r--r--fs/erofs/zdata.c269
-rw-r--r--fs/erofs/zmap.c75
8 files changed, 438 insertions, 783 deletions
diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h
index 26fa170090b8..b1b846504027 100644
--- a/fs/erofs/compress.h
+++ b/fs/erofs/compress.h
@@ -89,8 +89,7 @@ static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
unsigned int padbufsize);
-int z_erofs_decompress(struct z_erofs_decompress_req *rq,
- struct page **pagepool);
+extern const struct z_erofs_decompressor erofs_decompressors[];
/* prototypes for specific algorithms */
int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
index 7021e2cf6146..2a29943fa5cc 100644
--- a/fs/erofs/decompressor.c
+++ b/fs/erofs/decompressor.c
@@ -363,7 +363,7 @@ static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq,
return 0;
}
-static struct z_erofs_decompressor decompressors[] = {
+const struct z_erofs_decompressor erofs_decompressors[] = {
[Z_EROFS_COMPRESSION_SHIFTED] = {
.decompress = z_erofs_transform_plain,
.name = "shifted"
@@ -383,9 +383,3 @@ static struct z_erofs_decompressor decompressors[] = {
},
#endif
};
-
-int z_erofs_decompress(struct z_erofs_decompress_req *rq,
- struct page **pagepool)
-{
- return decompressors[rq->alg].decompress(rq, pagepool);
-}
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index 1e39c03357d1..36e32fa542f0 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -208,46 +208,12 @@ enum {
EROFS_ZIP_CACHE_READAROUND
};
-#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
-
/* basic unit of the workstation of a super_block */
struct erofs_workgroup {
- /* the workgroup index in the workstation */
pgoff_t index;
-
- /* overall workgroup reference count */
- atomic_t refcount;
+ struct lockref lockref;
};
-static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
- int val)
-{
- preempt_disable();
- if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) {
- preempt_enable();
- return false;
- }
- return true;
-}
-
-static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
- int orig_val)
-{
- /*
- * other observers should notice all modifications
- * in the freezing period.
- */
- smp_mb();
- atomic_set(&grp->refcount, orig_val);
- preempt_enable();
-}
-
-static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
-{
- return atomic_cond_read_relaxed(&grp->refcount,
- VAL != EROFS_LOCKED_MAGIC);
-}
-
enum erofs_kmap_type {
EROFS_NO_KMAP, /* don't map the buffer */
EROFS_KMAP, /* use kmap_local_page() to map the buffer */
@@ -486,7 +452,7 @@ static inline void erofs_pagepool_add(struct page **pagepool, struct page *page)
void erofs_release_pages(struct page **pagepool);
#ifdef CONFIG_EROFS_FS_ZIP
-int erofs_workgroup_put(struct erofs_workgroup *grp);
+void erofs_workgroup_put(struct erofs_workgroup *grp);
struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
pgoff_t index);
struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
@@ -500,7 +466,6 @@ int __init z_erofs_init_zip_subsystem(void);
void z_erofs_exit_zip_subsystem(void);
int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
struct erofs_workgroup *egrp);
-int erofs_try_to_free_cached_page(struct page *page);
int z_erofs_load_lz4_config(struct super_block *sb,
struct erofs_super_block *dsb,
struct z_erofs_lz4_cfgs *lz4, int len);
@@ -511,6 +476,7 @@ void erofs_put_pcpubuf(void *ptr);
int erofs_pcpubuf_growsize(unsigned int nrpages);
void __init erofs_pcpubuf_init(void);
void erofs_pcpubuf_exit(void);
+int erofs_init_managed_cache(struct super_block *sb);
#else
static inline void erofs_shrinker_register(struct super_block *sb) {}
static inline void erofs_shrinker_unregister(struct super_block *sb) {}
@@ -530,6 +496,7 @@ static inline int z_erofs_load_lz4_config(struct super_block *sb,
}
static inline void erofs_pcpubuf_init(void) {}
static inline void erofs_pcpubuf_exit(void) {}
+static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; }
#endif /* !CONFIG_EROFS_FS_ZIP */
#ifdef CONFIG_EROFS_FS_ZIP_LZMA
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index 811ab66d805e..ff18f6b14de5 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -599,68 +599,6 @@ static int erofs_fc_parse_param(struct fs_context *fc,
return 0;
}
-#ifdef CONFIG_EROFS_FS_ZIP
-static const struct address_space_operations managed_cache_aops;
-
-static bool erofs_managed_cache_release_folio(struct folio *folio, gfp_t gfp)
-{
- bool ret = true;
- struct address_space *const mapping = folio->mapping;
-
- DBG_BUGON(!folio_test_locked(folio));
- DBG_BUGON(mapping->a_ops != &managed_cache_aops);
-
- if (folio_test_private(folio))
- ret = erofs_try_to_free_cached_page(&folio->page);
-
- return ret;
-}
-
-/*
- * It will be called only on inode eviction. In case that there are still some
- * decompression requests in progress, wait with rescheduling for a bit here.
- * We could introduce an extra locking instead but it seems unnecessary.
- */
-static void erofs_managed_cache_invalidate_folio(struct folio *folio,
- size_t offset, size_t length)
-{
- const size_t stop = length + offset;
-
- DBG_BUGON(!folio_test_locked(folio));
-
- /* Check for potential overflow in debug mode */
- DBG_BUGON(stop > folio_size(folio) || stop < length);
-
- if (offset == 0 && stop == folio_size(folio))
- while (!erofs_managed_cache_release_folio(folio, GFP_NOFS))
- cond_resched();
-}
-
-static const struct address_space_operations managed_cache_aops = {
- .release_folio = erofs_managed_cache_release_folio,
- .invalidate_folio = erofs_managed_cache_invalidate_folio,
-};
-
-static int erofs_init_managed_cache(struct super_block *sb)
-{
- struct erofs_sb_info *const sbi = EROFS_SB(sb);
- struct inode *const inode = new_inode(sb);
-
- if (!inode)
- return -ENOMEM;
-
- set_nlink(inode, 1);
- inode->i_size = OFFSET_MAX;
-
- inode->i_mapping->a_ops = &managed_cache_aops;
- mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
- sbi->managed_cache = inode;
- return 0;
-}
-#else
-static int erofs_init_managed_cache(struct super_block *sb) { return 0; }
-#endif
-
static struct inode *erofs_nfs_get_inode(struct super_block *sb,
u64 ino, u32 generation)
{
@@ -1016,10 +954,8 @@ static int __init erofs_module_init(void)
sizeof(struct erofs_inode), 0,
SLAB_RECLAIM_ACCOUNT,
erofs_inode_init_once);
- if (!erofs_inode_cachep) {
- err = -ENOMEM;
- goto icache_err;
- }
+ if (!erofs_inode_cachep)
+ return -ENOMEM;
err = erofs_init_shrinker();
if (err)
@@ -1054,7 +990,6 @@ lzma_err:
erofs_exit_shrinker();
shrinker_err:
kmem_cache_destroy(erofs_inode_cachep);
-icache_err:
return err;
}
diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c
index 46627cb69abe..cc6fb9e98899 100644
--- a/fs/erofs/utils.c
+++ b/fs/erofs/utils.c
@@ -4,7 +4,6 @@
* https://www.huawei.com/
*/
#include "internal.h"
-#include <linux/pagevec.h>
struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp)
{
@@ -33,22 +32,21 @@ void erofs_release_pages(struct page **pagepool)
/* global shrink count (for all mounted EROFS instances) */
static atomic_long_t erofs_global_shrink_cnt;
-static int erofs_workgroup_get(struct erofs_workgroup *grp)
+static bool erofs_workgroup_get(struct erofs_workgroup *grp)
{
- int o;
+ if (lockref_get_not_zero(&grp->lockref))
+ return true;
-repeat:
- o = erofs_wait_on_workgroup_freezed(grp);
- if (o <= 0)
- return -1;
-
- if (atomic_cmpxchg(&grp->refcount, o, o + 1) != o)
- goto repeat;
+ spin_lock(&grp->lockref.lock);
+ if (__lockref_is_dead(&grp->lockref)) {
+ spin_unlock(&grp->lockref.lock);
+ return false;
+ }
- /* decrease refcount paired by erofs_workgroup_put */
- if (o == 1)
+ if (!grp->lockref.count++)
atomic_long_dec(&erofs_global_shrink_cnt);
- return 0;
+ spin_unlock(&grp->lockref.lock);
+ return true;
}
struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
@@ -61,7 +59,7 @@ repeat:
rcu_read_lock();
grp = xa_load(&sbi->managed_pslots, index);
if (grp) {
- if (erofs_workgroup_get(grp)) {
+ if (!erofs_workgroup_get(grp)) {
/* prefer to relax rcu read side */
rcu_read_unlock();
goto repeat;
@@ -80,11 +78,10 @@ struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
struct erofs_workgroup *pre;
/*
- * Bump up a reference count before making this visible
- * to others for the XArray in order to avoid potential
- * UAF without serialized by xa_lock.
+ * Bump up before making this visible to others for the XArray in order
+ * to avoid potential UAF without serialized by xa_lock.
*/
- atomic_inc(&grp->refcount);
+ lockref_get(&grp->lockref);
repeat:
xa_lock(&sbi->managed_pslots);
@@ -93,13 +90,13 @@ repeat:
if (pre) {
if (xa_is_err(pre)) {
pre = ERR_PTR(xa_err(pre));
- } else if (erofs_workgroup_get(pre)) {
+ } else if (!erofs_workgroup_get(pre)) {
/* try to legitimize the current in-tree one */
xa_unlock(&sbi->managed_pslots);
cond_resched();
goto repeat;
}
- atomic_dec(&grp->refcount);
+ lockref_put_return(&grp->lockref);
grp = pre;
}
xa_unlock(&sbi->managed_pslots);
@@ -112,38 +109,34 @@ static void __erofs_workgroup_free(struct erofs_workgroup *grp)
erofs_workgroup_free_rcu(grp);
}
-int erofs_workgroup_put(struct erofs_workgroup *grp)
+void erofs_workgroup_put(struct erofs_workgroup *grp)
{
- int count = atomic_dec_return(&grp->refcount);
+ if (lockref_put_or_lock(&grp->lockref))
+ return;
- if (count == 1)
+ DBG_BUGON(__lockref_is_dead(&grp->lockref));
+ if (grp->lockref.count == 1)
atomic_long_inc(&erofs_global_shrink_cnt);
- else if (!count)
- __erofs_workgroup_free(grp);
- return count;
+ --grp->lockref.count;
+ spin_unlock(&grp->lockref.lock);
}
static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
struct erofs_workgroup *grp)
{
- /*
- * If managed cache is on, refcount of workgroups
- * themselves could be < 0 (freezed). In other words,
- * there is no guarantee that all refcounts > 0.
- */
- if (!erofs_workgroup_try_to_freeze(grp, 1))
- return false;
+ int free = false;
+
+ spin_lock(&grp->lockref.lock);
+ if (grp->lockref.count)
+ goto out;
/*
- * Note that all cached pages should be unattached
- * before deleted from the XArray. Otherwise some
- * cached pages could be still attached to the orphan
- * old workgroup when the new one is available in the tree.
+ * Note that all cached pages should be detached before deleted from
+ * the XArray. Otherwise some cached pages could be still attached to
+ * the orphan old workgroup when the new one is available in the tree.
*/
- if (erofs_try_to_free_all_cached_pages(sbi, grp)) {
- erofs_workgroup_unfreeze(grp, 1);
- return false;
- }
+ if (erofs_try_to_free_all_cached_pages(sbi, grp))
+ goto out;
/*
* It's impossible to fail after the workgroup is freezed,
@@ -152,10 +145,13 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
*/
DBG_BUGON(__xa_erase(&sbi->managed_pslots, grp->index) != grp);
- /* last refcount should be connected with its managed pslot. */
- erofs_workgroup_unfreeze(grp, 0);
- __erofs_workgroup_free(grp);
- return true;
+ lockref_mark_dead(&grp->lockref);
+ free = true;
+out:
+ spin_unlock(&grp->lockref.lock);
+ if (free)
+ __erofs_workgroup_free(grp);
+ return free;
}
static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c
index bbfe7ce170d2..40178b6e0688 100644
--- a/fs/erofs/xattr.c
+++ b/fs/erofs/xattr.c
@@ -7,32 +7,27 @@
#include <linux/security.h>
#include "xattr.h"
-static inline erofs_blk_t erofs_xattr_blkaddr(struct super_block *sb,
- unsigned int xattr_id)
-{
- return EROFS_SB(sb)->xattr_blkaddr +
- erofs_blknr(sb, xattr_id * sizeof(__u32));
-}
-
-static inline unsigned int erofs_xattr_blkoff(struct super_block *sb,
- unsigned int xattr_id)
-{
- return erofs_blkoff(sb, xattr_id * sizeof(__u32));
-}
-
-struct xattr_iter {
+struct erofs_xattr_iter {
struct super_block *sb;
struct erofs_buf buf;
+ erofs_off_t pos;
void *kaddr;
- erofs_blk_t blkaddr;
- unsigned int ofs;
+ char *buffer;
+ int buffer_size, buffer_ofs;
+
+ /* getxattr */
+ int index, infix_len;
+ struct qstr name;
+
+ /* listxattr */
+ struct dentry *dentry;
};
static int erofs_init_inode_xattrs(struct inode *inode)
{
struct erofs_inode *const vi = EROFS_I(inode);
- struct xattr_iter it;
+ struct erofs_xattr_iter it;
unsigned int i;
struct erofs_xattr_ibody_header *ih;
struct super_block *sb = inode->i_sb;
@@ -81,17 +76,17 @@ static int erofs_init_inode_xattrs(struct inode *inode)
}
it.buf = __EROFS_BUF_INITIALIZER;
- it.blkaddr = erofs_blknr(sb, erofs_iloc(inode) + vi->inode_isize);
- it.ofs = erofs_blkoff(sb, erofs_iloc(inode) + vi->inode_isize);
+ erofs_init_metabuf(&it.buf, sb);
+ it.pos = erofs_iloc(inode) + vi->inode_isize;
/* read in shared xattr array (non-atomic, see kmalloc below) */
- it.kaddr = erofs_read_metabuf(&it.buf, sb, it.blkaddr, EROFS_KMAP);
+ it.kaddr = erofs_bread(&it.buf, erofs_blknr(sb, it.pos), EROFS_KMAP);
if (IS_ERR(it.kaddr)) {
ret = PTR_ERR(it.kaddr);
goto out_unlock;
}
- ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
+ ih = it.kaddr + erofs_blkoff(sb, it.pos);
vi->xattr_shared_count = ih->h_shared_count;
vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
sizeof(uint), GFP_KERNEL);
@@ -102,26 +97,20 @@ static int erofs_init_inode_xattrs(struct inode *inode)
}
/* let's skip ibody header */
- it.ofs += sizeof(struct erofs_xattr_ibody_header);
+ it.pos += sizeof(struct erofs_xattr_ibody_header);
for (i = 0; i < vi->xattr_shared_count; ++i) {
- if (it.ofs >= sb->s_blocksize) {
- /* cannot be unaligned */
- DBG_BUGON(it.ofs != sb->s_blocksize);
-
- it.kaddr = erofs_read_metabuf(&it.buf, sb, ++it.blkaddr,
- EROFS_KMAP);
- if (IS_ERR(it.kaddr)) {
- kfree(vi->xattr_shared_xattrs);
- vi->xattr_shared_xattrs = NULL;
- ret = PTR_ERR(it.kaddr);
- goto out_unlock;
- }
- it.ofs = 0;
+ it.kaddr = erofs_bread(&it.buf, erofs_blknr(sb, it.pos),
+ EROFS_KMAP);
+ if (IS_ERR(it.kaddr)) {
+ kfree(vi->xattr_shared_xattrs);
+ vi->xattr_shared_xattrs = NULL;
+ ret = PTR_ERR(it.kaddr);
+ goto out_unlock;
}
- vi->xattr_shared_xattrs[i] =
- le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs));
- it.ofs += sizeof(__le32);
+ vi->xattr_shared_xattrs[i] = le32_to_cpu(*(__le32 *)
+ (it.kaddr + erofs_blkoff(sb, it.pos)));
+ it.pos += sizeof(__le32);
}
erofs_put_metabuf(&it.buf);
@@ -134,287 +123,6 @@ out_unlock:
return ret;
}
-/*
- * the general idea for these return values is
- * if 0 is returned, go on processing the current xattr;
- * 1 (> 0) is returned, skip this round to process the next xattr;
- * -err (< 0) is returned, an error (maybe ENOXATTR) occurred
- * and need to be handled
- */
-struct xattr_iter_handlers {
- int (*entry)(struct xattr_iter *_it, struct erofs_xattr_entry *entry);
- int (*name)(struct xattr_iter *_it, unsigned int processed, char *buf,
- unsigned int len);
- int (*alloc_buffer)(struct xattr_iter *_it, unsigned int value_sz);
- void (*value)(struct xattr_iter *_it, unsigned int processed, char *buf,
- unsigned int len);
-};
-
-static inline int xattr_iter_fixup(struct xattr_iter *it)
-{
- if (it->ofs < it->sb->s_blocksize)
- return 0;
-
- it->blkaddr += erofs_blknr(it->sb, it->ofs);
- it->kaddr = erofs_read_metabuf(&it->buf, it->sb, it->blkaddr,
- EROFS_KMAP);
- if (IS_ERR(it->kaddr))
- return PTR_ERR(it->kaddr);
- it->ofs = erofs_blkoff(it->sb, it->ofs);
- return 0;
-}
-
-static int inline_xattr_iter_begin(struct xattr_iter *it,
- struct inode *inode)
-{
- struct erofs_inode *const vi = EROFS_I(inode);
- unsigned int xattr_header_sz, inline_xattr_ofs;
-
- xattr_header_sz = sizeof(struct erofs_xattr_ibody_header) +
- sizeof(u32) * vi->xattr_shared_count;
- if (xattr_header_sz >= vi->xattr_isize) {
- DBG_BUGON(xattr_header_sz > vi->xattr_isize);
- return -ENOATTR;
- }
-
- inline_xattr_ofs = vi->inode_isize + xattr_header_sz;
-
- it->blkaddr = erofs_blknr(it->sb, erofs_iloc(inode) + inline_xattr_ofs);
- it->ofs = erofs_blkoff(it->sb, erofs_iloc(inode) + inline_xattr_ofs);
- it->kaddr = erofs_read_metabuf(&it->buf, inode->i_sb, it->blkaddr,
- EROFS_KMAP);
- if (IS_ERR(it->kaddr))
- return PTR_ERR(it->kaddr);
- return vi->xattr_isize - xattr_header_sz;
-}
-
-/*
- * Regardless of success or failure, `xattr_foreach' will end up with
- * `ofs' pointing to the next xattr item rather than an arbitrary position.
- */
-static int xattr_foreach(struct xattr_iter *it,
- const struct xattr_iter_handlers *op,
- unsigned int *tlimit)
-{
- struct erofs_xattr_entry entry;
- unsigned int value_sz, processed, slice;
- int err;
-
- /* 0. fixup blkaddr, ofs, ipage */
- err = xattr_iter_fixup(it);
- if (err)
- return err;
-
- /*
- * 1. read xattr entry to the memory,
- * since we do EROFS_XATTR_ALIGN
- * therefore entry should be in the page
- */
- entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs);
- if (tlimit) {
- unsigned int entry_sz = erofs_xattr_entry_size(&entry);
-
- /* xattr on-disk corruption: xattr entry beyond xattr_isize */
- if (*tlimit < entry_sz) {
- DBG_BUGON(1);
- return -EFSCORRUPTED;
- }
- *tlimit -= entry_sz;
- }
-
- it->ofs += sizeof(struct erofs_xattr_entry);
- value_sz = le16_to_cpu(entry.e_value_size);
-
- /* handle entry */
- err = op->entry(it, &entry);
- if (err) {
- it->ofs += entry.e_name_len + value_sz;
- goto out;
- }
-
- /* 2. handle xattr name (ofs will finally be at the end of name) */
- processed = 0;
-
- while (processed < entry.e_name_len) {
- if (it->ofs >= it->sb->s_blocksize) {
- DBG_BUGON(it->ofs > it->sb->s_blocksize);
-
- err = xattr_iter_fixup(it);
- if (err)
- goto out;
- it->ofs = 0;
- }
-
- slice = min_t(unsigned int, it->sb->s_blocksize - it->ofs,
- entry.e_name_len - processed);
-
- /* handle name */
- err = op->name(it, processed, it->kaddr + it->ofs, slice);
- if (err) {
- it->ofs += entry.e_name_len - processed + value_sz;
- goto out;
- }
-
- it->ofs += slice;
- processed += slice;
- }
-
- /* 3. handle xattr value */
- processed = 0;
-
- if (op->alloc_buffer) {
- err = op->alloc_buffer(it, value_sz);
- if (err) {
- it->ofs += value_sz;
- goto out;
- }
- }
-
- while (processed < value_sz) {
- if (it->ofs >= it->sb->s_blocksize) {
- DBG_BUGON(it->ofs > it->sb->s_blocksize);
-
- err = xattr_iter_fixup(it);
- if (err)
- goto out;
- it->ofs = 0;
- }
-
- slice = min_t(unsigned int, it->sb->s_blocksize - it->ofs,
- value_sz - processed);
- op->value(it, processed, it->kaddr + it->ofs, slice);
- it->ofs += slice;
- processed += slice;
- }
-
-out:
- /* xattrs should be 4-byte aligned (on-disk constraint) */
- it->ofs = EROFS_XATTR_ALIGN(it->ofs);
- return err < 0 ? err : 0;
-}
-
-struct getxattr_iter {
- struct xattr_iter it;
-
- char *buffer;
- int buffer_size, index, infix_len;
- struct qstr name;
-};
-
-static int erofs_xattr_long_entrymatch(struct getxattr_iter *it,
- struct erofs_xattr_entry *entry)
-{
- struct erofs_sb_info *sbi = EROFS_SB(it->it.sb);
- struct erofs_xattr_prefix_item *pf = sbi->xattr_prefixes +
- (entry->e_name_index & EROFS_XATTR_LONG_PREFIX_MASK);
-
- if (pf >= sbi->xattr_prefixes + sbi->xattr_prefix_count)
- return -ENOATTR;
-
- if (it->index != pf->prefix->base_index ||
- it->name.len != entry->e_name_len + pf->infix_len)
- return -ENOATTR;
-
- if (memcmp(it->name.name, pf->prefix->infix, pf->infix_len))
- return -ENOATTR;
-
- it->infix_len = pf->infix_len;
- return 0;
-}
-
-static int xattr_entrymatch(struct xattr_iter *_it,
- struct erofs_xattr_entry *entry)
-{
- struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
-
- /* should also match the infix for long name prefixes */
- if (entry->e_name_index & EROFS_XATTR_LONG_PREFIX)
- return erofs_xattr_long_entrymatch(it, entry);
-
- if (it->index != entry->e_name_index ||
- it->name.len != entry->e_name_len)
- return -ENOATTR;
- it->infix_len = 0;
- return 0;
-}
-
-static int xattr_namematch(struct xattr_iter *_it,
- unsigned int processed, char *buf, unsigned int len)
-{
- struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
-
- if (memcmp(buf, it->name.name + it->infix_len + processed, len))
- return -ENOATTR;
- return 0;
-}
-
-static int xattr_checkbuffer(struct xattr_iter *_it,
- unsigned int value_sz)
-{
- struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
- int err = it->buffer_size < value_sz ? -ERANGE : 0;
-
- it->buffer_size = value_sz;
- return !it->buffer ? 1 : err;
-}
-
-static void xattr_copyvalue(struct xattr_iter *_it,
- unsigned int processed,
- char *buf, unsigned int len)
-{
- struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
-
- memcpy(it->buffer + processed, buf, len);
-}
-
-static const struct xattr_iter_handlers find_xattr_handlers = {
- .entry = xattr_entrymatch,
- .name = xattr_namematch,
- .alloc_buffer = xattr_checkbuffer,
- .value = xattr_copyvalue
-};
-
-static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
-{
- int ret;
- unsigned int remaining;
-
- ret = inline_xattr_iter_begin(&it->it, inode);
- if (ret < 0)
- return ret;
-
- remaining = ret;
- while (remaining) {
- ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
- if (ret != -ENOATTR)
- break;
- }
- return ret ? ret : it->buffer_size;
-}
-
-static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
-{
- struct erofs_inode *const vi = EROFS_I(inode);
- struct super_block *const sb = it->it.sb;
- unsigned int i, xsid;
- int ret = -ENOATTR;
-
- for (i = 0; i < vi->xattr_shared_count; ++i) {
- xsid = vi->xattr_shared_xattrs[i];
- it->it.blkaddr = erofs_xattr_blkaddr(sb, xsid);
- it->it.ofs = erofs_xattr_blkoff(sb, xsid);
- it->it.kaddr = erofs_read_metabuf(&it->it.buf, sb,
- it->it.blkaddr, EROFS_KMAP);
- if (IS_ERR(it->it.kaddr))
- return PTR_ERR(it->it.kaddr);
-
- ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
- if (ret != -ENOATTR)
- break;
- }
- return ret ? ret : it->buffer_size;
-}
-
static bool erofs_xattr_user_list(struct dentry *dentry)
{
return test_opt(&EROFS_SB(dentry->d_sb)->opt, XATTR_USER);
@@ -425,39 +133,6 @@ static bool erofs_xattr_trusted_list(struct dentry *dentry)
return capable(CAP_SYS_ADMIN);
}
-int erofs_getxattr(struct inode *inode, int index,
- const char *name,
- void *buffer, size_t buffer_size)
-{
- int ret;
- struct getxattr_iter it;
-
- if (!name)
- return -EINVAL;
-
- ret = erofs_init_inode_xattrs(inode);
- if (ret)
- return ret;
-
- it.index = index;
- it.name.len = strlen(name);
- if (it.name.len > EROFS_NAME_LEN)
- return -ERANGE;
-
- it.it.buf = __EROFS_BUF_INITIALIZER;
- it.name.name = name;
-
- it.buffer = buffer;
- it.buffer_size = buffer_size;
-
- it.it.sb = inode->i_sb;
- ret = inline_getxattr(inode, &it);
- if (ret == -ENOATTR)
- ret = shared_getxattr(inode, &it);
- erofs_put_metabuf(&it.it.buf);
- return ret;
-}
-
static int erofs_xattr_generic_get(const struct xattr_handler *handler,
struct dentry *unused, struct inode *inode,
const char *name, void *buffer, size_t size)
@@ -500,30 +175,49 @@ const struct xattr_handler *erofs_xattr_handlers[] = {
NULL,
};
-struct listxattr_iter {
- struct xattr_iter it;
-
- struct dentry *dentry;
- char *buffer;
- int buffer_size, buffer_ofs;
-};
+static int erofs_xattr_copy_to_buffer(struct erofs_xattr_iter *it,
+ unsigned int len)
+{
+ unsigned int slice, processed;
+ struct super_block *sb = it->sb;
+ void *src;
+
+ for (processed = 0; processed < len; processed += slice) {
+ it->kaddr = erofs_bread(&it->buf, erofs_blknr(sb, it->pos),
+ EROFS_KMAP);
+ if (IS_ERR(it->kaddr))
+ return PTR_ERR(it->kaddr);
+
+ src = it->kaddr + erofs_blkoff(sb, it->pos);
+ slice = min_t(unsigned int, sb->s_blocksize -
+ erofs_blkoff(sb, it->pos), len - processed);
+ memcpy(it->buffer + it->buffer_ofs, src, slice);
+ it->buffer_ofs += slice;
+ it->pos += slice;
+ }
+ return 0;
+}
-static int xattr_entrylist(struct xattr_iter *_it,
- struct erofs_xattr_entry *entry)
+static int erofs_listxattr_foreach(struct erofs_xattr_iter *it)
{
- struct listxattr_iter *it =
- container_of(_it, struct listxattr_iter, it);
- unsigned int base_index = entry->e_name_index;
- unsigned int prefix_len, infix_len = 0;
+ struct erofs_xattr_entry entry;
+ unsigned int base_index, name_total, prefix_len, infix_len = 0;
const char *prefix, *infix = NULL;
+ int err;
+
+ /* 1. handle xattr entry */
+ entry = *(struct erofs_xattr_entry *)
+ (it->kaddr + erofs_blkoff(it->sb, it->pos));
+ it->pos += sizeof(struct erofs_xattr_entry);
- if (entry->e_name_index & EROFS_XATTR_LONG_PREFIX) {
- struct erofs_sb_info *sbi = EROFS_SB(_it->sb);
+ base_index = entry.e_name_index;
+ if (entry.e_name_index & EROFS_XATTR_LONG_PREFIX) {
+ struct erofs_sb_info *sbi = EROFS_SB(it->sb);
struct erofs_xattr_prefix_item *pf = sbi->xattr_prefixes +
- (entry->e_name_index & EROFS_XATTR_LONG_PREFIX_MASK);
+ (entry.e_name_index & EROFS_XATTR_LONG_PREFIX_MASK);
if (pf >= sbi->xattr_prefixes + sbi->xattr_prefix_count)
- return 1;
+ return 0;
infix = pf->prefix->infix;
infix_len = pf->infix_len;
base_index = pf->prefix->base_index;
@@ -531,120 +225,228 @@ static int xattr_entrylist(struct xattr_iter *_it,
prefix = erofs_xattr_prefix(base_index, it->dentry);
if (!prefix)
- return 1;
+ return 0;
prefix_len = strlen(prefix);
+ name_total = prefix_len + infix_len + entry.e_name_len + 1;
if (!it->buffer) {
- it->buffer_ofs += prefix_len + infix_len +
- entry->e_name_len + 1;
- return 1;
+ it->buffer_ofs += name_total;
+ return 0;
}
- if (it->buffer_ofs + prefix_len + infix_len +
- + entry->e_name_len + 1 > it->buffer_size)
+ if (it->buffer_ofs + name_total > it->buffer_size)
return -ERANGE;
memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len);
memcpy(it->buffer + it->buffer_ofs + prefix_len, infix, infix_len);
it->buffer_ofs += prefix_len + infix_len;
- return 0;
-}
-static int xattr_namelist(struct xattr_iter *_it,
- unsigned int processed, char *buf, unsigned int len)
-{
- struct listxattr_iter *it =
- container_of(_it, struct listxattr_iter, it);
+ /* 2. handle xattr name */
+ err = erofs_xattr_copy_to_buffer(it, entry.e_name_len);
+ if (err)
+ return err;
- memcpy(it->buffer + it->buffer_ofs, buf, len);
- it->buffer_ofs += len;
+ it->buffer[it->buffer_ofs++] = '\0';
return 0;
}
-static int xattr_skipvalue(struct xattr_iter *_it,
- unsigned int value_sz)
+static int erofs_getxattr_foreach(struct erofs_xattr_iter *it)
{
- struct listxattr_iter *it =
- container_of(_it, struct listxattr_iter, it);
+ struct super_block *sb = it->sb;
+ struct erofs_xattr_entry entry;
+ unsigned int slice, processed, value_sz;
- it->buffer[it->buffer_ofs++] = '\0';
- return 1;
-}
+ /* 1. handle xattr entry */
+ entry = *(struct erofs_xattr_entry *)
+ (it->kaddr + erofs_blkoff(sb, it->pos));
+ it->pos += sizeof(struct erofs_xattr_entry);
+ value_sz = le16_to_cpu(entry.e_value_size);
-static const struct xattr_iter_handlers list_xattr_handlers = {
- .entry = xattr_entrylist,
- .name = xattr_namelist,
- .alloc_buffer = xattr_skipvalue,
- .value = NULL
-};
+ /* should also match the infix for long name prefixes */
+ if (entry.e_name_index & EROFS_XATTR_LONG_PREFIX) {
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ struct erofs_xattr_prefix_item *pf = sbi->xattr_prefixes +
+ (entry.e_name_index & EROFS_XATTR_LONG_PREFIX_MASK);
+
+ if (pf >= sbi->xattr_prefixes + sbi->xattr_prefix_count)
+ return -ENOATTR;
+
+ if (it->index != pf->prefix->base_index ||
+ it->name.len != entry.e_name_len + pf->infix_len)
+ return -ENOATTR;
+
+ if (memcmp(it->name.name, pf->prefix->infix, pf->infix_len))
+ return -ENOATTR;
+
+ it->infix_len = pf->infix_len;
+ } else {
+ if (it->index != entry.e_name_index ||
+ it->name.len != entry.e_name_len)
+ return -ENOATTR;
-static int inline_listxattr(struct listxattr_iter *it)
+ it->infix_len = 0;
+ }
+
+ /* 2. handle xattr name */
+ for (processed = 0; processed < entry.e_name_len; processed += slice) {
+ it->kaddr = erofs_bread(&it->buf, erofs_blknr(sb, it->pos),
+ EROFS_KMAP);
+ if (IS_ERR(it->kaddr))
+ return PTR_ERR(it->kaddr);
+
+ slice = min_t(unsigned int,
+ sb->s_blocksize - erofs_blkoff(sb, it->pos),
+ entry.e_name_len - processed);
+ if (memcmp(it->name.name + it->infix_len + processed,
+ it->kaddr + erofs_blkoff(sb, it->pos), slice))
+ return -ENOATTR;
+ it->pos += slice;
+ }
+
+ /* 3. handle xattr value */
+ if (!it->buffer) {
+ it->buffer_ofs = value_sz;
+ return 0;
+ }
+
+ if (it->buffer_size < value_sz)
+ return -ERANGE;
+
+ return erofs_xattr_copy_to_buffer(it, value_sz);
+}
+
+static int erofs_xattr_iter_inline(struct erofs_xattr_iter *it,
+ struct inode *inode, bool getxattr)
{
+ struct erofs_inode *const vi = EROFS_I(inode);
+ unsigned int xattr_header_sz, remaining, entry_sz;
+ erofs_off_t next_pos;
int ret;
- unsigned int remaining;
- ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry));
- if (ret < 0)
- return ret;
+ xattr_header_sz = sizeof(struct erofs_xattr_ibody_header) +
+ sizeof(u32) * vi->xattr_shared_count;
+ if (xattr_header_sz >= vi->xattr_isize) {
+ DBG_BUGON(xattr_header_sz > vi->xattr_isize);
+ return -ENOATTR;
+ }
+
+ remaining = vi->xattr_isize - xattr_header_sz;
+ it->pos = erofs_iloc(inode) + vi->inode_isize + xattr_header_sz;
- remaining = ret;
while (remaining) {
- ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining);
- if (ret)
+ it->kaddr = erofs_bread(&it->buf, erofs_blknr(it->sb, it->pos),
+ EROFS_KMAP);
+ if (IS_ERR(it->kaddr))
+ return PTR_ERR(it->kaddr);
+
+ entry_sz = erofs_xattr_entry_size(it->kaddr +
+ erofs_blkoff(it->sb, it->pos));
+ /* xattr on-disk corruption: xattr entry beyond xattr_isize */
+ if (remaining < entry_sz) {
+ DBG_BUGON(1);
+ return -EFSCORRUPTED;
+ }
+ remaining -= entry_sz;
+ next_pos = it->pos + entry_sz;
+
+ if (getxattr)
+ ret = erofs_getxattr_foreach(it);
+ else
+ ret = erofs_listxattr_foreach(it);
+ if ((getxattr && ret != -ENOATTR) || (!getxattr && ret))
break;
+
+ it->pos = next_pos;
}
- return ret ? ret : it->buffer_ofs;
+ return ret;
}
-static int shared_listxattr(struct listxattr_iter *it)
+static int erofs_xattr_iter_shared(struct erofs_xattr_iter *it,
+ struct inode *inode, bool getxattr)
{
- struct inode *const inode = d_inode(it->dentry);
struct erofs_inode *const vi = EROFS_I(inode);
- struct super_block *const sb = it->it.sb;
- unsigned int i, xsid;
- int ret = 0;
+ struct super_block *const sb = it->sb;
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ unsigned int i;
+ int ret = -ENOATTR;
for (i = 0; i < vi->xattr_shared_count; ++i) {
- xsid = vi->xattr_shared_xattrs[i];
- it->it.blkaddr = erofs_xattr_blkaddr(sb, xsid);
- it->it.ofs = erofs_xattr_blkoff(sb, xsid);
- it->it.kaddr = erofs_read_metabuf(&it->it.buf, sb,
- it->it.blkaddr, EROFS_KMAP);
- if (IS_ERR(it->it.kaddr))
- return PTR_ERR(it->it.kaddr);
-
- ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL);
- if (ret)
+ it->pos = erofs_pos(sb, sbi->xattr_blkaddr) +
+ vi->xattr_shared_xattrs[i] * sizeof(__le32);
+ it->kaddr = erofs_bread(&it->buf, erofs_blknr(sb, it->pos),
+ EROFS_KMAP);
+ if (IS_ERR(it->kaddr))
+ return PTR_ERR(it->kaddr);
+
+ if (getxattr)
+ ret = erofs_getxattr_foreach(it);
+ else
+ ret = erofs_listxattr_foreach(it);
+ if ((getxattr && ret != -ENOATTR) || (!getxattr && ret))
break;
}
- return ret ? ret : it->buffer_ofs;
+ return ret;
+}
+
+int erofs_getxattr(struct inode *inode, int index, const char *name,
+ void *buffer, size_t buffer_size)
+{
+ int ret;
+ struct erofs_xattr_iter it;
+
+ if (!name)
+ return -EINVAL;
+
+ ret = erofs_init_inode_xattrs(inode);
+ if (ret)
+ return ret;
+
+ it.index = index;
+ it.name = (struct qstr)QSTR_INIT(name, strlen(name));
+ if (it.name.len > EROFS_NAME_LEN)
+ return -ERANGE;
+
+ it.sb = inode->i_sb;
+ it.buf = __EROFS_BUF_INITIALIZER;
+ erofs_init_metabuf(&it.buf, it.sb);
+ it.buffer = buffer;
+ it.buffer_size = buffer_size;
+ it.buffer_ofs = 0;
+
+ ret = erofs_xattr_iter_inline(&it, inode, true);
+ if (ret == -ENOATTR)
+ ret = erofs_xattr_iter_shared(&it, inode, true);
+ erofs_put_metabuf(&it.buf);
+ return ret ? ret : it.buffer_ofs;
}
-ssize_t erofs_listxattr(struct dentry *dentry,
- char *buffer, size_t buffer_size)
+ssize_t erofs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
{
int ret;
- struct listxattr_iter it;
+ struct erofs_xattr_iter it;
+ struct inode *inode = d_inode(dentry);
- ret = erofs_init_inode_xattrs(d_inode(dentry));
+ ret = erofs_init_inode_xattrs(inode);
if (ret == -ENOATTR)
return 0;
if (ret)
return ret;
- it.it.buf = __EROFS_BUF_INITIALIZER;
+ it.sb = dentry->d_sb;
+ it.buf = __EROFS_BUF_INITIALIZER;
+ erofs_init_metabuf(&it.buf, it.sb);
it.dentry = dentry;
it.buffer = buffer;
it.buffer_size = buffer_size;
it.buffer_ofs = 0;
- it.it.sb = dentry->d_sb;
-
- ret = inline_listxattr(&it);
- if (ret >= 0 || ret == -ENOATTR)
- ret = shared_listxattr(&it);
- erofs_put_metabuf(&it.it.buf);
- return ret;
+ ret = erofs_xattr_iter_inline(&it, inode, false);
+ if (!ret || ret == -ENOATTR)
+ ret = erofs_xattr_iter_shared(&it, inode, false);
+ if (ret == -ENOATTR)
+ ret = 0;
+ erofs_put_metabuf(&it.buf);
+ return ret ? ret : it.buffer_ofs;
}
void erofs_xattr_prefixes_cleanup(struct super_block *sb)
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 160b3da43aec..5f1890e309c6 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -5,7 +5,6 @@
* Copyright (C) 2022 Alibaba Cloud
*/
#include "compress.h"
-#include <linux/prefetch.h>
#include <linux/psi.h>
#include <linux/cpuhotplug.h>
#include <trace/events/erofs.h>
@@ -92,13 +91,8 @@ struct z_erofs_pcluster {
struct z_erofs_bvec compressed_bvecs[];
};
-/* let's avoid the valid 32-bit kernel addresses */
-
-/* the chained workgroup has't submitted io (still open) */
-#define Z_EROFS_PCLUSTER_TAIL ((void *)0x5F0ECAFE)
-/* the chained workgroup has already submitted io */
-#define Z_EROFS_PCLUSTER_TAIL_CLOSED ((void *)0x5F0EDEAD)
-
+/* the end of a chain of pclusters */
+#define Z_EROFS_PCLUSTER_TAIL ((void *) 0x700 + POISON_POINTER_DELTA)
#define Z_EROFS_PCLUSTER_NIL (NULL)
struct z_erofs_decompressqueue {
@@ -241,14 +235,20 @@ static void z_erofs_bvec_iter_begin(struct z_erofs_bvec_iter *iter,
static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter,
struct z_erofs_bvec *bvec,
- struct page **candidate_bvpage)
+ struct page **candidate_bvpage,
+ struct page **pagepool)
{
- if (iter->cur == iter->nr) {
- if (!*candidate_bvpage)
- return -EAGAIN;
-
+ if (iter->cur >= iter->nr) {
+ struct page *nextpage = *candidate_bvpage;
+
+ if (!nextpage) {
+ nextpage = erofs_allocpage(pagepool, GFP_NOFS);
+ if (!nextpage)
+ return -ENOMEM;
+ set_page_private(nextpage, Z_EROFS_SHORTLIVED_PAGE);
+ }
DBG_BUGON(iter->bvset->nextpage);
- iter->bvset->nextpage = *candidate_bvpage;
+ iter->bvset->nextpage = nextpage;
z_erofs_bvset_flip(iter);
iter->bvset->nextpage = NULL;
@@ -500,20 +500,6 @@ out_error_pcluster_pool:
enum z_erofs_pclustermode {
Z_EROFS_PCLUSTER_INFLIGHT,
/*
- * The current pclusters was the tail of an exist chain, in addition
- * that the previous processed chained pclusters are all decided to
- * be hooked up to it.
- * A new chain will be created for the remaining pclusters which are
- * not processed yet, so different from Z_EROFS_PCLUSTER_FOLLOWED,
- * the next pcluster cannot reuse the whole page safely for inplace I/O
- * in the following scenario:
- * ________________________________________________________________
- * | tail (partial) page | head (partial) page |
- * | (belongs to the next pcl) | (belongs to the current pcl) |
- * |_______PCLUSTER_FOLLOWED______|________PCLUSTER_HOOKED__________|
- */
- Z_EROFS_PCLUSTER_HOOKED,
- /*
* a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it
* could be dispatched into bypass queue later due to uptodated managed
* pages. All related online pages cannot be reused for inplace I/O (or
@@ -530,8 +516,8 @@ enum z_erofs_pclustermode {
* ________________________________________________________________
* | tail (partial) page | head (partial) page |
* | (of the current cl) | (of the previous collection) |
- * | PCLUSTER_FOLLOWED or | |
- * |_____PCLUSTER_HOOKED__|___________PCLUSTER_FOLLOWED____________|
+ * | | |
+ * |__PCLUSTER_FOLLOWED___|___________PCLUSTER_FOLLOWED____________|
*
* [ (*) the above page can be used as inplace I/O. ]
*/
@@ -543,12 +529,12 @@ struct z_erofs_decompress_frontend {
struct erofs_map_blocks map;
struct z_erofs_bvec_iter biter;
+ struct page *pagepool;
struct page *candidate_bvpage;
- struct z_erofs_pcluster *pcl, *tailpcl;
+ struct z_erofs_pcluster *pcl;
z_erofs_next_pcluster_t owned_head;
enum z_erofs_pclustermode mode;
- bool readahead;
/* used for applying cache strategy on the fly */
bool backmost;
erofs_off_t headoffset;
@@ -578,8 +564,7 @@ static bool z_erofs_should_alloc_cache(struct z_erofs_decompress_frontend *fe)
return false;
}
-static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
- struct page **pagepool)
+static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
{
struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode));
struct z_erofs_pcluster *pcl = fe->pcl;
@@ -620,7 +605,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
* succeeds or fallback to in-place I/O instead
* to avoid any direct reclaim.
*/
- newpage = erofs_allocpage(pagepool, gfp);
+ newpage = erofs_allocpage(&fe->pagepool, gfp);
if (!newpage)
continue;
set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
@@ -633,7 +618,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
if (page)
put_page(page);
else if (newpage)
- erofs_pagepool_add(pagepool, newpage);
+ erofs_pagepool_add(&fe->pagepool, newpage);
}
/*
@@ -654,7 +639,7 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
/*
- * refcount of workgroup is now freezed as 1,
+ * refcount of workgroup is now freezed as 0,
* therefore no need to worry about available decompression users.
*/
for (i = 0; i < pcl->pclusterpages; ++i) {
@@ -678,29 +663,73 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
return 0;
}
-int erofs_try_to_free_cached_page(struct page *page)
+static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp)
{
- struct z_erofs_pcluster *const pcl = (void *)page_private(page);
- int ret, i;
+ struct z_erofs_pcluster *pcl = folio_get_private(folio);
+ bool ret;
+ int i;
- if (!erofs_workgroup_try_to_freeze(&pcl->obj, 1))
- return 0;
+ if (!folio_test_private(folio))
+ return true;
+
+ ret = false;
+ spin_lock(&pcl->obj.lockref.lock);
+ if (pcl->obj.lockref.count > 0)
+ goto out;
- ret = 0;
DBG_BUGON(z_erofs_is_inline_pcluster(pcl));
for (i = 0; i < pcl->pclusterpages; ++i) {
- if (pcl->compressed_bvecs[i].page == page) {
+ if (pcl->compressed_bvecs[i].page == &folio->page) {
WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
- ret = 1;
+ ret = true;
break;
}
}
- erofs_workgroup_unfreeze(&pcl->obj, 1);
if (ret)
- detach_page_private(page);
+ folio_detach_private(folio);
+out:
+ spin_unlock(&pcl->obj.lockref.lock);
return ret;
}
+/*
+ * It will be called only on inode eviction. In case that there are still some
+ * decompression requests in progress, wait with rescheduling for a bit here.
+ * An extra lock could be introduced instead but it seems unnecessary.
+ */
+static void z_erofs_cache_invalidate_folio(struct folio *folio,
+ size_t offset, size_t length)
+{
+ const size_t stop = length + offset;
+
+ /* Check for potential overflow in debug mode */
+ DBG_BUGON(stop > folio_size(folio) || stop < length);
+
+ if (offset == 0 && stop == folio_size(folio))
+ while (!z_erofs_cache_release_folio(folio, GFP_NOFS))
+ cond_resched();
+}
+
+static const struct address_space_operations z_erofs_cache_aops = {
+ .release_folio = z_erofs_cache_release_folio,
+ .invalidate_folio = z_erofs_cache_invalidate_folio,
+};
+
+int erofs_init_managed_cache(struct super_block *sb)
+{
+ struct inode *const inode = new_inode(sb);
+
+ if (!inode)
+ return -ENOMEM;
+
+ set_nlink(inode, 1);
+ inode->i_size = OFFSET_MAX;
+ inode->i_mapping->a_ops = &z_erofs_cache_aops;
+ mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
+ EROFS_SB(sb)->managed_cache = inode;
+ return 0;
+}
+
static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe,
struct z_erofs_bvec *bvec)
{
@@ -731,7 +760,8 @@ static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
!fe->candidate_bvpage)
fe->candidate_bvpage = bvec->page;
}
- ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage);
+ ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage,
+ &fe->pagepool);
fe->pcl->vcnt += (ret >= 0);
return ret;
}
@@ -750,19 +780,7 @@ static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
return;
}
- /*
- * type 2, link to the end of an existing open chain, be careful
- * that its submission is controlled by the original attached chain.
- */
- if (*owned_head != &pcl->next && pcl != f->tailpcl &&
- cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
- *owned_head) == Z_EROFS_PCLUSTER_TAIL) {
- *owned_head = Z_EROFS_PCLUSTER_TAIL;
- f->mode = Z_EROFS_PCLUSTER_HOOKED;
- f->tailpcl = NULL;
- return;
- }
- /* type 3, it belongs to a chain, but it isn't the end of the chain */
+ /* type 2, it belongs to an ongoing chain */
f->mode = Z_EROFS_PCLUSTER_INFLIGHT;
}
@@ -786,7 +804,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
if (IS_ERR(pcl))
return PTR_ERR(pcl);
- atomic_set(&pcl->obj.refcount, 1);
+ spin_lock_init(&pcl->obj.lockref.lock);
pcl->algorithmformat = map->m_algorithmformat;
pcl->length = 0;
pcl->partial = true;
@@ -823,9 +841,6 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
goto err_out;
}
}
- /* used to check tail merging loop due to corrupted images */
- if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL)
- fe->tailpcl = pcl;
fe->owned_head = &pcl->next;
fe->pcl = pcl;
return 0;
@@ -846,7 +861,6 @@ static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe)
/* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous pcluster */
DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
- DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
if (!(map->m_flags & EROFS_MAP_META)) {
grp = erofs_find_workgroup(fe->inode->i_sb,
@@ -865,10 +879,6 @@ static int z_erofs_collector_begin(struct z_erofs_decompress_frontend *fe)
if (ret == -EEXIST) {
mutex_lock(&fe->pcl->lock);
- /* used to check tail merging loop due to corrupted images */
- if (fe->owned_head == Z_EROFS_PCLUSTER_TAIL)
- fe->tailpcl = fe->pcl;
-
z_erofs_try_to_claim_pcluster(fe);
} else if (ret) {
return ret;
@@ -908,10 +918,8 @@ static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe)
z_erofs_bvec_iter_end(&fe->biter);
mutex_unlock(&pcl->lock);
- if (fe->candidate_bvpage) {
- DBG_BUGON(z_erofs_is_shortlived_page(fe->candidate_bvpage));
+ if (fe->candidate_bvpage)
fe->candidate_bvpage = NULL;
- }
/*
* if all pending pages are added, don't hold its reference
@@ -958,7 +966,7 @@ static int z_erofs_read_fragment(struct inode *inode, erofs_off_t pos,
}
static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
- struct page *page, struct page **pagepool)
+ struct page *page)
{
struct inode *const inode = fe->inode;
struct erofs_map_blocks *const map = &fe->map;
@@ -1016,7 +1024,7 @@ repeat:
fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
} else {
/* bind cache first when cached decompression is preferred */
- z_erofs_bind_cache(fe, pagepool);
+ z_erofs_bind_cache(fe);
}
hitted:
/*
@@ -1025,8 +1033,7 @@ hitted:
* those chains are handled asynchronously thus the page cannot be used
* for inplace I/O or bvpage (should be processed in a strict order.)
*/
- tight &= (fe->mode >= Z_EROFS_PCLUSTER_HOOKED &&
- fe->mode != Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE);
+ tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE);
cur = end - min_t(unsigned int, offset + end - map->m_la, end);
if (!(map->m_flags & EROFS_MAP_MAPPED)) {
@@ -1056,24 +1063,13 @@ hitted:
if (cur)
tight &= (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED);
-retry:
err = z_erofs_attach_page(fe, &((struct z_erofs_bvec) {
.page = page,
.offset = offset - map->m_la,
.end = end,
}), exclusive);
- /* should allocate an additional short-lived page for bvset */
- if (err == -EAGAIN && !fe->candidate_bvpage) {
- fe->candidate_bvpage = alloc_page(GFP_NOFS | __GFP_NOFAIL);
- set_page_private(fe->candidate_bvpage,
- Z_EROFS_SHORTLIVED_PAGE);
- goto retry;
- }
-
- if (err) {
- DBG_BUGON(err == -EAGAIN && fe->candidate_bvpage);
+ if (err)
goto out;
- }
z_erofs_onlinepage_split(page);
/* bump up the number of spiltted parts of a page */
@@ -1104,7 +1100,7 @@ out:
return err;
}
-static bool z_erofs_get_sync_decompress_policy(struct erofs_sb_info *sbi,
+static bool z_erofs_is_sync_decompress(struct erofs_sb_info *sbi,
unsigned int readahead_pages)
{
/* auto: enable for read_folio, disable for readahead */
@@ -1283,6 +1279,8 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
struct erofs_sb_info *const sbi = EROFS_SB(be->sb);
struct z_erofs_pcluster *pcl = be->pcl;
unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
+ const struct z_erofs_decompressor *decompressor =
+ &erofs_decompressors[pcl->algorithmformat];
unsigned int i, inputsize;
int err2;
struct page *page;
@@ -1326,7 +1324,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
else
inputsize = pclusterpages * PAGE_SIZE;
- err = z_erofs_decompress(&(struct z_erofs_decompress_req) {
+ err = decompressor->decompress(&(struct z_erofs_decompress_req) {
.sb = be->sb,
.in = be->compressed_pages,
.out = be->decompressed_pages,
@@ -1404,10 +1402,7 @@ static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
};
z_erofs_next_pcluster_t owned = io->head;
- while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) {
- /* impossible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
- DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL);
- /* impossible that 'owned' equals Z_EROFS_PCLUSTER_NIL */
+ while (owned != Z_EROFS_PCLUSTER_TAIL) {
DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL);
be.pcl = container_of(owned, struct z_erofs_pcluster, next);
@@ -1424,7 +1419,7 @@ static void z_erofs_decompressqueue_work(struct work_struct *work)
container_of(work, struct z_erofs_decompressqueue, u.work);
struct page *pagepool = NULL;
- DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
+ DBG_BUGON(bgq->head == Z_EROFS_PCLUSTER_TAIL);
z_erofs_decompress_queue(bgq, &pagepool);
erofs_release_pages(&pagepool);
kvfree(bgq);
@@ -1452,7 +1447,7 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
if (atomic_add_return(bios, &io->pending_bios))
return;
/* Use (kthread_)work and sync decompression for atomic contexts only */
- if (in_atomic() || irqs_disabled()) {
+ if (!in_task() || irqs_disabled() || rcu_read_lock_any_held()) {
#ifdef CONFIG_EROFS_FS_PCPU_KTHREAD
struct kthread_worker *worker;
@@ -1612,7 +1607,7 @@ fg_out:
q->sync = true;
}
q->sb = sb;
- q->head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
+ q->head = Z_EROFS_PCLUSTER_TAIL;
return q;
}
@@ -1630,11 +1625,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT];
z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS];
- DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
- if (owned_head == Z_EROFS_PCLUSTER_TAIL)
- owned_head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
-
- WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL_CLOSED);
+ WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL);
WRITE_ONCE(*submit_qtail, owned_head);
WRITE_ONCE(*bypass_qtail, &pcl->next);
@@ -1668,9 +1659,8 @@ static void z_erofs_decompressqueue_endio(struct bio *bio)
}
static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
- struct page **pagepool,
struct z_erofs_decompressqueue *fgq,
- bool *force_fg)
+ bool *force_fg, bool readahead)
{
struct super_block *sb = f->inode->i_sb;
struct address_space *mc = MNGD_MAPPING(EROFS_SB(sb));
@@ -1705,15 +1695,10 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
unsigned int i = 0;
bool bypass = true;
- /* no possible 'owned_head' equals the following */
- DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL);
-
pcl = container_of(owned_head, struct z_erofs_pcluster, next);
+ owned_head = READ_ONCE(pcl->next);
- /* close the main owned chain at first */
- owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
- Z_EROFS_PCLUSTER_TAIL_CLOSED);
if (z_erofs_is_inline_pcluster(pcl)) {
move_to_bypass_jobqueue(pcl, qtail, owned_head);
continue;
@@ -1731,8 +1716,8 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f,
do {
struct page *page;
- page = pickup_page_for_submission(pcl, i++, pagepool,
- mc);
+ page = pickup_page_for_submission(pcl, i++,
+ &f->pagepool, mc);
if (!page)
continue;
@@ -1761,7 +1746,7 @@ submit_bio_retry:
bio->bi_iter.bi_sector = (sector_t)cur <<
(sb->s_blocksize_bits - 9);
bio->bi_private = q[JQ_SUBMIT];
- if (f->readahead)
+ if (readahead)
bio->bi_opf |= REQ_RAHEAD;
++nr_bios;
}
@@ -1797,16 +1782,16 @@ submit_bio_retry:
}
static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
- struct page **pagepool, bool force_fg)
+ bool force_fg, bool ra)
{
struct z_erofs_decompressqueue io[NR_JOBQUEUES];
if (f->owned_head == Z_EROFS_PCLUSTER_TAIL)
return;
- z_erofs_submit_queue(f, pagepool, io, &force_fg);
+ z_erofs_submit_queue(f, io, &force_fg, ra);
/* handle bypass queue (no i/o pclusters) immediately */
- z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);
+ z_erofs_decompress_queue(&io[JQ_BYPASS], &f->pagepool);
if (!force_fg)
return;
@@ -1815,7 +1800,7 @@ static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
wait_for_completion_io(&io[JQ_SUBMIT].u.done);
/* handle synchronous decompress queue in the caller context */
- z_erofs_decompress_queue(&io[JQ_SUBMIT], pagepool);
+ z_erofs_decompress_queue(&io[JQ_SUBMIT], &f->pagepool);
}
/*
@@ -1823,29 +1808,28 @@ static void z_erofs_runqueue(struct z_erofs_decompress_frontend *f,
* approximate readmore strategies as a start.
*/
static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
- struct readahead_control *rac,
- erofs_off_t end,
- struct page **pagepool,
- bool backmost)
+ struct readahead_control *rac, bool backmost)
{
struct inode *inode = f->inode;
struct erofs_map_blocks *map = &f->map;
- erofs_off_t cur;
+ erofs_off_t cur, end, headoffset = f->headoffset;
int err;
if (backmost) {
+ if (rac)
+ end = headoffset + readahead_length(rac) - 1;
+ else
+ end = headoffset + PAGE_SIZE - 1;
map->m_la = end;
err = z_erofs_map_blocks_iter(inode, map,
EROFS_GET_BLOCKS_READMORE);
if (err)
return;
- /* expend ra for the trailing edge if readahead */
+ /* expand ra for the trailing edge if readahead */
if (rac) {
- loff_t newstart = readahead_pos(rac);
-
cur = round_up(map->m_la + map->m_llen, PAGE_SIZE);
- readahead_expand(rac, newstart, cur - newstart);
+ readahead_expand(rac, headoffset, cur - headoffset);
return;
}
end = round_up(end, PAGE_SIZE);
@@ -1866,7 +1850,7 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
if (PageUptodate(page)) {
unlock_page(page);
} else {
- err = z_erofs_do_read_page(f, page, pagepool);
+ err = z_erofs_do_read_page(f, page);
if (err)
erofs_err(inode->i_sb,
"readmore error at page %lu @ nid %llu",
@@ -1887,28 +1871,24 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio)
struct inode *const inode = page->mapping->host;
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
- struct page *pagepool = NULL;
int err;
trace_erofs_readpage(page, false);
f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
- z_erofs_pcluster_readmore(&f, NULL, f.headoffset + PAGE_SIZE - 1,
- &pagepool, true);
- err = z_erofs_do_read_page(&f, page, &pagepool);
- z_erofs_pcluster_readmore(&f, NULL, 0, &pagepool, false);
-
+ z_erofs_pcluster_readmore(&f, NULL, true);
+ err = z_erofs_do_read_page(&f, page);
+ z_erofs_pcluster_readmore(&f, NULL, false);
(void)z_erofs_collector_end(&f);
/* if some compressed cluster ready, need submit them anyway */
- z_erofs_runqueue(&f, &pagepool,
- z_erofs_get_sync_decompress_policy(sbi, 0));
+ z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, 0), false);
if (err)
erofs_err(inode->i_sb, "failed to read, err [%d]", err);
erofs_put_metabuf(&f.map.buf);
- erofs_release_pages(&pagepool);
+ erofs_release_pages(&f.pagepool);
return err;
}
@@ -1917,14 +1897,12 @@ static void z_erofs_readahead(struct readahead_control *rac)
struct inode *const inode = rac->mapping->host;
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
- struct page *pagepool = NULL, *head = NULL, *page;
+ struct page *head = NULL, *page;
unsigned int nr_pages;
- f.readahead = true;
f.headoffset = readahead_pos(rac);
- z_erofs_pcluster_readmore(&f, rac, f.headoffset +
- readahead_length(rac) - 1, &pagepool, true);
+ z_erofs_pcluster_readmore(&f, rac, true);
nr_pages = readahead_count(rac);
trace_erofs_readpages(inode, readahead_index(rac), nr_pages, false);
@@ -1940,20 +1918,19 @@ static void z_erofs_readahead(struct readahead_control *rac)
/* traversal in reverse order */
head = (void *)page_private(page);
- err = z_erofs_do_read_page(&f, page, &pagepool);
+ err = z_erofs_do_read_page(&f, page);
if (err)
erofs_err(inode->i_sb,
"readahead error at page %lu @ nid %llu",
page->index, EROFS_I(inode)->nid);
put_page(page);
}
- z_erofs_pcluster_readmore(&f, rac, 0, &pagepool, false);
+ z_erofs_pcluster_readmore(&f, rac, false);
(void)z_erofs_collector_end(&f);
- z_erofs_runqueue(&f, &pagepool,
- z_erofs_get_sync_decompress_policy(sbi, nr_pages));
+ z_erofs_runqueue(&f, z_erofs_is_sync_decompress(sbi, nr_pages), true);
erofs_put_metabuf(&f.map.buf);
- erofs_release_pages(&pagepool);
+ erofs_release_pages(&f.pagepool);
}
const struct address_space_operations z_erofs_aops = {
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
index d37c5c89c728..1909ddafd9c7 100644
--- a/fs/erofs/zmap.c
+++ b/fs/erofs/zmap.c
@@ -22,8 +22,8 @@ struct z_erofs_maprecorder {
bool partialref;
};
-static int legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
- unsigned long lcn)
+static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m,
+ unsigned long lcn)
{
struct inode *const inode = m->inode;
struct erofs_inode *const vi = EROFS_I(inode);
@@ -129,7 +129,7 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
u8 *in, type;
bool big_pcluster;
- if (1 << amortizedshift == 4)
+ if (1 << amortizedshift == 4 && lclusterbits <= 14)
vcnt = 2;
else if (1 << amortizedshift == 2 && lclusterbits == 12)
vcnt = 16;
@@ -226,12 +226,11 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
return 0;
}
-static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
- unsigned long lcn, bool lookahead)
+static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m,
+ unsigned long lcn, bool lookahead)
{
struct inode *const inode = m->inode;
struct erofs_inode *const vi = EROFS_I(inode);
- const unsigned int lclusterbits = vi->z_logical_clusterbits;
const erofs_off_t ebase = sizeof(struct z_erofs_map_header) +
ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
unsigned int totalidx = erofs_iblks(inode);
@@ -239,9 +238,6 @@ static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
unsigned int amortizedshift;
erofs_off_t pos;
- if (lclusterbits != 12)
- return -EOPNOTSUPP;
-
if (lcn >= totalidx)
return -EINVAL;
@@ -281,23 +277,23 @@ out:
return unpack_compacted_index(m, amortizedshift, pos, lookahead);
}
-static int z_erofs_load_cluster_from_disk(struct z_erofs_maprecorder *m,
- unsigned int lcn, bool lookahead)
+static int z_erofs_load_lcluster_from_disk(struct z_erofs_maprecorder *m,
+ unsigned int lcn, bool lookahead)
{
- const unsigned int datamode = EROFS_I(m->inode)->datalayout;
-
- if (datamode == EROFS_INODE_COMPRESSED_FULL)
- return legacy_load_cluster_from_disk(m, lcn);
-
- if (datamode == EROFS_INODE_COMPRESSED_COMPACT)
- return compacted_load_cluster_from_disk(m, lcn, lookahead);
-
- return -EINVAL;
+ switch (EROFS_I(m->inode)->datalayout) {
+ case EROFS_INODE_COMPRESSED_FULL:
+ return z_erofs_load_full_lcluster(m, lcn);
+ case EROFS_INODE_COMPRESSED_COMPACT:
+ return z_erofs_load_compact_lcluster(m, lcn, lookahead);
+ default:
+ return -EINVAL;
+ }
}
static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
unsigned int lookback_distance)
{
+ struct super_block *sb = m->inode->i_sb;
struct erofs_inode *const vi = EROFS_I(m->inode);
const unsigned int lclusterbits = vi->z_logical_clusterbits;
@@ -305,21 +301,15 @@ static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
unsigned long lcn = m->lcn - lookback_distance;
int err;
- /* load extent head logical cluster if needed */
- err = z_erofs_load_cluster_from_disk(m, lcn, false);
+ err = z_erofs_load_lcluster_from_disk(m, lcn, false);
if (err)
return err;
switch (m->type) {
case Z_EROFS_LCLUSTER_TYPE_NONHEAD:
- if (!m->delta[0]) {
- erofs_err(m->inode->i_sb,
- "invalid lookback distance 0 @ nid %llu",
- vi->nid);
- DBG_BUGON(1);
- return -EFSCORRUPTED;
- }
lookback_distance = m->delta[0];
+ if (!lookback_distance)
+ goto err_bogus;
continue;
case Z_EROFS_LCLUSTER_TYPE_PLAIN:
case Z_EROFS_LCLUSTER_TYPE_HEAD1:
@@ -328,16 +318,15 @@ static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m,
m->map->m_la = (lcn << lclusterbits) | m->clusterofs;
return 0;
default:
- erofs_err(m->inode->i_sb,
- "unknown type %u @ lcn %lu of nid %llu",
+ erofs_err(sb, "unknown type %u @ lcn %lu of nid %llu",
m->type, lcn, vi->nid);
DBG_BUGON(1);
return -EOPNOTSUPP;
}
}
-
- erofs_err(m->inode->i_sb, "bogus lookback distance @ nid %llu",
- vi->nid);
+err_bogus:
+ erofs_err(sb, "bogus lookback distance %u @ lcn %lu of nid %llu",
+ lookback_distance, m->lcn, vi->nid);
DBG_BUGON(1);
return -EFSCORRUPTED;
}
@@ -369,7 +358,7 @@ static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
if (m->compressedblks)
goto out;
- err = z_erofs_load_cluster_from_disk(m, lcn, false);
+ err = z_erofs_load_lcluster_from_disk(m, lcn, false);
if (err)
return err;
@@ -401,9 +390,8 @@ static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m,
break;
fallthrough;
default:
- erofs_err(m->inode->i_sb,
- "cannot found CBLKCNT @ lcn %lu of nid %llu",
- lcn, vi->nid);
+ erofs_err(sb, "cannot found CBLKCNT @ lcn %lu of nid %llu", lcn,
+ vi->nid);
DBG_BUGON(1);
return -EFSCORRUPTED;
}
@@ -411,9 +399,7 @@ out:
map->m_plen = erofs_pos(sb, m->compressedblks);
return 0;
err_bonus_cblkcnt:
- erofs_err(m->inode->i_sb,
- "bogus CBLKCNT @ lcn %lu of nid %llu",
- lcn, vi->nid);
+ erofs_err(sb, "bogus CBLKCNT @ lcn %lu of nid %llu", lcn, vi->nid);
DBG_BUGON(1);
return -EFSCORRUPTED;
}
@@ -434,7 +420,7 @@ static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m)
return 0;
}
- err = z_erofs_load_cluster_from_disk(m, lcn, true);
+ err = z_erofs_load_lcluster_from_disk(m, lcn, true);
if (err)
return err;
@@ -481,7 +467,7 @@ static int z_erofs_do_map_blocks(struct inode *inode,
initial_lcn = ofs >> lclusterbits;
endoff = ofs & ((1 << lclusterbits) - 1);
- err = z_erofs_load_cluster_from_disk(&m, initial_lcn, false);
+ err = z_erofs_load_lcluster_from_disk(&m, initial_lcn, false);
if (err)
goto unmap_out;
@@ -539,8 +525,7 @@ static int z_erofs_do_map_blocks(struct inode *inode,
if (flags & EROFS_GET_BLOCKS_FINDTAIL) {
vi->z_tailextent_headlcn = m.lcn;
/* for non-compact indexes, fragmentoff is 64 bits */
- if (fragment &&
- vi->datalayout == EROFS_INODE_COMPRESSED_FULL)
+ if (fragment && vi->datalayout == EROFS_INODE_COMPRESSED_FULL)
vi->z_fragmentoff |= (u64)m.pblk << 32;
}
if (ztailpacking && m.lcn == vi->z_tailextent_headlcn) {