diff options
author | Gao Xiang <hsiangkao@linux.alibaba.com> | 2024-03-05 17:14:45 +0800 |
---|---|---|
committer | Gao Xiang <hsiangkao@linux.alibaba.com> | 2024-03-10 18:40:49 +0800 |
commit | 19fb9070c2cd9aa6d4bd368985918d7200ec1722 (patch) | |
tree | aec5b96ed1c9865ea8d14e3a24dfa03f8cefb410 | |
parent | 0e25a788ea2c4a6f5f971279396fcf79f4fecd7b (diff) |
erofs: get rid of `justfound` debugging tag
`justfound` is introduced to identify cached folios that are just added
to compressed bvecs so that more checks can be applied in the I/O
submission path.
EROFS is quite now stable compared to the codebase at that stage.
`justfound` becomes a burden for upcoming features. Drop it.
Reviewed-by: Chao Yu <chao@kernel.org>
Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240305091448.1384242-3-hsiangkao@linux.alibaba.com
-rw-r--r-- | fs/erofs/zdata.c | 20 |
1 files changed, 3 insertions, 17 deletions
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index c25074657708..75b05990b571 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -565,17 +565,13 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) for (i = 0; i < pclusterpages; ++i) { struct page *page, *newpage; - void *t; /* mark pages just found for debugging */ /* Inaccurate check w/o locking to avoid unneeded lookups */ if (READ_ONCE(pcl->compressed_bvecs[i].page)) continue; page = find_get_page(mc, pcl->obj.index + i); - if (page) { - t = (void *)((unsigned long)page | 1); - newpage = NULL; - } else { + if (!page) { /* I/O is needed, no possible to decompress directly */ standalone = false; if (!shouldalloc) @@ -589,11 +585,10 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) if (!newpage) continue; set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE); - t = (void *)((unsigned long)newpage | 1); } spin_lock(&pcl->obj.lockref.lock); if (!pcl->compressed_bvecs[i].page) { - pcl->compressed_bvecs[i].page = t; + pcl->compressed_bvecs[i].page = page ? page : newpage; spin_unlock(&pcl->obj.lockref.lock); continue; } @@ -1423,7 +1418,7 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec, struct z_erofs_bvec zbv; struct address_space *mapping; struct page *page; - int justfound, bs = i_blocksize(f->inode); + int bs = i_blocksize(f->inode); /* Except for inplace pages, the entire page can be used for I/Os */ bvec->bv_offset = 0; @@ -1432,9 +1427,6 @@ repeat: spin_lock(&pcl->obj.lockref.lock); zbv = pcl->compressed_bvecs[nr]; page = zbv.page; - justfound = (unsigned long)page & 1UL; - page = (struct page *)((unsigned long)page & ~1UL); - pcl->compressed_bvecs[nr].page = page; spin_unlock(&pcl->obj.lockref.lock); if (!page) goto out_allocpage; @@ -1465,9 +1457,6 @@ repeat: } lock_page(page); - /* only true if page reclaim goes wrong, should never happen */ - DBG_BUGON(justfound && PagePrivate(page)); - /* the cached page is still in managed cache */ if (page->mapping == mc) { /* @@ -1475,7 +1464,6 @@ repeat: * `->private` pcluster hint. Let's reconnect them. */ if (!PagePrivate(page)) { - DBG_BUGON(!justfound); /* compressed_bvecs[] already takes a ref */ attach_page_private(page, pcl); put_page(page); @@ -1494,8 +1482,6 @@ repeat: * allocate a new page for compressed data. */ DBG_BUGON(page->mapping); - DBG_BUGON(!justfound); - tocache = true; unlock_page(page); put_page(page); |