summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorVivek Goyal <vgoyal@redhat.com>2020-02-28 11:34:55 -0500
committerDan Williams <dan.j.williams@intel.com>2020-04-02 19:15:03 -0700
commit0a23f9ffa5ac20d3c95a6e850f4ee68e4034f781 (patch)
tree60d1e8e0819b3fb1c58ef8992794e1ba526873f0 /fs
parentcdf6cdcd3b99a99ea9ecc1b05d1d040d5a69a134 (diff)
dax: Use new dax zero page method for zeroing a page
Use new dax native zero page method for zeroing page if I/O is page aligned. Otherwise fall back to direct_access() + memcpy(). This gets rid of one of the depenendency on block device in dax path. Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Link: https://lore.kernel.org/r/20200228163456.1587-6-vgoyal@redhat.com Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/dax.c53
1 files changed, 23 insertions, 30 deletions
diff --git a/fs/dax.c b/fs/dax.c
index 35da144375a0..98ba3756163a 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1038,47 +1038,40 @@ static vm_fault_t dax_load_hole(struct xa_state *xas,
return ret;
}
-static bool dax_range_is_aligned(struct block_device *bdev,
- unsigned int offset, unsigned int length)
-{
- unsigned short sector_size = bdev_logical_block_size(bdev);
-
- if (!IS_ALIGNED(offset, sector_size))
- return false;
- if (!IS_ALIGNED(length, sector_size))
- return false;
-
- return true;
-}
-
int __dax_zero_page_range(struct block_device *bdev,
struct dax_device *dax_dev, sector_t sector,
unsigned int offset, unsigned int size)
{
- if (dax_range_is_aligned(bdev, offset, size)) {
- sector_t start_sector = sector + (offset >> 9);
+ pgoff_t pgoff;
+ long rc, id;
+ void *kaddr;
+ bool page_aligned = false;
- return blkdev_issue_zeroout(bdev, start_sector,
- size >> 9, GFP_NOFS, 0);
- } else {
- pgoff_t pgoff;
- long rc, id;
- void *kaddr;
- rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
- if (rc)
- return rc;
+ if (IS_ALIGNED(sector << SECTOR_SHIFT, PAGE_SIZE) &&
+ IS_ALIGNED(size, PAGE_SIZE))
+ page_aligned = true;
+
+ rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
+ if (rc)
+ return rc;
- id = dax_read_lock();
+ id = dax_read_lock();
+
+ if (page_aligned)
+ rc = dax_zero_page_range(dax_dev, pgoff, size >> PAGE_SHIFT);
+ else
rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
- if (rc < 0) {
- dax_read_unlock(id);
- return rc;
- }
+ if (rc < 0) {
+ dax_read_unlock(id);
+ return rc;
+ }
+
+ if (!page_aligned) {
memset(kaddr + offset, 0, size);
dax_flush(dax_dev, kaddr + offset, size);
- dax_read_unlock(id);
}
+ dax_read_unlock(id);
return 0;
}
EXPORT_SYMBOL_GPL(__dax_zero_page_range);