summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-02-22 14:11:54 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2023-02-22 14:11:54 -0800
commit232dd599068ff228a29a4a1a6ab81e6b55198bb0 (patch)
tree2ad06f4c489b5bd2d54d508b665f203a3e3a1c7a
parentb7ee88128242b3460b0016a7e42207c9799f73b7 (diff)
parent2b188a2cfc4d8f319ad23832ec1390bdae52daf6 (diff)
Merge tag 'zonefs-6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal/zonefs
Pull zonefs updates from Damien Le Moal: - Reorganize zonefs code to split file related operations to a new fs/zonefs/file.c file (me) - Modify zonefs to use dynamically allocated inodes and dentries (using the inode and dentry caches) instead of statically allocating everything on mount. This saves a significant amount of memory for very large zoned block devices with 10s of thousands of zones (me) - Make zonefs_sb_ktype a const struct kobj_type (Thomas) * tag 'zonefs-6.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal/zonefs: zonefs: make kobj_type structure constant zonefs: Cache zone group directory inodes zonefs: Dynamically create file inodes when needed zonefs: Separate zone information from inode information zonefs: Reduce struct zonefs_inode_info size zonefs: Simplify IO error handling zonefs: Reorganize code
-rw-r--r--fs/zonefs/Makefile2
-rw-r--r--fs/zonefs/file.c878
-rw-r--r--fs/zonefs/super.c1887
-rw-r--r--fs/zonefs/sysfs.c2
-rw-r--r--fs/zonefs/trace.h20
-rw-r--r--fs/zonefs/zonefs.h110
6 files changed, 1656 insertions, 1243 deletions
diff --git a/fs/zonefs/Makefile b/fs/zonefs/Makefile
index 9fe54f5319f2..645f7229de4a 100644
--- a/fs/zonefs/Makefile
+++ b/fs/zonefs/Makefile
@@ -3,4 +3,4 @@ ccflags-y += -I$(src)
obj-$(CONFIG_ZONEFS_FS) += zonefs.o
-zonefs-y := super.o sysfs.o
+zonefs-y := super.o file.o sysfs.o
diff --git a/fs/zonefs/file.c b/fs/zonefs/file.c
new file mode 100644
index 000000000000..738b0e28d74b
--- /dev/null
+++ b/fs/zonefs/file.c
@@ -0,0 +1,878 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Simple file system for zoned block devices exposing zones as files.
+ *
+ * Copyright (C) 2022 Western Digital Corporation or its affiliates.
+ */
+#include <linux/module.h>
+#include <linux/pagemap.h>
+#include <linux/iomap.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/blkdev.h>
+#include <linux/statfs.h>
+#include <linux/writeback.h>
+#include <linux/quotaops.h>
+#include <linux/seq_file.h>
+#include <linux/parser.h>
+#include <linux/uio.h>
+#include <linux/mman.h>
+#include <linux/sched/mm.h>
+#include <linux/task_io_accounting_ops.h>
+
+#include "zonefs.h"
+
+#include "trace.h"
+
+static int zonefs_read_iomap_begin(struct inode *inode, loff_t offset,
+ loff_t length, unsigned int flags,
+ struct iomap *iomap, struct iomap *srcmap)
+{
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+ struct super_block *sb = inode->i_sb;
+ loff_t isize;
+
+ /*
+ * All blocks are always mapped below EOF. If reading past EOF,
+ * act as if there is a hole up to the file maximum size.
+ */
+ mutex_lock(&zi->i_truncate_mutex);
+ iomap->bdev = inode->i_sb->s_bdev;
+ iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
+ isize = i_size_read(inode);
+ if (iomap->offset >= isize) {
+ iomap->type = IOMAP_HOLE;
+ iomap->addr = IOMAP_NULL_ADDR;
+ iomap->length = length;
+ } else {
+ iomap->type = IOMAP_MAPPED;
+ iomap->addr = (z->z_sector << SECTOR_SHIFT) + iomap->offset;
+ iomap->length = isize - iomap->offset;
+ }
+ mutex_unlock(&zi->i_truncate_mutex);
+
+ trace_zonefs_iomap_begin(inode, iomap);
+
+ return 0;
+}
+
+static const struct iomap_ops zonefs_read_iomap_ops = {
+ .iomap_begin = zonefs_read_iomap_begin,
+};
+
+static int zonefs_write_iomap_begin(struct inode *inode, loff_t offset,
+ loff_t length, unsigned int flags,
+ struct iomap *iomap, struct iomap *srcmap)
+{
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+ struct super_block *sb = inode->i_sb;
+ loff_t isize;
+
+ /* All write I/Os should always be within the file maximum size */
+ if (WARN_ON_ONCE(offset + length > z->z_capacity))
+ return -EIO;
+
+ /*
+ * Sequential zones can only accept direct writes. This is already
+ * checked when writes are issued, so warn if we see a page writeback
+ * operation.
+ */
+ if (WARN_ON_ONCE(zonefs_zone_is_seq(z) && !(flags & IOMAP_DIRECT)))
+ return -EIO;
+
+ /*
+ * For conventional zones, all blocks are always mapped. For sequential
+ * zones, all blocks after always mapped below the inode size (zone
+ * write pointer) and unwriten beyond.
+ */
+ mutex_lock(&zi->i_truncate_mutex);
+ iomap->bdev = inode->i_sb->s_bdev;
+ iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
+ iomap->addr = (z->z_sector << SECTOR_SHIFT) + iomap->offset;
+ isize = i_size_read(inode);
+ if (iomap->offset >= isize) {
+ iomap->type = IOMAP_UNWRITTEN;
+ iomap->length = z->z_capacity - iomap->offset;
+ } else {
+ iomap->type = IOMAP_MAPPED;
+ iomap->length = isize - iomap->offset;
+ }
+ mutex_unlock(&zi->i_truncate_mutex);
+
+ trace_zonefs_iomap_begin(inode, iomap);
+
+ return 0;
+}
+
+static const struct iomap_ops zonefs_write_iomap_ops = {
+ .iomap_begin = zonefs_write_iomap_begin,
+};
+
+static int zonefs_read_folio(struct file *unused, struct folio *folio)
+{
+ return iomap_read_folio(folio, &zonefs_read_iomap_ops);
+}
+
+static void zonefs_readahead(struct readahead_control *rac)
+{
+ iomap_readahead(rac, &zonefs_read_iomap_ops);
+}
+
+/*
+ * Map blocks for page writeback. This is used only on conventional zone files,
+ * which implies that the page range can only be within the fixed inode size.
+ */
+static int zonefs_write_map_blocks(struct iomap_writepage_ctx *wpc,
+ struct inode *inode, loff_t offset)
+{
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+
+ if (WARN_ON_ONCE(zonefs_zone_is_seq(z)))
+ return -EIO;
+ if (WARN_ON_ONCE(offset >= i_size_read(inode)))
+ return -EIO;
+
+ /* If the mapping is already OK, nothing needs to be done */
+ if (offset >= wpc->iomap.offset &&
+ offset < wpc->iomap.offset + wpc->iomap.length)
+ return 0;
+
+ return zonefs_write_iomap_begin(inode, offset,
+ z->z_capacity - offset,
+ IOMAP_WRITE, &wpc->iomap, NULL);
+}
+
+static const struct iomap_writeback_ops zonefs_writeback_ops = {
+ .map_blocks = zonefs_write_map_blocks,
+};
+
+static int zonefs_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ struct iomap_writepage_ctx wpc = { };
+
+ return iomap_writepages(mapping, wbc, &wpc, &zonefs_writeback_ops);
+}
+
+static int zonefs_swap_activate(struct swap_info_struct *sis,
+ struct file *swap_file, sector_t *span)
+{
+ struct inode *inode = file_inode(swap_file);
+
+ if (zonefs_inode_is_seq(inode)) {
+ zonefs_err(inode->i_sb,
+ "swap file: not a conventional zone file\n");
+ return -EINVAL;
+ }
+
+ return iomap_swapfile_activate(sis, swap_file, span,
+ &zonefs_read_iomap_ops);
+}
+
+const struct address_space_operations zonefs_file_aops = {
+ .read_folio = zonefs_read_folio,
+ .readahead = zonefs_readahead,
+ .writepages = zonefs_writepages,
+ .dirty_folio = filemap_dirty_folio,
+ .release_folio = iomap_release_folio,
+ .invalidate_folio = iomap_invalidate_folio,
+ .migrate_folio = filemap_migrate_folio,
+ .is_partially_uptodate = iomap_is_partially_uptodate,
+ .error_remove_page = generic_error_remove_page,
+ .direct_IO = noop_direct_IO,
+ .swap_activate = zonefs_swap_activate,
+};
+
+int zonefs_file_truncate(struct inode *inode, loff_t isize)
+{
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+ loff_t old_isize;
+ enum req_op op;
+ int ret = 0;
+
+ /*
+ * Only sequential zone files can be truncated and truncation is allowed
+ * only down to a 0 size, which is equivalent to a zone reset, and to
+ * the maximum file size, which is equivalent to a zone finish.
+ */
+ if (!zonefs_zone_is_seq(z))
+ return -EPERM;
+
+ if (!isize)
+ op = REQ_OP_ZONE_RESET;
+ else if (isize == z->z_capacity)
+ op = REQ_OP_ZONE_FINISH;
+ else
+ return -EPERM;
+
+ inode_dio_wait(inode);
+
+ /* Serialize against page faults */
+ filemap_invalidate_lock(inode->i_mapping);
+
+ /* Serialize against zonefs_iomap_begin() */
+ mutex_lock(&zi->i_truncate_mutex);
+
+ old_isize = i_size_read(inode);
+ if (isize == old_isize)
+ goto unlock;
+
+ ret = zonefs_inode_zone_mgmt(inode, op);
+ if (ret)
+ goto unlock;
+
+ /*
+ * If the mount option ZONEFS_MNTOPT_EXPLICIT_OPEN is set,
+ * take care of open zones.
+ */
+ if (z->z_flags & ZONEFS_ZONE_OPEN) {
+ /*
+ * Truncating a zone to EMPTY or FULL is the equivalent of
+ * closing the zone. For a truncation to 0, we need to
+ * re-open the zone to ensure new writes can be processed.
+ * For a truncation to the maximum file size, the zone is
+ * closed and writes cannot be accepted anymore, so clear
+ * the open flag.
+ */
+ if (!isize)
+ ret = zonefs_inode_zone_mgmt(inode, REQ_OP_ZONE_OPEN);
+ else
+ z->z_flags &= ~ZONEFS_ZONE_OPEN;
+ }
+
+ zonefs_update_stats(inode, isize);
+ truncate_setsize(inode, isize);
+ z->z_wpoffset = isize;
+ zonefs_inode_account_active(inode);
+
+unlock:
+ mutex_unlock(&zi->i_truncate_mutex);
+ filemap_invalidate_unlock(inode->i_mapping);
+
+ return ret;
+}
+
+static int zonefs_file_fsync(struct file *file, loff_t start, loff_t end,
+ int datasync)
+{
+ struct inode *inode = file_inode(file);
+ int ret = 0;
+
+ if (unlikely(IS_IMMUTABLE(inode)))
+ return -EPERM;
+
+ /*
+ * Since only direct writes are allowed in sequential files, page cache
+ * flush is needed only for conventional zone files.
+ */
+ if (zonefs_inode_is_cnv(inode))
+ ret = file_write_and_wait_range(file, start, end);
+ if (!ret)
+ ret = blkdev_issue_flush(inode->i_sb->s_bdev);
+
+ if (ret)
+ zonefs_io_error(inode, true);
+
+ return ret;
+}
+
+static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
+{
+ struct inode *inode = file_inode(vmf->vma->vm_file);
+ vm_fault_t ret;
+
+ if (unlikely(IS_IMMUTABLE(inode)))
+ return VM_FAULT_SIGBUS;
+
+ /*
+ * Sanity check: only conventional zone files can have shared
+ * writeable mappings.
+ */
+ if (zonefs_inode_is_seq(inode))
+ return VM_FAULT_NOPAGE;
+
+ sb_start_pagefault(inode->i_sb);
+ file_update_time(vmf->vma->vm_file);
+
+ /* Serialize against truncates */
+ filemap_invalidate_lock_shared(inode->i_mapping);
+ ret = iomap_page_mkwrite(vmf, &zonefs_write_iomap_ops);
+ filemap_invalidate_unlock_shared(inode->i_mapping);
+
+ sb_end_pagefault(inode->i_sb);
+ return ret;
+}
+
+static const struct vm_operations_struct zonefs_file_vm_ops = {
+ .fault = filemap_fault,
+ .map_pages = filemap_map_pages,
+ .page_mkwrite = zonefs_filemap_page_mkwrite,
+};
+
+static int zonefs_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ /*
+ * Conventional zones accept random writes, so their files can support
+ * shared writable mappings. For sequential zone files, only read
+ * mappings are possible since there are no guarantees for write
+ * ordering between msync() and page cache writeback.
+ */
+ if (zonefs_inode_is_seq(file_inode(file)) &&
+ (vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
+ return -EINVAL;
+
+ file_accessed(file);
+ vma->vm_ops = &zonefs_file_vm_ops;
+
+ return 0;
+}
+
+static loff_t zonefs_file_llseek(struct file *file, loff_t offset, int whence)
+{
+ loff_t isize = i_size_read(file_inode(file));
+
+ /*
+ * Seeks are limited to below the zone size for conventional zones
+ * and below the zone write pointer for sequential zones. In both
+ * cases, this limit is the inode size.
+ */
+ return generic_file_llseek_size(file, offset, whence, isize, isize);
+}
+
+static int zonefs_file_write_dio_end_io(struct kiocb *iocb, ssize_t size,
+ int error, unsigned int flags)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+
+ if (error) {
+ zonefs_io_error(inode, true);
+ return error;
+ }
+
+ if (size && zonefs_inode_is_seq(inode)) {
+ /*
+ * Note that we may be seeing completions out of order,
+ * but that is not a problem since a write completed
+ * successfully necessarily means that all preceding writes
+ * were also successful. So we can safely increase the inode
+ * size to the write end location.
+ */
+ mutex_lock(&zi->i_truncate_mutex);
+ if (i_size_read(inode) < iocb->ki_pos + size) {
+ zonefs_update_stats(inode, iocb->ki_pos + size);
+ zonefs_i_size_write(inode, iocb->ki_pos + size);
+ }
+ mutex_unlock(&zi->i_truncate_mutex);
+ }
+
+ return 0;
+}
+
+static const struct iomap_dio_ops zonefs_write_dio_ops = {
+ .end_io = zonefs_file_write_dio_end_io,
+};
+
+static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+ struct block_device *bdev = inode->i_sb->s_bdev;
+ unsigned int max = bdev_max_zone_append_sectors(bdev);
+ struct bio *bio;
+ ssize_t size;
+ int nr_pages;
+ ssize_t ret;
+
+ max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize);
+ iov_iter_truncate(from, max);
+
+ nr_pages = iov_iter_npages(from, BIO_MAX_VECS);
+ if (!nr_pages)
+ return 0;
+
+ bio = bio_alloc(bdev, nr_pages,
+ REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE, GFP_NOFS);
+ bio->bi_iter.bi_sector = z->z_sector;
+ bio->bi_ioprio = iocb->ki_ioprio;
+ if (iocb_is_dsync(iocb))
+ bio->bi_opf |= REQ_FUA;
+
+ ret = bio_iov_iter_get_pages(bio, from);
+ if (unlikely(ret))
+ goto out_release;
+
+ size = bio->bi_iter.bi_size;
+ task_io_account_write(size);
+
+ if (iocb->ki_flags & IOCB_HIPRI)
+ bio_set_polled(bio, iocb);
+
+ ret = submit_bio_wait(bio);
+
+ /*
+ * If the file zone was written underneath the file system, the zone
+ * write pointer may not be where we expect it to be, but the zone
+ * append write can still succeed. So check manually that we wrote where
+ * we intended to, that is, at zi->i_wpoffset.
+ */
+ if (!ret) {
+ sector_t wpsector =
+ z->z_sector + (z->z_wpoffset >> SECTOR_SHIFT);
+
+ if (bio->bi_iter.bi_sector != wpsector) {
+ zonefs_warn(inode->i_sb,
+ "Corrupted write pointer %llu for zone at %llu\n",
+ wpsector, z->z_sector);
+ ret = -EIO;
+ }
+ }
+
+ zonefs_file_write_dio_end_io(iocb, size, ret, 0);
+ trace_zonefs_file_dio_append(inode, size, ret);
+
+out_release:
+ bio_release_pages(bio, false);
+ bio_put(bio);
+
+ if (ret >= 0) {
+ iocb->ki_pos += size;
+ return size;
+ }
+
+ return ret;
+}
+
+/*
+ * Do not exceed the LFS limits nor the file zone size. If pos is under the
+ * limit it becomes a short access. If it exceeds the limit, return -EFBIG.
+ */
+static loff_t zonefs_write_check_limits(struct file *file, loff_t pos,
+ loff_t count)
+{
+ struct inode *inode = file_inode(file);
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+ loff_t limit = rlimit(RLIMIT_FSIZE);
+ loff_t max_size = z->z_capacity;
+
+ if (limit != RLIM_INFINITY) {
+ if (pos >= limit) {
+ send_sig(SIGXFSZ, current, 0);
+ return -EFBIG;
+ }
+ count = min(count, limit - pos);
+ }
+
+ if (!(file->f_flags & O_LARGEFILE))
+ max_size = min_t(loff_t, MAX_NON_LFS, max_size);
+
+ if (unlikely(pos >= max_size))
+ return -EFBIG;
+
+ return min(count, max_size - pos);
+}
+
+static ssize_t zonefs_write_checks(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+ loff_t count;
+
+ if (IS_SWAPFILE(inode))
+ return -ETXTBSY;
+
+ if (!iov_iter_count(from))
+ return 0;
+
+ if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
+ return -EINVAL;
+
+ if (iocb->ki_flags & IOCB_APPEND) {
+ if (zonefs_zone_is_cnv(z))
+ return -EINVAL;
+ mutex_lock(&zi->i_truncate_mutex);
+ iocb->ki_pos = z->z_wpoffset;
+ mutex_unlock(&zi->i_truncate_mutex);
+ }
+
+ count = zonefs_write_check_limits(file, iocb->ki_pos,
+ iov_iter_count(from));
+ if (count < 0)
+ return count;
+
+ iov_iter_truncate(from, count);
+ return iov_iter_count(from);
+}
+
+/*
+ * Handle direct writes. For sequential zone files, this is the only possible
+ * write path. For these files, check that the user is issuing writes
+ * sequentially from the end of the file. This code assumes that the block layer
+ * delivers write requests to the device in sequential order. This is always the
+ * case if a block IO scheduler implementing the ELEVATOR_F_ZBD_SEQ_WRITE
+ * elevator feature is being used (e.g. mq-deadline). The block layer always
+ * automatically select such an elevator for zoned block devices during the
+ * device initialization.
+ */
+static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+ struct super_block *sb = inode->i_sb;
+ bool sync = is_sync_kiocb(iocb);
+ bool append = false;
+ ssize_t ret, count;
+
+ /*
+ * For async direct IOs to sequential zone files, refuse IOCB_NOWAIT
+ * as this can cause write reordering (e.g. the first aio gets EAGAIN
+ * on the inode lock but the second goes through but is now unaligned).
+ */
+ if (zonefs_zone_is_seq(z) && !sync && (iocb->ki_flags & IOCB_NOWAIT))
+ return -EOPNOTSUPP;
+
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!inode_trylock(inode))
+ return -EAGAIN;
+ } else {
+ inode_lock(inode);
+ }
+
+ count = zonefs_write_checks(iocb, from);
+ if (count <= 0) {
+ ret = count;
+ goto inode_unlock;
+ }
+
+ if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
+ ret = -EINVAL;
+ goto inode_unlock;
+ }
+
+ /* Enforce sequential writes (append only) in sequential zones */
+ if (zonefs_zone_is_seq(z)) {
+ mutex_lock(&zi->i_truncate_mutex);
+ if (iocb->ki_pos != z->z_wpoffset) {
+ mutex_unlock(&zi->i_truncate_mutex);
+ ret = -EINVAL;
+ goto inode_unlock;
+ }
+ mutex_unlock(&zi->i_truncate_mutex);
+ append = sync;
+ }
+
+ if (append)
+ ret = zonefs_file_dio_append(iocb, from);
+ else
+ ret = iomap_dio_rw(iocb, from, &zonefs_write_iomap_ops,
+ &zonefs_write_dio_ops, 0, NULL, 0);
+ if (zonefs_zone_is_seq(z) &&
+ (ret > 0 || ret == -EIOCBQUEUED)) {
+ if (ret > 0)
+ count = ret;
+
+ /*
+ * Update the zone write pointer offset assuming the write
+ * operation succeeded. If it did not, the error recovery path
+ * will correct it. Also do active seq file accounting.
+ */
+ mutex_lock(&zi->i_truncate_mutex);
+ z->z_wpoffset += count;
+ zonefs_inode_account_active(inode);
+ mutex_unlock(&zi->i_truncate_mutex);
+ }
+
+inode_unlock:
+ inode_unlock(inode);
+
+ return ret;
+}
+
+static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
+ struct iov_iter *from)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ ssize_t ret;
+
+ /*
+ * Direct IO writes are mandatory for sequential zone files so that the
+ * write IO issuing order is preserved.
+ */
+ if (zonefs_inode_is_seq(inode))
+ return -EIO;
+
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!inode_trylock(inode))
+ return -EAGAIN;
+ } else {
+ inode_lock(inode);
+ }
+
+ ret = zonefs_write_checks(iocb, from);
+ if (ret <= 0)
+ goto inode_unlock;
+
+ ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops);
+ if (ret > 0)
+ iocb->ki_pos += ret;
+ else if (ret == -EIO)
+ zonefs_io_error(inode, true);
+
+inode_unlock:
+ inode_unlock(inode);
+ if (ret > 0)
+ ret = generic_write_sync(iocb, ret);
+
+ return ret;
+}
+
+static ssize_t zonefs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+
+ if (unlikely(IS_IMMUTABLE(inode)))
+ return -EPERM;
+
+ if (sb_rdonly(inode->i_sb))
+ return -EROFS;
+
+ /* Write operations beyond the zone capacity are not allowed */
+ if (iocb->ki_pos >= z->z_capacity)
+ return -EFBIG;
+
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ ssize_t ret = zonefs_file_dio_write(iocb, from);
+
+ if (ret != -ENOTBLK)
+ return ret;
+ }
+
+ return zonefs_file_buffered_write(iocb, from);
+}
+
+static int zonefs_file_read_dio_end_io(struct kiocb *iocb, ssize_t size,
+ int error, unsigned int flags)
+{
+ if (error) {
+ zonefs_io_error(file_inode(iocb->ki_filp), false);
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct iomap_dio_ops zonefs_read_dio_ops = {
+ .end_io = zonefs_file_read_dio_end_io,
+};
+
+static ssize_t zonefs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+ struct super_block *sb = inode->i_sb;
+ loff_t isize;
+ ssize_t ret;
+
+ /* Offline zones cannot be read */
+ if (unlikely(IS_IMMUTABLE(inode) && !(inode->i_mode & 0777)))
+ return -EPERM;
+
+ if (iocb->ki_pos >= z->z_capacity)
+ return 0;
+
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!inode_trylock_shared(inode))
+ return -EAGAIN;
+ } else {
+ inode_lock_shared(inode);
+ }
+
+ /* Limit read operations to written data */
+ mutex_lock(&zi->i_truncate_mutex);
+ isize = i_size_read(inode);
+ if (iocb->ki_pos >= isize) {
+ mutex_unlock(&zi->i_truncate_mutex);
+ ret = 0;
+ goto inode_unlock;
+ }
+ iov_iter_truncate(to, isize - iocb->ki_pos);
+ mutex_unlock(&zi->i_truncate_mutex);
+
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ size_t count = iov_iter_count(to);
+
+ if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
+ ret = -EINVAL;
+ goto inode_unlock;
+ }
+ file_accessed(iocb->ki_filp);
+ ret = iomap_dio_rw(iocb, to, &zonefs_read_iomap_ops,
+ &zonefs_read_dio_ops, 0, NULL, 0);
+ } else {
+ ret = generic_file_read_iter(iocb, to);
+ if (ret == -EIO)
+ zonefs_io_error(inode, false);
+ }
+
+inode_unlock:
+ inode_unlock_shared(inode);
+
+ return ret;
+}
+
+/*
+ * Write open accounting is done only for sequential files.
+ */
+static inline bool zonefs_seq_file_need_wro(struct inode *inode,
+ struct file *file)
+{
+ if (zonefs_inode_is_cnv(inode))
+ return false;
+
+ if (!(file->f_mode & FMODE_WRITE))
+ return false;
+
+ return true;
+}
+
+static int zonefs_seq_file_write_open(struct inode *inode)
+{
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+ int ret = 0;
+
+ mutex_lock(&zi->i_truncate_mutex);
+
+ if (!zi->i_wr_refcnt) {
+ struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
+ unsigned int wro = atomic_inc_return(&sbi->s_wro_seq_files);
+
+ if (sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) {
+
+ if (sbi->s_max_wro_seq_files
+ && wro > sbi->s_max_wro_seq_files) {
+ atomic_dec(&sbi->s_wro_seq_files);
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ if (i_size_read(inode) < z->z_capacity) {
+ ret = zonefs_inode_zone_mgmt(inode,
+ REQ_OP_ZONE_OPEN);
+ if (ret) {
+ atomic_dec(&sbi->s_wro_seq_files);
+ goto unlock;
+ }
+ z->z_flags |= ZONEFS_ZONE_OPEN;
+ zonefs_inode_account_active(inode);
+ }
+ }
+ }
+
+ zi->i_wr_refcnt++;
+
+unlock:
+ mutex_unlock(&zi->i_truncate_mutex);
+
+ return ret;
+}
+
+static int zonefs_file_open(struct inode *inode, struct file *file)
+{
+ int ret;
+
+ ret = generic_file_open(inode, file);
+ if (ret)
+ return ret;
+
+ if (zonefs_seq_file_need_wro(inode, file))
+ return zonefs_seq_file_write_open(inode);
+
+ return 0;
+}
+
+static void zonefs_seq_file_write_close(struct inode *inode)
+{
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+ struct super_block *sb = inode->i_sb;
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ int ret = 0;
+
+ mutex_lock(&zi->i_truncate_mutex);
+
+ zi->i_wr_refcnt--;
+ if (zi->i_wr_refcnt)
+ goto unlock;
+
+ /*
+ * The file zone may not be open anymore (e.g. the file was truncated to
+ * its maximum size or it was fully written). For this case, we only
+ * need to decrement the write open count.
+ */
+ if (z->z_flags & ZONEFS_ZONE_OPEN) {
+ ret = zonefs_inode_zone_mgmt(inode, REQ_OP_ZONE_CLOSE);
+ if (ret) {
+ __zonefs_io_error(inode, false);
+ /*
+ * Leaving zones explicitly open may lead to a state
+ * where most zones cannot be written (zone resources
+ * exhausted). So take preventive action by remounting
+ * read-only.
+ */
+ if (z->z_flags & ZONEFS_ZONE_OPEN &&
+ !(sb->s_flags & SB_RDONLY)) {
+ zonefs_warn(sb,
+ "closing zone at %llu failed %d\n",
+ z->z_sector, ret);
+ zonefs_warn(sb,
+ "remounting filesystem read-only\n");
+ sb->s_flags |= SB_RDONLY;
+ }
+ goto unlock;
+ }
+
+ z->z_flags &= ~ZONEFS_ZONE_OPEN;
+ zonefs_inode_account_active(inode);
+ }
+
+ atomic_dec(&sbi->s_wro_seq_files);
+
+unlock:
+ mutex_unlock(&zi->i_truncate_mutex);
+}
+
+static int zonefs_file_release(struct inode *inode, struct file *file)
+{
+ /*
+ * If we explicitly open a zone we must close it again as well, but the
+ * zone management operation can fail (either due to an IO error or as
+ * the zone has gone offline or read-only). Make sure we don't fail the
+ * close(2) for user-space.
+ */
+ if (zonefs_seq_file_need_wro(inode, file))
+ zonefs_seq_file_write_close(inode);
+
+ return 0;
+}
+
+const struct file_operations zonefs_file_operations = {
+ .open = zonefs_file_open,
+ .release = zonefs_file_release,
+ .fsync = zonefs_file_fsync,
+ .mmap = zonefs_file_mmap,
+ .llseek = zonefs_file_llseek,
+ .read_iter = zonefs_file_read_iter,
+ .write_iter = zonefs_file_write_iter,
+ .splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
+ .iopoll = iocb_bio_iopoll,
+};
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index 72ef97320b99..23b8b299c64e 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -28,33 +28,47 @@
#include "trace.h"
/*
- * Manage the active zone count. Called with zi->i_truncate_mutex held.
+ * Get the name of a zone group directory.
*/
-static void zonefs_account_active(struct inode *inode)
+static const char *zonefs_zgroup_name(enum zonefs_ztype ztype)
{
- struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
- struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ switch (ztype) {
+ case ZONEFS_ZTYPE_CNV:
+ return "cnv";
+ case ZONEFS_ZTYPE_SEQ:
+ return "seq";
+ default:
+ WARN_ON_ONCE(1);
+ return "???";
+ }
+}
- lockdep_assert_held(&zi->i_truncate_mutex);
+/*
+ * Manage the active zone count.
+ */
+static void zonefs_account_active(struct super_block *sb,
+ struct zonefs_zone *z)
+{
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
- if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
+ if (zonefs_zone_is_cnv(z))
return;
/*
* For zones that transitioned to the offline or readonly condition,
* we only need to clear the active state.
*/
- if (zi->i_flags & (ZONEFS_ZONE_OFFLINE | ZONEFS_ZONE_READONLY))
+ if (z->z_flags & (ZONEFS_ZONE_OFFLINE | ZONEFS_ZONE_READONLY))
goto out;
/*
* If the zone is active, that is, if it is explicitly open or
* partially written, check if it was already accounted as active.
*/
- if ((zi->i_flags & ZONEFS_ZONE_OPEN) ||
- (zi->i_wpoffset > 0 && zi->i_wpoffset < zi->i_max_size)) {
- if (!(zi->i_flags & ZONEFS_ZONE_ACTIVE)) {
- zi->i_flags |= ZONEFS_ZONE_ACTIVE;
+ if ((z->z_flags & ZONEFS_ZONE_OPEN) ||
+ (z->z_wpoffset > 0 && z->z_wpoffset < z->z_capacity)) {
+ if (!(z->z_flags & ZONEFS_ZONE_ACTIVE)) {
+ z->z_flags |= ZONEFS_ZONE_ACTIVE;
atomic_inc(&sbi->s_active_seq_files);
}
return;
@@ -62,18 +76,29 @@ static void zonefs_account_active(struct inode *inode)
out:
/* The zone is not active. If it was, update the active count */
- if (zi->i_flags & ZONEFS_ZONE_ACTIVE) {
- zi->i_flags &= ~ZONEFS_ZONE_ACTIVE;
+ if (z->z_flags & ZONEFS_ZONE_ACTIVE) {
+ z->z_flags &= ~ZONEFS_ZONE_ACTIVE;
atomic_dec(&sbi->s_active_seq_files);
}
}
-static inline int zonefs_zone_mgmt(struct inode *inode, enum req_op op)
+/*
+ * Manage the active zone count. Called with zi->i_truncate_mutex held.
+ */
+void zonefs_inode_account_active(struct inode *inode)
{
- struct zonefs_inode_info *zi = ZONEFS_I(inode);
- int ret;
+ lockdep_assert_held(&ZONEFS_I(inode)->i_truncate_mutex);
- lockdep_assert_held(&zi->i_truncate_mutex);
+ return zonefs_account_active(inode->i_sb, zonefs_inode_zone(inode));
+}
+
+/*
+ * Execute a zone management operation.
+ */
+static int zonefs_zone_mgmt(struct super_block *sb,
+ struct zonefs_zone *z, enum req_op op)
+{
+ int ret;
/*
* With ZNS drives, closing an explicitly open zone that has not been
@@ -83,201 +108,49 @@ static inline int zonefs_zone_mgmt(struct inode *inode, enum req_op op)
* are exceeded, make sure that the zone does not remain active by
* resetting it.
*/
- if (op == REQ_OP_ZONE_CLOSE && !zi->i_wpoffset)
+ if (op == REQ_OP_ZONE_CLOSE && !z->z_wpoffset)
op = REQ_OP_ZONE_RESET;
- trace_zonefs_zone_mgmt(inode, op);
- ret = blkdev_zone_mgmt(inode->i_sb->s_bdev, op, zi->i_zsector,
- zi->i_zone_size >> SECTOR_SHIFT, GFP_NOFS);
+ trace_zonefs_zone_mgmt(sb, z, op);
+ ret = blkdev_zone_mgmt(sb->s_bdev, op, z->z_sector,
+ z->z_size >> SECTOR_SHIFT, GFP_NOFS);
if (ret) {
- zonefs_err(inode->i_sb,
+ zonefs_err(sb,
"Zone management operation %s at %llu failed %d\n",
- blk_op_str(op), zi->i_zsector, ret);
+ blk_op_str(op), z->z_sector, ret);
return ret;
}
return 0;
}
-static inline void zonefs_i_size_write(struct inode *inode, loff_t isize)
-{
- struct zonefs_inode_info *zi = ZONEFS_I(inode);
-
- i_size_write(inode, isize);
- /*
- * A full zone is no longer open/active and does not need
- * explicit closing.
- */
- if (isize >= zi->i_max_size) {
- struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
-
- if (zi->i_flags & ZONEFS_ZONE_ACTIVE)
- atomic_dec(&sbi->s_active_seq_files);
- zi->i_flags &= ~(ZONEFS_ZONE_OPEN | ZONEFS_ZONE_ACTIVE);
- }
-}
-
-static int zonefs_read_iomap_begin(struct inode *inode, loff_t offset,
- loff_t length, unsigned int flags,
- struct iomap *iomap, struct iomap *srcmap)
+int zonefs_inode_zone_mgmt(struct inode *inode, enum req_op op)
{
- struct zonefs_inode_info *zi = ZONEFS_I(inode);
- struct super_block *sb = inode->i_sb;
- loff_t isize;
+ lockdep_assert_held(&ZONEFS_I(inode)->i_truncate_mutex);
- /*
- * All blocks are always mapped below EOF. If reading past EOF,
- * act as if there is a hole up to the file maximum size.
- */
- mutex_lock(&zi->i_truncate_mutex);
- iomap->bdev = inode->i_sb->s_bdev;
- iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
- isize = i_size_read(inode);
- if (iomap->offset >= isize) {
- iomap->type = IOMAP_HOLE;
- iomap->addr = IOMAP_NULL_ADDR;
- iomap->length = length;
- } else {
- iomap->type = IOMAP_MAPPED;
- iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
- iomap->length = isize - iomap->offset;
- }
- mutex_unlock(&zi->i_truncate_mutex);
-
- trace_zonefs_iomap_begin(inode, iomap);
-
- return 0;
+ return zonefs_zone_mgmt(inode->i_sb, zonefs_inode_zone(inode), op);
}
-static const struct iomap_ops zonefs_read_iomap_ops = {
- .iomap_begin = zonefs_read_iomap_begin,
-};
-
-static int zonefs_write_iomap_begin(struct inode *inode, loff_t offset,
- loff_t length, unsigned int flags,
- struct iomap *iomap, struct iomap *srcmap)
+void zonefs_i_size_write(struct inode *inode, loff_t isize)
{
- struct zonefs_inode_info *zi = ZONEFS_I(inode);
- struct super_block *sb = inode->i_sb;
- loff_t isize;
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
- /* All write I/Os should always be within the file maximum size */
- if (WARN_ON_ONCE(offset + length > zi->i_max_size))
- return -EIO;
-
- /*
- * Sequential zones can only accept direct writes. This is already
- * checked when writes are issued, so warn if we see a page writeback
- * operation.
- */
- if (WARN_ON_ONCE(zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
- !(flags & IOMAP_DIRECT)))
- return -EIO;
+ i_size_write(inode, isize);
/*
- * For conventional zones, all blocks are always mapped. For sequential
- * zones, all blocks after always mapped below the inode size (zone
- * write pointer) and unwriten beyond.
+ * A full zone is no longer open/active and does not need
+ * explicit closing.
*/
- mutex_lock(&zi->i_truncate_mutex);
- iomap->bdev = inode->i_sb->s_bdev;
- iomap->offset = ALIGN_DOWN(offset, sb->s_blocksize);
- iomap->addr = (zi->i_zsector << SECTOR_SHIFT) + iomap->offset;
- isize = i_size_read(inode);
- if (iomap->offset >= isize) {
- iomap->type = IOMAP_UNWRITTEN;
- iomap->length = zi->i_max_size - iomap->offset;
- } else {
- iomap->type = IOMAP_MAPPED;
- iomap->length = isize - iomap->offset;
- }
- mutex_unlock(&zi->i_truncate_mutex);
-
- trace_zonefs_iomap_begin(inode, iomap);
-
- return 0;
-}
-
-static const struct iomap_ops zonefs_write_iomap_ops = {
- .iomap_begin = zonefs_write_iomap_begin,
-};
-
-static int zonefs_read_folio(struct file *unused, struct folio *folio)
-{
- return iomap_read_folio(folio, &zonefs_read_iomap_ops);
-}
-
-static void zonefs_readahead(struct readahead_control *rac)
-{
- iomap_readahead(rac, &zonefs_read_iomap_ops);
-}
-
-/*
- * Map blocks for page writeback. This is used only on conventional zone files,
- * which implies that the page range can only be within the fixed inode size.
- */
-static int zonefs_write_map_blocks(struct iomap_writepage_ctx *wpc,
- struct inode *inode, loff_t offset)
-{
- struct zonefs_inode_info *zi = ZONEFS_I(inode);
-
- if (WARN_ON_ONCE(zi->i_ztype != ZONEFS_ZTYPE_CNV))
- return -EIO;
- if (WARN_ON_ONCE(offset >= i_size_read(inode)))
- return -EIO;
-
- /* If the mapping is already OK, nothing needs to be done */
- if (offset >= wpc->iomap.offset &&
- offset < wpc->iomap.offset + wpc->iomap.length)
- return 0;
-
- return zonefs_write_iomap_begin(inode, offset, zi->i_max_size - offset,
- IOMAP_WRITE, &wpc->iomap, NULL);
-}
-
-static const struct iomap_writeback_ops zonefs_writeback_ops = {
- .map_blocks = zonefs_write_map_blocks,
-};
-
-static int zonefs_writepages(struct address_space *mapping,
- struct writeback_control *wbc)
-{
- struct iomap_writepage_ctx wpc = { };
-
- return iomap_writepages(mapping, wbc, &wpc, &zonefs_writeback_ops);
-}
-
-static int zonefs_swap_activate(struct swap_info_struct *sis,
- struct file *swap_file, sector_t *span)
-{
- struct inode *inode = file_inode(swap_file);
- struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ if (isize >= z->z_capacity) {
+ struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
- if (zi->i_ztype != ZONEFS_ZTYPE_CNV) {
- zonefs_err(inode->i_sb,
- "swap file: not a conventional zone file\n");
- return -EINVAL;
+ if (z->z_flags & ZONEFS_ZONE_ACTIVE)
+ atomic_dec(&sbi->s_active_seq_files);
+ z->z_flags &= ~(ZONEFS_ZONE_OPEN | ZONEFS_ZONE_ACTIVE);
}
-
- return iomap_swapfile_activate(sis, swap_file, span,
- &zonefs_read_iomap_ops);
}
-static const struct address_space_operations zonefs_file_aops = {
- .read_folio = zonefs_read_folio,
- .readahead = zonefs_readahead,
- .writepages = zonefs_writepages,
- .dirty_folio = filemap_dirty_folio,
- .release_folio = iomap_release_folio,
- .invalidate_folio = iomap_invalidate_folio,
- .migrate_folio = filemap_migrate_folio,
- .is_partially_uptodate = iomap_is_partially_uptodate,
- .error_remove_page = generic_error_remove_page,
- .direct_IO = noop_direct_IO,
- .swap_activate = zonefs_swap_activate,
-};
-
-static void zonefs_update_stats(struct inode *inode, loff_t new_isize)
+void zonefs_update_stats(struct inode *inode, loff_t new_isize)
{
struct super_block *sb = inode->i_sb;
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
@@ -310,63 +183,69 @@ static void zonefs_update_stats(struct inode *inode, loff_t new_isize)
}
/*
- * Check a zone condition and adjust its file inode access permissions for
- * offline and readonly zones. Return the inode size corresponding to the
- * amount of readable data in the zone.
+ * Check a zone condition. Return the amount of written (and still readable)
+ * data in the zone.
*/
-static loff_t zonefs_check_zone_condition(struct inode *inode,
- struct blk_zone *zone, bool warn,
- bool mount)
+static loff_t zonefs_check_zone_condition(struct super_block *sb,
+ struct zonefs_zone *z,
+ struct blk_zone *zone)
{
- struct zonefs_inode_info *zi = ZONEFS_I(inode);
-
switch (zone->cond) {
case BLK_ZONE_COND_OFFLINE:
- /*
- * Dead zone: make the inode immutable, disable all accesses
- * and set the file size to 0 (zone wp set to zone start).
- */
- if (warn)
- zonefs_warn(inode->i_sb, "inode %lu: offline zone\n",
- inode->i_ino);
- inode->i_flags |= S_IMMUTABLE;
- inode->i_mode &= ~0777;
- zone->wp = zone->start;
- zi->i_flags |= ZONEFS_ZONE_OFFLINE;
+ zonefs_warn(sb, "Zone %llu: offline zone\n",
+ z->z_sector);
+ z->z_flags |= ZONEFS_ZONE_OFFLINE;
return 0;
case BLK_ZONE_COND_READONLY:
/*
- * The write pointer of read-only zones is invalid. If such a
- * zone is found during mount, the file size cannot be retrieved
- * so we treat the zone as offline (mount == true case).
- * Otherwise, keep the file size as it was when last updated
- * so that the user can recover data. In both cases, writes are
- * always disabled for the zone.
+ * The write pointer of read-only zones is invalid, so we cannot
+ * determine the zone wpoffset (inode size). We thus keep the
+ * zone wpoffset as is, which leads to an empty file
+ * (wpoffset == 0) on mount. For a runtime error, this keeps
+ * the inode size as it was when last updated so that the user
+ * can recover data.
*/
- if (warn)
- zonefs_warn(inode->i_sb, "inode %lu: read-only zone\n",
- inode->i_ino);
- inode->i_flags |= S_IMMUTABLE;
- if (mount) {
- zone->cond = BLK_ZONE_COND_OFFLINE;
- inode->i_mode &= ~0777;
- zone->wp = zone->start;
- zi->i_flags |= ZONEFS_ZONE_OFFLINE;
- return 0;
- }
- zi->i_flags |= ZONEFS_ZONE_READONLY;
- inode->i_mode &= ~0222;
- return i_size_read(inode);
+ zonefs_warn(sb, "Zone %llu: read-only zone\n",
+ z->z_sector);
+ z->z_flags |= ZONEFS_ZONE_READONLY;
+ if (zonefs_zone_is_cnv(z))
+ return z->z_capacity;
+ return z->z_wpoffset;
case BLK_ZONE_COND_FULL:
/* The write pointer of full zones is invalid. */
- return zi->i_max_size;
+ return z->z_capacity;
default:
- if (zi->i_ztype == ZONEFS_ZTYPE_CNV)
- return zi->i_max_size;
+ if (zonefs_zone_is_cnv(z))
+ return z->z_capacity;
return (zone->wp - zone->start) << SECTOR_SHIFT;
}
}
+/*
+ * Check a zone condition and adjust its inode access permissions for
+ * offline and readonly zones.
+ */
+static void zonefs_inode_update_mode(struct inode *inode)
+{
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+
+ if (z->z_flags & ZONEFS_ZONE_OFFLINE) {
+ /* Offline zones cannot be read nor written */
+ inode->i_flags |= S_IMMUTABLE;
+ inode->i_mode &= ~0777;
+ } else if (z->z_flags & ZONEFS_ZONE_READONLY) {
+ /* Readonly zones cannot be written */
+ inode->i_flags |= S_IMMUTABLE;
+ if (z->z_flags & ZONEFS_ZONE_INIT_MODE)
+ inode->i_mode &= ~0777;
+ else
+ inode->i_mode &= ~0222;
+ }
+
+ z->z_flags &= ~ZONEFS_ZONE_INIT_MODE;
+ z->z_mode = inode->i_mode;
+}
+
struct zonefs_ioerr_data {
struct inode *inode;
bool write;
@@ -377,7 +256,7 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
{
struct zonefs_ioerr_data *err = data;
struct inode *inode = err->inode;
- struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
struct super_block *sb = inode->i_sb;
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
loff_t isize, data_size;
@@ -388,10 +267,9 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
* as there is no inconsistency between the inode size and the amount of
* data writen in the zone (data_size).
*/
- data_size = zonefs_check_zone_condition(inode, zone, true, false);
+ data_size = zonefs_check_zone_condition(sb, z, zone);
isize = i_size_read(inode);
- if (zone->cond != BLK_ZONE_COND_OFFLINE &&
- zone->cond != BLK_ZONE_COND_READONLY &&
+ if (!(z->z_flags & (ZONEFS_ZONE_READONLY | ZONEFS_ZONE_OFFLINE)) &&
!err->write && isize == data_size)
return 0;
@@ -414,8 +292,9 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
* In all cases, warn about inode size inconsistency and handle the
* IO error according to the zone condition and to the mount options.
*/
- if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && isize != data_size)
- zonefs_warn(sb, "inode %lu: invalid size %lld (should be %lld)\n",
+ if (zonefs_zone_is_seq(z) && isize != data_size)
+ zonefs_warn(sb,
+ "inode %lu: invalid size %lld (should be %lld)\n",
inode->i_ino, isize, data_size);
/*
@@ -424,24 +303,22 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
* zone condition to read-only and offline respectively, as if the
* condition was signaled by the hardware.
*/
- if (zone->cond == BLK_ZONE_COND_OFFLINE ||
- sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL) {
+ if ((z->z_flags & ZONEFS_ZONE_OFFLINE) ||
+ (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZOL)) {
zonefs_warn(sb, "inode %lu: read/write access disabled\n",
inode->i_ino);
- if (zone->cond != BLK_ZONE_COND_OFFLINE) {
- zone->cond = BLK_ZONE_COND_OFFLINE;
- data_size = zonefs_check_zone_condition(inode, zone,
- false, false);
- }
- } else if (zone->cond == BLK_ZONE_COND_READONLY ||
- sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO) {
+ if (!(z->z_flags & ZONEFS_ZONE_OFFLINE))
+ z->z_flags |= ZONEFS_ZONE_OFFLINE;
+ zonefs_inode_update_mode(inode);
+ data_size = 0;
+ } else if ((z->z_flags & ZONEFS_ZONE_READONLY) ||
+ (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_ZRO)) {
zonefs_warn(sb, "inode %lu: write access disabled\n",
inode->i_ino);
- if (zone->cond != BLK_ZONE_COND_READONLY) {
- zone->cond = BLK_ZONE_COND_READONLY;
- data_size = zonefs_check_zone_condition(inode, zone,
- false, false);
- }
+ if (!(z->z_flags & ZONEFS_ZONE_READONLY))
+ z->z_flags |= ZONEFS_ZONE_READONLY;
+ zonefs_inode_update_mode(inode);
+ data_size = isize;
} else if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO &&
data_size > isize) {
/* Do not expose garbage data */
@@ -455,9 +332,8 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
* close of the zone when the inode file is closed.
*/
if ((sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) &&
- (zone->cond == BLK_ZONE_COND_OFFLINE ||
- zone->cond == BLK_ZONE_COND_READONLY))
- zi->i_flags &= ~ZONEFS_ZONE_OPEN;
+ (z->z_flags & (ZONEFS_ZONE_READONLY | ZONEFS_ZONE_OFFLINE)))
+ z->z_flags &= ~ZONEFS_ZONE_OPEN;
/*
* If error=remount-ro was specified, any error result in remounting
@@ -474,8 +350,8 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
*/
zonefs_update_stats(inode, data_size);
zonefs_i_size_write(inode, data_size);
- zi->i_wpoffset = data_size;
- zonefs_account_active(inode);
+ z->z_wpoffset = data_size;
+ zonefs_inode_account_active(inode);
return 0;
}
@@ -487,9 +363,9 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
* eventually correct the file size and zonefs inode write pointer offset
* (which can be out of sync with the drive due to partial write failures).
*/
-static void __zonefs_io_error(struct inode *inode, bool write)
+void __zonefs_io_error(struct inode *inode, bool write)
{
- struct zonefs_inode_info *zi = ZONEFS_I(inode);
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
struct super_block *sb = inode->i_sb;
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
unsigned int noio_flag;
@@ -505,8 +381,8 @@ static void __zonefs_io_error(struct inode *inode, bool write)
* files with aggregated conventional zones, for which the inode zone
* size is always larger than the device zone size.
*/
- if (zi->i_zone_size > bdev_zone_sectors(sb->s_bdev))
- nr_zones = zi->i_zone_size >>
+ if (z->z_size > bdev_zone_sectors(sb->s_bdev))
+ nr_zones = z->z_size >>
(sbi->s_zone_sectors_shift + SECTOR_SHIFT);
/*
@@ -518,7 +394,7 @@ static void __zonefs_io_error(struct inode *inode, bool write)
* the GFP_NOIO context avoids both problems.
*/
noio_flag = memalloc_noio_save();
- ret = blkdev_report_zones(sb->s_bdev, zi->i_zsector, nr_zones,
+ ret = blkdev_report_zones(sb->s_bdev, z->z_sector, nr_zones,
zonefs_io_error_cb, &err);
if (ret != nr_zones)
zonefs_err(sb, "Get inode %lu zone information failed %d\n",
@@ -526,749 +402,6 @@ static void __zonefs_io_error(struct inode *inode, bool write)
memalloc_noio_restore(noio_flag);
}
-static void zonefs_io_error(struct inode *inode, bool write)
-{
- struct zonefs_inode_info *zi = ZONEFS_I(inode);
-
- mutex_lock(&zi->i_truncate_mutex);
- __zonefs_io_error(inode, write);
- mutex_unlock(&zi->i_truncate_mutex);
-}
-
-static int zonefs_file_truncate(struct inode *inode, loff_t isize)
-{
- struct zonefs_inode_info *zi = ZONEFS_I(inode);
- loff_t old_isize;
- enum req_op op;
- int ret = 0;
-
- /*
- * Only sequential zone files can be truncated and truncation is allowed
- * only down to a 0 size, which is equivalent to a zone reset, and to
- * the maximum file size, which is equivalent to a zone finish.
- */
- if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
- return -EPERM;
-
- if (!isize)
- op = REQ_OP_ZONE_RESET;
- else if (isize == zi->i_max_size)
- op = REQ_OP_ZONE_FINISH;
- else
- return -EPERM;
-
- inode_dio_wait(inode);
-
- /* Serialize against page faults */
- filemap_invalidate_lock(inode->i_mapping);
-
- /* Serialize against zonefs_iomap_begin() */
- mutex_lock(&zi->i_truncate_mutex);
-
- old_isize = i_size_read(inode);
- if (isize == old_isize)
- goto unlock;
-
- ret = zonefs_zone_mgmt(inode, op);
- if (ret)
- goto unlock;
-
- /*
- * If the mount option ZONEFS_MNTOPT_EXPLICIT_OPEN is set,
- * take care of open zones.
- */
- if (zi->i_flags & ZONEFS_ZONE_OPEN) {
- /*
- * Truncating a zone to EMPTY or FULL is the equivalent of
- * closing the zone. For a truncation to 0, we need to
- * re-open the zone to ensure new writes can be processed.
- * For a truncation to the maximum file size, the zone is
- * closed and writes cannot be accepted anymore, so clear
- * the open flag.
- */
- if (!isize)
- ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_OPEN);
- else
- zi->i_flags &= ~ZONEFS_ZONE_OPEN;
- }
-
- zonefs_update_stats(inode, isize);
- truncate_setsize(inode, isize);
- zi->i_wpoffset = isize;
- zonefs_account_active(inode);
-
-unlock:
- mutex_unlock(&zi->i_truncate_mutex);
- filemap_invalidate_unlock(inode->i_mapping);
-
- return ret;
-}
-
-static int zonefs_inode_setattr(struct mnt_idmap *idmap,
- struct dentry *dentry, struct iattr *iattr)
-{
- struct inode *inode = d_inode(dentry);
- int ret;
-
- if (unlikely(IS_IMMUTABLE(inode)))
- return -EPERM;
-
- ret = setattr_prepare(&nop_mnt_idmap, dentry, iattr);
- if (ret)
- return ret;
-
- /*
- * Since files and directories cannot be created nor deleted, do not
- * allow setting any write attributes on the sub-directories grouping
- * files by zone type.
- */
- if ((iattr->ia_valid & ATTR_MODE) && S_ISDIR(inode->i_mode) &&
- (iattr->ia_mode & 0222))
- return -EPERM;
-
- if (((iattr->ia_valid & ATTR_UID) &&
- !uid_eq(iattr->ia_uid, inode->i_uid)) ||
- ((iattr->ia_valid & ATTR_GID) &&
- !gid_eq(iattr->ia_gid, inode->i_gid))) {
- ret = dquot_transfer(&nop_mnt_idmap, inode, iattr);
- if (ret)
- return ret;
- }
-
- if (iattr->ia_valid & ATTR_SIZE) {
- ret = zonefs_file_truncate(inode, iattr->ia_size);
- if (ret)
- return ret;
- }
-
- setattr_copy(&nop_mnt_idmap, inode, iattr);
-
- return 0;
-}
-
-static const struct inode_operations zonefs_file_inode_operations = {
- .setattr = zonefs_inode_setattr,
-};
-
-static int zonefs_file_fsync(struct file *file, loff_t start, loff_t end,
- int datasync)
-{
- struct inode *inode = file_inode(file);
- int ret = 0;
-
- if (unlikely(IS_IMMUTABLE(inode)))
- return -EPERM;
-
- /*
- * Since only direct writes are allowed in sequential files, page cache
- * flush is needed only for conventional zone files.
- */
- if (ZONEFS_I(inode)->i_ztype == ZONEFS_ZTYPE_CNV)
- ret = file_write_and_wait_range(file, start, end);
- if (!ret)
- ret = blkdev_issue_flush(inode->i_sb->s_bdev);
-
- if (ret)
- zonefs_io_error(inode, true);
-
- return ret;
-}
-
-static vm_fault_t zonefs_filemap_page_mkwrite(struct vm_fault *vmf)
-{
- struct inode *inode = file_inode(vmf->vma->vm_file);
- struct zonefs_inode_info *zi = ZONEFS_I(inode);
- vm_fault_t ret;
-
- if (unlikely(IS_IMMUTABLE(inode)))
- return VM_FAULT_SIGBUS;
-
- /*
- * Sanity check: only conventional zone files can have shared
- * writeable mappings.
- */
- if (WARN_ON_ONCE(zi->i_ztype != ZONEFS_ZTYPE_CNV))
- return VM_FAULT_NOPAGE;
-
- sb_start_pagefault(inode->i_sb);
- file_update_time(vmf->vma->vm_file);
-
- /* Serialize against truncates */
- filemap_invalidate_lock_shared(inode->i_mapping);
- ret = iomap_page_mkwrite(vmf, &zonefs_write_iomap_ops);
- filemap_invalidate_unlock_shared(inode->i_mapping);
-
- sb_end_pagefault(inode->i_sb);
- return ret;
-}
-
-static const struct vm_operations_struct zonefs_file_vm_ops = {
- .fault = filemap_fault,
- .map_pages = filemap_map_pages,
- .page_mkwrite = zonefs_filemap_page_mkwrite,
-};
-
-static int zonefs_file_mmap(struct file *file, struct vm_area_struct *vma)
-{
- /*
- * Conventional zones accept random writes, so their files can support
- * shared writable mappings. For sequential zone files, only read
- * mappings are possible since there are no guarantees for write
- * ordering between msync() and page cache writeback.
- */
- if (ZONEFS_I(file_inode(file))->i_ztype == ZONEFS_ZTYPE_SEQ &&
- (vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
- return -EINVAL;
-
- file_accessed(file);
- vma->vm_ops = &zonefs_file_vm_ops;
-
- return 0;
-}
-
-static loff_t zonefs_file_llseek(struct file *file, loff_t offset, int whence)
-{
- loff_t isize = i_size_read(file_inode(file));
-
- /*
- * Seeks are limited to below the zone size for conventional zones
- * and below the zone write pointer for sequential zones. In both
- * cases, this limit is the inode size.
- */
- return generic_file_llseek_size(file, offset, whence, isize, isize);
-}
-
-static int zonefs_file_write_dio_end_io(struct kiocb *iocb, ssize_t size,
- int error, unsigned int flags)
-{
- struct inode *inode = file_inode(iocb->ki_filp);
- struct zonefs_inode_info *zi = ZONEFS_I(inode);
-
- if (error) {
- zonefs_io_error(inode, true);
- return error;
- }
-
- if (size && zi->i_ztype != ZONEFS_ZTYPE_CNV) {
- /*
- * Note that we may be seeing completions out of order,
- * but that is not a problem since a write completed
- * successfully necessarily means that all preceding writes
- * were also successful. So we can safely increase the inode
- * size to the write end location.
- */
- mutex_lock(&zi->i_truncate_mutex);
- if (i_size_read(inode) < iocb->ki_pos + size) {
- zonefs_update_stats(inode, iocb->ki_pos + size);
- zonefs_i_size_write(inode, iocb->ki_pos + size);
- }
- mutex_unlock(&zi->i_truncate_mutex);
- }
-
- return 0;
-}
-
-static const struct iomap_dio_ops zonefs_write_dio_ops = {
- .end_io = zonefs_file_write_dio_end_io,
-};
-
-static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
-{
- struct inode *inode = file_inode(iocb->ki_filp);
- struct zonefs_inode_info *zi = ZONEFS_I(inode);
- struct block_device *bdev = inode->i_sb->s_bdev;
- unsigned int max = bdev_max_zone_append_sectors(bdev);
- struct bio *bio;
- ssize_t size;
- int nr_pages;
- ssize_t ret;
-
- max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize);
- iov_iter_truncate(from, max);
-
- nr_pages = iov_iter_npages(from, BIO_MAX_VECS);
- if (!nr_pages)
- return 0;
-
- bio = bio_alloc(bdev, nr_pages,
- REQ_OP_ZONE_APPEND | REQ_SYNC | REQ_IDLE, GFP_NOFS);
- bio->bi_iter.bi_sector = zi->i_zsector;
- bio->bi_ioprio = iocb->ki_ioprio;
- if (iocb_is_dsync(iocb))
- bio->bi_opf |= REQ_FUA;
-
- ret = bio_iov_iter_get_pages(bio, from);
- if (unlikely(ret))
- goto out_release;
-
- size = bio->bi_iter.bi_size;
- task_io_account_write(size);
-
- if (iocb->ki_flags & IOCB_HIPRI)
- bio_set_polled(bio, iocb);
-
- ret = submit_bio_wait(bio);
-
- /*
- * If the file zone was written underneath the file system, the zone
- * write pointer may not be where we expect it to be, but the zone
- * append write can still succeed. So check manually that we wrote where
- * we intended to, that is, at zi->i_wpoffset.
- */
- if (!ret) {
- sector_t wpsector =
- zi->i_zsector + (zi->i_wpoffset >> SECTOR_SHIFT);
-
- if (bio->bi_iter.bi_sector != wpsector) {
- zonefs_warn(inode->i_sb,
- "Corrupted write pointer %llu for zone at %llu\n",
- wpsector, zi->i_zsector);
- ret = -EIO;
- }
- }
-
- zonefs_file_write_dio_end_io(iocb, size, ret, 0);
- trace_zonefs_file_dio_append(inode, size, ret);
-
-out_release:
- bio_release_pages(bio, false);
- bio_put(bio);
-
- if (ret >= 0) {
- iocb->ki_pos += size;
- return size;
- }
-
- return ret;
-}
-
-/*
- * Do not exceed the LFS limits nor the file zone size. If pos is under the
- * limit it becomes a short access. If it exceeds the limit, return -EFBIG.
- */
-static loff_t zonefs_write_check_limits(struct file *file, loff_t pos,
- loff_t count)
-{
- struct inode *inode = file_inode(file);
- struct zonefs_inode_info *zi = ZONEFS_I(inode);
- loff_t limit = rlimit(RLIMIT_FSIZE);
- loff_t max_size = zi->i_max_size;
-
- if (limit != RLIM_INFINITY) {
- if (pos >= limit) {
- send_sig(SIGXFSZ, current, 0);
- return -EFBIG;
- }
- count = min(count, limit - pos);
- }
-
- if (!(file->f_flags & O_LARGEFILE))
- max_size = min_t(loff_t, MAX_NON_LFS, max_size);
-
- if (unlikely(pos >= max_size))
- return -EFBIG;
-
- return min(count, max_size - pos);
-}
-
-static ssize_t zonefs_write_checks(struct kiocb *iocb, struct iov_iter *from)
-{
- struct file *file = iocb->ki_filp;
- struct inode *inode = file_inode(file);
- struct zonefs_inode_info *zi = ZONEFS_I(inode);
- loff_t count;
-
- if (IS_SWAPFILE(inode))
- return -ETXTBSY;
-
- if (!iov_iter_count(from))
- return 0;
-
- if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT))
- return -EINVAL;
-
- if (iocb->ki_flags & IOCB_APPEND) {
- if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
- return -EINVAL;
- mutex_lock(&zi->i_truncate_mutex);
- iocb->ki_pos = zi->i_wpoffset;
- mutex_unlock(&zi->i_truncate_mutex);
- }
-
- count = zonefs_write_check_limits(file, iocb->ki_pos,
- iov_iter_count(from));
- if (count < 0)
- return count;
-
- iov_iter_truncate(from, count);
- return iov_iter_count(from);
-}
-
-/*
- * Handle direct writes. For sequential zone files, this is the only possible
- * write path. For these files, check that the user is issuing writes
- * sequentially from the end of the file. This code assumes that the block layer
- * delivers write requests to the device in sequential order. This is always the
- * case if a block IO scheduler implementing the ELEVATOR_F_ZBD_SEQ_WRITE
- * elevator feature is being used (e.g. mq-deadline). The block layer always
- * automatically select such an elevator for zoned block devices during the
- * device initialization.
- */
-static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
-{
- struct inode *inode = file_inode(iocb->ki_filp);
- struct zonefs_inode_info *zi = ZONEFS_I(inode);
- struct super_block *sb = inode->i_sb;
- bool sync = is_sync_kiocb(iocb);
- bool append = false;
- ssize_t ret, count;
-
- /*
- * For async direct IOs to sequential zone files, refuse IOCB_NOWAIT
- * as this can cause write reordering (e.g. the first aio gets EAGAIN
- * on the inode lock but the second goes through but is now unaligned).
- */
- if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && !sync &&
- (iocb->ki_flags & IOCB_NOWAIT))
- return -EOPNOTSUPP;
-
- if (iocb->ki_flags & IOCB_NOWAIT) {
- if (!inode_trylock(inode))
- return -EAGAIN;
- } else {
- inode_lock(inode);
- }
-
- count = zonefs_write_checks(iocb, from);
- if (count <= 0) {
- ret = count;
- goto inode_unlock;
- }
-
- if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
- ret = -EINVAL;
- goto inode_unlock;
- }
-
- /* Enforce sequential writes (append only) in sequential zones */
- if (zi->i_ztype == ZONEFS_ZTYPE_SEQ) {
- mutex_lock(&zi->i_truncate_mutex);
- if (iocb->ki_pos != zi->i_wpoffset) {
- mutex_unlock(&zi->i_truncate_mutex);
- ret = -EINVAL;
- goto inode_unlock;
- }
- mutex_unlock(&zi->i_truncate_mutex);
- append = sync;
- }
-
- if (append)
- ret = zonefs_file_dio_append(iocb, from);
- else
- ret = iomap_dio_rw(iocb, from, &zonefs_write_iomap_ops,
- &zonefs_write_dio_ops, 0, NULL, 0);
- if (zi->i_ztype == ZONEFS_ZTYPE_SEQ &&
- (ret > 0 || ret == -EIOCBQUEUED)) {
- if (ret > 0)
- count = ret;
-
- /*
- * Update the zone write pointer offset assuming the write
- * operation succeeded. If it did not, the error recovery path
- * will correct it. Also do active seq file accounting.
- */
- mutex_lock(&zi->i_truncate_mutex);
- zi->i_wpoffset += count;
- zonefs_account_active(inode);
- mutex_unlock(&zi->i_truncate_mutex);
- }
-
-inode_unlock:
- inode_unlock(inode);
-
- return ret;
-}
-
-static ssize_t zonefs_file_buffered_write(struct kiocb *iocb,
- struct iov_iter *from)
-{
- struct inode *inode = file_inode(iocb->ki_filp);
- struct zonefs_inode_info *zi = ZONEFS_I(inode);
- ssize_t ret;
-
- /*
- * Direct IO writes are mandatory for sequential zone files so that the
- * write IO issuing order is preserved.
- */
- if (zi->i_ztype != ZONEFS_ZTYPE_CNV)
- return -EIO;
-
- if (iocb->ki_flags & IOCB_NOWAIT) {
- if (!inode_trylock(inode))
- return -EAGAIN;
- } else {
- inode_lock(inode);
- }
-
- ret = zonefs_write_checks(iocb, from);
- if (ret <= 0)
- goto inode_unlock;
-
- ret = iomap_file_buffered_write(iocb, from, &zonefs_write_iomap_ops);
- if (ret > 0)
- iocb->ki_pos += ret;
- else if (ret == -EIO)
- zonefs_io_error(inode, true);
-
-inode_unlock:
- inode_unlock(inode);
- if (ret > 0)
- ret = generic_write_sync(iocb, ret);
-
- return ret;
-}
-
-static ssize_t zonefs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
-{
- struct inode *inode = file_inode(iocb->ki_filp);
-
- if (unlikely(IS_IMMUTABLE(inode)))
- return -EPERM;
-
- if (sb_rdonly(inode->i_sb))
- return -EROFS;
-
- /* Write operations beyond the zone size are not allowed */
- if (iocb->ki_pos >= ZONEFS_I(inode)->i_max_size)
- return -EFBIG;
-
- if (iocb->ki_flags & IOCB_DIRECT) {
- ssize_t ret = zonefs_file_dio_write(iocb, from);
- if (ret != -ENOTBLK)
- return ret;
- }
-
- return zonefs_file_buffered_write(iocb, from);
-}
-
-static int zonefs_file_read_dio_end_io(struct kiocb *iocb, ssize_t size,
- int error, unsigned int flags)
-{
- if (error) {
- zonefs_io_error(file_inode(iocb->ki_filp), false);
- return error;
- }
-
- return 0;
-}
-
-static const struct iomap_dio_ops zonefs_read_dio_ops = {
- .end_io = zonefs_file_read_dio_end_io,
-};
-
-static ssize_t zonefs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
-{
- struct inode *inode = file_inode(iocb->ki_filp);
- struct zonefs_inode_info *zi = ZONEFS_I(inode);
- struct super_block *sb = inode->i_sb;
- loff_t isize;
- ssize_t ret;
-
- /* Offline zones cannot be read */
- if (unlikely(IS_IMMUTABLE(inode) && !(inode->i_mode & 0777)))
- return -EPERM;
-
- if (iocb->ki_pos >= zi->i_max_size)
- return 0;
-
- if (iocb->ki_flags & IOCB_NOWAIT) {
- if (!inode_trylock_shared(inode))
- return -EAGAIN;
- } else {
- inode_lock_shared(inode);
- }
-
- /* Limit read operations to written data */
- mutex_lock(&zi->i_truncate_mutex);
- isize = i_size_read(inode);
- if (iocb->ki_pos >= isize) {
- mutex_unlock(&zi->i_truncate_mutex);
- ret = 0;
- goto inode_unlock;
- }
- iov_iter_truncate(to, isize - iocb->ki_pos);
- mutex_unlock(&zi->i_truncate_mutex);
-
- if (iocb->ki_flags & IOCB_DIRECT) {
- size_t count = iov_iter_count(to);
-
- if ((iocb->ki_pos | count) & (sb->s_blocksize - 1)) {
- ret = -EINVAL;
- goto inode_unlock;
- }
- file_accessed(iocb->ki_filp);
- ret = iomap_dio_rw(iocb, to, &zonefs_read_iomap_ops,
- &zonefs_read_dio_ops, 0, NULL, 0);
- } else {
- ret = generic_file_read_iter(iocb, to);
- if (ret == -EIO)
- zonefs_io_error(inode, false);
- }
-
-inode_unlock:
- inode_unlock_shared(inode);
-
- return ret;
-}
-
-/*
- * Write open accounting is done only for sequential files.
- */
-static inline bool zonefs_seq_file_need_wro(struct inode *inode,
- struct file *file)
-{
- struct zonefs_inode_info *zi = ZONEFS_I(inode);
-
- if (zi->i_ztype != ZONEFS_ZTYPE_SEQ)
- return false;
-
- if (!(file->f_mode & FMODE_WRITE))
- return false;
-
- return true;
-}
-
-static int zonefs_seq_file_write_open(struct inode *inode)
-{
- struct zonefs_inode_info *zi = ZONEFS_I(inode);
- int ret = 0;
-
- mutex_lock(&zi->i_truncate_mutex);
-
- if (!zi->i_wr_refcnt) {
- struct zonefs_sb_info *sbi = ZONEFS_SB(inode->i_sb);
- unsigned int wro = atomic_inc_return(&sbi->s_wro_seq_files);
-
- if (sbi->s_mount_opts & ZONEFS_MNTOPT_EXPLICIT_OPEN) {
-
- if (sbi->s_max_wro_seq_files
- && wro > sbi->s_max_wro_seq_files) {
- atomic_dec(&sbi->s_wro_seq_files);
- ret = -EBUSY;
- goto unlock;
- }
-
- if (i_size_read(inode) < zi->i_max_size) {
- ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_OPEN);
- if (ret) {
- atomic_dec(&sbi->s_wro_seq_files);
- goto unlock;
- }
- zi->i_flags |= ZONEFS_ZONE_OPEN;
- zonefs_account_active(inode);
- }
- }
- }
-
- zi->i_wr_refcnt++;
-
-unlock:
- mutex_unlock(&zi->i_truncate_mutex);
-
- return ret;
-}
-
-static int zonefs_file_open(struct inode *inode, struct file *file)
-{
- int ret;
-
- ret = generic_file_open(inode, file);
- if (ret)
- return ret;
-
- if (zonefs_seq_file_need_wro(inode, file))
- return zonefs_seq_file_write_open(inode);
-
- return 0;
-}
-
-static void zonefs_seq_file_write_close(struct inode *inode)
-{
- struct zonefs_inode_info *zi = ZONEFS_I(inode);
- struct super_block *sb = inode->i_sb;
- struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
- int ret = 0;
-
- mutex_lock(&zi->i_truncate_mutex);
-
- zi->i_wr_refcnt--;
- if (zi->i_wr_refcnt)
- goto unlock;
-
- /*
- * The file zone may not be open anymore (e.g. the file was truncated to
- * its maximum size or it was fully written). For this case, we only
- * need to decrement the write open count.
- */
- if (zi->i_flags & ZONEFS_ZONE_OPEN) {
- ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_CLOSE);
- if (ret) {
- __zonefs_io_error(inode, false);
- /*
- * Leaving zones explicitly open may lead to a state
- * where most zones cannot be written (zone resources
- * exhausted). So take preventive action by remounting
- * read-only.
- */
- if (zi->i_flags & ZONEFS_ZONE_OPEN &&
- !(sb->s_flags & SB_RDONLY)) {
- zonefs_warn(sb,
- "closing zone at %llu failed %d\n",
- zi->i_zsector, ret);
- zonefs_warn(sb,
- "remounting filesystem read-only\n");
- sb->s_flags |= SB_RDONLY;
- }
- goto unlock;
- }
-
- zi->i_flags &= ~ZONEFS_ZONE_OPEN;
- zonefs_account_active(inode);
- }
-
- atomic_dec(&sbi->s_wro_seq_files);
-
-unlock:
- mutex_unlock(&zi->i_truncate_mutex);
-}
-
-static int zonefs_file_release(struct inode *inode, struct file *file)
-{
- /*
- * If we explicitly open a zone we must close it again as well, but the
- * zone management operation can fail (either due to an IO error or as
- * the zone has gone offline or read-only). Make sure we don't fail the
- * close(2) for user-space.
- */
- if (zonefs_seq_file_need_wro(inode, file))
- zonefs_seq_file_write_close(inode);
-
- return 0;
-}
-
-static const struct file_operations zonefs_file_operations = {
- .open = zonefs_file_open,
- .release = zonefs_file_release,
- .fsync = zonefs_file_fsync,
- .mmap = zonefs_file_mmap,
- .llseek = zonefs_file_llseek,
- .read_iter = zonefs_file_read_iter,
- .write_iter = zonefs_file_write_iter,
- .splice_read = generic_file_splice_read,
- .splice_write = iter_file_splice_write,
- .iopoll = iocb_bio_iopoll,
-};
-
static struct kmem_cache *zonefs_inode_cachep;
static struct inode *zonefs_alloc_inode(struct super_block *sb)
@@ -1282,7 +415,6 @@ static struct inode *zonefs_alloc_inode(struct super_block *sb)
inode_init_once(&zi->i_vnode);
mutex_init(&zi->i_truncate_mutex);
zi->i_wr_refcnt = 0;
- zi->i_flags = 0;
return &zi->i_vnode;
}
@@ -1315,8 +447,8 @@ static int zonefs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_bavail = buf->f_bfree;
for (t = 0; t < ZONEFS_ZTYPE_MAX; t++) {
- if (sbi->s_nr_files[t])
- buf->f_files += sbi->s_nr_files[t] + 1;
+ if (sbi->s_zgroup[t].g_nr_zones)
+ buf->f_files += sbi->s_zgroup[t].g_nr_zones + 1;
}
buf->f_ffree = 0;
@@ -1408,185 +540,440 @@ static int zonefs_remount(struct super_block *sb, int *flags, char *data)
return zonefs_parse_options(sb, data);
}
-static const struct super_operations zonefs_sops = {
- .alloc_inode = zonefs_alloc_inode,
- .free_inode = zonefs_free_inode,
- .statfs = zonefs_statfs,
- .remount_fs = zonefs_remount,
- .show_options = zonefs_show_options,
-};
+static int zonefs_inode_setattr(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct iattr *iattr)
+{
+ struct inode *inode = d_inode(dentry);
+ int ret;
+
+ if (unlikely(IS_IMMUTABLE(inode)))
+ return -EPERM;
+
+ ret = setattr_prepare(&nop_mnt_idmap, dentry, iattr);
+ if (ret)
+ return ret;
+
+ /*
+ * Since files and directories cannot be created nor deleted, do not
+ * allow setting any write attributes on the sub-directories grouping
+ * files by zone type.
+ */
+ if ((iattr->ia_valid & ATTR_MODE) && S_ISDIR(inode->i_mode) &&
+ (iattr->ia_mode & 0222))
+ return -EPERM;
+
+ if (((iattr->ia_valid & ATTR_UID) &&
+ !uid_eq(iattr->ia_uid, inode->i_uid)) ||
+ ((iattr->ia_valid & ATTR_GID) &&
+ !gid_eq(iattr->ia_gid, inode->i_gid))) {
+ ret = dquot_transfer(&nop_mnt_idmap, inode, iattr);
+ if (ret)
+ return ret;
+ }
+
+ if (iattr->ia_valid & ATTR_SIZE) {
+ ret = zonefs_file_truncate(inode, iattr->ia_size);
+ if (ret)
+ return ret;
+ }
-static const struct inode_operations zonefs_dir_inode_operations = {
- .lookup = simple_lookup,
+ setattr_copy(&nop_mnt_idmap, inode, iattr);
+
+ if (S_ISREG(inode->i_mode)) {
+ struct zonefs_zone *z = zonefs_inode_zone(inode);
+
+ z->z_mode = inode->i_mode;
+ z->z_uid = inode->i_uid;
+ z->z_gid = inode->i_gid;
+ }
+
+ return 0;
+}
+
+static const struct inode_operations zonefs_file_inode_operations = {
.setattr = zonefs_inode_setattr,
};
-static void zonefs_init_dir_inode(struct inode *parent, struct inode *inode,
- enum zonefs_ztype type)
+static long zonefs_fname_to_fno(const struct qstr *fname)
{
- struct super_block *sb = parent->i_sb;
+ const char *name = fname->name;
+ unsigned int len = fname->len;
+ long fno = 0, shift = 1;
+ const char *rname;
+ char c = *name;
+ unsigned int i;
- inode->i_ino = bdev_nr_zones(sb->s_bdev) + type + 1;
- inode_init_owner(&nop_mnt_idmap, inode, parent, S_IFDIR | 0555);
- inode->i_op = &zonefs_dir_inode_operations;
- inode->i_fop = &simple_dir_operations;
- set_nlink(inode, 2);
- inc_nlink(parent);
+ /*
+ * File names are always a base-10 number string without any
+ * leading 0s.
+ */
+ if (!isdigit(c))
+ return -ENOENT;
+
+ if (len > 1 && c == '0')
+ return -ENOENT;
+
+ if (len == 1)
+ return c - '0';
+
+ for (i = 0, rname = name + len - 1; i < len; i++, rname--) {
+ c = *rname;
+ if (!isdigit(c))
+ return -ENOENT;
+ fno += (c - '0') * shift;
+ shift *= 10;
+ }
+
+ return fno;
}
-static int zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone,
- enum zonefs_ztype type)
+static struct inode *zonefs_get_file_inode(struct inode *dir,
+ struct dentry *dentry)
{
- struct super_block *sb = inode->i_sb;
+ struct zonefs_zone_group *zgroup = dir->i_private;
+ struct super_block *sb = dir->i_sb;
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
- struct zonefs_inode_info *zi = ZONEFS_I(inode);
- int ret = 0;
+ struct zonefs_zone *z;
+ struct inode *inode;
+ ino_t ino;
+ long fno;
- inode->i_ino = zone->start >> sbi->s_zone_sectors_shift;
- inode->i_mode = S_IFREG | sbi->s_perm;
+ /* Get the file number from the file name */
+ fno = zonefs_fname_to_fno(&dentry->d_name);
+ if (fno < 0)
+ return ERR_PTR(fno);
- zi->i_ztype = type;
- zi->i_zsector = zone->start;
- zi->i_zone_size = zone->len << SECTOR_SHIFT;
- if (zi->i_zone_size > bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT &&
- !(sbi->s_features & ZONEFS_F_AGGRCNV)) {
- zonefs_err(sb,
- "zone size %llu doesn't match device's zone sectors %llu\n",
- zi->i_zone_size,
- bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT);
- return -EINVAL;
- }
+ if (!zgroup->g_nr_zones || fno >= zgroup->g_nr_zones)
+ return ERR_PTR(-ENOENT);
- zi->i_max_size = min_t(loff_t, MAX_LFS_FILESIZE,
- zone->capacity << SECTOR_SHIFT);
- zi->i_wpoffset = zonefs_check_zone_condition(inode, zone, true, true);
+ z = &zgroup->g_zones[fno];
+ ino = z->z_sector >> sbi->s_zone_sectors_shift;
+ inode = iget_locked(sb, ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW)) {
+ WARN_ON_ONCE(inode->i_private != z);
+ return inode;
+ }
- inode->i_uid = sbi->s_uid;
- inode->i_gid = sbi->s_gid;
- inode->i_size = zi->i_wpoffset;
- inode->i_blocks = zi->i_max_size >> SECTOR_SHIFT;
+ inode->i_ino = ino;
+ inode->i_mode = z->z_mode;
+ inode->i_ctime = inode->i_mtime = inode->i_atime = dir->i_ctime;
+ inode->i_uid = z->z_uid;
+ inode->i_gid = z->z_gid;
+ inode->i_size = z->z_wpoffset;
+ inode->i_blocks = z->z_capacity >> SECTOR_SHIFT;
+ inode->i_private = z;
inode->i_op = &zonefs_file_inode_operations;
inode->i_fop = &zonefs_file_operations;
inode->i_mapping->a_ops = &zonefs_file_aops;
- sb->s_maxbytes = max(zi->i_max_size, sb->s_maxbytes);
- sbi->s_blocks += zi->i_max_size >> sb->s_blocksize_bits;
- sbi->s_used_blocks += zi->i_wpoffset >> sb->s_blocksize_bits;
+ /* Update the inode access rights depending on the zone condition */
+ zonefs_inode_update_mode(inode);
+
+ unlock_new_inode(inode);
+
+ return inode;
+}
+
+static struct inode *zonefs_get_zgroup_inode(struct super_block *sb,
+ enum zonefs_ztype ztype)
+{
+ struct inode *root = d_inode(sb->s_root);
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ struct inode *inode;
+ ino_t ino = bdev_nr_zones(sb->s_bdev) + ztype + 1;
+
+ inode = iget_locked(sb, ino);
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+ if (!(inode->i_state & I_NEW))
+ return inode;
+
+ inode->i_ino = ino;
+ inode_init_owner(&nop_mnt_idmap, inode, root, S_IFDIR | 0555);
+ inode->i_size = sbi->s_zgroup[ztype].g_nr_zones;
+ inode->i_ctime = inode->i_mtime = inode->i_atime = root->i_ctime;
+ inode->i_private = &sbi->s_zgroup[ztype];
+ set_nlink(inode, 2);
+
+ inode->i_op = &zonefs_dir_inode_operations;
+ inode->i_fop = &zonefs_dir_operations;
+
+ unlock_new_inode(inode);
+
+ return inode;
+}
- mutex_lock(&zi->i_truncate_mutex);
+
+static struct inode *zonefs_get_dir_inode(struct inode *dir,
+ struct dentry *dentry)
+{
+ struct super_block *sb = dir->i_sb;
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ const char *name = dentry->d_name.name;
+ enum zonefs_ztype ztype;
/*
- * For sequential zones, make sure that any open zone is closed first
- * to ensure that the initial number of open zones is 0, in sync with
- * the open zone accounting done when the mount option
- * ZONEFS_MNTOPT_EXPLICIT_OPEN is used.
+ * We only need to check for the "seq" directory and
+ * the "cnv" directory if we have conventional zones.
*/
- if (type == ZONEFS_ZTYPE_SEQ &&
- (zone->cond == BLK_ZONE_COND_IMP_OPEN ||
- zone->cond == BLK_ZONE_COND_EXP_OPEN)) {
- ret = zonefs_zone_mgmt(inode, REQ_OP_ZONE_CLOSE);
- if (ret)
- goto unlock;
+ if (dentry->d_name.len != 3)
+ return ERR_PTR(-ENOENT);
+
+ for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
+ if (sbi->s_zgroup[ztype].g_nr_zones &&
+ memcmp(name, zonefs_zgroup_name(ztype), 3) == 0)
+ break;
}
+ if (ztype == ZONEFS_ZTYPE_MAX)
+ return ERR_PTR(-ENOENT);
- zonefs_account_active(inode);
+ return zonefs_get_zgroup_inode(sb, ztype);
+}
-unlock:
- mutex_unlock(&zi->i_truncate_mutex);
+static struct dentry *zonefs_lookup(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
+{
+ struct inode *inode;
- return ret;
+ if (dentry->d_name.len > ZONEFS_NAME_MAX)
+ return ERR_PTR(-ENAMETOOLONG);
+
+ if (dir == d_inode(dir->i_sb->s_root))
+ inode = zonefs_get_dir_inode(dir, dentry);
+ else
+ inode = zonefs_get_file_inode(dir, dentry);
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+
+ return d_splice_alias(inode, dentry);
}
-static struct dentry *zonefs_create_inode(struct dentry *parent,
- const char *name, struct blk_zone *zone,
- enum zonefs_ztype type)
+static int zonefs_readdir_root(struct file *file, struct dir_context *ctx)
{
- struct inode *dir = d_inode(parent);
- struct dentry *dentry;
- struct inode *inode;
- int ret = -ENOMEM;
+ struct inode *inode = file_inode(file);
+ struct super_block *sb = inode->i_sb;
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ enum zonefs_ztype ztype = ZONEFS_ZTYPE_CNV;
+ ino_t base_ino = bdev_nr_zones(sb->s_bdev) + 1;
- dentry = d_alloc_name(parent, name);
- if (!dentry)
- return ERR_PTR(ret);
+ if (ctx->pos >= inode->i_size)
+ return 0;
- inode = new_inode(parent->d_sb);
- if (!inode)
- goto dput;
+ if (!dir_emit_dots(file, ctx))
+ return 0;
- inode->i_ctime = inode->i_mtime = inode->i_atime = dir->i_ctime;
- if (zone) {
- ret = zonefs_init_file_inode(inode, zone, type);
- if (ret) {
- iput(inode);
- goto dput;
- }
- } else {
- zonefs_init_dir_inode(dir, inode, type);
+ if (ctx->pos == 2) {
+ if (!sbi->s_zgroup[ZONEFS_ZTYPE_CNV].g_nr_zones)
+ ztype = ZONEFS_ZTYPE_SEQ;
+
+ if (!dir_emit(ctx, zonefs_zgroup_name(ztype), 3,
+ base_ino + ztype, DT_DIR))
+ return 0;
+ ctx->pos++;
}
- d_add(dentry, inode);
- dir->i_size++;
+ if (ctx->pos == 3 && ztype != ZONEFS_ZTYPE_SEQ) {
+ ztype = ZONEFS_ZTYPE_SEQ;
+ if (!dir_emit(ctx, zonefs_zgroup_name(ztype), 3,
+ base_ino + ztype, DT_DIR))
+ return 0;
+ ctx->pos++;
+ }
- return dentry;
+ return 0;
+}
+
+static int zonefs_readdir_zgroup(struct file *file,
+ struct dir_context *ctx)
+{
+ struct inode *inode = file_inode(file);
+ struct zonefs_zone_group *zgroup = inode->i_private;
+ struct super_block *sb = inode->i_sb;
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ struct zonefs_zone *z;
+ int fname_len;
+ char *fname;
+ ino_t ino;
+ int f;
+
+ /*
+ * The size of zone group directories is equal to the number
+ * of zone files in the group and does note include the "." and
+ * ".." entries. Hence the "+ 2" here.
+ */
+ if (ctx->pos >= inode->i_size + 2)
+ return 0;
+
+ if (!dir_emit_dots(file, ctx))
+ return 0;
+
+ fname = kmalloc(ZONEFS_NAME_MAX, GFP_KERNEL);
+ if (!fname)
+ return -ENOMEM;
+
+ for (f = ctx->pos - 2; f < zgroup->g_nr_zones; f++) {
+ z = &zgroup->g_zones[f];
+ ino = z->z_sector >> sbi->s_zone_sectors_shift;
+ fname_len = snprintf(fname, ZONEFS_NAME_MAX - 1, "%u", f);
+ if (!dir_emit(ctx, fname, fname_len, ino, DT_REG))
+ break;
+ ctx->pos++;
+ }
-dput:
- dput(dentry);
+ kfree(fname);
- return ERR_PTR(ret);
+ return 0;
+}
+
+static int zonefs_readdir(struct file *file, struct dir_context *ctx)
+{
+ struct inode *inode = file_inode(file);
+
+ if (inode == d_inode(inode->i_sb->s_root))
+ return zonefs_readdir_root(file, ctx);
+
+ return zonefs_readdir_zgroup(file, ctx);
}
+const struct inode_operations zonefs_dir_inode_operations = {
+ .lookup = zonefs_lookup,
+ .setattr = zonefs_inode_setattr,
+};
+
+const struct file_operations zonefs_dir_operations = {
+ .llseek = generic_file_llseek,
+ .read = generic_read_dir,
+ .iterate_shared = zonefs_readdir,
+};
+
struct zonefs_zone_data {
struct super_block *sb;
unsigned int nr_zones[ZONEFS_ZTYPE_MAX];
+ sector_t cnv_zone_start;
struct blk_zone *zones;
};
+static int zonefs_get_zone_info_cb(struct blk_zone *zone, unsigned int idx,
+ void *data)
+{
+ struct zonefs_zone_data *zd = data;
+ struct super_block *sb = zd->sb;
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+
+ /*
+ * We do not care about the first zone: it contains the super block
+ * and not exposed as a file.
+ */
+ if (!idx)
+ return 0;
+
+ /*
+ * Count the number of zones that will be exposed as files.
+ * For sequential zones, we always have as many files as zones.
+ * FOr conventional zones, the number of files depends on if we have
+ * conventional zones aggregation enabled.
+ */
+ switch (zone->type) {
+ case BLK_ZONE_TYPE_CONVENTIONAL:
+ if (sbi->s_features & ZONEFS_F_AGGRCNV) {
+ /* One file per set of contiguous conventional zones */
+ if (!(sbi->s_zgroup[ZONEFS_ZTYPE_CNV].g_nr_zones) ||
+ zone->start != zd->cnv_zone_start)
+ sbi->s_zgroup[ZONEFS_ZTYPE_CNV].g_nr_zones++;
+ zd->cnv_zone_start = zone->start + zone->len;
+ } else {
+ /* One file per zone */
+ sbi->s_zgroup[ZONEFS_ZTYPE_CNV].g_nr_zones++;
+ }
+ break;
+ case BLK_ZONE_TYPE_SEQWRITE_REQ:
+ case BLK_ZONE_TYPE_SEQWRITE_PREF:
+ sbi->s_zgroup[ZONEFS_ZTYPE_SEQ].g_nr_zones++;
+ break;
+ default:
+ zonefs_err(zd->sb, "Unsupported zone type 0x%x\n",
+ zone->type);
+ return -EIO;
+ }
+
+ memcpy(&zd->zones[idx], zone, sizeof(struct blk_zone));
+
+ return 0;
+}
+
+static int zonefs_get_zone_info(struct zonefs_zone_data *zd)
+{
+ struct block_device *bdev = zd->sb->s_bdev;
+ int ret;
+
+ zd->zones = kvcalloc(bdev_nr_zones(bdev), sizeof(struct blk_zone),
+ GFP_KERNEL);
+ if (!zd->zones)
+ return -ENOMEM;
+
+ /* Get zones information from the device */
+ ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES,
+ zonefs_get_zone_info_cb, zd);
+ if (ret < 0) {
+ zonefs_err(zd->sb, "Zone report failed %d\n", ret);
+ return ret;
+ }
+
+ if (ret != bdev_nr_zones(bdev)) {
+ zonefs_err(zd->sb, "Invalid zone report (%d/%u zones)\n",
+ ret, bdev_nr_zones(bdev));
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static inline void zonefs_free_zone_info(struct zonefs_zone_data *zd)
+{
+ kvfree(zd->zones);
+}
+
/*
* Create a zone group and populate it with zone files.
*/
-static int zonefs_create_zgroup(struct zonefs_zone_data *zd,
- enum zonefs_ztype type)
+static int zonefs_init_zgroup(struct super_block *sb,
+ struct zonefs_zone_data *zd,
+ enum zonefs_ztype ztype)
{
- struct super_block *sb = zd->sb;
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ struct zonefs_zone_group *zgroup = &sbi->s_zgroup[ztype];
struct blk_zone *zone, *next, *end;
- const char *zgroup_name;
- char *file_name;
- struct dentry *dir, *dent;
+ struct zonefs_zone *z;
unsigned int n = 0;
int ret;
- /* If the group is empty, there is nothing to do */
- if (!zd->nr_zones[type])
+ /* Allocate the zone group. If it is empty, we have nothing to do. */
+ if (!zgroup->g_nr_zones)
return 0;
- file_name = kmalloc(ZONEFS_NAME_MAX, GFP_KERNEL);
- if (!file_name)
+ zgroup->g_zones = kvcalloc(zgroup->g_nr_zones,
+ sizeof(struct zonefs_zone), GFP_KERNEL);
+ if (!zgroup->g_zones)
return -ENOMEM;
- if (type == ZONEFS_ZTYPE_CNV)
- zgroup_name = "cnv";
- else
- zgroup_name = "seq";
-
- dir = zonefs_create_inode(sb->s_root, zgroup_name, NULL, type);
- if (IS_ERR(dir)) {
- ret = PTR_ERR(dir);
- goto free;
- }
-
/*
- * The first zone contains the super block: skip it.
+ * Initialize the zone groups using the device zone information.
+ * We always skip the first zone as it contains the super block
+ * and is not use to back a file.
*/
end = zd->zones + bdev_nr_zones(sb->s_bdev);
for (zone = &zd->zones[1]; zone < end; zone = next) {
next = zone + 1;
- if (zonefs_zone_type(zone) != type)
+ if (zonefs_zone_type(zone) != ztype)
continue;
+ if (WARN_ON_ONCE(n >= zgroup->g_nr_zones))
+ return -EINVAL;
+
/*
* For conventional zones, contiguous zones can be aggregated
* together to form larger files. Note that this overwrites the
@@ -1595,10 +982,10 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd,
* found, assume that all zones aggregated have the same
* condition.
*/
- if (type == ZONEFS_ZTYPE_CNV &&
+ if (ztype == ZONEFS_ZTYPE_CNV &&
(sbi->s_features & ZONEFS_F_AGGRCNV)) {
for (; next < end; next++) {
- if (zonefs_zone_type(next) != type)
+ if (zonefs_zone_type(next) != ztype)
break;
zone->len += next->len;
zone->capacity += next->capacity;
@@ -1608,99 +995,118 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd,
else if (next->cond == BLK_ZONE_COND_OFFLINE)
zone->cond = BLK_ZONE_COND_OFFLINE;
}
- if (zone->capacity != zone->len) {
- zonefs_err(sb, "Invalid conventional zone capacity\n");
- ret = -EINVAL;
- goto free;
- }
}
+ z = &zgroup->g_zones[n];
+ if (ztype == ZONEFS_ZTYPE_CNV)
+ z->z_flags |= ZONEFS_ZONE_CNV;
+ z->z_sector = zone->start;
+ z->z_size = zone->len << SECTOR_SHIFT;
+ if (z->z_size > bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT &&
+ !(sbi->s_features & ZONEFS_F_AGGRCNV)) {
+ zonefs_err(sb,
+ "Invalid zone size %llu (device zone sectors %llu)\n",
+ z->z_size,
+ bdev_zone_sectors(sb->s_bdev) << SECTOR_SHIFT);
+ return -EINVAL;
+ }
+
+ z->z_capacity = min_t(loff_t, MAX_LFS_FILESIZE,
+ zone->capacity << SECTOR_SHIFT);
+ z->z_wpoffset = zonefs_check_zone_condition(sb, z, zone);
+
+ z->z_mode = S_IFREG | sbi->s_perm;
+ z->z_uid = sbi->s_uid;
+ z->z_gid = sbi->s_gid;
+
/*
- * Use the file number within its group as file name.
+ * Let zonefs_inode_update_mode() know that we will need
+ * special initialization of the inode mode the first time
+ * it is accessed.
*/
- snprintf(file_name, ZONEFS_NAME_MAX - 1, "%u", n);
- dent = zonefs_create_inode(dir, file_name, zone, type);
- if (IS_ERR(dent)) {
- ret = PTR_ERR(dent);
- goto free;
+ z->z_flags |= ZONEFS_ZONE_INIT_MODE;
+
+ sb->s_maxbytes = max(z->z_capacity, sb->s_maxbytes);
+ sbi->s_blocks += z->z_capacity >> sb->s_blocksize_bits;
+ sbi->s_used_blocks += z->z_wpoffset >> sb->s_blocksize_bits;
+
+ /*
+ * For sequential zones, make sure that any open zone is closed
+ * first to ensure that the initial number of open zones is 0,
+ * in sync with the open zone accounting done when the mount
+ * option ZONEFS_MNTOPT_EXPLICIT_OPEN is used.
+ */
+ if (ztype == ZONEFS_ZTYPE_SEQ &&
+ (zone->cond == BLK_ZONE_COND_IMP_OPEN ||
+ zone->cond == BLK_ZONE_COND_EXP_OPEN)) {
+ ret = zonefs_zone_mgmt(sb, z, REQ_OP_ZONE_CLOSE);
+ if (ret)
+ return ret;
}
+ zonefs_account_active(sb, z);
+
n++;
}
- zonefs_info(sb, "Zone group \"%s\" has %u file%s\n",
- zgroup_name, n, n > 1 ? "s" : "");
-
- sbi->s_nr_files[type] = n;
- ret = 0;
+ if (WARN_ON_ONCE(n != zgroup->g_nr_zones))
+ return -EINVAL;
-free:
- kfree(file_name);
+ zonefs_info(sb, "Zone group \"%s\" has %u file%s\n",
+ zonefs_zgroup_name(ztype),
+ zgroup->g_nr_zones,
+ zgroup->g_nr_zones > 1 ? "s" : "");
- return ret;
+ return 0;
}
-static int zonefs_get_zone_info_cb(struct blk_zone *zone, unsigned int idx,
- void *data)
+static void zonefs_free_zgroups(struct super_block *sb)
{
- struct zonefs_zone_data *zd = data;
-
- /*
- * Count the number of usable zones: the first zone at index 0 contains
- * the super block and is ignored.
- */
- switch (zone->type) {
- case BLK_ZONE_TYPE_CONVENTIONAL:
- zone->wp = zone->start + zone->len;
- if (idx)
- zd->nr_zones[ZONEFS_ZTYPE_CNV]++;
- break;
- case BLK_ZONE_TYPE_SEQWRITE_REQ:
- case BLK_ZONE_TYPE_SEQWRITE_PREF:
- if (idx)
- zd->nr_zones[ZONEFS_ZTYPE_SEQ]++;
- break;
- default:
- zonefs_err(zd->sb, "Unsupported zone type 0x%x\n",
- zone->type);
- return -EIO;
- }
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ enum zonefs_ztype ztype;
- memcpy(&zd->zones[idx], zone, sizeof(struct blk_zone));
+ if (!sbi)
+ return;
- return 0;
+ for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
+ kvfree(sbi->s_zgroup[ztype].g_zones);
+ sbi->s_zgroup[ztype].g_zones = NULL;
+ }
}
-static int zonefs_get_zone_info(struct zonefs_zone_data *zd)
+/*
+ * Create a zone group and populate it with zone files.
+ */
+static int zonefs_init_zgroups(struct super_block *sb)
{
- struct block_device *bdev = zd->sb->s_bdev;
+ struct zonefs_zone_data zd;
+ enum zonefs_ztype ztype;
int ret;
- zd->zones = kvcalloc(bdev_nr_zones(bdev), sizeof(struct blk_zone),
- GFP_KERNEL);
- if (!zd->zones)
- return -ENOMEM;
-
- /* Get zones information from the device */
- ret = blkdev_report_zones(bdev, 0, BLK_ALL_ZONES,
- zonefs_get_zone_info_cb, zd);
- if (ret < 0) {
- zonefs_err(zd->sb, "Zone report failed %d\n", ret);
- return ret;
- }
+ /* First get the device zone information */
+ memset(&zd, 0, sizeof(struct zonefs_zone_data));
+ zd.sb = sb;
+ ret = zonefs_get_zone_info(&zd);
+ if (ret)
+ goto cleanup;
- if (ret != bdev_nr_zones(bdev)) {
- zonefs_err(zd->sb, "Invalid zone report (%d/%u zones)\n",
- ret, bdev_nr_zones(bdev));
- return -EIO;
+ /* Allocate and initialize the zone groups */
+ for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
+ ret = zonefs_init_zgroup(sb, &zd, ztype);
+ if (ret) {
+ zonefs_info(sb,
+ "Zone group \"%s\" initialization failed\n",
+ zonefs_zgroup_name(ztype));
+ break;
+ }
}
- return 0;
-}
+cleanup:
+ zonefs_free_zone_info(&zd);
+ if (ret)
+ zonefs_free_zgroups(sb);
-static inline void zonefs_cleanup_zone_info(struct zonefs_zone_data *zd)
-{
- kvfree(zd->zones);
+ return ret;
}
/*
@@ -1785,6 +1191,50 @@ free_page:
return ret;
}
+static const struct super_operations zonefs_sops = {
+ .alloc_inode = zonefs_alloc_inode,
+ .free_inode = zonefs_free_inode,
+ .statfs = zonefs_statfs,
+ .remount_fs = zonefs_remount,
+ .show_options = zonefs_show_options,
+};
+
+static int zonefs_get_zgroup_inodes(struct super_block *sb)
+{
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ struct inode *dir_inode;
+ enum zonefs_ztype ztype;
+
+ for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
+ if (!sbi->s_zgroup[ztype].g_nr_zones)
+ continue;
+
+ dir_inode = zonefs_get_zgroup_inode(sb, ztype);
+ if (IS_ERR(dir_inode))
+ return PTR_ERR(dir_inode);
+
+ sbi->s_zgroup[ztype].g_inode = dir_inode;
+ }
+
+ return 0;
+}
+
+static void zonefs_release_zgroup_inodes(struct super_block *sb)
+{
+ struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
+ enum zonefs_ztype ztype;
+
+ if (!sbi)
+ return;
+
+ for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
+ if (sbi->s_zgroup[ztype].g_inode) {
+ iput(sbi->s_zgroup[ztype].g_inode);
+ sbi->s_zgroup[ztype].g_inode = NULL;
+ }
+ }
+}
+
/*
* Check that the device is zoned. If it is, get the list of zones and create
* sub-directories and files according to the device zone configuration and
@@ -1792,10 +1242,9 @@ free_page:
*/
static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
{
- struct zonefs_zone_data zd;
struct zonefs_sb_info *sbi;
struct inode *inode;
- enum zonefs_ztype t;
+ enum zonefs_ztype ztype;
int ret;
if (!bdev_is_zoned(sb->s_bdev)) {
@@ -1845,16 +1294,6 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
if (ret)
return ret;
- memset(&zd, 0, sizeof(struct zonefs_zone_data));
- zd.sb = sb;
- ret = zonefs_get_zone_info(&zd);
- if (ret)
- goto cleanup;
-
- ret = zonefs_sysfs_register(sb);
- if (ret)
- goto cleanup;
-
zonefs_info(sb, "Mounting %u zones", bdev_nr_zones(sb->s_bdev));
if (!sbi->s_max_wro_seq_files &&
@@ -1865,7 +1304,12 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_mount_opts &= ~ZONEFS_MNTOPT_EXPLICIT_OPEN;
}
- /* Create root directory inode */
+ /* Initialize the zone groups */
+ ret = zonefs_init_zgroups(sb);
+ if (ret)
+ goto cleanup;
+
+ /* Create the root directory inode */
ret = -ENOMEM;
inode = new_inode(sb);
if (!inode)
@@ -1875,22 +1319,37 @@ static int zonefs_fill_super(struct super_block *sb, void *data, int silent)
inode->i_mode = S_IFDIR | 0555;
inode->i_ctime = inode->i_mtime = inode->i_atime = current_time(inode);
inode->i_op = &zonefs_dir_inode_operations;
- inode->i_fop = &simple_dir_operations;
+ inode->i_fop = &zonefs_dir_operations;
+ inode->i_size = 2;
set_nlink(inode, 2);
+ for (ztype = 0; ztype < ZONEFS_ZTYPE_MAX; ztype++) {
+ if (sbi->s_zgroup[ztype].g_nr_zones) {
+ inc_nlink(inode);
+ inode->i_size++;
+ }
+ }
sb->s_root = d_make_root(inode);
if (!sb->s_root)
goto cleanup;
- /* Create and populate files in zone groups directories */
- for (t = 0; t < ZONEFS_ZTYPE_MAX; t++) {
- ret = zonefs_create_zgroup(&zd, t);
- if (ret)
- break;
- }
+ /*
+ * Take a reference on the zone groups directory inodes
+ * to keep them in the inode cache.
+ */
+ ret = zonefs_get_zgroup_inodes(sb);
+ if (ret)
+ goto cleanup;
+
+ ret = zonefs_sysfs_register(sb);
+ if (ret)
+ goto cleanup;
+
+ return 0;
cleanup:
- zonefs_cleanup_zone_info(&zd);
+ zonefs_release_zgroup_inodes(sb);
+ zonefs_free_zgroups(sb);
return ret;
}
@@ -1905,11 +1364,13 @@ static void zonefs_kill_super(struct super_block *sb)
{
struct zonefs_sb_info *sbi = ZONEFS_SB(sb);
- if (sb->s_root)
- d_genocide(sb->s_root);
+ /* Release the reference on the zone group directory inodes */
+ zonefs_release_zgroup_inodes(sb);
- zonefs_sysfs_unregister(sb);
kill_block_super(sb);
+
+ zonefs_sysfs_unregister(sb);
+ zonefs_free_zgroups(sb);
kfree(sbi);
}
diff --git a/fs/zonefs/sysfs.c b/fs/zonefs/sysfs.c
index 9920689dc098..8ccb65c2b419 100644
--- a/fs/zonefs/sysfs.c
+++ b/fs/zonefs/sysfs.c
@@ -79,7 +79,7 @@ static const struct sysfs_ops zonefs_sysfs_attr_ops = {
.show = zonefs_sysfs_attr_show,
};
-static struct kobj_type zonefs_sb_ktype = {
+static const struct kobj_type zonefs_sb_ktype = {
.default_groups = zonefs_sysfs_groups,
.sysfs_ops = &zonefs_sysfs_attr_ops,
.release = zonefs_sysfs_sb_release,
diff --git a/fs/zonefs/trace.h b/fs/zonefs/trace.h
index 42edcfd393ed..9969db3a9c7d 100644
--- a/fs/zonefs/trace.h
+++ b/fs/zonefs/trace.h
@@ -20,8 +20,9 @@
#define show_dev(dev) MAJOR(dev), MINOR(dev)
TRACE_EVENT(zonefs_zone_mgmt,
- TP_PROTO(struct inode *inode, enum req_op op),
- TP_ARGS(inode, op),
+ TP_PROTO(struct super_block *sb, struct zonefs_zone *z,
+ enum req_op op),
+ TP_ARGS(sb, z, op),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
@@ -30,12 +31,12 @@ TRACE_EVENT(zonefs_zone_mgmt,
__field(sector_t, nr_sectors)
),
TP_fast_assign(
- __entry->dev = inode->i_sb->s_dev;
- __entry->ino = inode->i_ino;
+ __entry->dev = sb->s_dev;
+ __entry->ino =
+ z->z_sector >> ZONEFS_SB(sb)->s_zone_sectors_shift;
__entry->op = op;
- __entry->sector = ZONEFS_I(inode)->i_zsector;
- __entry->nr_sectors =
- ZONEFS_I(inode)->i_zone_size >> SECTOR_SHIFT;
+ __entry->sector = z->z_sector;
+ __entry->nr_sectors = z->z_size >> SECTOR_SHIFT;
),
TP_printk("bdev=(%d,%d), ino=%lu op=%s, sector=%llu, nr_sectors=%llu",
show_dev(__entry->dev), (unsigned long)__entry->ino,
@@ -58,9 +59,10 @@ TRACE_EVENT(zonefs_file_dio_append,
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->sector = ZONEFS_I(inode)->i_zsector;
+ __entry->sector = zonefs_inode_zone(inode)->z_sector;
__entry->size = size;
- __entry->wpoffset = ZONEFS_I(inode)->i_wpoffset;
+ __entry->wpoffset =
+ zonefs_inode_zone(inode)->z_wpoffset;
__entry->ret = ret;
),
TP_printk("bdev=(%d, %d), ino=%lu, sector=%llu, size=%zu, wpoffset=%llu, ret=%zu",
diff --git a/fs/zonefs/zonefs.h b/fs/zonefs/zonefs.h
index 1dbe78119ff1..8175652241b5 100644
--- a/fs/zonefs/zonefs.h
+++ b/fs/zonefs/zonefs.h
@@ -39,31 +39,53 @@ static inline enum zonefs_ztype zonefs_zone_type(struct blk_zone *zone)
return ZONEFS_ZTYPE_SEQ;
}
-#define ZONEFS_ZONE_OPEN (1U << 0)
-#define ZONEFS_ZONE_ACTIVE (1U << 1)
-#define ZONEFS_ZONE_OFFLINE (1U << 2)
-#define ZONEFS_ZONE_READONLY (1U << 3)
+#define ZONEFS_ZONE_INIT_MODE (1U << 0)
+#define ZONEFS_ZONE_OPEN (1U << 1)
+#define ZONEFS_ZONE_ACTIVE (1U << 2)
+#define ZONEFS_ZONE_OFFLINE (1U << 3)
+#define ZONEFS_ZONE_READONLY (1U << 4)
+#define ZONEFS_ZONE_CNV (1U << 31)
/*
- * In-memory inode data.
+ * In-memory per-file inode zone data.
*/
-struct zonefs_inode_info {
- struct inode i_vnode;
+struct zonefs_zone {
+ /* Zone state flags */
+ unsigned int z_flags;
- /* File zone type */
- enum zonefs_ztype i_ztype;
+ /* Zone start sector (512B unit) */
+ sector_t z_sector;
- /* File zone start sector (512B unit) */
- sector_t i_zsector;
+ /* Zone size (bytes) */
+ loff_t z_size;
- /* File zone write pointer position (sequential zones only) */
- loff_t i_wpoffset;
+ /* Zone capacity (file maximum size, bytes) */
+ loff_t z_capacity;
- /* File maximum size */
- loff_t i_max_size;
+ /* Write pointer offset in the zone (sequential zones only, bytes) */
+ loff_t z_wpoffset;
+
+ /* Saved inode uid, gid and access rights */
+ umode_t z_mode;
+ kuid_t z_uid;
+ kgid_t z_gid;
+};
+
+/*
+ * In memory zone group information: all zones of a group are exposed
+ * as files, one file per zone.
+ */
+struct zonefs_zone_group {
+ struct inode *g_inode;
+ unsigned int g_nr_zones;
+ struct zonefs_zone *g_zones;
+};
- /* File zone size */
- loff_t i_zone_size;
+/*
+ * In-memory inode data.
+ */
+struct zonefs_inode_info {
+ struct inode i_vnode;
/*
* To serialise fully against both syscall and mmap based IO and
@@ -82,7 +104,6 @@ struct zonefs_inode_info {
/* guarded by i_truncate_mutex */
unsigned int i_wr_refcnt;
- unsigned int i_flags;
};
static inline struct zonefs_inode_info *ZONEFS_I(struct inode *inode)
@@ -90,6 +111,31 @@ static inline struct zonefs_inode_info *ZONEFS_I(struct inode *inode)
return container_of(inode, struct zonefs_inode_info, i_vnode);
}
+static inline bool zonefs_zone_is_cnv(struct zonefs_zone *z)
+{
+ return z->z_flags & ZONEFS_ZONE_CNV;
+}
+
+static inline bool zonefs_zone_is_seq(struct zonefs_zone *z)
+{
+ return !zonefs_zone_is_cnv(z);
+}
+
+static inline struct zonefs_zone *zonefs_inode_zone(struct inode *inode)
+{
+ return inode->i_private;
+}
+
+static inline bool zonefs_inode_is_cnv(struct inode *inode)
+{
+ return zonefs_zone_is_cnv(zonefs_inode_zone(inode));
+}
+
+static inline bool zonefs_inode_is_seq(struct inode *inode)
+{
+ return zonefs_zone_is_seq(zonefs_inode_zone(inode));
+}
+
/*
* On-disk super block (block 0).
*/
@@ -181,7 +227,7 @@ struct zonefs_sb_info {
uuid_t s_uuid;
unsigned int s_zone_sectors_shift;
- unsigned int s_nr_files[ZONEFS_ZTYPE_MAX];
+ struct zonefs_zone_group s_zgroup[ZONEFS_ZTYPE_MAX];
loff_t s_blocks;
loff_t s_used_blocks;
@@ -209,6 +255,32 @@ static inline struct zonefs_sb_info *ZONEFS_SB(struct super_block *sb)
#define zonefs_warn(sb, format, args...) \
pr_warn("zonefs (%s) WARNING: " format, sb->s_id, ## args)
+/* In super.c */
+void zonefs_inode_account_active(struct inode *inode);
+int zonefs_inode_zone_mgmt(struct inode *inode, enum req_op op);
+void zonefs_i_size_write(struct inode *inode, loff_t isize);
+void zonefs_update_stats(struct inode *inode, loff_t new_isize);
+void __zonefs_io_error(struct inode *inode, bool write);
+
+static inline void zonefs_io_error(struct inode *inode, bool write)
+{
+ struct zonefs_inode_info *zi = ZONEFS_I(inode);
+
+ mutex_lock(&zi->i_truncate_mutex);
+ __zonefs_io_error(inode, write);
+ mutex_unlock(&zi->i_truncate_mutex);
+}
+
+/* In super.c */
+extern const struct inode_operations zonefs_dir_inode_operations;
+extern const struct file_operations zonefs_dir_operations;
+
+/* In file.c */
+extern const struct address_space_operations zonefs_file_aops;
+extern const struct file_operations zonefs_file_operations;
+int zonefs_file_truncate(struct inode *inode, loff_t isize);
+
+/* In sysfs.c */
int zonefs_sysfs_register(struct super_block *sb);
void zonefs_sysfs_unregister(struct super_block *sb);
int zonefs_sysfs_init(void);