summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2024-10-03 09:22:50 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2024-10-03 09:22:50 -0700
commit20c2474fa515ea3ce39b92a37fc5d03cdfc509b8 (patch)
treea8855f3f914645e13276d089318b29cfe40c5a86 /fs
parent7ec462100ef9142344ddbf86f2c3008b97acddbe (diff)
parenta311a08a4237241fb5b9d219d3e33346de6e83e0 (diff)
Merge tag 'vfs-6.12-rc2.fixes.2' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
Pull vfs fixes from Christian Brauner: "vfs: - Ensure that iter_folioq_get_pages() advances to the next slot otherwise it will end up using the same folio with an out-of-bound offset. iomap: - Dont unshare delalloc extents which can't be reflinked, and thus can't be shared. - Constrain the file range passed to iomap_file_unshare() directly in iomap instead of requiring the callers to do it. netfs: - Use folioq_count instead of folioq_nr_slot to prevent an unitialized value warning in netfs_clear_buffer(). - Fix missing wakeup after issuing writes by scheduling the write collector only if all the subrequest queues are empty and thus no writes are pending. - Fix two minor documentation bugs" * tag 'vfs-6.12-rc2.fixes.2' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs: iomap: constrain the file range passed to iomap_file_unshare iomap: don't bother unsharing delalloc extents netfs: Fix missing wakeup after issuing writes Documentation: add missing folio_queue entry folio_queue: fix documentation netfs: Fix a KMSAN uninit-value error in netfs_clear_buffer iov_iter: fix advancing slot in iter_folioq_get_pages()
Diffstat (limited to 'fs')
-rw-r--r--fs/dax.c6
-rw-r--r--fs/iomap/buffered-io.c9
-rw-r--r--fs/netfs/misc.c2
-rw-r--r--fs/netfs/write_issue.c42
4 files changed, 40 insertions, 19 deletions
diff --git a/fs/dax.c b/fs/dax.c
index becb4a6920c6..c62acd2812f8 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -1305,11 +1305,15 @@ int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len,
struct iomap_iter iter = {
.inode = inode,
.pos = pos,
- .len = len,
.flags = IOMAP_WRITE | IOMAP_UNSHARE | IOMAP_DAX,
};
+ loff_t size = i_size_read(inode);
int ret;
+ if (pos < 0 || pos >= size)
+ return 0;
+
+ iter.len = min(len, size - pos);
while ((ret = iomap_iter(&iter, ops)) > 0)
iter.processed = dax_unshare_iter(&iter);
return ret;
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 11ea747228ae..78ebd265f425 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -1321,7 +1321,7 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
return length;
/*
- * Don't bother with holes or unwritten extents.
+ * Don't bother with delalloc reservations, holes or unwritten extents.
*
* Note that we use srcmap directly instead of iomap_iter_srcmap as
* unsharing requires providing a separate source map, and the presence
@@ -1330,6 +1330,7 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
* fork for XFS.
*/
if (iter->srcmap.type == IOMAP_HOLE ||
+ iter->srcmap.type == IOMAP_DELALLOC ||
iter->srcmap.type == IOMAP_UNWRITTEN)
return length;
@@ -1374,11 +1375,15 @@ iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
struct iomap_iter iter = {
.inode = inode,
.pos = pos,
- .len = len,
.flags = IOMAP_WRITE | IOMAP_UNSHARE,
};
+ loff_t size = i_size_read(inode);
int ret;
+ if (pos < 0 || pos >= size)
+ return 0;
+
+ iter.len = min(len, size - pos);
while ((ret = iomap_iter(&iter, ops)) > 0)
iter.processed = iomap_unshare_iter(&iter);
return ret;
diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c
index 63280791de3b..78fe5796b2b2 100644
--- a/fs/netfs/misc.c
+++ b/fs/netfs/misc.c
@@ -102,7 +102,7 @@ void netfs_clear_buffer(struct netfs_io_request *rreq)
while ((p = rreq->buffer)) {
rreq->buffer = p->next;
- for (int slot = 0; slot < folioq_nr_slots(p); slot++) {
+ for (int slot = 0; slot < folioq_count(p); slot++) {
struct folio *folio = folioq_folio(p, slot);
if (!folio)
continue;
diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
index 6293f547e4c3..bf6d507578e5 100644
--- a/fs/netfs/write_issue.c
+++ b/fs/netfs/write_issue.c
@@ -509,6 +509,30 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
}
/*
+ * End the issuing of writes, letting the collector know we're done.
+ */
+static void netfs_end_issue_write(struct netfs_io_request *wreq)
+{
+ bool needs_poke = true;
+
+ smp_wmb(); /* Write subreq lists before ALL_QUEUED. */
+ set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
+
+ for (int s = 0; s < NR_IO_STREAMS; s++) {
+ struct netfs_io_stream *stream = &wreq->io_streams[s];
+
+ if (!stream->active)
+ continue;
+ if (!list_empty(&stream->subrequests))
+ needs_poke = false;
+ netfs_issue_write(wreq, stream);
+ }
+
+ if (needs_poke)
+ netfs_wake_write_collector(wreq, false);
+}
+
+/*
* Write some of the pending data back to the server
*/
int netfs_writepages(struct address_space *mapping,
@@ -559,10 +583,7 @@ int netfs_writepages(struct address_space *mapping,
break;
} while ((folio = writeback_iter(mapping, wbc, folio, &error)));
- for (int s = 0; s < NR_IO_STREAMS; s++)
- netfs_issue_write(wreq, &wreq->io_streams[s]);
- smp_wmb(); /* Write lists before ALL_QUEUED. */
- set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
+ netfs_end_issue_write(wreq);
mutex_unlock(&ictx->wb_lock);
@@ -650,10 +671,7 @@ int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_contr
if (writethrough_cache)
netfs_write_folio(wreq, wbc, writethrough_cache);
- netfs_issue_write(wreq, &wreq->io_streams[0]);
- netfs_issue_write(wreq, &wreq->io_streams[1]);
- smp_wmb(); /* Write lists before ALL_QUEUED. */
- set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
+ netfs_end_issue_write(wreq);
mutex_unlock(&ictx->wb_lock);
@@ -699,13 +717,7 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t
break;
}
- netfs_issue_write(wreq, upload);
-
- smp_wmb(); /* Write lists before ALL_QUEUED. */
- set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
- if (list_empty(&upload->subrequests))
- netfs_wake_write_collector(wreq, false);
-
+ netfs_end_issue_write(wreq);
_leave(" = %d", error);
return error;
}