summaryrefslogtreecommitdiff
path: root/fs/netfs/read_pgpriv2.c
blob: ba5af89d37fae50bfe0768e9c878b170ce84d9ba (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
// SPDX-License-Identifier: GPL-2.0-only
/* Read with PG_private_2 [DEPRECATED].
 *
 * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
 * Written by David Howells (dhowells@redhat.com)
 */

#include <linux/export.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/task_io_accounting_ops.h>
#include "internal.h"

/*
 * [DEPRECATED] Mark page as requiring copy-to-cache using PG_private_2.  The
 * third mark in the folio queue is used to indicate that this folio needs
 * writing.
 */
void netfs_pgpriv2_mark_copy_to_cache(struct netfs_io_subrequest *subreq,
				      struct netfs_io_request *rreq,
				      struct folio_queue *folioq,
				      int slot)
{
	struct folio *folio = folioq_folio(folioq, slot);

	trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
	folio_start_private_2(folio);
	folioq_mark3(folioq, slot);
}

/*
 * [DEPRECATED] Cancel PG_private_2 on all marked folios in the event of an
 * unrecoverable error.
 */
static void netfs_pgpriv2_cancel(struct folio_queue *folioq)
{
	struct folio *folio;
	int slot;

	while (folioq) {
		if (!folioq->marks3) {
			folioq = folioq->next;
			continue;
		}

		slot = __ffs(folioq->marks3);
		folio = folioq_folio(folioq, slot);

		trace_netfs_folio(folio, netfs_folio_trace_cancel_copy);
		folio_end_private_2(folio);
		folioq_unmark3(folioq, slot);
	}
}

/*
 * [DEPRECATED] Copy a folio to the cache with PG_private_2 set.
 */
static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio *folio)
{
	struct netfs_io_stream *cache  = &wreq->io_streams[1];
	size_t fsize = folio_size(folio), flen = fsize;
	loff_t fpos = folio_pos(folio), i_size;
	bool to_eof = false;

	_enter("");

	/* netfs_perform_write() may shift i_size around the page or from out
	 * of the page to beyond it, but cannot move i_size into or through the
	 * page since we have it locked.
	 */
	i_size = i_size_read(wreq->inode);

	if (fpos >= i_size) {
		/* mmap beyond eof. */
		_debug("beyond eof");
		folio_end_private_2(folio);
		return 0;
	}

	if (fpos + fsize > wreq->i_size)
		wreq->i_size = i_size;

	if (flen > i_size - fpos) {
		flen = i_size - fpos;
		to_eof = true;
	} else if (flen == i_size - fpos) {
		to_eof = true;
	}

	_debug("folio %zx %zx", flen, fsize);

	trace_netfs_folio(folio, netfs_folio_trace_store_copy);

	/* Attach the folio to the rolling buffer. */
	if (netfs_buffer_append_folio(wreq, folio, false) < 0)
		return -ENOMEM;

	cache->submit_extendable_to = fsize;
	cache->submit_off = 0;
	cache->submit_len = flen;

	/* Attach the folio to one or more subrequests.  For a big folio, we
	 * could end up with thousands of subrequests if the wsize is small -
	 * but we might need to wait during the creation of subrequests for
	 * network resources (eg. SMB credits).
	 */
	do {
		ssize_t part;

		wreq->io_iter.iov_offset = cache->submit_off;

		atomic64_set(&wreq->issued_to, fpos + cache->submit_off);
		cache->submit_extendable_to = fsize - cache->submit_off;
		part = netfs_advance_write(wreq, cache, fpos + cache->submit_off,
					   cache->submit_len, to_eof);
		cache->submit_off += part;
		if (part > cache->submit_len)
			cache->submit_len = 0;
		else
			cache->submit_len -= part;
	} while (cache->submit_len > 0);

	wreq->io_iter.iov_offset = 0;
	iov_iter_advance(&wreq->io_iter, fsize);
	atomic64_set(&wreq->issued_to, fpos + fsize);

	if (flen < fsize)
		netfs_issue_write(wreq, cache);

	_leave(" = 0");
	return 0;
}

/*
 * [DEPRECATED] Go through the buffer and write any folios that are marked with
 * the third mark to the cache.
 */
void netfs_pgpriv2_write_to_the_cache(struct netfs_io_request *rreq)
{
	struct netfs_io_request *wreq;
	struct folio_queue *folioq;
	struct folio *folio;
	int error = 0;
	int slot = 0;

	_enter("");

	if (!fscache_resources_valid(&rreq->cache_resources))
		goto couldnt_start;

	/* Need the first folio to be able to set up the op. */
	for (folioq = rreq->buffer; folioq; folioq = folioq->next) {
		if (folioq->marks3) {
			slot = __ffs(folioq->marks3);
			break;
		}
	}
	if (!folioq)
		return;
	folio = folioq_folio(folioq, slot);

	wreq = netfs_create_write_req(rreq->mapping, NULL, folio_pos(folio),
				      NETFS_PGPRIV2_COPY_TO_CACHE);
	if (IS_ERR(wreq)) {
		kleave(" [create %ld]", PTR_ERR(wreq));
		goto couldnt_start;
	}

	trace_netfs_write(wreq, netfs_write_trace_copy_to_cache);
	netfs_stat(&netfs_n_wh_copy_to_cache);

	for (;;) {
		error = netfs_pgpriv2_copy_folio(wreq, folio);
		if (error < 0)
			break;

		folioq_unmark3(folioq, slot);
		if (!folioq->marks3) {
			folioq = folioq->next;
			if (!folioq)
				break;
		}

		slot = __ffs(folioq->marks3);
		folio = folioq_folio(folioq, slot);
	}

	netfs_issue_write(wreq, &wreq->io_streams[1]);
	smp_wmb(); /* Write lists before ALL_QUEUED. */
	set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);

	netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
	_leave(" = %d", error);
couldnt_start:
	netfs_pgpriv2_cancel(rreq->buffer);
}

/*
 * [DEPRECATED] Remove the PG_private_2 mark from any folios we've finished
 * copying.
 */
bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq)
{
	struct folio_queue *folioq = wreq->buffer;
	unsigned long long collected_to = wreq->collected_to;
	unsigned int slot = wreq->buffer_head_slot;
	bool made_progress = false;

	if (slot >= folioq_nr_slots(folioq)) {
		folioq = netfs_delete_buffer_head(wreq);
		slot = 0;
	}

	for (;;) {
		struct folio *folio;
		unsigned long long fpos, fend;
		size_t fsize, flen;

		folio = folioq_folio(folioq, slot);
		if (WARN_ONCE(!folio_test_private_2(folio),
			      "R=%08x: folio %lx is not marked private_2\n",
			      wreq->debug_id, folio->index))
			trace_netfs_folio(folio, netfs_folio_trace_not_under_wback);

		fpos = folio_pos(folio);
		fsize = folio_size(folio);
		flen = fsize;

		fend = min_t(unsigned long long, fpos + flen, wreq->i_size);

		trace_netfs_collect_folio(wreq, folio, fend, collected_to);

		/* Unlock any folio we've transferred all of. */
		if (collected_to < fend)
			break;

		trace_netfs_folio(folio, netfs_folio_trace_end_copy);
		folio_end_private_2(folio);
		wreq->cleaned_to = fpos + fsize;
		made_progress = true;

		/* Clean up the head folioq.  If we clear an entire folioq, then
		 * we can get rid of it provided it's not also the tail folioq
		 * being filled by the issuer.
		 */
		folioq_clear(folioq, slot);
		slot++;
		if (slot >= folioq_nr_slots(folioq)) {
			if (READ_ONCE(wreq->buffer_tail) == folioq)
				break;
			folioq = netfs_delete_buffer_head(wreq);
			slot = 0;
		}

		if (fpos + fsize >= collected_to)
			break;
	}

	wreq->buffer = folioq;
	wreq->buffer_head_slot = slot;
	return made_progress;
}