Skip to content

Commit cd0277e

Browse files
dhowellsbrauner
authored andcommitted
netfs: Use new folio_queue data type and iterator instead of xarray iter
Make the netfs write-side routines use the new folio_queue struct to hold a rolling buffer of folios, with the issuer adding folios at the tail and the collector removing them from the head as they're processed instead of using an xarray. This will allow a subsequent patch to simplify the write collector. The primary mark (as tested by folioq_is_marked()) is used to note if the corresponding folio needs putting. Signed-off-by: David Howells <[email protected]> cc: Jeff Layton <[email protected]> cc: [email protected] cc: [email protected] Link: https://lore.kernel.org/r/[email protected]/ # v2 Signed-off-by: Christian Brauner <[email protected]>
1 parent c45ebd6 commit cd0277e

File tree

8 files changed

+150
-61
lines changed

8 files changed

+150
-61
lines changed

fs/netfs/internal.h

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77

88
#include <linux/slab.h>
99
#include <linux/seq_file.h>
10+
#include <linux/folio_queue.h>
1011
#include <linux/netfs.h>
1112
#include <linux/fscache.h>
1213
#include <linux/fscache-cache.h>
@@ -64,6 +65,10 @@ static inline void netfs_proc_del_rreq(struct netfs_io_request *rreq) {}
6465
/*
6566
* misc.c
6667
*/
68+
int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio,
69+
bool needs_put);
70+
struct folio_queue *netfs_delete_buffer_head(struct netfs_io_request *wreq);
71+
void netfs_clear_buffer(struct netfs_io_request *rreq);
6772

6873
/*
6974
* objects.c
@@ -120,6 +125,7 @@ extern atomic_t netfs_n_wh_write_done;
120125
extern atomic_t netfs_n_wh_write_failed;
121126
extern atomic_t netfs_n_wb_lock_skip;
122127
extern atomic_t netfs_n_wb_lock_wait;
128+
extern atomic_t netfs_n_folioq;
123129

124130
int netfs_stats_show(struct seq_file *m, void *v);
125131

@@ -153,7 +159,8 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
153159
loff_t start,
154160
enum netfs_io_origin origin);
155161
void netfs_reissue_write(struct netfs_io_stream *stream,
156-
struct netfs_io_subrequest *subreq);
162+
struct netfs_io_subrequest *subreq,
163+
struct iov_iter *source);
157164
int netfs_advance_write(struct netfs_io_request *wreq,
158165
struct netfs_io_stream *stream,
159166
loff_t start, size_t len, bool to_eof);

fs/netfs/misc.c

Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,82 @@
88
#include <linux/swap.h>
99
#include "internal.h"
1010

11+
/*
12+
* Append a folio to the rolling queue.
13+
*/
14+
int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio,
15+
bool needs_put)
16+
{
17+
struct folio_queue *tail = rreq->buffer_tail;
18+
unsigned int slot, order = folio_order(folio);
19+
20+
if (WARN_ON_ONCE(!rreq->buffer && tail) ||
21+
WARN_ON_ONCE(rreq->buffer && !tail))
22+
return -EIO;
23+
24+
if (!tail || folioq_full(tail)) {
25+
tail = kmalloc(sizeof(*tail), GFP_NOFS);
26+
if (!tail)
27+
return -ENOMEM;
28+
netfs_stat(&netfs_n_folioq);
29+
folioq_init(tail);
30+
tail->prev = rreq->buffer_tail;
31+
if (tail->prev)
32+
tail->prev->next = tail;
33+
rreq->buffer_tail = tail;
34+
if (!rreq->buffer) {
35+
rreq->buffer = tail;
36+
iov_iter_folio_queue(&rreq->io_iter, ITER_SOURCE, tail, 0, 0, 0);
37+
}
38+
rreq->buffer_tail_slot = 0;
39+
}
40+
41+
rreq->io_iter.count += PAGE_SIZE << order;
42+
43+
slot = folioq_append(tail, folio);
44+
/* Store the counter after setting the slot. */
45+
smp_store_release(&rreq->buffer_tail_slot, slot);
46+
return 0;
47+
}
48+
49+
/*
50+
* Delete the head of a rolling queue.
51+
*/
52+
struct folio_queue *netfs_delete_buffer_head(struct netfs_io_request *wreq)
53+
{
54+
struct folio_queue *head = wreq->buffer, *next = head->next;
55+
56+
if (next)
57+
next->prev = NULL;
58+
netfs_stat_d(&netfs_n_folioq);
59+
kfree(head);
60+
wreq->buffer = next;
61+
return next;
62+
}
63+
64+
/*
65+
* Clear out a rolling queue.
66+
*/
67+
void netfs_clear_buffer(struct netfs_io_request *rreq)
68+
{
69+
struct folio_queue *p;
70+
71+
while ((p = rreq->buffer)) {
72+
rreq->buffer = p->next;
73+
for (int slot = 0; slot < folioq_nr_slots(p); slot++) {
74+
struct folio *folio = folioq_folio(p, slot);
75+
if (!folio)
76+
continue;
77+
if (folioq_is_marked(p, slot)) {
78+
trace_netfs_folio(folio, netfs_folio_trace_put);
79+
folio_put(folio);
80+
}
81+
}
82+
netfs_stat_d(&netfs_n_folioq);
83+
kfree(p);
84+
}
85+
}
86+
1187
/**
1288
* netfs_dirty_folio - Mark folio dirty and pin a cache object for writeback
1389
* @mapping: The mapping the folio belongs to.

fs/netfs/objects.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -141,6 +141,7 @@ static void netfs_free_request(struct work_struct *work)
141141
}
142142
kvfree(rreq->direct_bv);
143143
}
144+
netfs_clear_buffer(rreq);
144145

145146
if (atomic_dec_and_test(&ictx->io_count))
146147
wake_up_var(&ictx->io_count);

fs/netfs/stats.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,6 +41,7 @@ atomic_t netfs_n_wh_write_done;
4141
atomic_t netfs_n_wh_write_failed;
4242
atomic_t netfs_n_wb_lock_skip;
4343
atomic_t netfs_n_wb_lock_wait;
44+
atomic_t netfs_n_folioq;
4445

4546
int netfs_stats_show(struct seq_file *m, void *v)
4647
{
@@ -76,9 +77,10 @@ int netfs_stats_show(struct seq_file *m, void *v)
7677
atomic_read(&netfs_n_wh_write),
7778
atomic_read(&netfs_n_wh_write_done),
7879
atomic_read(&netfs_n_wh_write_failed));
79-
seq_printf(m, "Objs : rr=%u sr=%u wsc=%u\n",
80+
seq_printf(m, "Objs : rr=%u sr=%u foq=%u wsc=%u\n",
8081
atomic_read(&netfs_n_rh_rreq),
8182
atomic_read(&netfs_n_rh_sreq),
83+
atomic_read(&netfs_n_folioq),
8284
atomic_read(&netfs_n_wh_wstream_conflict));
8385
seq_printf(m, "WbLock : skip=%u wait=%u\n",
8486
atomic_read(&netfs_n_wb_lock_skip),

fs/netfs/write_collect.c

Lines changed: 44 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -81,56 +81,32 @@ int netfs_folio_written_back(struct folio *folio)
8181
return gcount;
8282
}
8383

84-
/*
85-
* Get hold of a folio we have under writeback. We don't want to get the
86-
* refcount on it.
87-
*/
88-
static struct folio *netfs_writeback_lookup_folio(struct netfs_io_request *wreq, loff_t pos)
89-
{
90-
XA_STATE(xas, &wreq->mapping->i_pages, pos / PAGE_SIZE);
91-
struct folio *folio;
92-
93-
rcu_read_lock();
94-
95-
for (;;) {
96-
xas_reset(&xas);
97-
folio = xas_load(&xas);
98-
if (xas_retry(&xas, folio))
99-
continue;
100-
101-
if (!folio || xa_is_value(folio))
102-
kdebug("R=%08x: folio %lx (%llx) not present",
103-
wreq->debug_id, xas.xa_index, pos / PAGE_SIZE);
104-
BUG_ON(!folio || xa_is_value(folio));
105-
106-
if (folio == xas_reload(&xas))
107-
break;
108-
}
109-
110-
rcu_read_unlock();
111-
112-
if (WARN_ONCE(!folio_test_writeback(folio),
113-
"R=%08x: folio %lx is not under writeback\n",
114-
wreq->debug_id, folio->index)) {
115-
trace_netfs_folio(folio, netfs_folio_trace_not_under_wback);
116-
}
117-
return folio;
118-
}
119-
12084
/*
12185
* Unlock any folios we've finished with.
12286
*/
12387
static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
12488
unsigned long long collected_to,
12589
unsigned int *notes)
12690
{
91+
struct folio_queue *folioq = wreq->buffer;
92+
unsigned int slot = wreq->buffer_head_slot;
93+
94+
if (slot >= folioq_nr_slots(folioq)) {
95+
folioq = netfs_delete_buffer_head(wreq);
96+
slot = 0;
97+
}
98+
12799
for (;;) {
128100
struct folio *folio;
129101
struct netfs_folio *finfo;
130102
unsigned long long fpos, fend;
131103
size_t fsize, flen;
132104

133-
folio = netfs_writeback_lookup_folio(wreq, wreq->cleaned_to);
105+
folio = folioq_folio(folioq, slot);
106+
if (WARN_ONCE(!folio_test_writeback(folio),
107+
"R=%08x: folio %lx is not under writeback\n",
108+
wreq->debug_id, folio->index))
109+
trace_netfs_folio(folio, netfs_folio_trace_not_under_wback);
134110

135111
fpos = folio_pos(folio);
136112
fsize = folio_size(folio);
@@ -155,9 +131,25 @@ static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
155131
wreq->cleaned_to = fpos + fsize;
156132
*notes |= MADE_PROGRESS;
157133

134+
/* Clean up the head folioq. If we clear an entire folioq, then
135+
* we can get rid of it provided it's not also the tail folioq
136+
* being filled by the issuer.
137+
*/
138+
folioq_clear(folioq, slot);
139+
slot++;
140+
if (slot >= folioq_nr_slots(folioq)) {
141+
if (READ_ONCE(wreq->buffer_tail) == folioq)
142+
break;
143+
folioq = netfs_delete_buffer_head(wreq);
144+
slot = 0;
145+
}
146+
158147
if (fpos + fsize >= collected_to)
159148
break;
160149
}
150+
151+
wreq->buffer = folioq;
152+
wreq->buffer_head_slot = slot;
161153
}
162154

163155
/*
@@ -188,9 +180,12 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
188180
if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
189181
break;
190182
if (__test_and_clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
183+
struct iov_iter source = subreq->io_iter;
184+
185+
iov_iter_revert(&source, subreq->len - source.count);
191186
__set_bit(NETFS_SREQ_RETRYING, &subreq->flags);
192187
netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
193-
netfs_reissue_write(stream, subreq);
188+
netfs_reissue_write(stream, subreq, &source);
194189
}
195190
}
196191
return;
@@ -200,6 +195,7 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
200195

201196
do {
202197
struct netfs_io_subrequest *subreq = NULL, *from, *to, *tmp;
198+
struct iov_iter source;
203199
unsigned long long start, len;
204200
size_t part;
205201
bool boundary = false;
@@ -227,6 +223,14 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
227223
len += to->len;
228224
}
229225

226+
/* Determine the set of buffers we're going to use. Each
227+
* subreq gets a subset of a single overall contiguous buffer.
228+
*/
229+
source = from->io_iter;
230+
iov_iter_revert(&source, subreq->len - source.count);
231+
iov_iter_advance(&source, from->transferred);
232+
source.count = len;
233+
230234
/* Work through the sublist. */
231235
subreq = from;
232236
list_for_each_entry_from(subreq, &stream->subrequests, rreq_link) {
@@ -249,7 +253,7 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
249253
boundary = true;
250254

251255
netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
252-
netfs_reissue_write(stream, subreq);
256+
netfs_reissue_write(stream, subreq, &source);
253257
if (subreq == to)
254258
break;
255259
}
@@ -316,7 +320,7 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
316320
boundary = false;
317321
}
318322

319-
netfs_reissue_write(stream, subreq);
323+
netfs_reissue_write(stream, subreq, &source);
320324
if (!len)
321325
break;
322326

fs/netfs/write_issue.c

Lines changed: 13 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -213,37 +213,32 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
213213
* netfs_write_subrequest_terminated() when complete.
214214
*/
215215
static void netfs_do_issue_write(struct netfs_io_stream *stream,
216-
struct netfs_io_subrequest *subreq)
216+
struct netfs_io_subrequest *subreq,
217+
struct iov_iter *source)
217218
{
218219
struct netfs_io_request *wreq = subreq->rreq;
220+
size_t size = subreq->len - subreq->transferred;
219221

220222
_enter("R=%x[%x],%zx", wreq->debug_id, subreq->debug_index, subreq->len);
221223

222224
if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
223225
return netfs_write_subrequest_terminated(subreq, subreq->error, false);
224226

225227
// TODO: Use encrypted buffer
226-
if (test_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags)) {
227-
subreq->io_iter = wreq->io_iter;
228-
iov_iter_advance(&subreq->io_iter,
229-
subreq->start + subreq->transferred - wreq->start);
230-
iov_iter_truncate(&subreq->io_iter,
231-
subreq->len - subreq->transferred);
232-
} else {
233-
iov_iter_xarray(&subreq->io_iter, ITER_SOURCE, &wreq->mapping->i_pages,
234-
subreq->start + subreq->transferred,
235-
subreq->len - subreq->transferred);
236-
}
228+
subreq->io_iter = *source;
229+
iov_iter_advance(source, size);
230+
iov_iter_truncate(&subreq->io_iter, size);
237231

238232
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
239233
stream->issue_write(subreq);
240234
}
241235

242236
void netfs_reissue_write(struct netfs_io_stream *stream,
243-
struct netfs_io_subrequest *subreq)
237+
struct netfs_io_subrequest *subreq,
238+
struct iov_iter *source)
244239
{
245240
__set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
246-
netfs_do_issue_write(stream, subreq);
241+
netfs_do_issue_write(stream, subreq, source);
247242
}
248243

249244
static void netfs_issue_write(struct netfs_io_request *wreq,
@@ -257,7 +252,7 @@ static void netfs_issue_write(struct netfs_io_request *wreq,
257252

258253
if (subreq->start + subreq->len > wreq->start + wreq->submitted)
259254
WRITE_ONCE(wreq->submitted, subreq->start + subreq->len - wreq->start);
260-
netfs_do_issue_write(stream, subreq);
255+
netfs_do_issue_write(stream, subreq, &wreq->io_iter);
261256
}
262257

263258
/*
@@ -422,6 +417,9 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
422417
trace_netfs_folio(folio, netfs_folio_trace_store_plus);
423418
}
424419

420+
/* Attach the folio to the rolling buffer. */
421+
netfs_buffer_append_folio(wreq, folio, false);
422+
425423
/* Move the submission point forward to allow for write-streaming data
426424
* not starting at the front of the page. We don't do write-streaming
427425
* with the cache as the cache requires DIO alignment.

include/linux/netfs.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -38,10 +38,6 @@ static inline void folio_start_private_2(struct folio *folio)
3838
folio_set_private_2(folio);
3939
}
4040

41-
/* Marks used on xarray-based buffers */
42-
#define NETFS_BUF_PUT_MARK XA_MARK_0 /* - Page needs putting */
43-
#define NETFS_BUF_PAGECACHE_MARK XA_MARK_1 /* - Page needs wb/dirty flag wrangling */
44-
4541
enum netfs_io_source {
4642
NETFS_SOURCE_UNKNOWN,
4743
NETFS_FILL_WITH_ZEROES,
@@ -233,6 +229,8 @@ struct netfs_io_request {
233229
struct netfs_io_stream io_streams[2]; /* Streams of parallel I/O operations */
234230
#define NR_IO_STREAMS 2 //wreq->nr_io_streams
235231
struct netfs_group *group; /* Writeback group being written back */
232+
struct folio_queue *buffer; /* Head of I/O buffer */
233+
struct folio_queue *buffer_tail; /* Tail of I/O buffer */
236234
struct iov_iter iter; /* Unencrypted-side iterator */
237235
struct iov_iter io_iter; /* I/O (Encrypted-side) iterator */
238236
void *netfs_priv; /* Private data for the netfs */
@@ -254,6 +252,8 @@ struct netfs_io_request {
254252
short error; /* 0 or error that occurred */
255253
enum netfs_io_origin origin; /* Origin of the request */
256254
bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */
255+
u8 buffer_head_slot; /* First slot in ->buffer */
256+
u8 buffer_tail_slot; /* Next slot in ->buffer_tail */
257257
unsigned long long i_size; /* Size of the file */
258258
unsigned long long start; /* Start position */
259259
atomic64_t issued_to; /* Write issuer folio cursor */

0 commit comments

Comments
 (0)