|
9 | 9 | #include "internal.h"
|
10 | 10 |
|
11 | 11 | /*
|
12 |
| - * Append a folio to the rolling queue. |
| 12 | + * Make sure there's space in the rolling queue. |
13 | 13 | */
|
14 |
| -int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio, |
15 |
| - bool needs_put) |
| 14 | +struct folio_queue *netfs_buffer_make_space(struct netfs_io_request *rreq) |
16 | 15 | {
|
17 |
| - struct folio_queue *tail = rreq->buffer_tail; |
18 |
| - unsigned int slot, order = folio_order(folio); |
| 16 | + struct folio_queue *tail = rreq->buffer_tail, *prev; |
| 17 | + unsigned int prev_nr_slots = 0; |
19 | 18 |
|
20 | 19 | if (WARN_ON_ONCE(!rreq->buffer && tail) ||
|
21 | 20 | WARN_ON_ONCE(rreq->buffer && !tail))
|
22 |
| - return -EIO; |
23 |
| - |
24 |
| - if (!tail || folioq_full(tail)) { |
25 |
| - tail = kmalloc(sizeof(*tail), GFP_NOFS); |
26 |
| - if (!tail) |
27 |
| - return -ENOMEM; |
28 |
| - netfs_stat(&netfs_n_folioq); |
29 |
| - folioq_init(tail); |
30 |
| - tail->prev = rreq->buffer_tail; |
31 |
| - if (tail->prev) |
32 |
| - tail->prev->next = tail; |
33 |
| - rreq->buffer_tail = tail; |
34 |
| - if (!rreq->buffer) { |
35 |
| - rreq->buffer = tail; |
36 |
| - iov_iter_folio_queue(&rreq->io_iter, ITER_SOURCE, tail, 0, 0, 0); |
| 21 | + return ERR_PTR(-EIO); |
| 22 | + |
| 23 | + prev = tail; |
| 24 | + if (prev) { |
| 25 | + if (!folioq_full(tail)) |
| 26 | + return tail; |
| 27 | + prev_nr_slots = folioq_nr_slots(tail); |
| 28 | + } |
| 29 | + |
| 30 | + tail = kmalloc(sizeof(*tail), GFP_NOFS); |
| 31 | + if (!tail) |
| 32 | + return ERR_PTR(-ENOMEM); |
| 33 | + netfs_stat(&netfs_n_folioq); |
| 34 | + folioq_init(tail); |
| 35 | + tail->prev = prev; |
| 36 | + if (prev) |
| 37 | + /* [!] NOTE: After we set prev->next, the consumer is entirely |
| 38 | + * at liberty to delete prev. |
| 39 | + */ |
| 40 | + WRITE_ONCE(prev->next, tail); |
| 41 | + |
| 42 | + rreq->buffer_tail = tail; |
| 43 | + if (!rreq->buffer) { |
| 44 | + rreq->buffer = tail; |
| 45 | + iov_iter_folio_queue(&rreq->io_iter, ITER_SOURCE, tail, 0, 0, 0); |
| 46 | + } else { |
| 47 | + /* Make sure we don't leave the master iterator pointing to a |
| 48 | + * block that might get immediately consumed. |
| 49 | + */ |
| 50 | + if (rreq->io_iter.folioq == prev && |
| 51 | + rreq->io_iter.folioq_slot == prev_nr_slots) { |
| 52 | + rreq->io_iter.folioq = tail; |
| 53 | + rreq->io_iter.folioq_slot = 0; |
37 | 54 | }
|
38 |
| - rreq->buffer_tail_slot = 0; |
39 | 55 | }
|
| 56 | + rreq->buffer_tail_slot = 0; |
| 57 | + return tail; |
| 58 | +} |
| 59 | + |
| 60 | +/* |
| 61 | + * Append a folio to the rolling queue. |
| 62 | + */ |
| 63 | +int netfs_buffer_append_folio(struct netfs_io_request *rreq, struct folio *folio, |
| 64 | + bool needs_put) |
| 65 | +{ |
| 66 | + struct folio_queue *tail; |
| 67 | + unsigned int slot, order = folio_order(folio); |
| 68 | + |
| 69 | + tail = netfs_buffer_make_space(rreq); |
| 70 | + if (IS_ERR(tail)) |
| 71 | + return PTR_ERR(tail); |
40 | 72 |
|
41 | 73 | rreq->io_iter.count += PAGE_SIZE << order;
|
42 | 74 |
|
|
0 commit comments