19
19
#include <linux/netfs.h>
20
20
#include <net/9p/9p.h>
21
21
#include <net/9p/client.h>
22
+ #include <trace/events/netfs.h>
22
23
23
24
#include "v9fs.h"
24
25
#include "v9fs_vfs.h"
25
26
#include "cache.h"
26
27
#include "fid.h"
27
28
29
+ static void v9fs_upload_to_server (struct netfs_io_subrequest * subreq )
30
+ {
31
+ struct inode * inode = subreq -> rreq -> inode ;
32
+ struct v9fs_inode __maybe_unused * v9inode = V9FS_I (inode );
33
+ struct p9_fid * fid = subreq -> rreq -> netfs_priv ;
34
+ int err ;
35
+
36
+ trace_netfs_sreq (subreq , netfs_sreq_trace_submit );
37
+ p9_client_write (fid , subreq -> start , & subreq -> io_iter , & err );
38
+ netfs_write_subrequest_terminated (subreq , err < 0 ? err : subreq -> len ,
39
+ false);
40
+ }
41
+
42
+ static void v9fs_upload_to_server_worker (struct work_struct * work )
43
+ {
44
+ struct netfs_io_subrequest * subreq =
45
+ container_of (work , struct netfs_io_subrequest , work );
46
+
47
+ v9fs_upload_to_server (subreq );
48
+ }
49
+
50
+ /*
51
+ * Set up write requests for a writeback slice. We need to add a write request
52
+ * for each write we want to make.
53
+ */
54
+ static void v9fs_create_write_requests (struct netfs_io_request * wreq , loff_t start , size_t len )
55
+ {
56
+ struct netfs_io_subrequest * subreq ;
57
+
58
+ subreq = netfs_create_write_request (wreq , NETFS_UPLOAD_TO_SERVER ,
59
+ start , len , v9fs_upload_to_server_worker );
60
+ if (subreq )
61
+ netfs_queue_write_request (subreq );
62
+ }
63
+
28
64
/**
29
65
* v9fs_issue_read - Issue a read from 9P
30
66
* @subreq: The read to make
@@ -33,14 +69,10 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
33
69
{
34
70
struct netfs_io_request * rreq = subreq -> rreq ;
35
71
struct p9_fid * fid = rreq -> netfs_priv ;
36
- struct iov_iter to ;
37
- loff_t pos = subreq -> start + subreq -> transferred ;
38
- size_t len = subreq -> len - subreq -> transferred ;
39
72
int total , err ;
40
73
41
- iov_iter_xarray (& to , ITER_DEST , & rreq -> mapping -> i_pages , pos , len );
42
-
43
- total = p9_client_read (fid , pos , & to , & err );
74
+ total = p9_client_read (fid , subreq -> start + subreq -> transferred ,
75
+ & subreq -> io_iter , & err );
44
76
45
77
/* if we just extended the file size, any portion not in
46
78
* cache won't be on server and is zeroes */
@@ -50,23 +82,37 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
50
82
}
51
83
52
84
/**
53
- * v9fs_init_request - Initialise a read request
85
+ * v9fs_init_request - Initialise a request
54
86
* @rreq: The read request
55
87
* @file: The file being read from
56
88
*/
57
89
static int v9fs_init_request (struct netfs_io_request * rreq , struct file * file )
58
90
{
59
- struct p9_fid * fid = file -> private_data ;
60
-
61
- BUG_ON (!fid );
91
+ struct p9_fid * fid ;
92
+ bool writing = (rreq -> origin == NETFS_READ_FOR_WRITE ||
93
+ rreq -> origin == NETFS_WRITEBACK ||
94
+ rreq -> origin == NETFS_WRITETHROUGH ||
95
+ rreq -> origin == NETFS_LAUNDER_WRITE ||
96
+ rreq -> origin == NETFS_UNBUFFERED_WRITE ||
97
+ rreq -> origin == NETFS_DIO_WRITE );
98
+
99
+ if (file ) {
100
+ fid = file -> private_data ;
101
+ BUG_ON (!fid );
102
+ p9_fid_get (fid );
103
+ } else {
104
+ fid = v9fs_fid_find_inode (rreq -> inode , writing , INVALID_UID , true);
105
+ if (!fid ) {
106
+ WARN_ONCE (1 , "folio expected an open fid inode->i_private=%p\n" ,
107
+ rreq -> inode -> i_private );
108
+ return - EINVAL ;
109
+ }
110
+ }
62
111
63
112
/* we might need to read from a fid that was opened write-only
64
113
* for read-modify-write of page cache, use the writeback fid
65
114
* for that */
66
- WARN_ON (rreq -> origin == NETFS_READ_FOR_WRITE &&
67
- !(fid -> mode & P9_ORDWR ));
68
-
69
- p9_fid_get (fid );
115
+ WARN_ON (rreq -> origin == NETFS_READ_FOR_WRITE && !(fid -> mode & P9_ORDWR ));
70
116
rreq -> netfs_priv = fid ;
71
117
return 0 ;
72
118
}
@@ -86,217 +132,16 @@ const struct netfs_request_ops v9fs_req_ops = {
86
132
.init_request = v9fs_init_request ,
87
133
.free_request = v9fs_free_request ,
88
134
.issue_read = v9fs_issue_read ,
135
+ .create_write_requests = v9fs_create_write_requests ,
89
136
};
90
137
91
- #ifdef CONFIG_9P_FSCACHE
92
- static void v9fs_write_to_cache_done (void * priv , ssize_t transferred_or_error ,
93
- bool was_async )
94
- {
95
- struct v9fs_inode * v9inode = priv ;
96
- __le32 version ;
97
-
98
- if (IS_ERR_VALUE (transferred_or_error ) &&
99
- transferred_or_error != - ENOBUFS ) {
100
- version = cpu_to_le32 (v9inode -> qid .version );
101
- fscache_invalidate (v9fs_inode_cookie (v9inode ), & version ,
102
- i_size_read (& v9inode -> netfs .inode ), 0 );
103
- }
104
- }
105
- #endif
106
-
107
- static int v9fs_vfs_write_folio_locked (struct folio * folio )
108
- {
109
- struct inode * inode = folio_inode (folio );
110
- loff_t start = folio_pos (folio );
111
- loff_t i_size = i_size_read (inode );
112
- struct iov_iter from ;
113
- size_t len = folio_size (folio );
114
- struct p9_fid * writeback_fid ;
115
- int err ;
116
- struct v9fs_inode __maybe_unused * v9inode = V9FS_I (inode );
117
- struct fscache_cookie __maybe_unused * cookie = v9fs_inode_cookie (v9inode );
118
-
119
- if (start >= i_size )
120
- return 0 ; /* Simultaneous truncation occurred */
121
-
122
- len = min_t (loff_t , i_size - start , len );
123
-
124
- iov_iter_xarray (& from , ITER_SOURCE , & folio_mapping (folio )-> i_pages , start , len );
125
-
126
- writeback_fid = v9fs_fid_find_inode (inode , true, INVALID_UID , true);
127
- if (!writeback_fid ) {
128
- WARN_ONCE (1 , "folio expected an open fid inode->i_private=%p\n" ,
129
- inode -> i_private );
130
- return - EINVAL ;
131
- }
132
-
133
- folio_wait_fscache (folio );
134
- folio_start_writeback (folio );
135
-
136
- p9_client_write (writeback_fid , start , & from , & err );
137
-
138
- #ifdef CONFIG_9P_FSCACHE
139
- if (err == 0 &&
140
- fscache_cookie_enabled (cookie ) &&
141
- test_bit (FSCACHE_COOKIE_IS_CACHING , & cookie -> flags )) {
142
- folio_start_fscache (folio );
143
- fscache_write_to_cache (v9fs_inode_cookie (v9inode ),
144
- folio_mapping (folio ), start , len , i_size ,
145
- v9fs_write_to_cache_done , v9inode ,
146
- true);
147
- }
148
- #endif
149
-
150
- folio_end_writeback (folio );
151
- p9_fid_put (writeback_fid );
152
-
153
- return err ;
154
- }
155
-
156
- static int v9fs_vfs_writepage (struct page * page , struct writeback_control * wbc )
157
- {
158
- struct folio * folio = page_folio (page );
159
- int retval ;
160
-
161
- p9_debug (P9_DEBUG_VFS , "folio %p\n" , folio );
162
-
163
- retval = v9fs_vfs_write_folio_locked (folio );
164
- if (retval < 0 ) {
165
- if (retval == - EAGAIN ) {
166
- folio_redirty_for_writepage (wbc , folio );
167
- retval = 0 ;
168
- } else {
169
- mapping_set_error (folio_mapping (folio ), retval );
170
- }
171
- } else
172
- retval = 0 ;
173
-
174
- folio_unlock (folio );
175
- return retval ;
176
- }
177
-
178
- static int v9fs_launder_folio (struct folio * folio )
179
- {
180
- int retval ;
181
-
182
- if (folio_clear_dirty_for_io (folio )) {
183
- retval = v9fs_vfs_write_folio_locked (folio );
184
- if (retval )
185
- return retval ;
186
- }
187
- folio_wait_fscache (folio );
188
- return 0 ;
189
- }
190
-
191
- /**
192
- * v9fs_direct_IO - 9P address space operation for direct I/O
193
- * @iocb: target I/O control block
194
- * @iter: The data/buffer to use
195
- *
196
- * The presence of v9fs_direct_IO() in the address space ops vector
197
- * allowes open() O_DIRECT flags which would have failed otherwise.
198
- *
199
- * In the non-cached mode, we shunt off direct read and write requests before
200
- * the VFS gets them, so this method should never be called.
201
- *
202
- * Direct IO is not 'yet' supported in the cached mode. Hence when
203
- * this routine is called through generic_file_aio_read(), the read/write fails
204
- * with an error.
205
- *
206
- */
207
- static ssize_t
208
- v9fs_direct_IO (struct kiocb * iocb , struct iov_iter * iter )
209
- {
210
- struct file * file = iocb -> ki_filp ;
211
- loff_t pos = iocb -> ki_pos ;
212
- ssize_t n ;
213
- int err = 0 ;
214
-
215
- if (iov_iter_rw (iter ) == WRITE ) {
216
- n = p9_client_write (file -> private_data , pos , iter , & err );
217
- if (n ) {
218
- struct inode * inode = file_inode (file );
219
- loff_t i_size = i_size_read (inode );
220
-
221
- if (pos + n > i_size )
222
- inode_add_bytes (inode , pos + n - i_size );
223
- }
224
- } else {
225
- n = p9_client_read (file -> private_data , pos , iter , & err );
226
- }
227
- return n ? n : err ;
228
- }
229
-
230
- static int v9fs_write_begin (struct file * filp , struct address_space * mapping ,
231
- loff_t pos , unsigned int len ,
232
- struct page * * subpagep , void * * fsdata )
233
- {
234
- int retval ;
235
- struct folio * folio ;
236
- struct v9fs_inode * v9inode = V9FS_I (mapping -> host );
237
-
238
- p9_debug (P9_DEBUG_VFS , "filp %p, mapping %p\n" , filp , mapping );
239
-
240
- /* Prefetch area to be written into the cache if we're caching this
241
- * file. We need to do this before we get a lock on the page in case
242
- * there's more than one writer competing for the same cache block.
243
- */
244
- retval = netfs_write_begin (& v9inode -> netfs , filp , mapping , pos , len , & folio , fsdata );
245
- if (retval < 0 )
246
- return retval ;
247
-
248
- * subpagep = & folio -> page ;
249
- return retval ;
250
- }
251
-
252
- static int v9fs_write_end (struct file * filp , struct address_space * mapping ,
253
- loff_t pos , unsigned int len , unsigned int copied ,
254
- struct page * subpage , void * fsdata )
255
- {
256
- loff_t last_pos = pos + copied ;
257
- struct folio * folio = page_folio (subpage );
258
- struct inode * inode = mapping -> host ;
259
-
260
- p9_debug (P9_DEBUG_VFS , "filp %p, mapping %p\n" , filp , mapping );
261
-
262
- if (!folio_test_uptodate (folio )) {
263
- if (unlikely (copied < len )) {
264
- copied = 0 ;
265
- goto out ;
266
- }
267
-
268
- folio_mark_uptodate (folio );
269
- }
270
-
271
- /*
272
- * No need to use i_size_read() here, the i_size
273
- * cannot change under us because we hold the i_mutex.
274
- */
275
- if (last_pos > inode -> i_size ) {
276
- inode_add_bytes (inode , last_pos - inode -> i_size );
277
- i_size_write (inode , last_pos );
278
- #ifdef CONFIG_9P_FSCACHE
279
- fscache_update_cookie (v9fs_inode_cookie (V9FS_I (inode )), NULL ,
280
- & last_pos );
281
- #endif
282
- }
283
- folio_mark_dirty (folio );
284
- out :
285
- folio_unlock (folio );
286
- folio_put (folio );
287
-
288
- return copied ;
289
- }
290
-
291
138
const struct address_space_operations v9fs_addr_operations = {
292
- .read_folio = netfs_read_folio ,
293
- .readahead = netfs_readahead ,
294
- .dirty_folio = netfs_dirty_folio ,
295
- .writepage = v9fs_vfs_writepage ,
296
- .write_begin = v9fs_write_begin ,
297
- .write_end = v9fs_write_end ,
298
- .release_folio = netfs_release_folio ,
299
- .invalidate_folio = netfs_invalidate_folio ,
300
- .launder_folio = v9fs_launder_folio ,
301
- .direct_IO = v9fs_direct_IO ,
139
+ .read_folio = netfs_read_folio ,
140
+ .readahead = netfs_readahead ,
141
+ .dirty_folio = netfs_dirty_folio ,
142
+ .release_folio = netfs_release_folio ,
143
+ .invalidate_folio = netfs_invalidate_folio ,
144
+ .launder_folio = netfs_launder_folio ,
145
+ .direct_IO = noop_direct_IO ,
146
+ .writepages = netfs_writepages ,
302
147
};
0 commit comments