6
6
*/
7
7
8
8
#include <linux/slab.h>
9
+ #include <linux/mempool.h>
10
+ #include <linux/delay.h>
9
11
#include "internal.h"
10
12
11
13
/*
@@ -20,17 +22,22 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
20
22
struct inode * inode = file ? file_inode (file ) : mapping -> host ;
21
23
struct netfs_inode * ctx = netfs_inode (inode );
22
24
struct netfs_io_request * rreq ;
25
+ mempool_t * mempool = ctx -> ops -> request_pool ?: & netfs_request_pool ;
26
+ struct kmem_cache * cache = mempool -> pool_data ;
23
27
bool is_unbuffered = (origin == NETFS_UNBUFFERED_WRITE ||
24
28
origin == NETFS_DIO_READ ||
25
29
origin == NETFS_DIO_WRITE );
26
30
bool cached = !is_unbuffered && netfs_is_cache_enabled (ctx );
27
31
int ret ;
28
32
29
- rreq = kzalloc (ctx -> ops -> io_request_size ?: sizeof (struct netfs_io_request ),
30
- GFP_KERNEL );
31
- if (!rreq )
32
- return ERR_PTR (- ENOMEM );
33
+ for (;;) {
34
+ rreq = mempool_alloc (mempool , GFP_KERNEL );
35
+ if (rreq )
36
+ break ;
37
+ msleep (10 );
38
+ }
33
39
40
+ memset (rreq , 0 , kmem_cache_size (cache ));
34
41
rreq -> start = start ;
35
42
rreq -> len = len ;
36
43
rreq -> upper_len = len ;
@@ -56,7 +63,7 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
56
63
if (rreq -> netfs_ops -> init_request ) {
57
64
ret = rreq -> netfs_ops -> init_request (rreq , file );
58
65
if (ret < 0 ) {
59
- kfree (rreq );
66
+ mempool_free (rreq , rreq -> netfs_ops -> request_pool ?: & netfs_request_pool );
60
67
return ERR_PTR (ret );
61
68
}
62
69
}
@@ -88,6 +95,14 @@ void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async)
88
95
}
89
96
}
90
97
98
+ static void netfs_free_request_rcu (struct rcu_head * rcu )
99
+ {
100
+ struct netfs_io_request * rreq = container_of (rcu , struct netfs_io_request , rcu );
101
+
102
+ mempool_free (rreq , rreq -> netfs_ops -> request_pool ?: & netfs_request_pool );
103
+ netfs_stat_d (& netfs_n_rh_rreq );
104
+ }
105
+
91
106
static void netfs_free_request (struct work_struct * work )
92
107
{
93
108
struct netfs_io_request * rreq =
@@ -110,8 +125,7 @@ static void netfs_free_request(struct work_struct *work)
110
125
}
111
126
kvfree (rreq -> direct_bv );
112
127
}
113
- kfree_rcu (rreq , rcu );
114
- netfs_stat_d (& netfs_n_rh_rreq );
128
+ call_rcu (& rreq -> rcu , netfs_free_request_rcu );
115
129
}
116
130
117
131
void netfs_put_request (struct netfs_io_request * rreq , bool was_async ,
@@ -143,20 +157,25 @@ void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
143
157
struct netfs_io_subrequest * netfs_alloc_subrequest (struct netfs_io_request * rreq )
144
158
{
145
159
struct netfs_io_subrequest * subreq ;
146
-
147
- subreq = kzalloc (rreq -> netfs_ops -> io_subrequest_size ?:
148
- sizeof (struct netfs_io_subrequest ),
149
- GFP_KERNEL );
150
- if (subreq ) {
151
- INIT_WORK (& subreq -> work , NULL );
152
- INIT_LIST_HEAD (& subreq -> rreq_link );
153
- refcount_set (& subreq -> ref , 2 );
154
- subreq -> rreq = rreq ;
155
- subreq -> debug_index = atomic_inc_return (& rreq -> subreq_counter );
156
- netfs_get_request (rreq , netfs_rreq_trace_get_subreq );
157
- netfs_stat (& netfs_n_rh_sreq );
160
+ mempool_t * mempool = rreq -> netfs_ops -> subrequest_pool ?: & netfs_subrequest_pool ;
161
+ struct kmem_cache * cache = mempool -> pool_data ;
162
+
163
+ for (;;) {
164
+ subreq = mempool_alloc (rreq -> netfs_ops -> subrequest_pool ?: & netfs_subrequest_pool ,
165
+ GFP_KERNEL );
166
+ if (subreq )
167
+ break ;
168
+ msleep (10 );
158
169
}
159
170
171
+ memset (subreq , 0 , kmem_cache_size (cache ));
172
+ INIT_WORK (& subreq -> work , NULL );
173
+ INIT_LIST_HEAD (& subreq -> rreq_link );
174
+ refcount_set (& subreq -> ref , 2 );
175
+ subreq -> rreq = rreq ;
176
+ subreq -> debug_index = atomic_inc_return (& rreq -> subreq_counter );
177
+ netfs_get_request (rreq , netfs_rreq_trace_get_subreq );
178
+ netfs_stat (& netfs_n_rh_sreq );
160
179
return subreq ;
161
180
}
162
181
@@ -178,7 +197,7 @@ static void netfs_free_subrequest(struct netfs_io_subrequest *subreq,
178
197
trace_netfs_sreq (subreq , netfs_sreq_trace_free );
179
198
if (rreq -> netfs_ops -> free_subrequest )
180
199
rreq -> netfs_ops -> free_subrequest (subreq );
181
- kfree (subreq );
200
+ mempool_free (subreq , rreq -> netfs_ops -> subrequest_pool ?: & netfs_subrequest_pool );
182
201
netfs_stat_d (& netfs_n_rh_sreq );
183
202
netfs_put_request (rreq , was_async , netfs_rreq_trace_put_subreq );
184
203
}
0 commit comments