@@ -12,16 +12,6 @@ int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs)
12
12
return page_count (bs -> page_info .page ) - bs -> page_info .pagecnt_bias ;
13
13
}
14
14
15
- void gve_free_page_dqo (struct gve_priv * priv , struct gve_rx_buf_state_dqo * bs ,
16
- bool free_page )
17
- {
18
- page_ref_sub (bs -> page_info .page , bs -> page_info .pagecnt_bias - 1 );
19
- if (free_page )
20
- gve_free_page (& priv -> pdev -> dev , bs -> page_info .page , bs -> addr ,
21
- DMA_FROM_DEVICE );
22
- bs -> page_info .page = NULL ;
23
- }
24
-
25
15
struct gve_rx_buf_state_dqo * gve_alloc_buf_state (struct gve_rx_ring * rx )
26
16
{
27
17
struct gve_rx_buf_state_dqo * buf_state ;
@@ -128,56 +118,28 @@ struct gve_rx_buf_state_dqo *gve_get_recycled_buf_state(struct gve_rx_ring *rx)
128
118
gve_enqueue_buf_state (rx , & rx -> dqo .used_buf_states , buf_state );
129
119
}
130
120
131
- /* For QPL, we cannot allocate any new buffers and must
132
- * wait for the existing ones to be available.
133
- */
134
- if (rx -> dqo .qpl )
135
- return NULL ;
136
-
137
- /* If there are no free buf states discard an entry from
138
- * `used_buf_states` so it can be used.
139
- */
140
- if (unlikely (rx -> dqo .free_buf_states == -1 )) {
141
- buf_state = gve_dequeue_buf_state (rx , & rx -> dqo .used_buf_states );
142
- if (gve_buf_ref_cnt (buf_state ) == 0 )
143
- return buf_state ;
144
-
145
- gve_free_page_dqo (rx -> gve , buf_state , true);
146
- gve_free_buf_state (rx , buf_state );
147
- }
148
-
149
121
return NULL ;
150
122
}
151
123
152
- int gve_alloc_page_dqo (struct gve_rx_ring * rx ,
153
- struct gve_rx_buf_state_dqo * buf_state )
124
+ int gve_alloc_qpl_page_dqo (struct gve_rx_ring * rx ,
125
+ struct gve_rx_buf_state_dqo * buf_state )
154
126
{
155
127
struct gve_priv * priv = rx -> gve ;
156
128
u32 idx ;
157
129
158
- if (!rx -> dqo .qpl ) {
159
- int err ;
160
-
161
- err = gve_alloc_page (priv , & priv -> pdev -> dev ,
162
- & buf_state -> page_info .page ,
163
- & buf_state -> addr ,
164
- DMA_FROM_DEVICE , GFP_ATOMIC );
165
- if (err )
166
- return err ;
167
- } else {
168
- idx = rx -> dqo .next_qpl_page_idx ;
169
- if (idx >= gve_get_rx_pages_per_qpl_dqo (priv -> rx_desc_cnt )) {
170
- net_err_ratelimited ("%s: Out of QPL pages\n" ,
171
- priv -> dev -> name );
172
- return - ENOMEM ;
173
- }
174
- buf_state -> page_info .page = rx -> dqo .qpl -> pages [idx ];
175
- buf_state -> addr = rx -> dqo .qpl -> page_buses [idx ];
176
- rx -> dqo .next_qpl_page_idx ++ ;
130
+ idx = rx -> dqo .next_qpl_page_idx ;
131
+ if (idx >= gve_get_rx_pages_per_qpl_dqo (priv -> rx_desc_cnt )) {
132
+ net_err_ratelimited ("%s: Out of QPL pages\n" ,
133
+ priv -> dev -> name );
134
+ return - ENOMEM ;
177
135
}
136
+ buf_state -> page_info .page = rx -> dqo .qpl -> pages [idx ];
137
+ buf_state -> addr = rx -> dqo .qpl -> page_buses [idx ];
138
+ rx -> dqo .next_qpl_page_idx ++ ;
178
139
buf_state -> page_info .page_offset = 0 ;
179
140
buf_state -> page_info .page_address =
180
141
page_address (buf_state -> page_info .page );
142
+ buf_state -> page_info .buf_size = priv -> data_buffer_size_dqo ;
181
143
buf_state -> last_single_ref_offset = 0 ;
182
144
183
145
/* The page already has 1 ref. */
@@ -187,6 +149,16 @@ int gve_alloc_page_dqo(struct gve_rx_ring *rx,
187
149
return 0 ;
188
150
}
189
151
152
+ void gve_free_qpl_page_dqo (struct gve_rx_buf_state_dqo * buf_state )
153
+ {
154
+ if (!buf_state -> page_info .page )
155
+ return ;
156
+
157
+ page_ref_sub (buf_state -> page_info .page ,
158
+ buf_state -> page_info .pagecnt_bias - 1 );
159
+ buf_state -> page_info .page = NULL ;
160
+ }
161
+
190
162
void gve_try_recycle_buf (struct gve_priv * priv , struct gve_rx_ring * rx ,
191
163
struct gve_rx_buf_state_dqo * buf_state )
192
164
{
@@ -228,3 +200,113 @@ void gve_try_recycle_buf(struct gve_priv *priv, struct gve_rx_ring *rx,
228
200
gve_enqueue_buf_state (rx , & rx -> dqo .used_buf_states , buf_state );
229
201
rx -> dqo .used_buf_states_cnt ++ ;
230
202
}
203
+
204
+ void gve_free_to_page_pool (struct gve_rx_ring * rx ,
205
+ struct gve_rx_buf_state_dqo * buf_state ,
206
+ bool allow_direct )
207
+ {
208
+ struct page * page = buf_state -> page_info .page ;
209
+
210
+ if (!page )
211
+ return ;
212
+
213
+ page_pool_put_page (page -> pp , page , buf_state -> page_info .buf_size ,
214
+ allow_direct );
215
+ buf_state -> page_info .page = NULL ;
216
+ }
217
+
218
+ static int gve_alloc_from_page_pool (struct gve_rx_ring * rx ,
219
+ struct gve_rx_buf_state_dqo * buf_state )
220
+ {
221
+ struct gve_priv * priv = rx -> gve ;
222
+ struct page * page ;
223
+
224
+ buf_state -> page_info .buf_size = priv -> data_buffer_size_dqo ;
225
+ page = page_pool_alloc (rx -> dqo .page_pool ,
226
+ & buf_state -> page_info .page_offset ,
227
+ & buf_state -> page_info .buf_size , GFP_ATOMIC );
228
+
229
+ if (!page )
230
+ return - ENOMEM ;
231
+
232
+ buf_state -> page_info .page = page ;
233
+ buf_state -> page_info .page_address = page_address (page );
234
+ buf_state -> addr = page_pool_get_dma_addr (page );
235
+
236
+ return 0 ;
237
+ }
238
+
239
+ struct page_pool * gve_rx_create_page_pool (struct gve_priv * priv ,
240
+ struct gve_rx_ring * rx )
241
+ {
242
+ u32 ntfy_id = gve_rx_idx_to_ntfy (priv , rx -> q_num );
243
+ struct page_pool_params pp = {
244
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV ,
245
+ .order = 0 ,
246
+ .pool_size = GVE_PAGE_POOL_SIZE_MULTIPLIER * priv -> rx_desc_cnt ,
247
+ .dev = & priv -> pdev -> dev ,
248
+ .netdev = priv -> dev ,
249
+ .napi = & priv -> ntfy_blocks [ntfy_id ].napi ,
250
+ .max_len = PAGE_SIZE ,
251
+ .dma_dir = DMA_FROM_DEVICE ,
252
+ };
253
+
254
+ return page_pool_create (& pp );
255
+ }
256
+
257
+ void gve_free_buffer (struct gve_rx_ring * rx ,
258
+ struct gve_rx_buf_state_dqo * buf_state )
259
+ {
260
+ if (rx -> dqo .page_pool ) {
261
+ gve_free_to_page_pool (rx , buf_state , true);
262
+ gve_free_buf_state (rx , buf_state );
263
+ } else {
264
+ gve_enqueue_buf_state (rx , & rx -> dqo .recycled_buf_states ,
265
+ buf_state );
266
+ }
267
+ }
268
+
269
+ void gve_reuse_buffer (struct gve_rx_ring * rx ,
270
+ struct gve_rx_buf_state_dqo * buf_state )
271
+ {
272
+ if (rx -> dqo .page_pool ) {
273
+ buf_state -> page_info .page = NULL ;
274
+ gve_free_buf_state (rx , buf_state );
275
+ } else {
276
+ gve_dec_pagecnt_bias (& buf_state -> page_info );
277
+ gve_try_recycle_buf (rx -> gve , rx , buf_state );
278
+ }
279
+ }
280
+
281
+ int gve_alloc_buffer (struct gve_rx_ring * rx , struct gve_rx_desc_dqo * desc )
282
+ {
283
+ struct gve_rx_buf_state_dqo * buf_state ;
284
+
285
+ if (rx -> dqo .page_pool ) {
286
+ buf_state = gve_alloc_buf_state (rx );
287
+ if (WARN_ON_ONCE (!buf_state ))
288
+ return - ENOMEM ;
289
+
290
+ if (gve_alloc_from_page_pool (rx , buf_state ))
291
+ goto free_buf_state ;
292
+ } else {
293
+ buf_state = gve_get_recycled_buf_state (rx );
294
+ if (unlikely (!buf_state )) {
295
+ buf_state = gve_alloc_buf_state (rx );
296
+ if (unlikely (!buf_state ))
297
+ return - ENOMEM ;
298
+
299
+ if (unlikely (gve_alloc_qpl_page_dqo (rx , buf_state )))
300
+ goto free_buf_state ;
301
+ }
302
+ }
303
+ desc -> buf_id = cpu_to_le16 (buf_state - rx -> dqo .buf_states );
304
+ desc -> buf_addr = cpu_to_le64 (buf_state -> addr +
305
+ buf_state -> page_info .page_offset );
306
+
307
+ return 0 ;
308
+
309
+ free_buf_state :
310
+ gve_free_buf_state (rx , buf_state );
311
+ return - ENOMEM ;
312
+ }
0 commit comments