@@ -104,6 +104,25 @@ void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq)
104
104
}
105
105
EXPORT_SYMBOL (xp_set_rxq_info );
106
106
107
+ static void xp_disable_drv_zc (struct xsk_buff_pool * pool )
108
+ {
109
+ struct netdev_bpf bpf ;
110
+ int err ;
111
+
112
+ ASSERT_RTNL ();
113
+
114
+ if (pool -> umem -> zc ) {
115
+ bpf .command = XDP_SETUP_XSK_POOL ;
116
+ bpf .xsk .pool = NULL ;
117
+ bpf .xsk .queue_id = pool -> queue_id ;
118
+
119
+ err = pool -> netdev -> netdev_ops -> ndo_bpf (pool -> netdev , & bpf );
120
+
121
+ if (err )
122
+ WARN (1 , "Failed to disable zero-copy!\n" );
123
+ }
124
+ }
125
+
107
126
int xp_assign_dev (struct xsk_buff_pool * pool , struct net_device * netdev ,
108
127
u16 queue_id , u16 flags )
109
128
{
@@ -122,6 +141,8 @@ int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *netdev,
122
141
if (xsk_get_pool_from_qid (netdev , queue_id ))
123
142
return - EBUSY ;
124
143
144
+ pool -> netdev = netdev ;
145
+ pool -> queue_id = queue_id ;
125
146
err = xsk_reg_pool_at_qid (netdev , pool , queue_id );
126
147
if (err )
127
148
return err ;
@@ -155,11 +176,15 @@ int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *netdev,
155
176
if (err )
156
177
goto err_unreg_pool ;
157
178
158
- pool -> netdev = netdev ;
159
- pool -> queue_id = queue_id ;
179
+ if (!pool -> dma_pages ) {
180
+ WARN (1 , "Driver did not DMA map zero-copy buffers" );
181
+ goto err_unreg_xsk ;
182
+ }
160
183
pool -> umem -> zc = true;
161
184
return 0 ;
162
185
186
+ err_unreg_xsk :
187
+ xp_disable_drv_zc (pool );
163
188
err_unreg_pool :
164
189
if (!force_zc )
165
190
err = 0 ; /* fallback to copy mode */
@@ -170,25 +195,10 @@ int xp_assign_dev(struct xsk_buff_pool *pool, struct net_device *netdev,
170
195
171
196
void xp_clear_dev (struct xsk_buff_pool * pool )
172
197
{
173
- struct netdev_bpf bpf ;
174
- int err ;
175
-
176
- ASSERT_RTNL ();
177
-
178
198
if (!pool -> netdev )
179
199
return ;
180
200
181
- if (pool -> umem -> zc ) {
182
- bpf .command = XDP_SETUP_XSK_POOL ;
183
- bpf .xsk .pool = NULL ;
184
- bpf .xsk .queue_id = pool -> queue_id ;
185
-
186
- err = pool -> netdev -> netdev_ops -> ndo_bpf (pool -> netdev , & bpf );
187
-
188
- if (err )
189
- WARN (1 , "Failed to disable zero-copy!\n" );
190
- }
191
-
201
+ xp_disable_drv_zc (pool );
192
202
xsk_clear_pool_at_qid (pool -> netdev , pool -> queue_id );
193
203
dev_put (pool -> netdev );
194
204
pool -> netdev = NULL ;
@@ -233,70 +243,159 @@ void xp_put_pool(struct xsk_buff_pool *pool)
233
243
}
234
244
}
235
245
236
- void xp_dma_unmap (struct xsk_buff_pool * pool , unsigned long attrs )
246
+ static struct xsk_dma_map * xp_find_dma_map (struct xsk_buff_pool * pool )
247
+ {
248
+ struct xsk_dma_map * dma_map ;
249
+
250
+ list_for_each_entry (dma_map , & pool -> umem -> xsk_dma_list , list ) {
251
+ if (dma_map -> netdev == pool -> netdev )
252
+ return dma_map ;
253
+ }
254
+
255
+ return NULL ;
256
+ }
257
+
258
+ static struct xsk_dma_map * xp_create_dma_map (struct device * dev , struct net_device * netdev ,
259
+ u32 nr_pages , struct xdp_umem * umem )
260
+ {
261
+ struct xsk_dma_map * dma_map ;
262
+
263
+ dma_map = kzalloc (sizeof (* dma_map ), GFP_KERNEL );
264
+ if (!dma_map )
265
+ return NULL ;
266
+
267
+ dma_map -> dma_pages = kvcalloc (nr_pages , sizeof (* dma_map -> dma_pages ), GFP_KERNEL );
268
+ if (!dma_map ) {
269
+ kfree (dma_map );
270
+ return NULL ;
271
+ }
272
+
273
+ dma_map -> netdev = netdev ;
274
+ dma_map -> dev = dev ;
275
+ dma_map -> dma_need_sync = false;
276
+ dma_map -> dma_pages_cnt = nr_pages ;
277
+ refcount_set (& dma_map -> users , 0 );
278
+ list_add (& dma_map -> list , & umem -> xsk_dma_list );
279
+ return dma_map ;
280
+ }
281
+
282
+ static void xp_destroy_dma_map (struct xsk_dma_map * dma_map )
283
+ {
284
+ list_del (& dma_map -> list );
285
+ kvfree (dma_map -> dma_pages );
286
+ kfree (dma_map );
287
+ }
288
+
289
+ static void __xp_dma_unmap (struct xsk_dma_map * dma_map , unsigned long attrs )
237
290
{
238
291
dma_addr_t * dma ;
239
292
u32 i ;
240
293
241
- if (pool -> dma_pages_cnt == 0 )
242
- return ;
243
-
244
- for (i = 0 ; i < pool -> dma_pages_cnt ; i ++ ) {
245
- dma = & pool -> dma_pages [i ];
294
+ for (i = 0 ; i < dma_map -> dma_pages_cnt ; i ++ ) {
295
+ dma = & dma_map -> dma_pages [i ];
246
296
if (* dma ) {
247
- dma_unmap_page_attrs (pool -> dev , * dma , PAGE_SIZE ,
297
+ dma_unmap_page_attrs (dma_map -> dev , * dma , PAGE_SIZE ,
248
298
DMA_BIDIRECTIONAL , attrs );
249
299
* dma = 0 ;
250
300
}
251
301
}
252
302
303
+ xp_destroy_dma_map (dma_map );
304
+ }
305
+
306
+ void xp_dma_unmap (struct xsk_buff_pool * pool , unsigned long attrs )
307
+ {
308
+ struct xsk_dma_map * dma_map ;
309
+
310
+ if (pool -> dma_pages_cnt == 0 )
311
+ return ;
312
+
313
+ dma_map = xp_find_dma_map (pool );
314
+ if (!dma_map ) {
315
+ WARN (1 , "Could not find dma_map for device" );
316
+ return ;
317
+ }
318
+
319
+ if (!refcount_dec_and_test (& dma_map -> users ))
320
+ return ;
321
+
322
+ __xp_dma_unmap (dma_map , attrs );
253
323
kvfree (pool -> dma_pages );
254
324
pool -> dma_pages_cnt = 0 ;
255
325
pool -> dev = NULL ;
256
326
}
257
327
EXPORT_SYMBOL (xp_dma_unmap );
258
328
259
- static void xp_check_dma_contiguity (struct xsk_buff_pool * pool )
329
+ static void xp_check_dma_contiguity (struct xsk_dma_map * dma_map )
260
330
{
261
331
u32 i ;
262
332
263
- for (i = 0 ; i < pool -> dma_pages_cnt - 1 ; i ++ ) {
264
- if (pool -> dma_pages [i ] + PAGE_SIZE == pool -> dma_pages [i + 1 ])
265
- pool -> dma_pages [i ] |= XSK_NEXT_PG_CONTIG_MASK ;
333
+ for (i = 0 ; i < dma_map -> dma_pages_cnt - 1 ; i ++ ) {
334
+ if (dma_map -> dma_pages [i ] + PAGE_SIZE == dma_map -> dma_pages [i + 1 ])
335
+ dma_map -> dma_pages [i ] |= XSK_NEXT_PG_CONTIG_MASK ;
266
336
else
267
- pool -> dma_pages [i ] &= ~XSK_NEXT_PG_CONTIG_MASK ;
337
+ dma_map -> dma_pages [i ] &= ~XSK_NEXT_PG_CONTIG_MASK ;
268
338
}
269
339
}
270
340
341
+ static int xp_init_dma_info (struct xsk_buff_pool * pool , struct xsk_dma_map * dma_map )
342
+ {
343
+ pool -> dma_pages = kvcalloc (dma_map -> dma_pages_cnt , sizeof (* pool -> dma_pages ), GFP_KERNEL );
344
+ if (!pool -> dma_pages )
345
+ return - ENOMEM ;
346
+
347
+ pool -> dev = dma_map -> dev ;
348
+ pool -> dma_pages_cnt = dma_map -> dma_pages_cnt ;
349
+ pool -> dma_need_sync = dma_map -> dma_need_sync ;
350
+ refcount_inc (& dma_map -> users );
351
+ memcpy (pool -> dma_pages , dma_map -> dma_pages ,
352
+ pool -> dma_pages_cnt * sizeof (* pool -> dma_pages ));
353
+
354
+ return 0 ;
355
+ }
356
+
271
357
int xp_dma_map (struct xsk_buff_pool * pool , struct device * dev ,
272
358
unsigned long attrs , struct page * * pages , u32 nr_pages )
273
359
{
360
+ struct xsk_dma_map * dma_map ;
274
361
dma_addr_t dma ;
362
+ int err ;
275
363
u32 i ;
276
364
277
- pool -> dma_pages = kvcalloc (nr_pages , sizeof (* pool -> dma_pages ),
278
- GFP_KERNEL );
279
- if (!pool -> dma_pages )
280
- return - ENOMEM ;
365
+ dma_map = xp_find_dma_map (pool );
366
+ if (dma_map ) {
367
+ err = xp_init_dma_info (pool , dma_map );
368
+ if (err )
369
+ return err ;
281
370
282
- pool -> dev = dev ;
283
- pool -> dma_pages_cnt = nr_pages ;
284
- pool -> dma_need_sync = false;
371
+ return 0 ;
372
+ }
285
373
286
- for (i = 0 ; i < pool -> dma_pages_cnt ; i ++ ) {
374
+ dma_map = xp_create_dma_map (dev , pool -> netdev , nr_pages , pool -> umem );
375
+ if (!dma_map )
376
+ return - ENOMEM ;
377
+
378
+ for (i = 0 ; i < dma_map -> dma_pages_cnt ; i ++ ) {
287
379
dma = dma_map_page_attrs (dev , pages [i ], 0 , PAGE_SIZE ,
288
380
DMA_BIDIRECTIONAL , attrs );
289
381
if (dma_mapping_error (dev , dma )) {
290
- xp_dma_unmap ( pool , attrs );
382
+ __xp_dma_unmap ( dma_map , attrs );
291
383
return - ENOMEM ;
292
384
}
293
385
if (dma_need_sync (dev , dma ))
294
- pool -> dma_need_sync = true;
295
- pool -> dma_pages [i ] = dma ;
386
+ dma_map -> dma_need_sync = true;
387
+ dma_map -> dma_pages [i ] = dma ;
296
388
}
297
389
298
390
if (pool -> unaligned )
299
- xp_check_dma_contiguity (pool );
391
+ xp_check_dma_contiguity (dma_map );
392
+
393
+ err = xp_init_dma_info (pool , dma_map );
394
+ if (err ) {
395
+ __xp_dma_unmap (dma_map , attrs );
396
+ return err ;
397
+ }
398
+
300
399
return 0 ;
301
400
}
302
401
EXPORT_SYMBOL (xp_dma_map );
0 commit comments