@@ -36,19 +36,28 @@ static struct xdp_sock *xdp_sk(struct sock *sk)
36
36
37
37
bool xsk_is_setup_for_bpf_map (struct xdp_sock * xs )
38
38
{
39
- return !!xs -> rx ;
39
+ return READ_ONCE (xs -> rx ) && READ_ONCE (xs -> umem ) &&
40
+ READ_ONCE (xs -> umem -> fq );
40
41
}
41
42
42
- static int __xsk_rcv (struct xdp_sock * xs , struct xdp_buff * xdp )
43
+ u64 * xsk_umem_peek_addr (struct xdp_umem * umem , u64 * addr )
44
+ {
45
+ return xskq_peek_addr (umem -> fq , addr );
46
+ }
47
+ EXPORT_SYMBOL (xsk_umem_peek_addr );
48
+
49
+ void xsk_umem_discard_addr (struct xdp_umem * umem )
50
+ {
51
+ xskq_discard_addr (umem -> fq );
52
+ }
53
+ EXPORT_SYMBOL (xsk_umem_discard_addr );
54
+
55
+ static int __xsk_rcv (struct xdp_sock * xs , struct xdp_buff * xdp , u32 len )
43
56
{
44
- u32 len = xdp -> data_end - xdp -> data ;
45
57
void * buffer ;
46
58
u64 addr ;
47
59
int err ;
48
60
49
- if (xs -> dev != xdp -> rxq -> dev || xs -> queue_id != xdp -> rxq -> queue_index )
50
- return - EINVAL ;
51
-
52
61
if (!xskq_peek_addr (xs -> umem -> fq , & addr ) ||
53
62
len > xs -> umem -> chunk_size_nohr ) {
54
63
xs -> rx_dropped ++ ;
@@ -60,25 +69,41 @@ static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
60
69
buffer = xdp_umem_get_data (xs -> umem , addr );
61
70
memcpy (buffer , xdp -> data , len );
62
71
err = xskq_produce_batch_desc (xs -> rx , addr , len );
63
- if (!err )
72
+ if (!err ) {
64
73
xskq_discard_addr (xs -> umem -> fq );
65
- else
66
- xs -> rx_dropped ++ ;
74
+ xdp_return_buff (xdp );
75
+ return 0 ;
76
+ }
67
77
78
+ xs -> rx_dropped ++ ;
68
79
return err ;
69
80
}
70
81
71
- int xsk_rcv (struct xdp_sock * xs , struct xdp_buff * xdp )
82
+ static int __xsk_rcv_zc (struct xdp_sock * xs , struct xdp_buff * xdp , u32 len )
72
83
{
73
- int err ;
84
+ int err = xskq_produce_batch_desc ( xs -> rx , ( u64 ) xdp -> handle , len ) ;
74
85
75
- err = __xsk_rcv (xs , xdp );
76
- if (likely (!err ))
86
+ if (err ) {
77
87
xdp_return_buff (xdp );
88
+ xs -> rx_dropped ++ ;
89
+ }
78
90
79
91
return err ;
80
92
}
81
93
94
+ int xsk_rcv (struct xdp_sock * xs , struct xdp_buff * xdp )
95
+ {
96
+ u32 len ;
97
+
98
+ if (xs -> dev != xdp -> rxq -> dev || xs -> queue_id != xdp -> rxq -> queue_index )
99
+ return - EINVAL ;
100
+
101
+ len = xdp -> data_end - xdp -> data ;
102
+
103
+ return (xdp -> rxq -> mem .type == MEM_TYPE_ZERO_COPY ) ?
104
+ __xsk_rcv_zc (xs , xdp , len ) : __xsk_rcv (xs , xdp , len );
105
+ }
106
+
82
107
void xsk_flush (struct xdp_sock * xs )
83
108
{
84
109
xskq_produce_flush_desc (xs -> rx );
@@ -87,12 +112,29 @@ void xsk_flush(struct xdp_sock *xs)
87
112
88
113
int xsk_generic_rcv (struct xdp_sock * xs , struct xdp_buff * xdp )
89
114
{
115
+ u32 len = xdp -> data_end - xdp -> data ;
116
+ void * buffer ;
117
+ u64 addr ;
90
118
int err ;
91
119
92
- err = __xsk_rcv (xs , xdp );
93
- if (!err )
120
+ if (!xskq_peek_addr (xs -> umem -> fq , & addr ) ||
121
+ len > xs -> umem -> chunk_size_nohr ) {
122
+ xs -> rx_dropped ++ ;
123
+ return - ENOSPC ;
124
+ }
125
+
126
+ addr += xs -> umem -> headroom ;
127
+
128
+ buffer = xdp_umem_get_data (xs -> umem , addr );
129
+ memcpy (buffer , xdp -> data , len );
130
+ err = xskq_produce_batch_desc (xs -> rx , addr , len );
131
+ if (!err ) {
132
+ xskq_discard_addr (xs -> umem -> fq );
94
133
xsk_flush (xs );
134
+ return 0 ;
135
+ }
95
136
137
+ xs -> rx_dropped ++ ;
96
138
return err ;
97
139
}
98
140
@@ -291,6 +333,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
291
333
struct sock * sk = sock -> sk ;
292
334
struct xdp_sock * xs = xdp_sk (sk );
293
335
struct net_device * dev ;
336
+ u32 flags , qid ;
294
337
int err = 0 ;
295
338
296
339
if (addr_len < sizeof (struct sockaddr_xdp ))
@@ -315,16 +358,26 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
315
358
goto out_unlock ;
316
359
}
317
360
318
- if ((xs -> rx && sxdp -> sxdp_queue_id >= dev -> real_num_rx_queues ) ||
319
- (xs -> tx && sxdp -> sxdp_queue_id >= dev -> real_num_tx_queues )) {
361
+ qid = sxdp -> sxdp_queue_id ;
362
+
363
+ if ((xs -> rx && qid >= dev -> real_num_rx_queues ) ||
364
+ (xs -> tx && qid >= dev -> real_num_tx_queues )) {
320
365
err = - EINVAL ;
321
366
goto out_unlock ;
322
367
}
323
368
324
- if (sxdp -> sxdp_flags & XDP_SHARED_UMEM ) {
369
+ flags = sxdp -> sxdp_flags ;
370
+
371
+ if (flags & XDP_SHARED_UMEM ) {
325
372
struct xdp_sock * umem_xs ;
326
373
struct socket * sock ;
327
374
375
+ if ((flags & XDP_COPY ) || (flags & XDP_ZEROCOPY )) {
376
+ /* Cannot specify flags for shared sockets. */
377
+ err = - EINVAL ;
378
+ goto out_unlock ;
379
+ }
380
+
328
381
if (xs -> umem ) {
329
382
/* We have already our own. */
330
383
err = - EINVAL ;
@@ -343,8 +396,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
343
396
err = - EBADF ;
344
397
sockfd_put (sock );
345
398
goto out_unlock ;
346
- } else if (umem_xs -> dev != dev ||
347
- umem_xs -> queue_id != sxdp -> sxdp_queue_id ) {
399
+ } else if (umem_xs -> dev != dev || umem_xs -> queue_id != qid ) {
348
400
err = - EINVAL ;
349
401
sockfd_put (sock );
350
402
goto out_unlock ;
@@ -360,6 +412,10 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
360
412
/* This xsk has its own umem. */
361
413
xskq_set_umem (xs -> umem -> fq , & xs -> umem -> props );
362
414
xskq_set_umem (xs -> umem -> cq , & xs -> umem -> props );
415
+
416
+ err = xdp_umem_assign_dev (xs -> umem , dev , qid , flags );
417
+ if (err )
418
+ goto out_unlock ;
363
419
}
364
420
365
421
xs -> dev = dev ;
0 commit comments