@@ -53,6 +53,20 @@ static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
53
53
tx_buf -> action = XDP_TX ;
54
54
}
55
55
56
+ static void __bnxt_xmit_xdp_redirect (struct bnxt * bp ,
57
+ struct bnxt_tx_ring_info * txr ,
58
+ dma_addr_t mapping , u32 len ,
59
+ struct xdp_frame * xdpf )
60
+ {
61
+ struct bnxt_sw_tx_bd * tx_buf ;
62
+
63
+ tx_buf = bnxt_xmit_bd (bp , txr , mapping , len );
64
+ tx_buf -> action = XDP_REDIRECT ;
65
+ tx_buf -> xdpf = xdpf ;
66
+ dma_unmap_addr_set (tx_buf , mapping , mapping );
67
+ dma_unmap_len_set (tx_buf , len , 0 );
68
+ }
69
+
56
70
void bnxt_tx_int_xdp (struct bnxt * bp , struct bnxt_napi * bnapi , int nr_pkts )
57
71
{
58
72
struct bnxt_tx_ring_info * txr = bnapi -> tx_ring ;
@@ -66,7 +80,17 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
66
80
for (i = 0 ; i < nr_pkts ; i ++ ) {
67
81
tx_buf = & txr -> tx_buf_ring [tx_cons ];
68
82
69
- if (tx_buf -> action == XDP_TX ) {
83
+ if (tx_buf -> action == XDP_REDIRECT ) {
84
+ struct pci_dev * pdev = bp -> pdev ;
85
+
86
+ dma_unmap_single (& pdev -> dev ,
87
+ dma_unmap_addr (tx_buf , mapping ),
88
+ dma_unmap_len (tx_buf , len ),
89
+ PCI_DMA_TODEVICE );
90
+ xdp_return_frame (tx_buf -> xdpf );
91
+ tx_buf -> action = 0 ;
92
+ tx_buf -> xdpf = NULL ;
93
+ } else if (tx_buf -> action == XDP_TX ) {
70
94
rx_doorbell_needed = true;
71
95
last_tx_cons = tx_cons ;
72
96
}
@@ -101,19 +125,19 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
101
125
return false;
102
126
103
127
pdev = bp -> pdev ;
104
- txr = rxr -> bnapi -> tx_ring ;
105
128
rx_buf = & rxr -> rx_buf_ring [cons ];
106
129
offset = bp -> rx_offset ;
107
130
131
+ mapping = rx_buf -> mapping - bp -> rx_dma_offset ;
132
+ dma_sync_single_for_cpu (& pdev -> dev , mapping + offset , * len , bp -> rx_dir );
133
+
134
+ txr = rxr -> bnapi -> tx_ring ;
108
135
xdp .data_hard_start = * data_ptr - offset ;
109
136
xdp .data = * data_ptr ;
110
137
xdp_set_data_meta_invalid (& xdp );
111
138
xdp .data_end = * data_ptr + * len ;
112
139
xdp .rxq = & rxr -> xdp_rxq ;
113
140
orig_data = xdp .data ;
114
- mapping = rx_buf -> mapping - bp -> rx_dma_offset ;
115
-
116
- dma_sync_single_for_cpu (& pdev -> dev , mapping + offset , * len , bp -> rx_dir );
117
141
118
142
rcu_read_lock ();
119
143
act = bpf_prog_run_xdp (xdp_prog , & xdp );
@@ -149,6 +173,30 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
149
173
NEXT_RX (rxr -> rx_prod ));
150
174
bnxt_reuse_rx_data (rxr , cons , page );
151
175
return true;
176
+ case XDP_REDIRECT :
177
+ /* if we are calling this here then we know that the
178
+ * redirect is coming from a frame received by the
179
+ * bnxt_en driver.
180
+ */
181
+ dma_unmap_page_attrs (& pdev -> dev , mapping ,
182
+ PAGE_SIZE , bp -> rx_dir ,
183
+ DMA_ATTR_WEAK_ORDERING );
184
+
185
+ /* if we are unable to allocate a new buffer, abort and reuse */
186
+ if (bnxt_alloc_rx_data (bp , rxr , rxr -> rx_prod , GFP_ATOMIC )) {
187
+ trace_xdp_exception (bp -> dev , xdp_prog , act );
188
+ bnxt_reuse_rx_data (rxr , cons , page );
189
+ return true;
190
+ }
191
+
192
+ if (xdp_do_redirect (bp -> dev , & xdp , xdp_prog )) {
193
+ trace_xdp_exception (bp -> dev , xdp_prog , act );
194
+ __free_page (page );
195
+ return true;
196
+ }
197
+
198
+ * event |= BNXT_REDIRECT_EVENT ;
199
+ break ;
152
200
default :
153
201
bpf_warn_invalid_xdp_action (act );
154
202
/* Fall thru */
@@ -162,6 +210,56 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
162
210
return true;
163
211
}
164
212
213
+ int bnxt_xdp_xmit (struct net_device * dev , int num_frames ,
214
+ struct xdp_frame * * frames , u32 flags )
215
+ {
216
+ struct bnxt * bp = netdev_priv (dev );
217
+ struct bpf_prog * xdp_prog = READ_ONCE (bp -> xdp_prog );
218
+ struct pci_dev * pdev = bp -> pdev ;
219
+ struct bnxt_tx_ring_info * txr ;
220
+ dma_addr_t mapping ;
221
+ int drops = 0 ;
222
+ int ring ;
223
+ int i ;
224
+
225
+ if (!test_bit (BNXT_STATE_OPEN , & bp -> state ) ||
226
+ !bp -> tx_nr_rings_xdp ||
227
+ !xdp_prog )
228
+ return - EINVAL ;
229
+
230
+ ring = smp_processor_id () % bp -> tx_nr_rings_xdp ;
231
+ txr = & bp -> tx_ring [ring ];
232
+
233
+ for (i = 0 ; i < num_frames ; i ++ ) {
234
+ struct xdp_frame * xdp = frames [i ];
235
+
236
+ if (!txr || !bnxt_tx_avail (bp , txr ) ||
237
+ !(bp -> bnapi [ring ]-> flags & BNXT_NAPI_FLAG_XDP )) {
238
+ xdp_return_frame_rx_napi (xdp );
239
+ drops ++ ;
240
+ continue ;
241
+ }
242
+
243
+ mapping = dma_map_single (& pdev -> dev , xdp -> data , xdp -> len ,
244
+ DMA_TO_DEVICE );
245
+
246
+ if (dma_mapping_error (& pdev -> dev , mapping )) {
247
+ xdp_return_frame_rx_napi (xdp );
248
+ drops ++ ;
249
+ continue ;
250
+ }
251
+ __bnxt_xmit_xdp_redirect (bp , txr , mapping , xdp -> len , xdp );
252
+ }
253
+
254
+ if (flags & XDP_XMIT_FLUSH ) {
255
+ /* Sync BD data before updating doorbell */
256
+ wmb ();
257
+ bnxt_db_write (bp , & txr -> tx_db , txr -> tx_prod );
258
+ }
259
+
260
+ return num_frames - drops ;
261
+ }
262
+
165
263
/* Under rtnl_lock */
166
264
static int bnxt_xdp_set (struct bnxt * bp , struct bpf_prog * prog )
167
265
{
0 commit comments