@@ -468,46 +468,46 @@ static int veth_ndo_xdp_xmit(struct net_device *dev, int n,
468
468
return veth_xdp_xmit (dev , n , frames , flags , true);
469
469
}
470
470
471
- static void veth_xdp_flush_bq (struct net_device * dev , struct veth_xdp_tx_bq * bq )
471
+ static void veth_xdp_flush_bq (struct veth_rq * rq , struct veth_xdp_tx_bq * bq )
472
472
{
473
473
int sent , i , err = 0 ;
474
474
475
- sent = veth_xdp_xmit (dev , bq -> count , bq -> q , 0 , false);
475
+ sent = veth_xdp_xmit (rq -> dev , bq -> count , bq -> q , 0 , false);
476
476
if (sent < 0 ) {
477
477
err = sent ;
478
478
sent = 0 ;
479
479
for (i = 0 ; i < bq -> count ; i ++ )
480
480
xdp_return_frame (bq -> q [i ]);
481
481
}
482
- trace_xdp_bulk_tx (dev , sent , bq -> count - sent , err );
482
+ trace_xdp_bulk_tx (rq -> dev , sent , bq -> count - sent , err );
483
483
484
484
bq -> count = 0 ;
485
485
}
486
486
487
- static void veth_xdp_flush (struct net_device * dev , struct veth_xdp_tx_bq * bq )
487
+ static void veth_xdp_flush (struct veth_rq * rq , struct veth_xdp_tx_bq * bq )
488
488
{
489
- struct veth_priv * rcv_priv , * priv = netdev_priv (dev );
489
+ struct veth_priv * rcv_priv , * priv = netdev_priv (rq -> dev );
490
490
struct net_device * rcv ;
491
- struct veth_rq * rq ;
491
+ struct veth_rq * rcv_rq ;
492
492
493
493
rcu_read_lock ();
494
- veth_xdp_flush_bq (dev , bq );
494
+ veth_xdp_flush_bq (rq , bq );
495
495
rcv = rcu_dereference (priv -> peer );
496
496
if (unlikely (!rcv ))
497
497
goto out ;
498
498
499
499
rcv_priv = netdev_priv (rcv );
500
- rq = & rcv_priv -> rq [veth_select_rxq (rcv )];
500
+ rcv_rq = & rcv_priv -> rq [veth_select_rxq (rcv )];
501
501
/* xdp_ring is initialized on receive side? */
502
- if (unlikely (!rcu_access_pointer (rq -> xdp_prog )))
502
+ if (unlikely (!rcu_access_pointer (rcv_rq -> xdp_prog )))
503
503
goto out ;
504
504
505
- __veth_xdp_flush (rq );
505
+ __veth_xdp_flush (rcv_rq );
506
506
out :
507
507
rcu_read_unlock ();
508
508
}
509
509
510
- static int veth_xdp_tx (struct net_device * dev , struct xdp_buff * xdp ,
510
+ static int veth_xdp_tx (struct veth_rq * rq , struct xdp_buff * xdp ,
511
511
struct veth_xdp_tx_bq * bq )
512
512
{
513
513
struct xdp_frame * frame = convert_to_xdp_frame (xdp );
@@ -516,7 +516,7 @@ static int veth_xdp_tx(struct net_device *dev, struct xdp_buff *xdp,
516
516
return - EOVERFLOW ;
517
517
518
518
if (unlikely (bq -> count == VETH_XDP_TX_BULK_SIZE ))
519
- veth_xdp_flush_bq (dev , bq );
519
+ veth_xdp_flush_bq (rq , bq );
520
520
521
521
bq -> q [bq -> count ++ ] = frame ;
522
522
@@ -559,7 +559,7 @@ static struct sk_buff *veth_xdp_rcv_one(struct veth_rq *rq,
559
559
orig_frame = * frame ;
560
560
xdp .data_hard_start = head ;
561
561
xdp .rxq -> mem = frame -> mem ;
562
- if (unlikely (veth_xdp_tx (rq -> dev , & xdp , bq ) < 0 )) {
562
+ if (unlikely (veth_xdp_tx (rq , & xdp , bq ) < 0 )) {
563
563
trace_xdp_exception (rq -> dev , xdp_prog , act );
564
564
frame = & orig_frame ;
565
565
stats -> rx_drops ++ ;
@@ -692,7 +692,7 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq,
692
692
get_page (virt_to_page (xdp .data ));
693
693
consume_skb (skb );
694
694
xdp .rxq -> mem = rq -> xdp_mem ;
695
- if (unlikely (veth_xdp_tx (rq -> dev , & xdp , bq ) < 0 )) {
695
+ if (unlikely (veth_xdp_tx (rq , & xdp , bq ) < 0 )) {
696
696
trace_xdp_exception (rq -> dev , xdp_prog , act );
697
697
stats -> rx_drops ++ ;
698
698
goto err_xdp ;
@@ -817,7 +817,7 @@ static int veth_poll(struct napi_struct *napi, int budget)
817
817
}
818
818
819
819
if (stats .xdp_tx > 0 )
820
- veth_xdp_flush (rq -> dev , & bq );
820
+ veth_xdp_flush (rq , & bq );
821
821
if (stats .xdp_redirect > 0 )
822
822
xdp_do_flush ();
823
823
xdp_clear_return_frame_no_direct ();
0 commit comments