@@ -295,6 +295,10 @@ struct send_queue {
295
295
296
296
/* Record whether sq is in reset state. */
297
297
bool reset ;
298
+
299
+ struct xsk_buff_pool * xsk_pool ;
300
+
301
+ dma_addr_t xsk_hdr_dma_addr ;
298
302
};
299
303
300
304
/* Internal representation of a receive virtqueue */
@@ -495,6 +499,8 @@ struct virtio_net_common_hdr {
495
499
};
496
500
};
497
501
502
+ static struct virtio_net_common_hdr xsk_hdr ;
503
+
498
504
static void virtnet_sq_free_unused_buf (struct virtqueue * vq , void * buf );
499
505
static int virtnet_xdp_handler (struct bpf_prog * xdp_prog , struct xdp_buff * xdp ,
500
506
struct net_device * dev ,
@@ -5561,6 +5567,29 @@ static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queu
5561
5567
return err ;
5562
5568
}
5563
5569
5570
+ static int virtnet_sq_bind_xsk_pool (struct virtnet_info * vi ,
5571
+ struct send_queue * sq ,
5572
+ struct xsk_buff_pool * pool )
5573
+ {
5574
+ int err , qindex ;
5575
+
5576
+ qindex = sq - vi -> sq ;
5577
+
5578
+ virtnet_tx_pause (vi , sq );
5579
+
5580
+ err = virtqueue_reset (sq -> vq , virtnet_sq_free_unused_buf );
5581
+ if (err ) {
5582
+ netdev_err (vi -> dev , "reset tx fail: tx queue index: %d err: %d\n" , qindex , err );
5583
+ pool = NULL ;
5584
+ }
5585
+
5586
+ sq -> xsk_pool = pool ;
5587
+
5588
+ virtnet_tx_resume (vi , sq );
5589
+
5590
+ return err ;
5591
+ }
5592
+
5564
5593
static int virtnet_xsk_pool_enable (struct net_device * dev ,
5565
5594
struct xsk_buff_pool * pool ,
5566
5595
u16 qid )
@@ -5569,6 +5598,7 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
5569
5598
struct receive_queue * rq ;
5570
5599
struct device * dma_dev ;
5571
5600
struct send_queue * sq ;
5601
+ dma_addr_t hdr_dma ;
5572
5602
int err , size ;
5573
5603
5574
5604
if (vi -> hdr_len > xsk_pool_get_headroom (pool ))
@@ -5606,6 +5636,11 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
5606
5636
if (!rq -> xsk_buffs )
5607
5637
return - ENOMEM ;
5608
5638
5639
+ hdr_dma = virtqueue_dma_map_single_attrs (sq -> vq , & xsk_hdr , vi -> hdr_len ,
5640
+ DMA_TO_DEVICE , 0 );
5641
+ if (virtqueue_dma_mapping_error (sq -> vq , hdr_dma ))
5642
+ return - ENOMEM ;
5643
+
5609
5644
err = xsk_pool_dma_map (pool , dma_dev , 0 );
5610
5645
if (err )
5611
5646
goto err_xsk_map ;
@@ -5614,11 +5649,24 @@ static int virtnet_xsk_pool_enable(struct net_device *dev,
5614
5649
if (err )
5615
5650
goto err_rq ;
5616
5651
5652
+ err = virtnet_sq_bind_xsk_pool (vi , sq , pool );
5653
+ if (err )
5654
+ goto err_sq ;
5655
+
5656
+ /* Now, we do not support tx offload(such as tx csum), so all the tx
5657
+ * virtnet hdr is zero. So all the tx packets can share a single hdr.
5658
+ */
5659
+ sq -> xsk_hdr_dma_addr = hdr_dma ;
5660
+
5617
5661
return 0 ;
5618
5662
5663
+ err_sq :
5664
+ virtnet_rq_bind_xsk_pool (vi , rq , NULL );
5619
5665
err_rq :
5620
5666
xsk_pool_dma_unmap (pool , 0 );
5621
5667
err_xsk_map :
5668
+ virtqueue_dma_unmap_single_attrs (rq -> vq , hdr_dma , vi -> hdr_len ,
5669
+ DMA_TO_DEVICE , 0 );
5622
5670
return err ;
5623
5671
}
5624
5672
@@ -5627,19 +5675,24 @@ static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid)
5627
5675
struct virtnet_info * vi = netdev_priv (dev );
5628
5676
struct xsk_buff_pool * pool ;
5629
5677
struct receive_queue * rq ;
5678
+ struct send_queue * sq ;
5630
5679
int err ;
5631
5680
5632
5681
if (qid >= vi -> curr_queue_pairs )
5633
5682
return - EINVAL ;
5634
5683
5684
+ sq = & vi -> sq [qid ];
5635
5685
rq = & vi -> rq [qid ];
5636
5686
5637
5687
pool = rq -> xsk_pool ;
5638
5688
5639
5689
err = virtnet_rq_bind_xsk_pool (vi , rq , NULL );
5690
+ err |= virtnet_sq_bind_xsk_pool (vi , sq , NULL );
5640
5691
5641
5692
xsk_pool_dma_unmap (pool , 0 );
5642
5693
5694
+ virtqueue_dma_unmap_single_attrs (sq -> vq , sq -> xsk_hdr_dma_addr ,
5695
+ vi -> hdr_len , DMA_TO_DEVICE , 0 );
5643
5696
kvfree (rq -> xsk_buffs );
5644
5697
5645
5698
return err ;
0 commit comments