@@ -400,7 +400,7 @@ static void fec_dump(struct net_device *ndev)
400
400
fec16_to_cpu (bdp -> cbd_sc ),
401
401
fec32_to_cpu (bdp -> cbd_bufaddr ),
402
402
fec16_to_cpu (bdp -> cbd_datlen ),
403
- txq -> tx_buf [index ].skb );
403
+ txq -> tx_buf [index ].buf_p );
404
404
bdp = fec_enet_get_nextdesc (bdp , & txq -> bd );
405
405
index ++ ;
406
406
} while (bdp != txq -> bd .base );
@@ -657,7 +657,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
657
657
658
658
index = fec_enet_get_bd_index (last_bdp , & txq -> bd );
659
659
/* Save skb pointer */
660
- txq -> tx_buf [index ].skb = skb ;
660
+ txq -> tx_buf [index ].buf_p = skb ;
661
661
662
662
/* Make sure the updates to rest of the descriptor are performed before
663
663
* transferring ownership.
@@ -863,7 +863,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
863
863
}
864
864
865
865
/* Save skb pointer */
866
- txq -> tx_buf [index ].skb = skb ;
866
+ txq -> tx_buf [index ].buf_p = skb ;
867
867
868
868
skb_tx_timestamp (skb );
869
869
txq -> bd .cur = bdp ;
@@ -960,27 +960,27 @@ static void fec_enet_bd_init(struct net_device *dev)
960
960
fec32_to_cpu (bdp -> cbd_bufaddr ),
961
961
fec16_to_cpu (bdp -> cbd_datlen ),
962
962
DMA_TO_DEVICE );
963
- if (txq -> tx_buf [i ].skb ) {
964
- dev_kfree_skb_any (txq -> tx_buf [i ].skb );
965
- txq -> tx_buf [i ].skb = NULL ;
966
- }
967
- } else {
968
- if (bdp -> cbd_bufaddr &&
969
- txq -> tx_buf [i ].type == FEC_TXBUF_T_XDP_NDO )
963
+ if (txq -> tx_buf [i ].buf_p )
964
+ dev_kfree_skb_any (txq -> tx_buf [i ].buf_p );
965
+ } else if (txq -> tx_buf [i ].type == FEC_TXBUF_T_XDP_NDO ) {
966
+ if (bdp -> cbd_bufaddr )
970
967
dma_unmap_single (& fep -> pdev -> dev ,
971
968
fec32_to_cpu (bdp -> cbd_bufaddr ),
972
969
fec16_to_cpu (bdp -> cbd_datlen ),
973
970
DMA_TO_DEVICE );
974
971
975
- if (txq -> tx_buf [i ].xdp ) {
976
- xdp_return_frame (txq -> tx_buf [i ].xdp );
977
- txq -> tx_buf [ i ]. xdp = NULL ;
978
- }
972
+ if (txq -> tx_buf [i ].buf_p )
973
+ xdp_return_frame (txq -> tx_buf [i ].buf_p );
974
+ } else {
975
+ struct page * page = txq -> tx_buf [ i ]. buf_p ;
979
976
980
- /* restore default tx buffer type: FEC_TXBUF_T_SKB */
981
- txq -> tx_buf [ i ]. type = FEC_TXBUF_T_SKB ;
977
+ if ( page )
978
+ page_pool_put_page ( page -> pp , page , 0 , false) ;
982
979
}
983
980
981
+ txq -> tx_buf [i ].buf_p = NULL ;
982
+ /* restore default tx buffer type: FEC_TXBUF_T_SKB */
983
+ txq -> tx_buf [i ].type = FEC_TXBUF_T_SKB ;
984
984
bdp -> cbd_bufaddr = cpu_to_fec32 (0 );
985
985
bdp = fec_enet_get_nextdesc (bdp , & txq -> bd );
986
986
}
@@ -1387,6 +1387,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
1387
1387
struct netdev_queue * nq ;
1388
1388
int index = 0 ;
1389
1389
int entries_free ;
1390
+ struct page * page ;
1391
+ int frame_len ;
1390
1392
1391
1393
fep = netdev_priv (ndev );
1392
1394
@@ -1408,8 +1410,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
1408
1410
index = fec_enet_get_bd_index (bdp , & txq -> bd );
1409
1411
1410
1412
if (txq -> tx_buf [index ].type == FEC_TXBUF_T_SKB ) {
1411
- skb = txq -> tx_buf [index ].skb ;
1412
- txq -> tx_buf [index ].skb = NULL ;
1413
+ skb = txq -> tx_buf [index ].buf_p ;
1413
1414
if (bdp -> cbd_bufaddr &&
1414
1415
!IS_TSO_HEADER (txq , fec32_to_cpu (bdp -> cbd_bufaddr )))
1415
1416
dma_unmap_single (& fep -> pdev -> dev ,
@@ -1428,18 +1429,24 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
1428
1429
if (unlikely (!budget ))
1429
1430
break ;
1430
1431
1431
- xdpf = txq -> tx_buf [index ].xdp ;
1432
- if (bdp -> cbd_bufaddr &&
1433
- txq -> tx_buf [index ].type == FEC_TXBUF_T_XDP_NDO )
1434
- dma_unmap_single (& fep -> pdev -> dev ,
1435
- fec32_to_cpu (bdp -> cbd_bufaddr ),
1436
- fec16_to_cpu (bdp -> cbd_datlen ),
1437
- DMA_TO_DEVICE );
1432
+ if (txq -> tx_buf [index ].type == FEC_TXBUF_T_XDP_NDO ) {
1433
+ xdpf = txq -> tx_buf [index ].buf_p ;
1434
+ if (bdp -> cbd_bufaddr )
1435
+ dma_unmap_single (& fep -> pdev -> dev ,
1436
+ fec32_to_cpu (bdp -> cbd_bufaddr ),
1437
+ fec16_to_cpu (bdp -> cbd_datlen ),
1438
+ DMA_TO_DEVICE );
1439
+ } else {
1440
+ page = txq -> tx_buf [index ].buf_p ;
1441
+ }
1442
+
1438
1443
bdp -> cbd_bufaddr = cpu_to_fec32 (0 );
1439
- if (unlikely (!xdpf )) {
1444
+ if (unlikely (!txq -> tx_buf [ index ]. buf_p )) {
1440
1445
txq -> tx_buf [index ].type = FEC_TXBUF_T_SKB ;
1441
1446
goto tx_buf_done ;
1442
1447
}
1448
+
1449
+ frame_len = fec16_to_cpu (bdp -> cbd_datlen );
1443
1450
}
1444
1451
1445
1452
/* Check for errors. */
@@ -1463,7 +1470,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
1463
1470
if (txq -> tx_buf [index ].type == FEC_TXBUF_T_SKB )
1464
1471
ndev -> stats .tx_bytes += skb -> len ;
1465
1472
else
1466
- ndev -> stats .tx_bytes += xdpf -> len ;
1473
+ ndev -> stats .tx_bytes += frame_len ;
1467
1474
}
1468
1475
1469
1476
/* Deferred means some collisions occurred during transmit,
@@ -1488,23 +1495,17 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
1488
1495
1489
1496
/* Free the sk buffer associated with this last transmit */
1490
1497
dev_kfree_skb_any (skb );
1491
- } else {
1492
- if (txq -> tx_buf [index ].type == FEC_TXBUF_T_XDP_NDO ) {
1493
- xdp_return_frame_rx_napi (xdpf );
1494
- } else { /* recycle pages of XDP_TX frames */
1495
- struct page * page = virt_to_head_page (xdpf -> data );
1496
-
1497
- /* The dma_sync_size = 0 as XDP_TX has already
1498
- * synced DMA for_device.
1499
- */
1500
- page_pool_put_page (page -> pp , page , 0 , true);
1501
- }
1502
-
1503
- txq -> tx_buf [index ].xdp = NULL ;
1504
- /* restore default tx buffer type: FEC_TXBUF_T_SKB */
1505
- txq -> tx_buf [index ].type = FEC_TXBUF_T_SKB ;
1498
+ } else if (txq -> tx_buf [index ].type == FEC_TXBUF_T_XDP_NDO ) {
1499
+ xdp_return_frame_rx_napi (xdpf );
1500
+ } else { /* recycle pages of XDP_TX frames */
1501
+ /* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */
1502
+ page_pool_put_page (page -> pp , page , 0 , true);
1506
1503
}
1507
1504
1505
+ txq -> tx_buf [index ].buf_p = NULL ;
1506
+ /* restore default tx buffer type: FEC_TXBUF_T_SKB */
1507
+ txq -> tx_buf [index ].type = FEC_TXBUF_T_SKB ;
1508
+
1508
1509
tx_buf_done :
1509
1510
/* Make sure the update to bdp and tx_buf are performed
1510
1511
* before dirty_tx
@@ -3234,7 +3235,6 @@ static void fec_enet_free_buffers(struct net_device *ndev)
3234
3235
{
3235
3236
struct fec_enet_private * fep = netdev_priv (ndev );
3236
3237
unsigned int i ;
3237
- struct sk_buff * skb ;
3238
3238
struct fec_enet_priv_tx_q * txq ;
3239
3239
struct fec_enet_priv_rx_q * rxq ;
3240
3240
unsigned int q ;
@@ -3259,18 +3259,23 @@ static void fec_enet_free_buffers(struct net_device *ndev)
3259
3259
kfree (txq -> tx_bounce [i ]);
3260
3260
txq -> tx_bounce [i ] = NULL ;
3261
3261
3262
+ if (!txq -> tx_buf [i ].buf_p ) {
3263
+ txq -> tx_buf [i ].type = FEC_TXBUF_T_SKB ;
3264
+ continue ;
3265
+ }
3266
+
3262
3267
if (txq -> tx_buf [i ].type == FEC_TXBUF_T_SKB ) {
3263
- skb = txq -> tx_buf [i ].skb ;
3264
- txq -> tx_buf [i ].skb = NULL ;
3265
- dev_kfree_skb ( skb );
3268
+ dev_kfree_skb ( txq -> tx_buf [i ].buf_p ) ;
3269
+ } else if ( txq -> tx_buf [i ].type == FEC_TXBUF_T_XDP_NDO ) {
3270
+ xdp_return_frame ( txq -> tx_buf [ i ]. buf_p );
3266
3271
} else {
3267
- if (txq -> tx_buf [i ].xdp ) {
3268
- xdp_return_frame (txq -> tx_buf [i ].xdp );
3269
- txq -> tx_buf [i ].xdp = NULL ;
3270
- }
3272
+ struct page * page = txq -> tx_buf [i ].buf_p ;
3271
3273
3272
- txq -> tx_buf [ i ]. type = FEC_TXBUF_T_SKB ;
3274
+ page_pool_put_page ( page -> pp , page , 0 , false) ;
3273
3275
}
3276
+
3277
+ txq -> tx_buf [i ].buf_p = NULL ;
3278
+ txq -> tx_buf [i ].type = FEC_TXBUF_T_SKB ;
3274
3279
}
3275
3280
}
3276
3281
}
@@ -3793,13 +3798,14 @@ fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index)
3793
3798
3794
3799
static int fec_enet_txq_xmit_frame (struct fec_enet_private * fep ,
3795
3800
struct fec_enet_priv_tx_q * txq ,
3796
- struct xdp_frame * frame ,
3797
- u32 dma_sync_len , bool ndo_xmit )
3801
+ void * frame , u32 dma_sync_len ,
3802
+ bool ndo_xmit )
3798
3803
{
3799
3804
unsigned int index , status , estatus ;
3800
3805
struct bufdesc * bdp ;
3801
3806
dma_addr_t dma_addr ;
3802
3807
int entries_free ;
3808
+ u16 frame_len ;
3803
3809
3804
3810
entries_free = fec_enet_get_free_txdesc_num (txq );
3805
3811
if (entries_free < MAX_SKB_FRAGS + 1 ) {
@@ -3815,30 +3821,36 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
3815
3821
index = fec_enet_get_bd_index (bdp , & txq -> bd );
3816
3822
3817
3823
if (ndo_xmit ) {
3818
- dma_addr = dma_map_single (& fep -> pdev -> dev , frame -> data ,
3819
- frame -> len , DMA_TO_DEVICE );
3824
+ struct xdp_frame * xdpf = frame ;
3825
+
3826
+ dma_addr = dma_map_single (& fep -> pdev -> dev , xdpf -> data ,
3827
+ xdpf -> len , DMA_TO_DEVICE );
3820
3828
if (dma_mapping_error (& fep -> pdev -> dev , dma_addr ))
3821
3829
return - ENOMEM ;
3822
3830
3831
+ frame_len = xdpf -> len ;
3832
+ txq -> tx_buf [index ].buf_p = xdpf ;
3823
3833
txq -> tx_buf [index ].type = FEC_TXBUF_T_XDP_NDO ;
3824
3834
} else {
3825
- struct page * page = virt_to_page (frame -> data );
3835
+ struct xdp_buff * xdpb = frame ;
3836
+ struct page * page ;
3826
3837
3827
- dma_addr = page_pool_get_dma_addr (page ) + sizeof (* frame ) +
3828
- frame -> headroom ;
3838
+ page = virt_to_page (xdpb -> data );
3839
+ dma_addr = page_pool_get_dma_addr (page ) +
3840
+ (xdpb -> data - xdpb -> data_hard_start );
3829
3841
dma_sync_single_for_device (& fep -> pdev -> dev , dma_addr ,
3830
3842
dma_sync_len , DMA_BIDIRECTIONAL );
3843
+ frame_len = xdpb -> data_end - xdpb -> data ;
3844
+ txq -> tx_buf [index ].buf_p = page ;
3831
3845
txq -> tx_buf [index ].type = FEC_TXBUF_T_XDP_TX ;
3832
3846
}
3833
3847
3834
- txq -> tx_buf [index ].xdp = frame ;
3835
-
3836
3848
status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST );
3837
3849
if (fep -> bufdesc_ex )
3838
3850
estatus = BD_ENET_TX_INT ;
3839
3851
3840
3852
bdp -> cbd_bufaddr = cpu_to_fec32 (dma_addr );
3841
- bdp -> cbd_datlen = cpu_to_fec16 (frame -> len );
3853
+ bdp -> cbd_datlen = cpu_to_fec16 (frame_len );
3842
3854
3843
3855
if (fep -> bufdesc_ex ) {
3844
3856
struct bufdesc_ex * ebdp = (struct bufdesc_ex * )bdp ;
@@ -3879,14 +3891,10 @@ static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
3879
3891
int cpu , struct xdp_buff * xdp ,
3880
3892
u32 dma_sync_len )
3881
3893
{
3882
- struct xdp_frame * xdpf = xdp_convert_buff_to_frame (xdp );
3883
3894
struct fec_enet_priv_tx_q * txq ;
3884
3895
struct netdev_queue * nq ;
3885
3896
int queue , ret ;
3886
3897
3887
- if (unlikely (!xdpf ))
3888
- return - EFAULT ;
3889
-
3890
3898
queue = fec_enet_xdp_get_tx_queue (fep , cpu );
3891
3899
txq = fep -> tx_queue [queue ];
3892
3900
nq = netdev_get_tx_queue (fep -> netdev , queue );
@@ -3895,7 +3903,7 @@ static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
3895
3903
3896
3904
/* Avoid tx timeout as XDP shares the queue with kernel stack */
3897
3905
txq_trans_cond_update (nq );
3898
- ret = fec_enet_txq_xmit_frame (fep , txq , xdpf , dma_sync_len , false);
3906
+ ret = fec_enet_txq_xmit_frame (fep , txq , xdp , dma_sync_len , false);
3899
3907
3900
3908
__netif_tx_unlock (nq );
3901
3909
0 commit comments