@@ -328,7 +328,6 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
328
328
struct bufdesc * bdp = txq -> bd .cur ;
329
329
struct bufdesc_ex * ebdp ;
330
330
int nr_frags = skb_shinfo (skb )-> nr_frags ;
331
- unsigned short queue = skb_get_queue_mapping (skb );
332
331
int frag , frag_len ;
333
332
unsigned short status ;
334
333
unsigned int estatus = 0 ;
@@ -361,7 +360,7 @@ fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
361
360
362
361
if (fep -> bufdesc_ex ) {
363
362
if (fep -> quirks & FEC_QUIRK_HAS_AVB )
364
- estatus |= FEC_TX_BD_FTYPE (queue );
363
+ estatus |= FEC_TX_BD_FTYPE (txq -> bd . qid );
365
364
if (skb -> ip_summed == CHECKSUM_PARTIAL )
366
365
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS ;
367
366
ebdp -> cbd_bdu = 0 ;
@@ -415,7 +414,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
415
414
dma_addr_t addr ;
416
415
unsigned short status ;
417
416
unsigned short buflen ;
418
- unsigned short queue ;
419
417
unsigned int estatus = 0 ;
420
418
unsigned int index ;
421
419
int entries_free ;
@@ -444,7 +442,6 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
444
442
bufaddr = skb -> data ;
445
443
buflen = skb_headlen (skb );
446
444
447
- queue = skb_get_queue_mapping (skb );
448
445
index = fec_enet_get_bd_index (bdp , & txq -> bd );
449
446
if (((unsigned long ) bufaddr ) & fep -> tx_align ||
450
447
fep -> quirks & FEC_QUIRK_SWAP_FRAME ) {
@@ -487,7 +484,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
487
484
skb_shinfo (skb )-> tx_flags |= SKBTX_IN_PROGRESS ;
488
485
489
486
if (fep -> quirks & FEC_QUIRK_HAS_AVB )
490
- estatus |= FEC_TX_BD_FTYPE (queue );
487
+ estatus |= FEC_TX_BD_FTYPE (txq -> bd . qid );
491
488
492
489
if (skb -> ip_summed == CHECKSUM_PARTIAL )
493
490
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS ;
@@ -521,7 +518,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
521
518
txq -> bd .cur = bdp ;
522
519
523
520
/* Trigger transmission start */
524
- writel (0 , fep -> hwp + FEC_X_DES_ACTIVE ( queue ) );
521
+ writel (0 , txq -> bd . reg_desc_active );
525
522
526
523
return 0 ;
527
524
}
@@ -534,7 +531,6 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
534
531
{
535
532
struct fec_enet_private * fep = netdev_priv (ndev );
536
533
struct bufdesc_ex * ebdp = container_of (bdp , struct bufdesc_ex , desc );
537
- unsigned short queue = skb_get_queue_mapping (skb );
538
534
unsigned short status ;
539
535
unsigned int estatus = 0 ;
540
536
dma_addr_t addr ;
@@ -566,7 +562,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
566
562
567
563
if (fep -> bufdesc_ex ) {
568
564
if (fep -> quirks & FEC_QUIRK_HAS_AVB )
569
- estatus |= FEC_TX_BD_FTYPE (queue );
565
+ estatus |= FEC_TX_BD_FTYPE (txq -> bd . qid );
570
566
if (skb -> ip_summed == CHECKSUM_PARTIAL )
571
567
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS ;
572
568
ebdp -> cbd_bdu = 0 ;
@@ -595,7 +591,6 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
595
591
struct fec_enet_private * fep = netdev_priv (ndev );
596
592
int hdr_len = skb_transport_offset (skb ) + tcp_hdrlen (skb );
597
593
struct bufdesc_ex * ebdp = container_of (bdp , struct bufdesc_ex , desc );
598
- unsigned short queue = skb_get_queue_mapping (skb );
599
594
void * bufaddr ;
600
595
unsigned long dmabuf ;
601
596
unsigned short status ;
@@ -630,7 +625,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
630
625
631
626
if (fep -> bufdesc_ex ) {
632
627
if (fep -> quirks & FEC_QUIRK_HAS_AVB )
633
- estatus |= FEC_TX_BD_FTYPE (queue );
628
+ estatus |= FEC_TX_BD_FTYPE (txq -> bd . qid );
634
629
if (skb -> ip_summed == CHECKSUM_PARTIAL )
635
630
estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS ;
636
631
ebdp -> cbd_bdu = 0 ;
@@ -650,7 +645,6 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
650
645
int hdr_len = skb_transport_offset (skb ) + tcp_hdrlen (skb );
651
646
int total_len , data_left ;
652
647
struct bufdesc * bdp = txq -> bd .cur ;
653
- unsigned short queue = skb_get_queue_mapping (skb );
654
648
struct tso_t tso ;
655
649
unsigned int index = 0 ;
656
650
int ret ;
@@ -715,11 +709,11 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
715
709
716
710
/* Trigger transmission start */
717
711
if (!(fep -> quirks & FEC_QUIRK_ERR007885 ) ||
718
- !readl (fep -> hwp + FEC_X_DES_ACTIVE ( queue ) ) ||
719
- !readl (fep -> hwp + FEC_X_DES_ACTIVE ( queue ) ) ||
720
- !readl (fep -> hwp + FEC_X_DES_ACTIVE ( queue ) ) ||
721
- !readl (fep -> hwp + FEC_X_DES_ACTIVE ( queue ) ))
722
- writel (0 , fep -> hwp + FEC_X_DES_ACTIVE ( queue ) );
712
+ !readl (txq -> bd . reg_desc_active ) ||
713
+ !readl (txq -> bd . reg_desc_active ) ||
714
+ !readl (txq -> bd . reg_desc_active ) ||
715
+ !readl (txq -> bd . reg_desc_active ))
716
+ writel (0 , txq -> bd . reg_desc_active );
723
717
724
718
return 0 ;
725
719
@@ -819,7 +813,7 @@ static void fec_enet_active_rxring(struct net_device *ndev)
819
813
int i ;
820
814
821
815
for (i = 0 ; i < fep -> num_rx_queues ; i ++ )
822
- writel (0 , fep -> hwp + FEC_R_DES_ACTIVE ( i ) );
816
+ writel (0 , fep -> rx_queue [ i ] -> bd . reg_desc_active );
823
817
}
824
818
825
819
static void fec_enet_enable_ring (struct net_device * ndev )
@@ -1255,8 +1249,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
1255
1249
1256
1250
/* ERR006538: Keep the transmitter going */
1257
1251
if (bdp != txq -> bd .cur &&
1258
- readl (fep -> hwp + FEC_X_DES_ACTIVE ( queue_id ) ) == 0 )
1259
- writel (0 , fep -> hwp + FEC_X_DES_ACTIVE ( queue_id ) );
1252
+ readl (txq -> bd . reg_desc_active ) == 0 )
1253
+ writel (0 , txq -> bd . reg_desc_active );
1260
1254
}
1261
1255
1262
1256
static void
@@ -1498,7 +1492,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1498
1492
* incoming frames. On a heavily loaded network, we should be
1499
1493
* able to keep up at the expense of system resources.
1500
1494
*/
1501
- writel (0 , fep -> hwp + FEC_R_DES_ACTIVE ( queue_id ) );
1495
+ writel (0 , rxq -> bd . reg_desc_active );
1502
1496
}
1503
1497
rxq -> bd .cur = bdp ;
1504
1498
return pkt_received ;
@@ -3061,6 +3055,14 @@ static const struct net_device_ops fec_netdev_ops = {
3061
3055
.ndo_set_features = fec_set_features ,
3062
3056
};
3063
3057
3058
+ static const unsigned short offset_des_active_rxq [] = {
3059
+ FEC_R_DES_ACTIVE_0 , FEC_R_DES_ACTIVE_1 , FEC_R_DES_ACTIVE_2
3060
+ };
3061
+
3062
+ static const unsigned short offset_des_active_txq [] = {
3063
+ FEC_X_DES_ACTIVE_0 , FEC_X_DES_ACTIVE_1 , FEC_X_DES_ACTIVE_2
3064
+ };
3065
+
3064
3066
/*
3065
3067
* XXX: We need to clean up on failure exits here.
3066
3068
*
@@ -3114,6 +3116,7 @@ static int fec_enet_init(struct net_device *ndev)
3114
3116
rxq -> bd .dma = bd_dma ;
3115
3117
rxq -> bd .dsize = dsize ;
3116
3118
rxq -> bd .dsize_log2 = dsize_log2 ;
3119
+ rxq -> bd .reg_desc_active = fep -> hwp + offset_des_active_rxq [i ];
3117
3120
bd_dma += size ;
3118
3121
cbd_base = (struct bufdesc * )(((void * )cbd_base ) + size );
3119
3122
rxq -> bd .last = (struct bufdesc * )(((void * )cbd_base ) - dsize );
@@ -3129,6 +3132,7 @@ static int fec_enet_init(struct net_device *ndev)
3129
3132
txq -> bd .dma = bd_dma ;
3130
3133
txq -> bd .dsize = dsize ;
3131
3134
txq -> bd .dsize_log2 = dsize_log2 ;
3135
+ txq -> bd .reg_desc_active = fep -> hwp + offset_des_active_txq [i ];
3132
3136
bd_dma += size ;
3133
3137
cbd_base = (struct bufdesc * )(((void * )cbd_base ) + size );
3134
3138
txq -> bd .last = (struct bufdesc * )(((void * )cbd_base ) - dsize );
0 commit comments