@@ -513,149 +513,31 @@ static void liquidio_deinit_pci(void)
513
513
pci_unregister_driver (& liquidio_pci_driver );
514
514
}
515
515
516
- /**
517
- * \brief Stop Tx queues
518
- * @param netdev network device
519
- */
520
- static inline void txqs_stop (struct net_device * netdev )
521
- {
522
- if (netif_is_multiqueue (netdev )) {
523
- int i ;
524
-
525
- for (i = 0 ; i < netdev -> num_tx_queues ; i ++ )
526
- netif_stop_subqueue (netdev , i );
527
- } else {
528
- netif_stop_queue (netdev );
529
- }
530
- }
531
-
532
- /**
533
- * \brief Start Tx queues
534
- * @param netdev network device
535
- */
536
- static inline void txqs_start (struct net_device * netdev )
537
- {
538
- if (netif_is_multiqueue (netdev )) {
539
- int i ;
540
-
541
- for (i = 0 ; i < netdev -> num_tx_queues ; i ++ )
542
- netif_start_subqueue (netdev , i );
543
- } else {
544
- netif_start_queue (netdev );
545
- }
546
- }
547
-
548
- /**
549
- * \brief Wake Tx queues
550
- * @param netdev network device
551
- */
552
- static inline void txqs_wake (struct net_device * netdev )
553
- {
554
- struct lio * lio = GET_LIO (netdev );
555
-
556
- if (netif_is_multiqueue (netdev )) {
557
- int i ;
558
-
559
- for (i = 0 ; i < netdev -> num_tx_queues ; i ++ ) {
560
- int qno = lio -> linfo .txpciq [i %
561
- lio -> oct_dev -> num_iqs ].s .q_no ;
562
-
563
- if (__netif_subqueue_stopped (netdev , i )) {
564
- INCR_INSTRQUEUE_PKT_COUNT (lio -> oct_dev , qno ,
565
- tx_restart , 1 );
566
- netif_wake_subqueue (netdev , i );
567
- }
568
- }
569
- } else {
570
- INCR_INSTRQUEUE_PKT_COUNT (lio -> oct_dev , lio -> txq ,
571
- tx_restart , 1 );
572
- netif_wake_queue (netdev );
573
- }
574
- }
575
-
576
- /**
577
- * \brief Stop Tx queue
578
- * @param netdev network device
579
- */
580
- static void stop_txq (struct net_device * netdev )
581
- {
582
- txqs_stop (netdev );
583
- }
584
-
585
- /**
586
- * \brief Start Tx queue
587
- * @param netdev network device
588
- */
589
- static void start_txq (struct net_device * netdev )
590
- {
591
- struct lio * lio = GET_LIO (netdev );
592
-
593
- if (lio -> linfo .link .s .link_up ) {
594
- txqs_start (netdev );
595
- return ;
596
- }
597
- }
598
-
599
- /**
600
- * \brief Wake a queue
601
- * @param netdev network device
602
- * @param q which queue to wake
603
- */
604
- static inline void wake_q (struct net_device * netdev , int q )
605
- {
606
- if (netif_is_multiqueue (netdev ))
607
- netif_wake_subqueue (netdev , q );
608
- else
609
- netif_wake_queue (netdev );
610
- }
611
-
612
- /**
613
- * \brief Stop a queue
614
- * @param netdev network device
615
- * @param q which queue to stop
616
- */
617
- static inline void stop_q (struct net_device * netdev , int q )
618
- {
619
- if (netif_is_multiqueue (netdev ))
620
- netif_stop_subqueue (netdev , q );
621
- else
622
- netif_stop_queue (netdev );
623
- }
624
-
625
516
/**
626
517
* \brief Check Tx queue status, and take appropriate action
627
518
* @param lio per-network private data
628
519
* @returns 0 if full, number of queues woken up otherwise
629
520
*/
630
521
static inline int check_txq_status (struct lio * lio )
631
522
{
523
+ int numqs = lio -> netdev -> num_tx_queues ;
632
524
int ret_val = 0 ;
525
+ int q , iq ;
633
526
634
- if (netif_is_multiqueue (lio -> netdev )) {
635
- int numqs = lio -> netdev -> num_tx_queues ;
636
- int q , iq = 0 ;
637
-
638
- /* check each sub-queue state */
639
- for (q = 0 ; q < numqs ; q ++ ) {
640
- iq = lio -> linfo .txpciq [q %
641
- lio -> oct_dev -> num_iqs ].s .q_no ;
642
- if (octnet_iq_is_full (lio -> oct_dev , iq ))
643
- continue ;
644
- if (__netif_subqueue_stopped (lio -> netdev , q )) {
645
- wake_q (lio -> netdev , q );
646
- INCR_INSTRQUEUE_PKT_COUNT (lio -> oct_dev , iq ,
647
- tx_restart , 1 );
648
- ret_val ++ ;
649
- }
527
+ /* check each sub-queue state */
528
+ for (q = 0 ; q < numqs ; q ++ ) {
529
+ iq = lio -> linfo .txpciq [q %
530
+ lio -> oct_dev -> num_iqs ].s .q_no ;
531
+ if (octnet_iq_is_full (lio -> oct_dev , iq ))
532
+ continue ;
533
+ if (__netif_subqueue_stopped (lio -> netdev , q )) {
534
+ netif_wake_subqueue (lio -> netdev , q );
535
+ INCR_INSTRQUEUE_PKT_COUNT (lio -> oct_dev , iq ,
536
+ tx_restart , 1 );
537
+ ret_val ++ ;
650
538
}
651
- } else {
652
- if (octnet_iq_is_full (lio -> oct_dev , lio -> txq ))
653
- return 0 ;
654
- wake_q (lio -> netdev , lio -> txq );
655
- INCR_INSTRQUEUE_PKT_COUNT (lio -> oct_dev , lio -> txq ,
656
- tx_restart , 1 );
657
- ret_val = 1 ;
658
539
}
540
+
659
541
return ret_val ;
660
542
}
661
543
@@ -900,11 +782,11 @@ static inline void update_link_status(struct net_device *netdev,
900
782
if (lio -> linfo .link .s .link_up ) {
901
783
dev_dbg (& oct -> pci_dev -> dev , "%s: link_up" , __func__ );
902
784
netif_carrier_on (netdev );
903
- txqs_wake (netdev );
785
+ wake_txqs (netdev );
904
786
} else {
905
787
dev_dbg (& oct -> pci_dev -> dev , "%s: link_off" , __func__ );
906
788
netif_carrier_off (netdev );
907
- stop_txq (netdev );
789
+ stop_txqs (netdev );
908
790
}
909
791
if (lio -> linfo .link .s .mtu != current_max_mtu ) {
910
792
netif_info (lio , probe , lio -> netdev , "Max MTU changed from %d to %d\n" ,
@@ -1752,39 +1634,24 @@ static int octeon_pci_os_setup(struct octeon_device *oct)
1752
1634
return 0 ;
1753
1635
}
1754
1636
1755
- static inline int skb_iq (struct lio * lio , struct sk_buff * skb )
1756
- {
1757
- int q = 0 ;
1758
-
1759
- if (netif_is_multiqueue (lio -> netdev ))
1760
- q = skb -> queue_mapping % lio -> linfo .num_txpciq ;
1761
-
1762
- return q ;
1763
- }
1764
-
1765
1637
/**
1766
1638
* \brief Check Tx queue state for a given network buffer
1767
1639
* @param lio per-network private data
1768
1640
* @param skb network buffer
1769
1641
*/
1770
1642
static inline int check_txq_state (struct lio * lio , struct sk_buff * skb )
1771
1643
{
1772
- int q = 0 , iq = 0 ;
1644
+ int q , iq ;
1773
1645
1774
- if (netif_is_multiqueue (lio -> netdev )) {
1775
- q = skb -> queue_mapping ;
1776
- iq = lio -> linfo .txpciq [(q % lio -> oct_dev -> num_iqs )].s .q_no ;
1777
- } else {
1778
- iq = lio -> txq ;
1779
- q = iq ;
1780
- }
1646
+ q = skb -> queue_mapping ;
1647
+ iq = lio -> linfo .txpciq [(q % lio -> oct_dev -> num_iqs )].s .q_no ;
1781
1648
1782
1649
if (octnet_iq_is_full (lio -> oct_dev , iq ))
1783
1650
return 0 ;
1784
1651
1785
1652
if (__netif_subqueue_stopped (lio -> netdev , q )) {
1786
1653
INCR_INSTRQUEUE_PKT_COUNT (lio -> oct_dev , iq , tx_restart , 1 );
1787
- wake_q (lio -> netdev , q );
1654
+ netif_wake_subqueue (lio -> netdev , q );
1788
1655
}
1789
1656
return 1 ;
1790
1657
}
@@ -2224,7 +2091,7 @@ static int liquidio_open(struct net_device *netdev)
2224
2091
return -1 ;
2225
2092
}
2226
2093
2227
- start_txq (netdev );
2094
+ start_txqs (netdev );
2228
2095
2229
2096
/* tell Octeon to start forwarding packets to host */
2230
2097
send_rx_ctrl_cmd (lio , 1 );
@@ -2666,14 +2533,9 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2666
2533
lio = GET_LIO (netdev );
2667
2534
oct = lio -> oct_dev ;
2668
2535
2669
- if (netif_is_multiqueue (netdev )) {
2670
- q_idx = skb -> queue_mapping ;
2671
- q_idx = (q_idx % (lio -> linfo .num_txpciq ));
2672
- tag = q_idx ;
2673
- iq_no = lio -> linfo .txpciq [q_idx ].s .q_no ;
2674
- } else {
2675
- iq_no = lio -> txq ;
2676
- }
2536
+ q_idx = skb_iq (lio , skb );
2537
+ tag = q_idx ;
2538
+ iq_no = lio -> linfo .txpciq [q_idx ].s .q_no ;
2677
2539
2678
2540
stats = & oct -> instr_queue [iq_no ]-> stats ;
2679
2541
@@ -2704,23 +2566,14 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2704
2566
2705
2567
ndata .q_no = iq_no ;
2706
2568
2707
- if (netif_is_multiqueue (netdev )) {
2708
- if (octnet_iq_is_full (oct , ndata .q_no )) {
2709
- /* defer sending if queue is full */
2710
- netif_info (lio , tx_err , lio -> netdev , "Transmit failed iq:%d full\n" ,
2711
- ndata .q_no );
2712
- stats -> tx_iq_busy ++ ;
2713
- return NETDEV_TX_BUSY ;
2714
- }
2715
- } else {
2716
- if (octnet_iq_is_full (oct , lio -> txq )) {
2717
- /* defer sending if queue is full */
2718
- stats -> tx_iq_busy ++ ;
2719
- netif_info (lio , tx_err , lio -> netdev , "Transmit failed iq:%d full\n" ,
2720
- lio -> txq );
2721
- return NETDEV_TX_BUSY ;
2722
- }
2569
+ if (octnet_iq_is_full (oct , ndata .q_no )) {
2570
+ /* defer sending if queue is full */
2571
+ netif_info (lio , tx_err , lio -> netdev , "Transmit failed iq:%d full\n" ,
2572
+ ndata .q_no );
2573
+ stats -> tx_iq_busy ++ ;
2574
+ return NETDEV_TX_BUSY ;
2723
2575
}
2576
+
2724
2577
/* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
2725
2578
* lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
2726
2579
*/
@@ -2876,7 +2729,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2876
2729
netif_info (lio , tx_queued , lio -> netdev , "Transmit queued successfully\n" );
2877
2730
2878
2731
if (status == IQ_SEND_STOP )
2879
- stop_q (netdev , q_idx );
2732
+ netif_stop_subqueue (netdev , q_idx );
2880
2733
2881
2734
netif_trans_update (netdev );
2882
2735
@@ -2915,7 +2768,7 @@ static void liquidio_tx_timeout(struct net_device *netdev)
2915
2768
"Transmit timeout tx_dropped:%ld, waking up queues now!!\n" ,
2916
2769
netdev -> stats .tx_dropped );
2917
2770
netif_trans_update (netdev );
2918
- txqs_wake (netdev );
2771
+ wake_txqs (netdev );
2919
2772
}
2920
2773
2921
2774
static int liquidio_vlan_rx_add_vid (struct net_device * netdev ,
0 commit comments