Skip to content

Commit ee7a60c

Browse files
committed
Merge branch 'liquidio-Tx-queue-cleanup'
Intiyaz Basha says: ==================== liquidio: Tx queue cleanup Moved some common function to octeon_network.h Removed some unwanted functions and checks. ==================== Signed-off-by: David S. Miller <[email protected]>
2 parents 94cb549 + c9614a1 commit ee7a60c

File tree

4 files changed

+109
-341
lines changed

4 files changed

+109
-341
lines changed

drivers/net/ethernet/cavium/liquidio/lio_core.c

Lines changed: 5 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -377,20 +377,12 @@ static void lio_update_txq_status(struct octeon_device *oct, int iq_num)
377377
return;
378378

379379
lio = GET_LIO(netdev);
380-
if (netif_is_multiqueue(netdev)) {
381-
if (__netif_subqueue_stopped(netdev, iq->q_index) &&
382-
lio->linfo.link.s.link_up &&
383-
(!octnet_iq_is_full(oct, iq_num))) {
384-
netif_wake_subqueue(netdev, iq->q_index);
385-
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
386-
tx_restart, 1);
387-
}
388-
} else if (netif_queue_stopped(netdev) &&
389-
lio->linfo.link.s.link_up &&
390-
(!octnet_iq_is_full(oct, lio->txq))) {
391-
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
380+
if (__netif_subqueue_stopped(netdev, iq->q_index) &&
381+
lio->linfo.link.s.link_up &&
382+
(!octnet_iq_is_full(oct, iq_num))) {
383+
netif_wake_subqueue(netdev, iq->q_index);
384+
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
392385
tx_restart, 1);
393-
netif_wake_queue(netdev);
394386
}
395387
}
396388

drivers/net/ethernet/cavium/liquidio/lio_main.c

Lines changed: 33 additions & 180 deletions
Original file line numberDiff line numberDiff line change
@@ -513,149 +513,31 @@ static void liquidio_deinit_pci(void)
513513
pci_unregister_driver(&liquidio_pci_driver);
514514
}
515515

516-
/**
517-
* \brief Stop Tx queues
518-
* @param netdev network device
519-
*/
520-
static inline void txqs_stop(struct net_device *netdev)
521-
{
522-
if (netif_is_multiqueue(netdev)) {
523-
int i;
524-
525-
for (i = 0; i < netdev->num_tx_queues; i++)
526-
netif_stop_subqueue(netdev, i);
527-
} else {
528-
netif_stop_queue(netdev);
529-
}
530-
}
531-
532-
/**
533-
* \brief Start Tx queues
534-
* @param netdev network device
535-
*/
536-
static inline void txqs_start(struct net_device *netdev)
537-
{
538-
if (netif_is_multiqueue(netdev)) {
539-
int i;
540-
541-
for (i = 0; i < netdev->num_tx_queues; i++)
542-
netif_start_subqueue(netdev, i);
543-
} else {
544-
netif_start_queue(netdev);
545-
}
546-
}
547-
548-
/**
549-
* \brief Wake Tx queues
550-
* @param netdev network device
551-
*/
552-
static inline void txqs_wake(struct net_device *netdev)
553-
{
554-
struct lio *lio = GET_LIO(netdev);
555-
556-
if (netif_is_multiqueue(netdev)) {
557-
int i;
558-
559-
for (i = 0; i < netdev->num_tx_queues; i++) {
560-
int qno = lio->linfo.txpciq[i %
561-
lio->oct_dev->num_iqs].s.q_no;
562-
563-
if (__netif_subqueue_stopped(netdev, i)) {
564-
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
565-
tx_restart, 1);
566-
netif_wake_subqueue(netdev, i);
567-
}
568-
}
569-
} else {
570-
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
571-
tx_restart, 1);
572-
netif_wake_queue(netdev);
573-
}
574-
}
575-
576-
/**
577-
* \brief Stop Tx queue
578-
* @param netdev network device
579-
*/
580-
static void stop_txq(struct net_device *netdev)
581-
{
582-
txqs_stop(netdev);
583-
}
584-
585-
/**
586-
* \brief Start Tx queue
587-
* @param netdev network device
588-
*/
589-
static void start_txq(struct net_device *netdev)
590-
{
591-
struct lio *lio = GET_LIO(netdev);
592-
593-
if (lio->linfo.link.s.link_up) {
594-
txqs_start(netdev);
595-
return;
596-
}
597-
}
598-
599-
/**
600-
* \brief Wake a queue
601-
* @param netdev network device
602-
* @param q which queue to wake
603-
*/
604-
static inline void wake_q(struct net_device *netdev, int q)
605-
{
606-
if (netif_is_multiqueue(netdev))
607-
netif_wake_subqueue(netdev, q);
608-
else
609-
netif_wake_queue(netdev);
610-
}
611-
612-
/**
613-
* \brief Stop a queue
614-
* @param netdev network device
615-
* @param q which queue to stop
616-
*/
617-
static inline void stop_q(struct net_device *netdev, int q)
618-
{
619-
if (netif_is_multiqueue(netdev))
620-
netif_stop_subqueue(netdev, q);
621-
else
622-
netif_stop_queue(netdev);
623-
}
624-
625516
/**
626517
* \brief Check Tx queue status, and take appropriate action
627518
* @param lio per-network private data
628519
* @returns 0 if full, number of queues woken up otherwise
629520
*/
630521
static inline int check_txq_status(struct lio *lio)
631522
{
523+
int numqs = lio->netdev->num_tx_queues;
632524
int ret_val = 0;
525+
int q, iq;
633526

634-
if (netif_is_multiqueue(lio->netdev)) {
635-
int numqs = lio->netdev->num_tx_queues;
636-
int q, iq = 0;
637-
638-
/* check each sub-queue state */
639-
for (q = 0; q < numqs; q++) {
640-
iq = lio->linfo.txpciq[q %
641-
lio->oct_dev->num_iqs].s.q_no;
642-
if (octnet_iq_is_full(lio->oct_dev, iq))
643-
continue;
644-
if (__netif_subqueue_stopped(lio->netdev, q)) {
645-
wake_q(lio->netdev, q);
646-
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
647-
tx_restart, 1);
648-
ret_val++;
649-
}
527+
/* check each sub-queue state */
528+
for (q = 0; q < numqs; q++) {
529+
iq = lio->linfo.txpciq[q %
530+
lio->oct_dev->num_iqs].s.q_no;
531+
if (octnet_iq_is_full(lio->oct_dev, iq))
532+
continue;
533+
if (__netif_subqueue_stopped(lio->netdev, q)) {
534+
netif_wake_subqueue(lio->netdev, q);
535+
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
536+
tx_restart, 1);
537+
ret_val++;
650538
}
651-
} else {
652-
if (octnet_iq_is_full(lio->oct_dev, lio->txq))
653-
return 0;
654-
wake_q(lio->netdev, lio->txq);
655-
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
656-
tx_restart, 1);
657-
ret_val = 1;
658539
}
540+
659541
return ret_val;
660542
}
661543

@@ -900,11 +782,11 @@ static inline void update_link_status(struct net_device *netdev,
900782
if (lio->linfo.link.s.link_up) {
901783
dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__);
902784
netif_carrier_on(netdev);
903-
txqs_wake(netdev);
785+
wake_txqs(netdev);
904786
} else {
905787
dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__);
906788
netif_carrier_off(netdev);
907-
stop_txq(netdev);
789+
stop_txqs(netdev);
908790
}
909791
if (lio->linfo.link.s.mtu != current_max_mtu) {
910792
netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n",
@@ -1752,39 +1634,24 @@ static int octeon_pci_os_setup(struct octeon_device *oct)
17521634
return 0;
17531635
}
17541636

1755-
static inline int skb_iq(struct lio *lio, struct sk_buff *skb)
1756-
{
1757-
int q = 0;
1758-
1759-
if (netif_is_multiqueue(lio->netdev))
1760-
q = skb->queue_mapping % lio->linfo.num_txpciq;
1761-
1762-
return q;
1763-
}
1764-
17651637
/**
17661638
* \brief Check Tx queue state for a given network buffer
17671639
* @param lio per-network private data
17681640
* @param skb network buffer
17691641
*/
17701642
static inline int check_txq_state(struct lio *lio, struct sk_buff *skb)
17711643
{
1772-
int q = 0, iq = 0;
1644+
int q, iq;
17731645

1774-
if (netif_is_multiqueue(lio->netdev)) {
1775-
q = skb->queue_mapping;
1776-
iq = lio->linfo.txpciq[(q % lio->oct_dev->num_iqs)].s.q_no;
1777-
} else {
1778-
iq = lio->txq;
1779-
q = iq;
1780-
}
1646+
q = skb->queue_mapping;
1647+
iq = lio->linfo.txpciq[(q % lio->oct_dev->num_iqs)].s.q_no;
17811648

17821649
if (octnet_iq_is_full(lio->oct_dev, iq))
17831650
return 0;
17841651

17851652
if (__netif_subqueue_stopped(lio->netdev, q)) {
17861653
INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1);
1787-
wake_q(lio->netdev, q);
1654+
netif_wake_subqueue(lio->netdev, q);
17881655
}
17891656
return 1;
17901657
}
@@ -2224,7 +2091,7 @@ static int liquidio_open(struct net_device *netdev)
22242091
return -1;
22252092
}
22262093

2227-
start_txq(netdev);
2094+
start_txqs(netdev);
22282095

22292096
/* tell Octeon to start forwarding packets to host */
22302097
send_rx_ctrl_cmd(lio, 1);
@@ -2666,14 +2533,9 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
26662533
lio = GET_LIO(netdev);
26672534
oct = lio->oct_dev;
26682535

2669-
if (netif_is_multiqueue(netdev)) {
2670-
q_idx = skb->queue_mapping;
2671-
q_idx = (q_idx % (lio->linfo.num_txpciq));
2672-
tag = q_idx;
2673-
iq_no = lio->linfo.txpciq[q_idx].s.q_no;
2674-
} else {
2675-
iq_no = lio->txq;
2676-
}
2536+
q_idx = skb_iq(lio, skb);
2537+
tag = q_idx;
2538+
iq_no = lio->linfo.txpciq[q_idx].s.q_no;
26772539

26782540
stats = &oct->instr_queue[iq_no]->stats;
26792541

@@ -2704,23 +2566,14 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
27042566

27052567
ndata.q_no = iq_no;
27062568

2707-
if (netif_is_multiqueue(netdev)) {
2708-
if (octnet_iq_is_full(oct, ndata.q_no)) {
2709-
/* defer sending if queue is full */
2710-
netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2711-
ndata.q_no);
2712-
stats->tx_iq_busy++;
2713-
return NETDEV_TX_BUSY;
2714-
}
2715-
} else {
2716-
if (octnet_iq_is_full(oct, lio->txq)) {
2717-
/* defer sending if queue is full */
2718-
stats->tx_iq_busy++;
2719-
netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2720-
lio->txq);
2721-
return NETDEV_TX_BUSY;
2722-
}
2569+
if (octnet_iq_is_full(oct, ndata.q_no)) {
2570+
/* defer sending if queue is full */
2571+
netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2572+
ndata.q_no);
2573+
stats->tx_iq_busy++;
2574+
return NETDEV_TX_BUSY;
27232575
}
2576+
27242577
/* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
27252578
* lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
27262579
*/
@@ -2876,7 +2729,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
28762729
netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
28772730

28782731
if (status == IQ_SEND_STOP)
2879-
stop_q(netdev, q_idx);
2732+
netif_stop_subqueue(netdev, q_idx);
28802733

28812734
netif_trans_update(netdev);
28822735

@@ -2915,7 +2768,7 @@ static void liquidio_tx_timeout(struct net_device *netdev)
29152768
"Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
29162769
netdev->stats.tx_dropped);
29172770
netif_trans_update(netdev);
2918-
txqs_wake(netdev);
2771+
wake_txqs(netdev);
29192772
}
29202773

29212774
static int liquidio_vlan_rx_add_vid(struct net_device *netdev,

0 commit comments

Comments
 (0)