@@ -479,23 +479,37 @@ mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq,
479
479
}
480
480
481
481
static int
482
- mt76_txq_schedule_list (struct mt76_dev * dev , struct mt76_sw_queue * sq )
482
+ mt76_txq_schedule_list (struct mt76_dev * dev , enum mt76_txq_id qid )
483
483
{
484
+ struct mt76_sw_queue * sq = & dev -> q_tx [qid ];
484
485
struct mt76_queue * hwq = sq -> q ;
485
- struct mt76_txq * mtxq , * mtxq_last ;
486
- int len = 0 ;
486
+ struct ieee80211_txq * txq ;
487
+ struct mt76_txq * mtxq ;
488
+ struct mt76_wcid * wcid ;
489
+ int ret = 0 ;
487
490
488
- restart :
489
- mtxq_last = list_last_entry (& sq -> swq , struct mt76_txq , list );
490
- while (!list_empty (& sq -> swq )) {
491
+ spin_lock_bh (& hwq -> lock );
492
+ while (1 ) {
491
493
bool empty = false;
492
- int cur ;
494
+
495
+ if (sq -> swq_queued >= 4 )
496
+ break ;
493
497
494
498
if (test_bit (MT76_OFFCHANNEL , & dev -> state ) ||
495
- test_bit (MT76_RESET , & dev -> state ))
496
- return - EBUSY ;
499
+ test_bit (MT76_RESET , & dev -> state )) {
500
+ ret = - EBUSY ;
501
+ break ;
502
+ }
503
+
504
+ txq = ieee80211_next_txq (dev -> hw , qid );
505
+ if (!txq )
506
+ break ;
507
+
508
+ mtxq = (struct mt76_txq * )txq -> drv_priv ;
509
+ wcid = mtxq -> wcid ;
510
+ if (wcid && test_bit (MT_WCID_FLAG_PS , & wcid -> flags ))
511
+ continue ;
497
512
498
- mtxq = list_first_entry (& sq -> swq , struct mt76_txq , list );
499
513
if (mtxq -> send_bar && mtxq -> aggr ) {
500
514
struct ieee80211_txq * txq = mtxq_to_txq (mtxq );
501
515
struct ieee80211_sta * sta = txq -> sta ;
@@ -507,38 +521,37 @@ mt76_txq_schedule_list(struct mt76_dev *dev, struct mt76_sw_queue *sq)
507
521
spin_unlock_bh (& hwq -> lock );
508
522
ieee80211_send_bar (vif , sta -> addr , tid , agg_ssn );
509
523
spin_lock_bh (& hwq -> lock );
510
- goto restart ;
511
524
}
512
525
513
- list_del_init (& mtxq -> list );
514
-
515
- cur = mt76_txq_send_burst (dev , sq , mtxq , & empty );
516
- if (!empty )
517
- list_add_tail (& mtxq -> list , & sq -> swq );
518
-
519
- if (cur < 0 )
520
- return cur ;
521
-
522
- len += cur ;
523
-
524
- if (mtxq == mtxq_last )
525
- break ;
526
+ ret += mt76_txq_send_burst (dev , sq , mtxq , & empty );
527
+ if (skb_queue_empty (& mtxq -> retry_q ))
528
+ empty = true;
529
+ ieee80211_return_txq (dev -> hw , txq , !empty );
526
530
}
531
+ spin_unlock_bh (& hwq -> lock );
527
532
528
- return len ;
533
+ return ret ;
529
534
}
530
535
531
- void mt76_txq_schedule (struct mt76_dev * dev , struct mt76_sw_queue * sq )
536
+ void mt76_txq_schedule (struct mt76_dev * dev , enum mt76_txq_id qid )
532
537
{
538
+ struct mt76_sw_queue * sq = & dev -> q_tx [qid ];
533
539
int len ;
534
540
541
+ if (qid >= 4 )
542
+ return ;
543
+
544
+ if (sq -> swq_queued >= 4 )
545
+ return ;
546
+
535
547
rcu_read_lock ();
536
- do {
537
- if (sq -> swq_queued >= 4 || list_empty (& sq -> swq ))
538
- break ;
539
548
540
- len = mt76_txq_schedule_list (dev , sq );
549
+ do {
550
+ ieee80211_txq_schedule_start (dev -> hw , qid );
551
+ len = mt76_txq_schedule_list (dev , qid );
552
+ ieee80211_txq_schedule_end (dev -> hw , qid );
541
553
} while (len > 0 );
554
+
542
555
rcu_read_unlock ();
543
556
}
544
557
EXPORT_SYMBOL_GPL (mt76_txq_schedule );
@@ -547,13 +560,8 @@ void mt76_txq_schedule_all(struct mt76_dev *dev)
547
560
{
548
561
int i ;
549
562
550
- for (i = 0 ; i <= MT_TXQ_BK ; i ++ ) {
551
- struct mt76_queue * q = dev -> q_tx [i ].q ;
552
-
553
- spin_lock_bh (& q -> lock );
554
- mt76_txq_schedule (dev , & dev -> q_tx [i ]);
555
- spin_unlock_bh (& q -> lock );
556
- }
563
+ for (i = 0 ; i <= MT_TXQ_BK ; i ++ )
564
+ mt76_txq_schedule (dev , i );
557
565
}
558
566
EXPORT_SYMBOL_GPL (mt76_txq_schedule_all );
559
567
@@ -575,8 +583,6 @@ void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
575
583
576
584
spin_lock_bh (& hwq -> lock );
577
585
mtxq -> send_bar = mtxq -> aggr && send_bar ;
578
- if (!list_empty (& mtxq -> list ))
579
- list_del_init (& mtxq -> list );
580
586
spin_unlock_bh (& hwq -> lock );
581
587
}
582
588
}
@@ -585,37 +591,23 @@ EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
585
591
void mt76_wake_tx_queue (struct ieee80211_hw * hw , struct ieee80211_txq * txq )
586
592
{
587
593
struct mt76_dev * dev = hw -> priv ;
588
- struct mt76_txq * mtxq = (struct mt76_txq * )txq -> drv_priv ;
589
- struct mt76_sw_queue * sq = mtxq -> swq ;
590
- struct mt76_queue * hwq = sq -> q ;
591
594
592
595
if (!test_bit (MT76_STATE_RUNNING , & dev -> state ))
593
596
return ;
594
597
595
- spin_lock_bh (& hwq -> lock );
596
- if (list_empty (& mtxq -> list ))
597
- list_add_tail (& mtxq -> list , & sq -> swq );
598
- mt76_txq_schedule (dev , sq );
599
- spin_unlock_bh (& hwq -> lock );
598
+ mt76_txq_schedule (dev , txq -> ac );
600
599
}
601
600
EXPORT_SYMBOL_GPL (mt76_wake_tx_queue );
602
601
603
602
void mt76_txq_remove (struct mt76_dev * dev , struct ieee80211_txq * txq )
604
603
{
605
- struct mt76_queue * hwq ;
606
604
struct mt76_txq * mtxq ;
607
605
struct sk_buff * skb ;
608
606
609
607
if (!txq )
610
608
return ;
611
609
612
610
mtxq = (struct mt76_txq * ) txq -> drv_priv ;
613
- hwq = mtxq -> swq -> q ;
614
-
615
- spin_lock_bh (& hwq -> lock );
616
- if (!list_empty (& mtxq -> list ))
617
- list_del_init (& mtxq -> list );
618
- spin_unlock_bh (& hwq -> lock );
619
611
620
612
while ((skb = skb_dequeue (& mtxq -> retry_q )) != NULL )
621
613
ieee80211_free_txskb (dev -> hw , skb );
@@ -626,7 +618,6 @@ void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
626
618
{
627
619
struct mt76_txq * mtxq = (struct mt76_txq * ) txq -> drv_priv ;
628
620
629
- INIT_LIST_HEAD (& mtxq -> list );
630
621
skb_queue_head_init (& mtxq -> retry_q );
631
622
632
623
mtxq -> swq = & dev -> q_tx [mt76_txq_get_qid (txq )];
0 commit comments