|
224 | 224 | #define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
|
225 | 225 | #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
|
226 | 226 | #define MVNETA_TXQ_DEC_SENT_SHIFT 16
|
| 227 | +#define MVNETA_TXQ_DEC_SENT_MASK 0xff |
227 | 228 | #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
|
228 | 229 | #define MVNETA_TXQ_SENT_DESC_SHIFT 16
|
229 | 230 | #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
|
@@ -525,6 +526,7 @@ struct mvneta_tx_queue {
|
525 | 526 | * descriptor ring
|
526 | 527 | */
|
527 | 528 | int count;
|
| 529 | + int pending; |
528 | 530 | int tx_stop_threshold;
|
529 | 531 | int tx_wake_threshold;
|
530 | 532 |
|
@@ -818,8 +820,9 @@ static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
|
818 | 820 | /* Only 255 descriptors can be added at once ; Assume caller
|
819 | 821 | * process TX desriptors in quanta less than 256
|
820 | 822 | */
|
821 |
| - val = pend_desc; |
| 823 | + val = pend_desc + txq->pending; |
822 | 824 | mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
|
| 825 | + txq->pending = 0; |
823 | 826 | }
|
824 | 827 |
|
825 | 828 | /* Get pointer to next TX descriptor to be processed (send) by HW */
|
@@ -2399,11 +2402,15 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
|
2399 | 2402 | struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
|
2400 | 2403 |
|
2401 | 2404 | txq->count += frags;
|
2402 |
| - mvneta_txq_pend_desc_add(pp, txq, frags); |
2403 |
| - |
2404 | 2405 | if (txq->count >= txq->tx_stop_threshold)
|
2405 | 2406 | netif_tx_stop_queue(nq);
|
2406 | 2407 |
|
| 2408 | + if (!skb->xmit_more || netif_xmit_stopped(nq) || |
| 2409 | + txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK) |
| 2410 | + mvneta_txq_pend_desc_add(pp, txq, frags); |
| 2411 | + else |
| 2412 | + txq->pending += frags; |
| 2413 | + |
2407 | 2414 | u64_stats_update_begin(&stats->syncp);
|
2408 | 2415 | stats->tx_packets++;
|
2409 | 2416 | stats->tx_bytes += len;
|
|
0 commit comments