Skip to content

Commit e22979d

Browse files
Yevgeny Petrilindavem330
authored andcommitted
mlx4_en: Moving to Interrupts for TX completions
Moving to interrupts instead of polling fpr TX completions Avoiding situations where skb can be held in by the driver for a long time (till timer expires). The change is also necessary for supporting BQL. Removing comp_lock that was required because we could handle TX completions from several contexts: Interrupts, timer, polling. Now there is only interrupts Signed-off-by: Yevgeny Petrilin <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent a19a848 commit e22979d

File tree

4 files changed

+9
-74
lines changed

4 files changed

+9
-74
lines changed

drivers/net/ethernet/mellanox/mlx4/en_cq.c

Lines changed: 3 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -124,11 +124,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
124124
cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
125125
cq->mcq.event = mlx4_en_cq_event;
126126

127-
if (cq->is_tx) {
128-
init_timer(&cq->timer);
129-
cq->timer.function = mlx4_en_poll_tx_cq;
130-
cq->timer.data = (unsigned long) cq;
131-
} else {
127+
if (!cq->is_tx) {
132128
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
133129
napi_enable(&cq->napi);
134130
}
@@ -151,16 +147,12 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
151147

152148
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
153149
{
154-
struct mlx4_en_dev *mdev = priv->mdev;
155-
156-
if (cq->is_tx)
157-
del_timer(&cq->timer);
158-
else {
150+
if (!cq->is_tx) {
159151
napi_disable(&cq->napi);
160152
netif_napi_del(&cq->napi);
161153
}
162154

163-
mlx4_cq_free(mdev->dev, &cq->mcq);
155+
mlx4_cq_free(priv->mdev->dev, &cq->mcq);
164156
}
165157

166158
/* Set rx cq moderation parameters */

drivers/net/ethernet/mellanox/mlx4/en_netdev.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -667,6 +667,10 @@ int mlx4_en_start_port(struct net_device *dev)
667667
mlx4_en_deactivate_cq(priv, cq);
668668
goto tx_err;
669669
}
670+
671+
/* Arm CQ for TX completions */
672+
mlx4_en_arm_cq(priv, cq);
673+
670674
/* Set initial ownership of all Tx TXBBs to SW (1) */
671675
for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
672676
*((u32 *) (tx_ring->buf + j)) = 0xffffffff;

drivers/net/ethernet/mellanox/mlx4/en_tx.c

Lines changed: 1 addition & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -67,8 +67,6 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
6767

6868
inline_thold = min(inline_thold, MAX_INLINE);
6969

70-
spin_lock_init(&ring->comp_lock);
71-
7270
tmp = size * sizeof(struct mlx4_en_tx_info);
7371
ring->tx_info = vmalloc(tmp);
7472
if (!ring->tx_info)
@@ -377,41 +375,12 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq)
377375
{
378376
struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
379377
struct mlx4_en_priv *priv = netdev_priv(cq->dev);
380-
struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
381378

382-
if (!spin_trylock(&ring->comp_lock))
383-
return;
384379
mlx4_en_process_tx_cq(cq->dev, cq);
385-
mod_timer(&cq->timer, jiffies + 1);
386-
spin_unlock(&ring->comp_lock);
380+
mlx4_en_arm_cq(priv, cq);
387381
}
388382

389383

390-
void mlx4_en_poll_tx_cq(unsigned long data)
391-
{
392-
struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
393-
struct mlx4_en_priv *priv = netdev_priv(cq->dev);
394-
struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
395-
u32 inflight;
396-
397-
INC_PERF_COUNTER(priv->pstats.tx_poll);
398-
399-
if (!spin_trylock_irq(&ring->comp_lock)) {
400-
mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
401-
return;
402-
}
403-
mlx4_en_process_tx_cq(cq->dev, cq);
404-
inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
405-
406-
/* If there are still packets in flight and the timer has not already
407-
* been scheduled by the Tx routine then schedule it here to guarantee
408-
* completion processing of these packets */
409-
if (inflight && priv->port_up)
410-
mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
411-
412-
spin_unlock_irq(&ring->comp_lock);
413-
}
414-
415384
static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
416385
struct mlx4_en_tx_ring *ring,
417386
u32 index,
@@ -440,25 +409,6 @@ static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
440409
return ring->buf + index * TXBB_SIZE;
441410
}
442411

443-
static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
444-
{
445-
struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
446-
struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
447-
unsigned long flags;
448-
449-
/* If we don't have a pending timer, set one up to catch our recent
450-
post in case the interface becomes idle */
451-
if (!timer_pending(&cq->timer))
452-
mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
453-
454-
/* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
455-
if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
456-
if (spin_trylock_irqsave(&ring->comp_lock, flags)) {
457-
mlx4_en_process_tx_cq(priv->dev, cq);
458-
spin_unlock_irqrestore(&ring->comp_lock, flags);
459-
}
460-
}
461-
462412
static int is_inline(struct sk_buff *skb, void **pfrag)
463413
{
464414
void *ptr;
@@ -590,7 +540,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
590540
struct mlx4_en_priv *priv = netdev_priv(dev);
591541
struct mlx4_en_dev *mdev = priv->mdev;
592542
struct mlx4_en_tx_ring *ring;
593-
struct mlx4_en_cq *cq;
594543
struct mlx4_en_tx_desc *tx_desc;
595544
struct mlx4_wqe_data_seg *data;
596545
struct skb_frag_struct *frag;
@@ -638,9 +587,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
638587
ring->blocked = 1;
639588
priv->port_stats.queue_stopped++;
640589

641-
/* Use interrupts to find out when queue opened */
642-
cq = &priv->tx_cq[tx_ind];
643-
mlx4_en_arm_cq(priv, cq);
644590
return NETDEV_TX_BUSY;
645591
}
646592

@@ -788,9 +734,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
788734
iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
789735
}
790736

791-
/* Poll CQ here */
792-
mlx4_en_xmit_poll(priv, tx_ind);
793-
794737
return NETDEV_TX_OK;
795738

796739
tx_drop:

drivers/net/ethernet/mellanox/mlx4/mlx4_en.h

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ enum {
122122
#define MLX4_EN_RX_COAL_TARGET 44
123123
#define MLX4_EN_RX_COAL_TIME 0x10
124124

125-
#define MLX4_EN_TX_COAL_PKTS 5
125+
#define MLX4_EN_TX_COAL_PKTS 16
126126
#define MLX4_EN_TX_COAL_TIME 0x80
127127

128128
#define MLX4_EN_RX_RATE_LOW 400000
@@ -255,7 +255,6 @@ struct mlx4_en_tx_ring {
255255
unsigned long bytes;
256256
unsigned long packets;
257257
unsigned long tx_csum;
258-
spinlock_t comp_lock;
259258
struct mlx4_bf bf;
260259
bool bf_enabled;
261260
};
@@ -308,8 +307,6 @@ struct mlx4_en_cq {
308307
spinlock_t lock;
309308
struct net_device *dev;
310309
struct napi_struct napi;
311-
/* Per-core Tx cq processing support */
312-
struct timer_list timer;
313310
int size;
314311
int buf_size;
315312
unsigned vector;
@@ -530,7 +527,6 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
530527
int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
531528
int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
532529

533-
void mlx4_en_poll_tx_cq(unsigned long data);
534530
void mlx4_en_tx_irq(struct mlx4_cq *mcq);
535531
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb);
536532
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);

0 commit comments

Comments
 (0)