Skip to content

Commit 71ce391

Browse files
wojtas-marcindavem330
authored andcommitted
net: mvpp2: enable proper per-CPU TX buffers unmapping
mvpp2 driver allows usage of per-CPU TX processing. Once the packets are prepared independetly on each CPU, the hardware enqueues the descriptors in common TX queue. After they are sent, the buffers and associated sk_buffs should be released on the corresponding CPU. This is why a special index is maintained in order to point to the right data to be released after transmission takes place. Each per-CPU TX queue comprise an array of sent sk_buffs, freed in mvpp2_txq_bufs_free function. However, the index was used there also for obtaining a descriptor (and therefore a buffer to be DMA-unmapped) from common TX queue, which was wrong, because it was not referring to the current CPU. This commit enables proper unmapping of sent data buffers by indexing them in per-CPU queues using a dedicated array for keeping their physical addresses. Signed-off-by: Marcin Wojtas <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent d53793c commit 71ce391

File tree

1 file changed

+37
-15
lines changed
  • drivers/net/ethernet/marvell

1 file changed

+37
-15
lines changed

drivers/net/ethernet/marvell/mvpp2.c

Lines changed: 37 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -776,6 +776,9 @@ struct mvpp2_txq_pcpu {
776776
/* Array of transmitted skb */
777777
struct sk_buff **tx_skb;
778778

779+
/* Array of transmitted buffers' physical addresses */
780+
dma_addr_t *tx_buffs;
781+
779782
/* Index of last TX DMA descriptor that was inserted */
780783
int txq_put_index;
781784

@@ -961,9 +964,13 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
961964
}
962965

963966
static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
964-
struct sk_buff *skb)
967+
struct sk_buff *skb,
968+
struct mvpp2_tx_desc *tx_desc)
965969
{
966970
txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
971+
if (skb)
972+
txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] =
973+
tx_desc->buf_phys_addr;
967974
txq_pcpu->txq_put_index++;
968975
if (txq_pcpu->txq_put_index == txq_pcpu->size)
969976
txq_pcpu->txq_put_index = 0;
@@ -4392,17 +4399,17 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
43924399
int i;
43934400

43944401
for (i = 0; i < num; i++) {
4395-
struct mvpp2_tx_desc *tx_desc = txq->descs +
4396-
txq_pcpu->txq_get_index;
4402+
dma_addr_t buf_phys_addr =
4403+
txq_pcpu->tx_buffs[txq_pcpu->txq_get_index];
43974404
struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
43984405

43994406
mvpp2_txq_inc_get(txq_pcpu);
44004407

44014408
if (!skb)
44024409
continue;
44034410

4404-
dma_unmap_single(port->dev->dev.parent, tx_desc->buf_phys_addr,
4405-
tx_desc->data_size, DMA_TO_DEVICE);
4411+
dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
4412+
skb_headlen(skb), DMA_TO_DEVICE);
44064413
dev_kfree_skb_any(skb);
44074414
}
44084415
}
@@ -4634,12 +4641,13 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
46344641
txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
46354642
sizeof(*txq_pcpu->tx_skb),
46364643
GFP_KERNEL);
4637-
if (!txq_pcpu->tx_skb) {
4638-
dma_free_coherent(port->dev->dev.parent,
4639-
txq->size * MVPP2_DESC_ALIGNED_SIZE,
4640-
txq->descs, txq->descs_phys);
4641-
return -ENOMEM;
4642-
}
4644+
if (!txq_pcpu->tx_skb)
4645+
goto error;
4646+
4647+
txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size *
4648+
sizeof(dma_addr_t), GFP_KERNEL);
4649+
if (!txq_pcpu->tx_buffs)
4650+
goto error;
46434651

46444652
txq_pcpu->count = 0;
46454653
txq_pcpu->reserved_num = 0;
@@ -4648,6 +4656,19 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
46484656
}
46494657

46504658
return 0;
4659+
4660+
error:
4661+
for_each_present_cpu(cpu) {
4662+
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4663+
kfree(txq_pcpu->tx_skb);
4664+
kfree(txq_pcpu->tx_buffs);
4665+
}
4666+
4667+
dma_free_coherent(port->dev->dev.parent,
4668+
txq->size * MVPP2_DESC_ALIGNED_SIZE,
4669+
txq->descs, txq->descs_phys);
4670+
4671+
return -ENOMEM;
46514672
}
46524673

46534674
/* Free allocated TXQ resources */
@@ -4660,6 +4681,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
46604681
for_each_present_cpu(cpu) {
46614682
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
46624683
kfree(txq_pcpu->tx_skb);
4684+
kfree(txq_pcpu->tx_buffs);
46634685
}
46644686

46654687
if (txq->descs)
@@ -5129,11 +5151,11 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
51295151
if (i == (skb_shinfo(skb)->nr_frags - 1)) {
51305152
/* Last descriptor */
51315153
tx_desc->command = MVPP2_TXD_L_DESC;
5132-
mvpp2_txq_inc_put(txq_pcpu, skb);
5154+
mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
51335155
} else {
51345156
/* Descriptor in the middle: Not First, Not Last */
51355157
tx_desc->command = 0;
5136-
mvpp2_txq_inc_put(txq_pcpu, NULL);
5158+
mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
51375159
}
51385160
}
51395161

@@ -5199,12 +5221,12 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
51995221
/* First and Last descriptor */
52005222
tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
52015223
tx_desc->command = tx_cmd;
5202-
mvpp2_txq_inc_put(txq_pcpu, skb);
5224+
mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
52035225
} else {
52045226
/* First but not Last */
52055227
tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
52065228
tx_desc->command = tx_cmd;
5207-
mvpp2_txq_inc_put(txq_pcpu, NULL);
5229+
mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
52085230

52095231
/* Continue with other skb fragments */
52105232
if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {

0 commit comments

Comments
 (0)