Skip to content

Commit 4a188a6

Browse files
Jisheng Zhangdavem330
authored andcommitted
net: mvneta: split rxq/txq init and txq deinit into SW and HW parts
This is to prepare the suspend/resume improvement in next patch. The SW parts can be optimized out during resume. As for rxq handling during suspend, we'd like to drop packets by calling mvneta_rxq_drop_pkts() which is both SW and HW operation, so we don't split rxq deinit. Signed-off-by: Jisheng Zhang <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 6e00f7d commit 4a188a6

File tree

1 file changed

+66
-19
lines changed

1 file changed

+66
-19
lines changed

drivers/net/ethernet/marvell/mvneta.c

Lines changed: 66 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -2797,10 +2797,8 @@ static void mvneta_rx_reset(struct mvneta_port *pp)
27972797

27982798
/* Rx/Tx queue initialization/cleanup methods */
27992799

2800-
/* Create a specified RX queue */
2801-
static int mvneta_rxq_init(struct mvneta_port *pp,
2802-
struct mvneta_rx_queue *rxq)
2803-
2800+
static int mvneta_rxq_sw_init(struct mvneta_port *pp,
2801+
struct mvneta_rx_queue *rxq)
28042802
{
28052803
rxq->size = pp->rx_ring_size;
28062804

@@ -2813,6 +2811,12 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
28132811

28142812
rxq->last_desc = rxq->size - 1;
28152813

2814+
return 0;
2815+
}
2816+
2817+
static void mvneta_rxq_hw_init(struct mvneta_port *pp,
2818+
struct mvneta_rx_queue *rxq)
2819+
{
28162820
/* Set Rx descriptors queue starting address */
28172821
mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
28182822
mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
@@ -2836,6 +2840,20 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
28362840
mvneta_rxq_short_pool_set(pp, rxq);
28372841
mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
28382842
}
2843+
}
2844+
2845+
/* Create a specified RX queue */
2846+
static int mvneta_rxq_init(struct mvneta_port *pp,
2847+
struct mvneta_rx_queue *rxq)
2848+
2849+
{
2850+
int ret;
2851+
2852+
ret = mvneta_rxq_sw_init(pp, rxq);
2853+
if (ret < 0)
2854+
return ret;
2855+
2856+
mvneta_rxq_hw_init(pp, rxq);
28392857

28402858
return 0;
28412859
}
@@ -2858,9 +2876,8 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp,
28582876
rxq->descs_phys = 0;
28592877
}
28602878

2861-
/* Create and initialize a tx queue */
2862-
static int mvneta_txq_init(struct mvneta_port *pp,
2863-
struct mvneta_tx_queue *txq)
2879+
static int mvneta_txq_sw_init(struct mvneta_port *pp,
2880+
struct mvneta_tx_queue *txq)
28642881
{
28652882
int cpu;
28662883

@@ -2873,7 +2890,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
28732890
txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
28742891
txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
28752892

2876-
28772893
/* Allocate memory for TX descriptors */
28782894
txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
28792895
txq->size * MVNETA_DESC_ALIGNED_SIZE,
@@ -2883,14 +2899,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
28832899

28842900
txq->last_desc = txq->size - 1;
28852901

2886-
/* Set maximum bandwidth for enabled TXQs */
2887-
mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
2888-
mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
2889-
2890-
/* Set Tx descriptors queue starting address */
2891-
mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
2892-
mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
2893-
28942902
txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb),
28952903
GFP_KERNEL);
28962904
if (!txq->tx_skb) {
@@ -2911,7 +2919,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
29112919
txq->descs, txq->descs_phys);
29122920
return -ENOMEM;
29132921
}
2914-
mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
29152922

29162923
/* Setup XPS mapping */
29172924
if (txq_number > 1)
@@ -2924,9 +2931,38 @@ static int mvneta_txq_init(struct mvneta_port *pp,
29242931
return 0;
29252932
}
29262933

2934+
static void mvneta_txq_hw_init(struct mvneta_port *pp,
2935+
struct mvneta_tx_queue *txq)
2936+
{
2937+
/* Set maximum bandwidth for enabled TXQs */
2938+
mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
2939+
mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
2940+
2941+
/* Set Tx descriptors queue starting address */
2942+
mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
2943+
mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
2944+
2945+
mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2946+
}
2947+
2948+
/* Create and initialize a tx queue */
2949+
static int mvneta_txq_init(struct mvneta_port *pp,
2950+
struct mvneta_tx_queue *txq)
2951+
{
2952+
int ret;
2953+
2954+
ret = mvneta_txq_sw_init(pp, txq);
2955+
if (ret < 0)
2956+
return ret;
2957+
2958+
mvneta_txq_hw_init(pp, txq);
2959+
2960+
return 0;
2961+
}
2962+
29272963
/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2928-
static void mvneta_txq_deinit(struct mvneta_port *pp,
2929-
struct mvneta_tx_queue *txq)
2964+
static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
2965+
struct mvneta_tx_queue *txq)
29302966
{
29312967
struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
29322968

@@ -2947,7 +2983,11 @@ static void mvneta_txq_deinit(struct mvneta_port *pp,
29472983
txq->last_desc = 0;
29482984
txq->next_desc_to_proc = 0;
29492985
txq->descs_phys = 0;
2986+
}
29502987

2988+
static void mvneta_txq_hw_deinit(struct mvneta_port *pp,
2989+
struct mvneta_tx_queue *txq)
2990+
{
29512991
/* Set minimum bandwidth for disabled TXQs */
29522992
mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
29532993
mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
@@ -2957,6 +2997,13 @@ static void mvneta_txq_deinit(struct mvneta_port *pp,
29572997
mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
29582998
}
29592999

3000+
static void mvneta_txq_deinit(struct mvneta_port *pp,
3001+
struct mvneta_tx_queue *txq)
3002+
{
3003+
mvneta_txq_sw_deinit(pp, txq);
3004+
mvneta_txq_hw_deinit(pp, txq);
3005+
}
3006+
29603007
/* Cleanup all Tx queues */
29613008
static void mvneta_cleanup_txqs(struct mvneta_port *pp)
29623009
{

0 commit comments

Comments
 (0)