Skip to content

Commit 159f029

Browse files
committed
Merge branch 'net-mvneta-improve-suspend-resume'
Jisheng Zhang says: ==================== net: mvneta: improve suspend/resume This series tries to optimize the mvneta's suspend/resume implementation by only taking necessary actions. Since v2: - keep rtnl lock when calling mvneta_start_dev() and mvneta_stop_dev() Thank Russell for pointing this out Since v1: - unify ret check - try best to keep the suspend/resume behavior - split txq deinit into sw/hw parts as well - adjust mvneta_stop_dev() location I didn't add Thomas's Ack tag to patch1, because in v2, I add new code to split the txq deinit into two parts. ==================== Signed-off-by: David S. Miller <[email protected]>
2 parents 6e00f7d + 1799cdd commit 159f029

File tree

1 file changed

+128
-26
lines changed

1 file changed

+128
-26
lines changed

drivers/net/ethernet/marvell/mvneta.c

Lines changed: 128 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -2797,10 +2797,8 @@ static void mvneta_rx_reset(struct mvneta_port *pp)
27972797

27982798
/* Rx/Tx queue initialization/cleanup methods */
27992799

2800-
/* Create a specified RX queue */
2801-
static int mvneta_rxq_init(struct mvneta_port *pp,
2802-
struct mvneta_rx_queue *rxq)
2803-
2800+
static int mvneta_rxq_sw_init(struct mvneta_port *pp,
2801+
struct mvneta_rx_queue *rxq)
28042802
{
28052803
rxq->size = pp->rx_ring_size;
28062804

@@ -2813,6 +2811,12 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
28132811

28142812
rxq->last_desc = rxq->size - 1;
28152813

2814+
return 0;
2815+
}
2816+
2817+
static void mvneta_rxq_hw_init(struct mvneta_port *pp,
2818+
struct mvneta_rx_queue *rxq)
2819+
{
28162820
/* Set Rx descriptors queue starting address */
28172821
mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
28182822
mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
@@ -2836,6 +2840,20 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
28362840
mvneta_rxq_short_pool_set(pp, rxq);
28372841
mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
28382842
}
2843+
}
2844+
2845+
/* Create a specified RX queue */
2846+
static int mvneta_rxq_init(struct mvneta_port *pp,
2847+
struct mvneta_rx_queue *rxq)
2848+
2849+
{
2850+
int ret;
2851+
2852+
ret = mvneta_rxq_sw_init(pp, rxq);
2853+
if (ret < 0)
2854+
return ret;
2855+
2856+
mvneta_rxq_hw_init(pp, rxq);
28392857

28402858
return 0;
28412859
}
@@ -2858,9 +2876,8 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp,
28582876
rxq->descs_phys = 0;
28592877
}
28602878

2861-
/* Create and initialize a tx queue */
2862-
static int mvneta_txq_init(struct mvneta_port *pp,
2863-
struct mvneta_tx_queue *txq)
2879+
static int mvneta_txq_sw_init(struct mvneta_port *pp,
2880+
struct mvneta_tx_queue *txq)
28642881
{
28652882
int cpu;
28662883

@@ -2873,7 +2890,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
28732890
txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
28742891
txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
28752892

2876-
28772893
/* Allocate memory for TX descriptors */
28782894
txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
28792895
txq->size * MVNETA_DESC_ALIGNED_SIZE,
@@ -2883,14 +2899,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
28832899

28842900
txq->last_desc = txq->size - 1;
28852901

2886-
/* Set maximum bandwidth for enabled TXQs */
2887-
mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
2888-
mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
2889-
2890-
/* Set Tx descriptors queue starting address */
2891-
mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
2892-
mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
2893-
28942902
txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb),
28952903
GFP_KERNEL);
28962904
if (!txq->tx_skb) {
@@ -2911,7 +2919,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
29112919
txq->descs, txq->descs_phys);
29122920
return -ENOMEM;
29132921
}
2914-
mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
29152922

29162923
/* Setup XPS mapping */
29172924
if (txq_number > 1)
@@ -2924,9 +2931,38 @@ static int mvneta_txq_init(struct mvneta_port *pp,
29242931
return 0;
29252932
}
29262933

2934+
static void mvneta_txq_hw_init(struct mvneta_port *pp,
2935+
struct mvneta_tx_queue *txq)
2936+
{
2937+
/* Set maximum bandwidth for enabled TXQs */
2938+
mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
2939+
mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
2940+
2941+
/* Set Tx descriptors queue starting address */
2942+
mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
2943+
mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
2944+
2945+
mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
2946+
}
2947+
2948+
/* Create and initialize a tx queue */
2949+
static int mvneta_txq_init(struct mvneta_port *pp,
2950+
struct mvneta_tx_queue *txq)
2951+
{
2952+
int ret;
2953+
2954+
ret = mvneta_txq_sw_init(pp, txq);
2955+
if (ret < 0)
2956+
return ret;
2957+
2958+
mvneta_txq_hw_init(pp, txq);
2959+
2960+
return 0;
2961+
}
2962+
29272963
/* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
2928-
static void mvneta_txq_deinit(struct mvneta_port *pp,
2929-
struct mvneta_tx_queue *txq)
2964+
static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
2965+
struct mvneta_tx_queue *txq)
29302966
{
29312967
struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
29322968

@@ -2947,7 +2983,11 @@ static void mvneta_txq_deinit(struct mvneta_port *pp,
29472983
txq->last_desc = 0;
29482984
txq->next_desc_to_proc = 0;
29492985
txq->descs_phys = 0;
2986+
}
29502987

2988+
static void mvneta_txq_hw_deinit(struct mvneta_port *pp,
2989+
struct mvneta_tx_queue *txq)
2990+
{
29512991
/* Set minimum bandwidth for disabled TXQs */
29522992
mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
29532993
mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
@@ -2957,6 +2997,13 @@ static void mvneta_txq_deinit(struct mvneta_port *pp,
29572997
mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
29582998
}
29592999

3000+
static void mvneta_txq_deinit(struct mvneta_port *pp,
3001+
struct mvneta_tx_queue *txq)
3002+
{
3003+
mvneta_txq_sw_deinit(pp, txq);
3004+
mvneta_txq_hw_deinit(pp, txq);
3005+
}
3006+
29603007
/* Cleanup all Tx queues */
29613008
static void mvneta_cleanup_txqs(struct mvneta_port *pp)
29623009
{
@@ -4524,16 +4571,45 @@ static int mvneta_remove(struct platform_device *pdev)
45244571
#ifdef CONFIG_PM_SLEEP
45254572
static int mvneta_suspend(struct device *device)
45264573
{
4574+
int queue;
45274575
struct net_device *dev = dev_get_drvdata(device);
45284576
struct mvneta_port *pp = netdev_priv(dev);
45294577

4578+
if (!netif_running(dev))
4579+
goto clean_exit;
4580+
4581+
if (!pp->neta_armada3700) {
4582+
spin_lock(&pp->lock);
4583+
pp->is_stopped = true;
4584+
spin_unlock(&pp->lock);
4585+
4586+
cpuhp_state_remove_instance_nocalls(online_hpstate,
4587+
&pp->node_online);
4588+
cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4589+
&pp->node_dead);
4590+
}
4591+
45304592
rtnl_lock();
4531-
if (netif_running(dev))
4532-
mvneta_stop(dev);
4593+
mvneta_stop_dev(pp);
45334594
rtnl_unlock();
4595+
4596+
for (queue = 0; queue < rxq_number; queue++) {
4597+
struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4598+
4599+
mvneta_rxq_drop_pkts(pp, rxq);
4600+
}
4601+
4602+
for (queue = 0; queue < txq_number; queue++) {
4603+
struct mvneta_tx_queue *txq = &pp->txqs[queue];
4604+
4605+
mvneta_txq_hw_deinit(pp, txq);
4606+
}
4607+
4608+
clean_exit:
45344609
netif_device_detach(dev);
45354610
clk_disable_unprepare(pp->clk_bus);
45364611
clk_disable_unprepare(pp->clk);
4612+
45374613
return 0;
45384614
}
45394615

@@ -4542,7 +4618,7 @@ static int mvneta_resume(struct device *device)
45424618
struct platform_device *pdev = to_platform_device(device);
45434619
struct net_device *dev = dev_get_drvdata(device);
45444620
struct mvneta_port *pp = netdev_priv(dev);
4545-
int err;
4621+
int err, queue;
45464622

45474623
clk_prepare_enable(pp->clk);
45484624
if (!IS_ERR(pp->clk_bus))
@@ -4564,12 +4640,38 @@ static int mvneta_resume(struct device *device)
45644640
}
45654641

45664642
netif_device_attach(dev);
4567-
rtnl_lock();
4568-
if (netif_running(dev)) {
4569-
mvneta_open(dev);
4570-
mvneta_set_rx_mode(dev);
4643+
4644+
if (!netif_running(dev))
4645+
return 0;
4646+
4647+
for (queue = 0; queue < rxq_number; queue++) {
4648+
struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
4649+
4650+
rxq->next_desc_to_proc = 0;
4651+
mvneta_rxq_hw_init(pp, rxq);
4652+
}
4653+
4654+
for (queue = 0; queue < txq_number; queue++) {
4655+
struct mvneta_tx_queue *txq = &pp->txqs[queue];
4656+
4657+
txq->next_desc_to_proc = 0;
4658+
mvneta_txq_hw_init(pp, txq);
45714659
}
4660+
4661+
if (!pp->neta_armada3700) {
4662+
spin_lock(&pp->lock);
4663+
pp->is_stopped = false;
4664+
spin_unlock(&pp->lock);
4665+
cpuhp_state_add_instance_nocalls(online_hpstate,
4666+
&pp->node_online);
4667+
cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
4668+
&pp->node_dead);
4669+
}
4670+
4671+
rtnl_lock();
4672+
mvneta_start_dev(pp);
45724673
rtnl_unlock();
4674+
mvneta_set_rx_mode(dev);
45734675

45744676
return 0;
45754677
}

0 commit comments

Comments
 (0)