Skip to content

Commit 3568d21

Browse files
rogerqkuba-moo
authored andcommitted
net: ethernet: ti: am65-cpsw: streamline TX queue creation and cleanup
Introduce am65_cpsw_create_txqs() and am65_cpsw_destroy_txqs() and use them. Signed-off-by: Roger Quadros <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent 66c1ae6 commit 3568d21

File tree

1 file changed

+77
-46
lines changed

1 file changed

+77
-46
lines changed

drivers/net/ethernet/ti/am65-cpsw-nuss.c

Lines changed: 77 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -502,6 +502,7 @@ static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow,
502502
struct page *page,
503503
bool allow_direct);
504504
static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma);
505+
static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma);
505506

506507
static void am65_cpsw_destroy_rxq(struct am65_cpsw_common *common, int id)
507508
{
@@ -656,6 +657,76 @@ static int am65_cpsw_create_rxqs(struct am65_cpsw_common *common)
656657
return ret;
657658
}
658659

660+
static void am65_cpsw_destroy_txq(struct am65_cpsw_common *common, int id)
661+
{
662+
struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[id];
663+
664+
napi_disable(&tx_chn->napi_tx);
665+
hrtimer_cancel(&tx_chn->tx_hrtimer);
666+
k3_udma_glue_reset_tx_chn(tx_chn->tx_chn, tx_chn,
667+
am65_cpsw_nuss_tx_cleanup);
668+
k3_udma_glue_disable_tx_chn(tx_chn->tx_chn);
669+
}
670+
671+
static void am65_cpsw_destroy_txqs(struct am65_cpsw_common *common)
672+
{
673+
struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
674+
int id;
675+
676+
/* shutdown tx channels */
677+
atomic_set(&common->tdown_cnt, common->tx_ch_num);
678+
/* ensure new tdown_cnt value is visible */
679+
smp_mb__after_atomic();
680+
reinit_completion(&common->tdown_complete);
681+
682+
for (id = 0; id < common->tx_ch_num; id++)
683+
k3_udma_glue_tdown_tx_chn(tx_chn[id].tx_chn, false);
684+
685+
id = wait_for_completion_timeout(&common->tdown_complete,
686+
msecs_to_jiffies(1000));
687+
if (!id)
688+
dev_err(common->dev, "tx teardown timeout\n");
689+
690+
for (id = common->tx_ch_num - 1; id >= 0; id--)
691+
am65_cpsw_destroy_txq(common, id);
692+
}
693+
694+
static int am65_cpsw_create_txq(struct am65_cpsw_common *common, int id)
695+
{
696+
struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[id];
697+
int ret;
698+
699+
ret = k3_udma_glue_enable_tx_chn(tx_chn->tx_chn);
700+
if (ret)
701+
return ret;
702+
703+
napi_enable(&tx_chn->napi_tx);
704+
705+
return 0;
706+
}
707+
708+
static int am65_cpsw_create_txqs(struct am65_cpsw_common *common)
709+
{
710+
int id, ret;
711+
712+
for (id = 0; id < common->tx_ch_num; id++) {
713+
ret = am65_cpsw_create_txq(common, id);
714+
if (ret) {
715+
dev_err(common->dev, "couldn't create txq %d: %d\n",
716+
id, ret);
717+
goto err;
718+
}
719+
}
720+
721+
return 0;
722+
723+
err:
724+
for (--id; id >= 0; id--)
725+
am65_cpsw_destroy_txq(common, id);
726+
727+
return ret;
728+
}
729+
659730
static int am65_cpsw_nuss_desc_idx(struct k3_cppi_desc_pool *desc_pool,
660731
void *desc,
661732
unsigned char dsize_log2)
@@ -790,9 +861,8 @@ static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
790861
static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
791862
{
792863
struct am65_cpsw_host *host_p = am65_common_get_host(common);
793-
struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
794-
int port_idx, ret, tx;
795864
u32 val, port_mask;
865+
int port_idx, ret;
796866

797867
if (common->usage_count)
798868
return 0;
@@ -856,67 +926,28 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
856926
if (ret)
857927
return ret;
858928

859-
for (tx = 0; tx < common->tx_ch_num; tx++) {
860-
ret = k3_udma_glue_enable_tx_chn(tx_chn[tx].tx_chn);
861-
if (ret) {
862-
dev_err(common->dev, "couldn't enable tx chn %d: %d\n",
863-
tx, ret);
864-
tx--;
865-
goto fail_tx;
866-
}
867-
napi_enable(&tx_chn[tx].napi_tx);
868-
}
929+
ret = am65_cpsw_create_txqs(common);
930+
if (ret)
931+
goto cleanup_rx;
869932

870933
dev_dbg(common->dev, "cpsw_nuss started\n");
871934
return 0;
872935

873-
fail_tx:
874-
while (tx >= 0) {
875-
napi_disable(&tx_chn[tx].napi_tx);
876-
k3_udma_glue_disable_tx_chn(tx_chn[tx].tx_chn);
877-
tx--;
878-
}
879-
936+
cleanup_rx:
880937
am65_cpsw_destroy_rxqs(common);
881938

882939
return ret;
883940
}
884941

885942
static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
886943
{
887-
struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
888-
int i;
889-
890944
if (common->usage_count != 1)
891945
return 0;
892946

893947
cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
894948
ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
895949

896-
/* shutdown tx channels */
897-
atomic_set(&common->tdown_cnt, common->tx_ch_num);
898-
/* ensure new tdown_cnt value is visible */
899-
smp_mb__after_atomic();
900-
reinit_completion(&common->tdown_complete);
901-
902-
for (i = 0; i < common->tx_ch_num; i++)
903-
k3_udma_glue_tdown_tx_chn(tx_chn[i].tx_chn, false);
904-
905-
i = wait_for_completion_timeout(&common->tdown_complete,
906-
msecs_to_jiffies(1000));
907-
if (!i)
908-
dev_err(common->dev, "tx timeout\n");
909-
for (i = 0; i < common->tx_ch_num; i++) {
910-
napi_disable(&tx_chn[i].napi_tx);
911-
hrtimer_cancel(&tx_chn[i].tx_hrtimer);
912-
}
913-
914-
for (i = 0; i < common->tx_ch_num; i++) {
915-
k3_udma_glue_reset_tx_chn(tx_chn[i].tx_chn, &tx_chn[i],
916-
am65_cpsw_nuss_tx_cleanup);
917-
k3_udma_glue_disable_tx_chn(tx_chn[i].tx_chn);
918-
}
919-
950+
am65_cpsw_destroy_txqs(common);
920951
am65_cpsw_destroy_rxqs(common);
921952
cpsw_ale_stop(common->ale);
922953

0 commit comments

Comments
 (0)