Skip to content

Commit 66c1ae6

Browse files
rogerqkuba-moo
authored andcommitted
net: ethernet: ti: am65-cpsw: streamline RX queue creation and cleanup
Introduce am65_cpsw_create_rxqs() and am65_cpsw_destroy_rxqs() and use them. Signed-off-by: Roger Quadros <[email protected]> Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
1 parent 681eb2b commit 66c1ae6

File tree

1 file changed

+119
-124
lines changed

1 file changed

+119
-124
lines changed

drivers/net/ethernet/ti/am65-cpsw-nuss.c

Lines changed: 119 additions & 124 deletions
Original file line numberDiff line numberDiff line change
@@ -498,35 +498,61 @@ static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common);
498498
static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common);
499499
static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port);
500500
static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
501+
static inline void am65_cpsw_put_page(struct am65_cpsw_rx_flow *flow,
502+
struct page *page,
503+
bool allow_direct);
504+
static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma);
501505

502-
static void am65_cpsw_destroy_xdp_rxqs(struct am65_cpsw_common *common)
506+
static void am65_cpsw_destroy_rxq(struct am65_cpsw_common *common, int id)
503507
{
504508
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
505509
struct am65_cpsw_rx_flow *flow;
506510
struct xdp_rxq_info *rxq;
507-
int id, port;
511+
int port;
508512

509-
for (id = 0; id < common->rx_ch_num_flows; id++) {
510-
flow = &rx_chn->flows[id];
513+
flow = &rx_chn->flows[id];
514+
napi_disable(&flow->napi_rx);
515+
hrtimer_cancel(&flow->rx_hrtimer);
516+
k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, id, rx_chn,
517+
am65_cpsw_nuss_rx_cleanup, !!id);
511518

512-
for (port = 0; port < common->port_num; port++) {
513-
if (!common->ports[port].ndev)
514-
continue;
519+
for (port = 0; port < common->port_num; port++) {
520+
if (!common->ports[port].ndev)
521+
continue;
515522

516-
rxq = &common->ports[port].xdp_rxq[id];
523+
rxq = &common->ports[port].xdp_rxq[id];
517524

518-
if (xdp_rxq_info_is_reg(rxq))
519-
xdp_rxq_info_unreg(rxq);
520-
}
525+
if (xdp_rxq_info_is_reg(rxq))
526+
xdp_rxq_info_unreg(rxq);
527+
}
521528

522-
if (flow->page_pool) {
523-
page_pool_destroy(flow->page_pool);
524-
flow->page_pool = NULL;
525-
}
529+
if (flow->page_pool) {
530+
page_pool_destroy(flow->page_pool);
531+
flow->page_pool = NULL;
526532
}
527533
}
528534

529-
static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common)
535+
static void am65_cpsw_destroy_rxqs(struct am65_cpsw_common *common)
536+
{
537+
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
538+
int id;
539+
540+
reinit_completion(&common->tdown_complete);
541+
k3_udma_glue_tdown_rx_chn(rx_chn->rx_chn, true);
542+
543+
if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
544+
id = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000));
545+
if (!id)
546+
dev_err(common->dev, "rx teardown timeout\n");
547+
}
548+
549+
for (id = common->rx_ch_num_flows - 1; id >= 0; id--)
550+
am65_cpsw_destroy_rxq(common, id);
551+
552+
k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn);
553+
}
554+
555+
static int am65_cpsw_create_rxq(struct am65_cpsw_common *common, int id)
530556
{
531557
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
532558
struct page_pool_params pp_params = {
@@ -541,45 +567,92 @@ static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common)
541567
struct am65_cpsw_rx_flow *flow;
542568
struct xdp_rxq_info *rxq;
543569
struct page_pool *pool;
544-
int id, port, ret;
570+
struct page *page;
571+
int port, ret, i;
545572

546-
for (id = 0; id < common->rx_ch_num_flows; id++) {
547-
flow = &rx_chn->flows[id];
548-
pp_params.napi = &flow->napi_rx;
549-
pool = page_pool_create(&pp_params);
550-
if (IS_ERR(pool)) {
551-
ret = PTR_ERR(pool);
573+
flow = &rx_chn->flows[id];
574+
pp_params.napi = &flow->napi_rx;
575+
pool = page_pool_create(&pp_params);
576+
if (IS_ERR(pool)) {
577+
ret = PTR_ERR(pool);
578+
return ret;
579+
}
580+
581+
flow->page_pool = pool;
582+
583+
/* using same page pool is allowed as no running rx handlers
584+
* simultaneously for both ndevs
585+
*/
586+
for (port = 0; port < common->port_num; port++) {
587+
if (!common->ports[port].ndev)
588+
/* FIXME should we BUG here? */
589+
continue;
590+
591+
rxq = &common->ports[port].xdp_rxq[id];
592+
ret = xdp_rxq_info_reg(rxq, common->ports[port].ndev,
593+
id, flow->napi_rx.napi_id);
594+
if (ret)
595+
goto err;
596+
597+
ret = xdp_rxq_info_reg_mem_model(rxq,
598+
MEM_TYPE_PAGE_POOL,
599+
pool);
600+
if (ret)
601+
goto err;
602+
}
603+
604+
for (i = 0; i < AM65_CPSW_MAX_RX_DESC; i++) {
605+
page = page_pool_dev_alloc_pages(flow->page_pool);
606+
if (!page) {
607+
dev_err(common->dev, "cannot allocate page in flow %d\n",
608+
id);
609+
ret = -ENOMEM;
552610
goto err;
553611
}
554612

555-
flow->page_pool = pool;
613+
ret = am65_cpsw_nuss_rx_push(common, page, id);
614+
if (ret < 0) {
615+
dev_err(common->dev,
616+
"cannot submit page to rx channel flow %d, error %d\n",
617+
id, ret);
618+
am65_cpsw_put_page(flow, page, false);
619+
goto err;
620+
}
621+
}
556622

557-
/* using same page pool is allowed as no running rx handlers
558-
* simultaneously for both ndevs
559-
*/
560-
for (port = 0; port < common->port_num; port++) {
561-
if (!common->ports[port].ndev)
562-
continue;
623+
napi_enable(&flow->napi_rx);
624+
return 0;
563625

564-
rxq = &common->ports[port].xdp_rxq[id];
626+
err:
627+
am65_cpsw_destroy_rxq(common, id);
628+
return ret;
629+
}
565630

566-
ret = xdp_rxq_info_reg(rxq, common->ports[port].ndev,
567-
id, flow->napi_rx.napi_id);
568-
if (ret)
569-
goto err;
631+
static int am65_cpsw_create_rxqs(struct am65_cpsw_common *common)
632+
{
633+
int id, ret;
570634

571-
ret = xdp_rxq_info_reg_mem_model(rxq,
572-
MEM_TYPE_PAGE_POOL,
573-
pool);
574-
if (ret)
575-
goto err;
635+
for (id = 0; id < common->rx_ch_num_flows; id++) {
636+
ret = am65_cpsw_create_rxq(common, id);
637+
if (ret) {
638+
dev_err(common->dev, "couldn't create rxq %d: %d\n",
639+
id, ret);
640+
goto err;
576641
}
577642
}
578643

644+
ret = k3_udma_glue_enable_rx_chn(common->rx_chns.rx_chn);
645+
if (ret) {
646+
dev_err(common->dev, "couldn't enable rx chn: %d\n", ret);
647+
goto err;
648+
}
649+
579650
return 0;
580651

581652
err:
582-
am65_cpsw_destroy_xdp_rxqs(common);
653+
for (--id; id >= 0; id--)
654+
am65_cpsw_destroy_rxq(common, id);
655+
583656
return ret;
584657
}
585658

@@ -643,7 +716,6 @@ static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
643716
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
644717
dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
645718
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
646-
647719
am65_cpsw_put_page(&rx_chn->flows[flow_id], page, false);
648720
}
649721

@@ -718,12 +790,9 @@ static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
718790
static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
719791
{
720792
struct am65_cpsw_host *host_p = am65_common_get_host(common);
721-
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
722793
struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
723-
int port_idx, i, ret, tx, flow_idx;
724-
struct am65_cpsw_rx_flow *flow;
794+
int port_idx, ret, tx;
725795
u32 val, port_mask;
726-
struct page *page;
727796

728797
if (common->usage_count)
729798
return 0;
@@ -783,47 +852,9 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
783852

784853
am65_cpsw_qos_tx_p0_rate_init(common);
785854

786-
ret = am65_cpsw_create_xdp_rxqs(common);
787-
if (ret) {
788-
dev_err(common->dev, "Failed to create XDP rx queues\n");
855+
ret = am65_cpsw_create_rxqs(common);
856+
if (ret)
789857
return ret;
790-
}
791-
792-
for (flow_idx = 0; flow_idx < common->rx_ch_num_flows; flow_idx++) {
793-
flow = &rx_chn->flows[flow_idx];
794-
for (i = 0; i < AM65_CPSW_MAX_RX_DESC; i++) {
795-
page = page_pool_dev_alloc_pages(flow->page_pool);
796-
if (!page) {
797-
dev_err(common->dev, "cannot allocate page in flow %d\n",
798-
flow_idx);
799-
ret = -ENOMEM;
800-
goto fail_rx;
801-
}
802-
803-
ret = am65_cpsw_nuss_rx_push(common, page, flow_idx);
804-
if (ret < 0) {
805-
dev_err(common->dev,
806-
"cannot submit page to rx channel flow %d, error %d\n",
807-
flow_idx, ret);
808-
am65_cpsw_put_page(flow, page, false);
809-
goto fail_rx;
810-
}
811-
}
812-
}
813-
814-
ret = k3_udma_glue_enable_rx_chn(rx_chn->rx_chn);
815-
if (ret) {
816-
dev_err(common->dev, "couldn't enable rx chn: %d\n", ret);
817-
goto fail_rx;
818-
}
819-
820-
for (i = 0; i < common->rx_ch_num_flows ; i++) {
821-
napi_enable(&rx_chn->flows[i].napi_rx);
822-
if (rx_chn->flows[i].irq_disabled) {
823-
rx_chn->flows[i].irq_disabled = false;
824-
enable_irq(rx_chn->flows[i].irq);
825-
}
826-
}
827858

828859
for (tx = 0; tx < common->tx_ch_num; tx++) {
829860
ret = k3_udma_glue_enable_tx_chn(tx_chn[tx].tx_chn);
@@ -846,30 +877,13 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
846877
tx--;
847878
}
848879

849-
for (flow_idx = 0; i < common->rx_ch_num_flows; flow_idx++) {
850-
flow = &rx_chn->flows[flow_idx];
851-
if (!flow->irq_disabled) {
852-
disable_irq(flow->irq);
853-
flow->irq_disabled = true;
854-
}
855-
napi_disable(&flow->napi_rx);
856-
}
857-
858-
k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
859-
860-
fail_rx:
861-
for (i = 0; i < common->rx_ch_num_flows; i++)
862-
k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
863-
am65_cpsw_nuss_rx_cleanup, !!i);
864-
865-
am65_cpsw_destroy_xdp_rxqs(common);
880+
am65_cpsw_destroy_rxqs(common);
866881

867882
return ret;
868883
}
869884

870885
static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
871886
{
872-
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
873887
struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
874888
int i;
875889

@@ -903,31 +917,12 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
903917
k3_udma_glue_disable_tx_chn(tx_chn[i].tx_chn);
904918
}
905919

906-
reinit_completion(&common->tdown_complete);
907-
k3_udma_glue_tdown_rx_chn(rx_chn->rx_chn, true);
908-
909-
if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
910-
i = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000));
911-
if (!i)
912-
dev_err(common->dev, "rx teardown timeout\n");
913-
}
914-
915-
for (i = common->rx_ch_num_flows - 1; i >= 0; i--) {
916-
napi_disable(&rx_chn->flows[i].napi_rx);
917-
hrtimer_cancel(&rx_chn->flows[i].rx_hrtimer);
918-
k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
919-
am65_cpsw_nuss_rx_cleanup, !!i);
920-
}
921-
922-
k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
923-
920+
am65_cpsw_destroy_rxqs(common);
924921
cpsw_ale_stop(common->ale);
925922

926923
writel(0, common->cpsw_base + AM65_CPSW_REG_CTL);
927924
writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
928925

929-
am65_cpsw_destroy_xdp_rxqs(common);
930-
931926
dev_dbg(common->dev, "cpsw_nuss stopped\n");
932927
return 0;
933928
}

0 commit comments

Comments
 (0)