Skip to content

Commit 4688f4f

Browse files
Ryceancurrydavem330
authored andcommitted
net: bcmasp: Keep buffers through power management
There is no advantage of freeing and re-allocating buffers through suspend and resume. This waste cycles and makes suspend/resume time longer. We also open ourselves to failed allocations in systems with heavy memory fragmentation. Signed-off-by: Justin Chen <[email protected]> Acked-by: Florian Fainelli <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 9112fc0 commit 4688f4f

File tree

2 files changed

+85
-108
lines changed

2 files changed

+85
-108
lines changed

drivers/net/ethernet/broadcom/asp2/bcmasp.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -315,6 +315,7 @@ struct bcmasp_intf {
315315
struct bcmasp_desc *rx_edpkt_cpu;
316316
dma_addr_t rx_edpkt_dma_addr;
317317
dma_addr_t rx_edpkt_dma_read;
318+
dma_addr_t rx_edpkt_dma_valid;
318319

319320
/* RX buffer prefetcher ring*/
320321
void *rx_ring_cpu;

drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c

Lines changed: 84 additions & 108 deletions
Original file line numberDiff line numberDiff line change
@@ -674,40 +674,78 @@ static void bcmasp_adj_link(struct net_device *dev)
674674
phy_print_status(phydev);
675675
}
676676

677-
static int bcmasp_init_rx(struct bcmasp_intf *intf)
677+
static int bcmasp_alloc_buffers(struct bcmasp_intf *intf)
678678
{
679679
struct device *kdev = &intf->parent->pdev->dev;
680680
struct page *buffer_pg;
681-
dma_addr_t dma;
682-
void *p;
683-
u32 reg;
684-
int ret;
685681

682+
/* Alloc RX */
686683
intf->rx_buf_order = get_order(RING_BUFFER_SIZE);
687684
buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order);
688685
if (!buffer_pg)
689686
return -ENOMEM;
690687

691-
dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
692-
DMA_FROM_DEVICE);
693-
if (dma_mapping_error(kdev, dma)) {
694-
__free_pages(buffer_pg, intf->rx_buf_order);
695-
return -ENOMEM;
696-
}
697688
intf->rx_ring_cpu = page_to_virt(buffer_pg);
698-
intf->rx_ring_dma = dma;
699-
intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1;
689+
intf->rx_ring_dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
690+
DMA_FROM_DEVICE);
691+
if (dma_mapping_error(kdev, intf->rx_ring_dma))
692+
goto free_rx_buffer;
693+
694+
intf->rx_edpkt_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE,
695+
&intf->rx_edpkt_dma_addr, GFP_KERNEL);
696+
if (!intf->rx_edpkt_cpu)
697+
goto free_rx_buffer_dma;
698+
699+
/* Alloc TX */
700+
intf->tx_spb_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE,
701+
&intf->tx_spb_dma_addr, GFP_KERNEL);
702+
if (!intf->tx_spb_cpu)
703+
goto free_rx_edpkt_dma;
700704

701-
p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->rx_edpkt_dma_addr,
705+
intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb),
702706
GFP_KERNEL);
703-
if (!p) {
704-
ret = -ENOMEM;
705-
goto free_rx_ring;
706-
}
707-
intf->rx_edpkt_cpu = p;
707+
if (!intf->tx_cbs)
708+
goto free_tx_spb_dma;
708709

709-
netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll);
710+
return 0;
711+
712+
free_tx_spb_dma:
713+
dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
714+
intf->tx_spb_dma_addr);
715+
free_rx_edpkt_dma:
716+
dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
717+
intf->rx_edpkt_dma_addr);
718+
free_rx_buffer_dma:
719+
dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
720+
DMA_FROM_DEVICE);
721+
free_rx_buffer:
722+
__free_pages(buffer_pg, intf->rx_buf_order);
723+
724+
return -ENOMEM;
725+
}
726+
727+
static void bcmasp_reclaim_free_buffers(struct bcmasp_intf *intf)
728+
{
729+
struct device *kdev = &intf->parent->pdev->dev;
730+
731+
/* RX buffers */
732+
dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
733+
intf->rx_edpkt_dma_addr);
734+
dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
735+
DMA_FROM_DEVICE);
736+
__free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
737+
738+
/* TX buffers */
739+
dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
740+
intf->tx_spb_dma_addr);
741+
kfree(intf->tx_cbs);
742+
}
710743

744+
static void bcmasp_init_rx(struct bcmasp_intf *intf)
745+
{
746+
/* Restart from index 0 */
747+
intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1;
748+
intf->rx_edpkt_dma_valid = intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1);
711749
intf->rx_edpkt_dma_read = intf->rx_edpkt_dma_addr;
712750
intf->rx_edpkt_index = 0;
713751

@@ -733,64 +771,23 @@ static int bcmasp_init_rx(struct bcmasp_intf *intf)
733771
rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_WRITE);
734772
rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_READ);
735773
rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_BASE);
736-
rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1),
737-
RX_EDPKT_DMA_END);
738-
rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1),
739-
RX_EDPKT_DMA_VALID);
740-
741-
reg = UMAC2FB_CFG_DEFAULT_EN |
742-
((intf->channel + 11) << UMAC2FB_CFG_CHID_SHIFT);
743-
reg |= (0xd << UMAC2FB_CFG_OK_SEND_SHIFT);
744-
umac2fb_wl(intf, reg, UMAC2FB_CFG);
774+
rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_END);
775+
rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_VALID);
745776

746-
return 0;
747-
748-
free_rx_ring:
749-
dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
750-
DMA_FROM_DEVICE);
751-
__free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
752-
753-
return ret;
777+
umac2fb_wl(intf, UMAC2FB_CFG_DEFAULT_EN | ((intf->channel + 11) <<
778+
UMAC2FB_CFG_CHID_SHIFT) | (0xd << UMAC2FB_CFG_OK_SEND_SHIFT),
779+
UMAC2FB_CFG);
754780
}
755781

756-
static void bcmasp_reclaim_free_all_rx(struct bcmasp_intf *intf)
757-
{
758-
struct device *kdev = &intf->parent->pdev->dev;
759782

760-
dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
761-
intf->rx_edpkt_dma_addr);
762-
dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
763-
DMA_FROM_DEVICE);
764-
__free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
765-
}
766-
767-
static int bcmasp_init_tx(struct bcmasp_intf *intf)
783+
static void bcmasp_init_tx(struct bcmasp_intf *intf)
768784
{
769-
struct device *kdev = &intf->parent->pdev->dev;
770-
void *p;
771-
int ret;
772-
773-
p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->tx_spb_dma_addr,
774-
GFP_KERNEL);
775-
if (!p)
776-
return -ENOMEM;
777-
778-
intf->tx_spb_cpu = p;
785+
/* Restart from index 0 */
779786
intf->tx_spb_dma_valid = intf->tx_spb_dma_addr + DESC_RING_SIZE - 1;
780787
intf->tx_spb_dma_read = intf->tx_spb_dma_addr;
781-
782-
intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb),
783-
GFP_KERNEL);
784-
if (!intf->tx_cbs) {
785-
ret = -ENOMEM;
786-
goto free_tx_spb;
787-
}
788-
789788
intf->tx_spb_index = 0;
790789
intf->tx_spb_clean_index = 0;
791790

792-
netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll);
793-
794791
/* Make sure channels are disabled */
795792
tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
796793
tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC);
@@ -806,26 +803,6 @@ static int bcmasp_init_tx(struct bcmasp_intf *intf)
806803
tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE);
807804
tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_END);
808805
tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID);
809-
810-
return 0;
811-
812-
free_tx_spb:
813-
dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
814-
intf->tx_spb_dma_addr);
815-
816-
return ret;
817-
}
818-
819-
static void bcmasp_reclaim_free_all_tx(struct bcmasp_intf *intf)
820-
{
821-
struct device *kdev = &intf->parent->pdev->dev;
822-
823-
/* Free descriptors */
824-
dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
825-
intf->tx_spb_dma_addr);
826-
827-
/* Free cbs */
828-
kfree(intf->tx_cbs);
829806
}
830807

831808
static void bcmasp_ephy_enable_set(struct bcmasp_intf *intf, bool enable)
@@ -915,10 +892,7 @@ static void bcmasp_netif_deinit(struct net_device *dev)
915892
bcmasp_enable_rx_irq(intf, 0);
916893

917894
netif_napi_del(&intf->tx_napi);
918-
bcmasp_reclaim_free_all_tx(intf);
919-
920895
netif_napi_del(&intf->rx_napi);
921-
bcmasp_reclaim_free_all_rx(intf);
922896
}
923897

924898
static int bcmasp_stop(struct net_device *dev)
@@ -932,6 +906,8 @@ static int bcmasp_stop(struct net_device *dev)
932906

933907
bcmasp_netif_deinit(dev);
934908

909+
bcmasp_reclaim_free_buffers(intf);
910+
935911
phy_disconnect(dev->phydev);
936912

937913
/* Disable internal EPHY or external PHY */
@@ -1073,17 +1049,12 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
10731049
intf->old_link = -1;
10741050
intf->old_pause = -1;
10751051

1076-
ret = bcmasp_init_tx(intf);
1077-
if (ret)
1078-
goto err_phy_disconnect;
1079-
1080-
/* Turn on asp */
1052+
bcmasp_init_tx(intf);
1053+
netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll);
10811054
bcmasp_enable_tx(intf, 1);
10821055

1083-
ret = bcmasp_init_rx(intf);
1084-
if (ret)
1085-
goto err_reclaim_tx;
1086-
1056+
bcmasp_init_rx(intf);
1057+
netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll);
10871058
bcmasp_enable_rx(intf, 1);
10881059

10891060
/* Turn on UniMAC TX/RX */
@@ -1097,12 +1068,6 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
10971068

10981069
return 0;
10991070

1100-
err_reclaim_tx:
1101-
netif_napi_del(&intf->tx_napi);
1102-
bcmasp_reclaim_free_all_tx(intf);
1103-
err_phy_disconnect:
1104-
if (phydev)
1105-
phy_disconnect(phydev);
11061071
err_phy_disable:
11071072
if (intf->internal_phy)
11081073
bcmasp_ephy_enable_set(intf, false);
@@ -1118,13 +1083,24 @@ static int bcmasp_open(struct net_device *dev)
11181083

11191084
netif_dbg(intf, ifup, dev, "bcmasp open\n");
11201085

1121-
ret = clk_prepare_enable(intf->parent->clk);
1086+
ret = bcmasp_alloc_buffers(intf);
11221087
if (ret)
11231088
return ret;
11241089

1125-
ret = bcmasp_netif_init(dev, true);
1090+
ret = clk_prepare_enable(intf->parent->clk);
11261091
if (ret)
1092+
goto err_free_mem;
1093+
1094+
ret = bcmasp_netif_init(dev, true);
1095+
if (ret) {
11271096
clk_disable_unprepare(intf->parent->clk);
1097+
goto err_free_mem;
1098+
}
1099+
1100+
return ret;
1101+
1102+
err_free_mem:
1103+
bcmasp_reclaim_free_buffers(intf);
11281104

11291105
return ret;
11301106
}

0 commit comments

Comments
 (0)