Skip to content

Commit 7a6e288

Browse files
Daniel Borkmanndavem330
authored andcommitted
pktgen: ipv6: numa: consolidate skb allocation to pktgen_alloc_skb
We currently allow for numa-node aware skb allocation only within the fill_packet_ipv4() path, but not in fill_packet_ipv6(). Consolidate that code to a common allocation helper to enable numa-node aware skb allocation for ipv6, and use it in both paths. This also makes both functions a bit more readable. Signed-off-by: Daniel Borkmann <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent da5bab0 commit 7a6e288

File tree

1 file changed

+27
-25
lines changed

1 file changed

+27
-25
lines changed

net/core/pktgen.c

Lines changed: 27 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -2627,6 +2627,29 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
26272627
pgh->tv_usec = htonl(timestamp.tv_usec);
26282628
}
26292629

2630+
static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
2631+
struct pktgen_dev *pkt_dev,
2632+
unsigned int extralen)
2633+
{
2634+
struct sk_buff *skb = NULL;
2635+
unsigned int size = pkt_dev->cur_pkt_size + 64 + extralen +
2636+
pkt_dev->pkt_overhead;
2637+
2638+
if (pkt_dev->flags & F_NODE) {
2639+
int node = pkt_dev->node >= 0 ? pkt_dev->node : numa_node_id();
2640+
2641+
skb = __alloc_skb(NET_SKB_PAD + size, GFP_NOWAIT, 0, node);
2642+
if (likely(skb)) {
2643+
skb_reserve(skb, NET_SKB_PAD);
2644+
skb->dev = dev;
2645+
}
2646+
} else {
2647+
skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
2648+
}
2649+
2650+
return skb;
2651+
}
2652+
26302653
static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
26312654
struct pktgen_dev *pkt_dev)
26322655
{
@@ -2657,32 +2680,13 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
26572680

26582681
datalen = (odev->hard_header_len + 16) & ~0xf;
26592682

2660-
if (pkt_dev->flags & F_NODE) {
2661-
int node;
2662-
2663-
if (pkt_dev->node >= 0)
2664-
node = pkt_dev->node;
2665-
else
2666-
node = numa_node_id();
2667-
2668-
skb = __alloc_skb(NET_SKB_PAD + pkt_dev->cur_pkt_size + 64
2669-
+ datalen + pkt_dev->pkt_overhead, GFP_NOWAIT, 0, node);
2670-
if (likely(skb)) {
2671-
skb_reserve(skb, NET_SKB_PAD);
2672-
skb->dev = odev;
2673-
}
2674-
}
2675-
else
2676-
skb = __netdev_alloc_skb(odev,
2677-
pkt_dev->cur_pkt_size + 64
2678-
+ datalen + pkt_dev->pkt_overhead, GFP_NOWAIT);
2679-
2683+
skb = pktgen_alloc_skb(odev, pkt_dev, datalen);
26802684
if (!skb) {
26812685
sprintf(pkt_dev->result, "No memory");
26822686
return NULL;
26832687
}
2684-
prefetchw(skb->data);
26852688

2689+
prefetchw(skb->data);
26862690
skb_reserve(skb, datalen);
26872691

26882692
/* Reserve for ethernet and IP header */
@@ -2786,15 +2790,13 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
27862790
mod_cur_headers(pkt_dev);
27872791
queue_map = pkt_dev->cur_queue_map;
27882792

2789-
skb = __netdev_alloc_skb(odev,
2790-
pkt_dev->cur_pkt_size + 64
2791-
+ 16 + pkt_dev->pkt_overhead, GFP_NOWAIT);
2793+
skb = pktgen_alloc_skb(odev, pkt_dev, 16);
27922794
if (!skb) {
27932795
sprintf(pkt_dev->result, "No memory");
27942796
return NULL;
27952797
}
2796-
prefetchw(skb->data);
27972798

2799+
prefetchw(skb->data);
27982800
skb_reserve(skb, 16);
27992801

28002802
/* Reserve for ethernet and IP header */

0 commit comments

Comments
 (0)