Skip to content

Commit da1b419

Browse files
committed
Merge branch 'bpf-csum-complete'
Daniel Borkmann says: ==================== Few BPF helper related checksum fixes The set contains three fixes with regards to CHECKSUM_COMPLETE and BPF helper functions. For details please see individual patches. Thanks! v1 -> v2: - Fixed make htmldocs issue reported by kbuild bot. - Rest as is. ==================== Signed-off-by: David S. Miller <[email protected]>
2 parents 66cf350 + 8065694 commit da1b419

File tree

2 files changed

+57
-24
lines changed

2 files changed

+57
-24
lines changed

include/linux/skbuff.h

Lines changed: 33 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -2847,6 +2847,18 @@ static inline int skb_linearize_cow(struct sk_buff *skb)
28472847
__skb_linearize(skb) : 0;
28482848
}
28492849

2850+
static __always_inline void
2851+
__skb_postpull_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
2852+
unsigned int off)
2853+
{
2854+
if (skb->ip_summed == CHECKSUM_COMPLETE)
2855+
skb->csum = csum_block_sub(skb->csum,
2856+
csum_partial(start, len, 0), off);
2857+
else if (skb->ip_summed == CHECKSUM_PARTIAL &&
2858+
skb_checksum_start_offset(skb) < 0)
2859+
skb->ip_summed = CHECKSUM_NONE;
2860+
}
2861+
28502862
/**
28512863
* skb_postpull_rcsum - update checksum for received skb after pull
28522864
* @skb: buffer to update
@@ -2857,36 +2869,38 @@ static inline int skb_linearize_cow(struct sk_buff *skb)
28572869
* update the CHECKSUM_COMPLETE checksum, or set ip_summed to
28582870
* CHECKSUM_NONE so that it can be recomputed from scratch.
28592871
*/
2860-
28612872
static inline void skb_postpull_rcsum(struct sk_buff *skb,
28622873
const void *start, unsigned int len)
28632874
{
2864-
if (skb->ip_summed == CHECKSUM_COMPLETE)
2865-
skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
2866-
else if (skb->ip_summed == CHECKSUM_PARTIAL &&
2867-
skb_checksum_start_offset(skb) < 0)
2868-
skb->ip_summed = CHECKSUM_NONE;
2875+
__skb_postpull_rcsum(skb, start, len, 0);
28692876
}
28702877

2871-
unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
2878+
static __always_inline void
2879+
__skb_postpush_rcsum(struct sk_buff *skb, const void *start, unsigned int len,
2880+
unsigned int off)
2881+
{
2882+
if (skb->ip_summed == CHECKSUM_COMPLETE)
2883+
skb->csum = csum_block_add(skb->csum,
2884+
csum_partial(start, len, 0), off);
2885+
}
28722886

2887+
/**
2888+
* skb_postpush_rcsum - update checksum for received skb after push
2889+
* @skb: buffer to update
2890+
* @start: start of data after push
2891+
* @len: length of data pushed
2892+
*
2893+
* After doing a push on a received packet, you need to call this to
2894+
* update the CHECKSUM_COMPLETE checksum.
2895+
*/
28732896
static inline void skb_postpush_rcsum(struct sk_buff *skb,
28742897
const void *start, unsigned int len)
28752898
{
2876-
/* For performing the reverse operation to skb_postpull_rcsum(),
2877-
* we can instead of ...
2878-
*
2879-
* skb->csum = csum_add(skb->csum, csum_partial(start, len, 0));
2880-
*
2881-
* ... just use this equivalent version here to save a few
2882-
* instructions. Feeding csum of 0 in csum_partial() and later
2883-
* on adding skb->csum is equivalent to feed skb->csum in the
2884-
* first place.
2885-
*/
2886-
if (skb->ip_summed == CHECKSUM_COMPLETE)
2887-
skb->csum = csum_partial(start, len, skb->csum);
2899+
__skb_postpush_rcsum(skb, start, len, 0);
28882900
}
28892901

2902+
unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
2903+
28902904
/**
28912905
* skb_push_rcsum - push skb and update receive checksum
28922906
* @skb: buffer to update

net/core/filter.c

Lines changed: 24 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1365,6 +1365,18 @@ static inline int bpf_try_make_writable(struct sk_buff *skb,
13651365
return err;
13661366
}
13671367

1368+
static inline void bpf_push_mac_rcsum(struct sk_buff *skb)
1369+
{
1370+
if (skb_at_tc_ingress(skb))
1371+
skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1372+
}
1373+
1374+
static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
1375+
{
1376+
if (skb_at_tc_ingress(skb))
1377+
skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1378+
}
1379+
13681380
static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
13691381
{
13701382
struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
@@ -1395,7 +1407,7 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
13951407
return -EFAULT;
13961408

13971409
if (flags & BPF_F_RECOMPUTE_CSUM)
1398-
skb_postpull_rcsum(skb, ptr, len);
1410+
__skb_postpull_rcsum(skb, ptr, len, offset);
13991411

14001412
memcpy(ptr, from, len);
14011413

@@ -1404,7 +1416,7 @@ static u64 bpf_skb_store_bytes(u64 r1, u64 r2, u64 r3, u64 r4, u64 flags)
14041416
skb_store_bits(skb, offset, ptr, len);
14051417

14061418
if (flags & BPF_F_RECOMPUTE_CSUM)
1407-
skb_postpush_rcsum(skb, ptr, len);
1419+
__skb_postpush_rcsum(skb, ptr, len, offset);
14081420
if (flags & BPF_F_INVALIDATE_HASH)
14091421
skb_clear_hash(skb);
14101422

@@ -1607,9 +1619,6 @@ static const struct bpf_func_proto bpf_csum_diff_proto = {
16071619

16081620
static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
16091621
{
1610-
if (skb_at_tc_ingress(skb))
1611-
skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1612-
16131622
return dev_forward_skb(dev, skb);
16141623
}
16151624

@@ -1648,6 +1657,8 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
16481657
if (unlikely(!skb))
16491658
return -ENOMEM;
16501659

1660+
bpf_push_mac_rcsum(skb);
1661+
16511662
return flags & BPF_F_INGRESS ?
16521663
__bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
16531664
}
@@ -1693,6 +1704,8 @@ int skb_do_redirect(struct sk_buff *skb)
16931704
return -EINVAL;
16941705
}
16951706

1707+
bpf_push_mac_rcsum(skb);
1708+
16961709
return ri->flags & BPF_F_INGRESS ?
16971710
__bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
16981711
}
@@ -1756,7 +1769,10 @@ static u64 bpf_skb_vlan_push(u64 r1, u64 r2, u64 vlan_tci, u64 r4, u64 r5)
17561769
vlan_proto != htons(ETH_P_8021AD)))
17571770
vlan_proto = htons(ETH_P_8021Q);
17581771

1772+
bpf_push_mac_rcsum(skb);
17591773
ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
1774+
bpf_pull_mac_rcsum(skb);
1775+
17601776
bpf_compute_data_end(skb);
17611777
return ret;
17621778
}
@@ -1776,7 +1792,10 @@ static u64 bpf_skb_vlan_pop(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
17761792
struct sk_buff *skb = (struct sk_buff *) (long) r1;
17771793
int ret;
17781794

1795+
bpf_push_mac_rcsum(skb);
17791796
ret = skb_vlan_pop(skb);
1797+
bpf_pull_mac_rcsum(skb);
1798+
17801799
bpf_compute_data_end(skb);
17811800
return ret;
17821801
}

0 commit comments

Comments
 (0)