@@ -1658,9 +1658,12 @@ struct bpf_scratchpad {
1658
1658
__be32 diff [MAX_BPF_STACK / sizeof (__be32 )];
1659
1659
u8 buff [MAX_BPF_STACK ];
1660
1660
};
1661
+ local_lock_t bh_lock ;
1661
1662
};
1662
1663
1663
- static DEFINE_PER_CPU (struct bpf_scratchpad , bpf_sp ) ;
1664
+ static DEFINE_PER_CPU (struct bpf_scratchpad , bpf_sp ) = {
1665
+ .bh_lock = INIT_LOCAL_LOCK (bh_lock ),
1666
+ };
1664
1667
1665
1668
static inline int __bpf_try_make_writable (struct sk_buff * skb ,
1666
1669
unsigned int write_len )
@@ -2021,6 +2024,7 @@ BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size,
2021
2024
struct bpf_scratchpad * sp = this_cpu_ptr (& bpf_sp );
2022
2025
u32 diff_size = from_size + to_size ;
2023
2026
int i , j = 0 ;
2027
+ __wsum ret ;
2024
2028
2025
2029
/* This is quite flexible, some examples:
2026
2030
*
@@ -2034,12 +2038,15 @@ BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size,
2034
2038
diff_size > sizeof (sp -> diff )))
2035
2039
return - EINVAL ;
2036
2040
2041
+ local_lock_nested_bh (& bpf_sp .bh_lock );
2037
2042
for (i = 0 ; i < from_size / sizeof (__be32 ); i ++ , j ++ )
2038
2043
sp -> diff [j ] = ~from [i ];
2039
2044
for (i = 0 ; i < to_size / sizeof (__be32 ); i ++ , j ++ )
2040
2045
sp -> diff [j ] = to [i ];
2041
2046
2042
- return csum_partial (sp -> diff , diff_size , seed );
2047
+ ret = csum_partial (sp -> diff , diff_size , seed );
2048
+ local_unlock_nested_bh (& bpf_sp .bh_lock );
2049
+ return ret ;
2043
2050
}
2044
2051
2045
2052
static const struct bpf_func_proto bpf_csum_diff_proto = {
0 commit comments