@@ -1350,14 +1350,18 @@ struct bpf_scratchpad {
1350
1350
1351
1351
static DEFINE_PER_CPU (struct bpf_scratchpad , bpf_sp ) ;
1352
1352
1353
+ static inline int __bpf_try_make_writable (struct sk_buff * skb ,
1354
+ unsigned int write_len )
1355
+ {
1356
+ return skb_ensure_writable (skb , write_len );
1357
+ }
1358
+
1353
1359
static inline int bpf_try_make_writable (struct sk_buff * skb ,
1354
1360
unsigned int write_len )
1355
1361
{
1356
- int err ;
1362
+ int err = __bpf_try_make_writable ( skb , write_len ) ;
1357
1363
1358
- err = skb_ensure_writable (skb , write_len );
1359
1364
bpf_compute_data_end (skb );
1360
-
1361
1365
return err ;
1362
1366
}
1363
1367
@@ -1976,8 +1980,8 @@ static u64 bpf_skb_change_type(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
1976
1980
u32 pkt_type = r2 ;
1977
1981
1978
1982
/* We only allow a restricted subset to be changed for now. */
1979
- if (unlikely (skb -> pkt_type > PACKET_OTHERHOST ||
1980
- pkt_type > PACKET_OTHERHOST ))
1983
+ if (unlikely (! skb_pkt_type_ok ( skb -> pkt_type ) ||
1984
+ ! skb_pkt_type_ok ( pkt_type ) ))
1981
1985
return - EINVAL ;
1982
1986
1983
1987
skb -> pkt_type = pkt_type ;
@@ -1992,6 +1996,92 @@ static const struct bpf_func_proto bpf_skb_change_type_proto = {
1992
1996
.arg2_type = ARG_ANYTHING ,
1993
1997
};
1994
1998
1999
+ static u32 __bpf_skb_min_len (const struct sk_buff * skb )
2000
+ {
2001
+ u32 min_len = skb_network_offset (skb );
2002
+
2003
+ if (skb_transport_header_was_set (skb ))
2004
+ min_len = skb_transport_offset (skb );
2005
+ if (skb -> ip_summed == CHECKSUM_PARTIAL )
2006
+ min_len = skb_checksum_start_offset (skb ) +
2007
+ skb -> csum_offset + sizeof (__sum16 );
2008
+ return min_len ;
2009
+ }
2010
+
2011
+ static u32 __bpf_skb_max_len (const struct sk_buff * skb )
2012
+ {
2013
+ return skb -> dev ? skb -> dev -> mtu + skb -> dev -> hard_header_len :
2014
+ 65536 ;
2015
+ }
2016
+
2017
+ static int bpf_skb_grow_rcsum (struct sk_buff * skb , unsigned int new_len )
2018
+ {
2019
+ unsigned int old_len = skb -> len ;
2020
+ int ret ;
2021
+
2022
+ ret = __skb_grow_rcsum (skb , new_len );
2023
+ if (!ret )
2024
+ memset (skb -> data + old_len , 0 , new_len - old_len );
2025
+ return ret ;
2026
+ }
2027
+
2028
+ static int bpf_skb_trim_rcsum (struct sk_buff * skb , unsigned int new_len )
2029
+ {
2030
+ return __skb_trim_rcsum (skb , new_len );
2031
+ }
2032
+
2033
+ static u64 bpf_skb_change_tail (u64 r1 , u64 r2 , u64 flags , u64 r4 , u64 r5 )
2034
+ {
2035
+ struct sk_buff * skb = (struct sk_buff * )(long ) r1 ;
2036
+ u32 max_len = __bpf_skb_max_len (skb );
2037
+ u32 min_len = __bpf_skb_min_len (skb );
2038
+ u32 new_len = (u32 ) r2 ;
2039
+ int ret ;
2040
+
2041
+ if (unlikely (flags || new_len > max_len || new_len < min_len ))
2042
+ return - EINVAL ;
2043
+ if (skb -> encapsulation )
2044
+ return - ENOTSUPP ;
2045
+
2046
+ /* The basic idea of this helper is that it's performing the
2047
+ * needed work to either grow or trim an skb, and eBPF program
2048
+ * rewrites the rest via helpers like bpf_skb_store_bytes(),
2049
+ * bpf_lX_csum_replace() and others rather than passing a raw
2050
+ * buffer here. This one is a slow path helper and intended
2051
+ * for replies with control messages.
2052
+ *
2053
+ * Like in bpf_skb_change_proto(), we want to keep this rather
2054
+ * minimal and without protocol specifics so that we are able
2055
+ * to separate concerns as in bpf_skb_store_bytes() should only
2056
+ * be the one responsible for writing buffers.
2057
+ *
2058
+ * It's really expected to be a slow path operation here for
2059
+ * control message replies, so we're implicitly linearizing,
2060
+ * uncloning and drop offloads from the skb by this.
2061
+ */
2062
+ ret = __bpf_try_make_writable (skb , skb -> len );
2063
+ if (!ret ) {
2064
+ if (new_len > skb -> len )
2065
+ ret = bpf_skb_grow_rcsum (skb , new_len );
2066
+ else if (new_len < skb -> len )
2067
+ ret = bpf_skb_trim_rcsum (skb , new_len );
2068
+ if (!ret && skb_is_gso (skb ))
2069
+ skb_gso_reset (skb );
2070
+ }
2071
+
2072
+ bpf_compute_data_end (skb );
2073
+ return ret ;
2074
+ }
2075
+
2076
+ static const struct bpf_func_proto bpf_skb_change_tail_proto = {
2077
+ .func = bpf_skb_change_tail ,
2078
+ .gpl_only = false,
2079
+ .ret_type = RET_INTEGER ,
2080
+ .arg1_type = ARG_PTR_TO_CTX ,
2081
+ .arg2_type = ARG_ANYTHING ,
2082
+ .arg3_type = ARG_ANYTHING ,
2083
+ };
2084
+
1995
2085
bool bpf_helper_changes_skb_data (void * func )
1996
2086
{
1997
2087
if (func == bpf_skb_vlan_push )
@@ -2002,6 +2092,8 @@ bool bpf_helper_changes_skb_data(void *func)
2002
2092
return true;
2003
2093
if (func == bpf_skb_change_proto )
2004
2094
return true;
2095
+ if (func == bpf_skb_change_tail )
2096
+ return true;
2005
2097
if (func == bpf_l3_csum_replace )
2006
2098
return true;
2007
2099
if (func == bpf_l4_csum_replace )
@@ -2282,7 +2374,6 @@ bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
2282
2374
}
2283
2375
}
2284
2376
2285
- #ifdef CONFIG_SOCK_CGROUP_DATA
2286
2377
static u64 bpf_skb_under_cgroup (u64 r1 , u64 r2 , u64 r3 , u64 r4 , u64 r5 )
2287
2378
{
2288
2379
struct sk_buff * skb = (struct sk_buff * )(long )r1 ;
@@ -2303,7 +2394,7 @@ static u64 bpf_skb_under_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
2303
2394
if (unlikely (!cgrp ))
2304
2395
return - EAGAIN ;
2305
2396
2306
- return cgroup_is_descendant ( sock_cgroup_ptr ( & sk -> sk_cgrp_data ) , cgrp );
2397
+ return sk_under_cgroup_hierarchy ( sk , cgrp );
2307
2398
}
2308
2399
2309
2400
static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
@@ -2314,7 +2405,41 @@ static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
2314
2405
.arg2_type = ARG_CONST_MAP_PTR ,
2315
2406
.arg3_type = ARG_ANYTHING ,
2316
2407
};
2317
- #endif
2408
+
2409
+ static unsigned long bpf_xdp_copy (void * dst_buff , const void * src_buff ,
2410
+ unsigned long off , unsigned long len )
2411
+ {
2412
+ memcpy (dst_buff , src_buff + off , len );
2413
+ return 0 ;
2414
+ }
2415
+
2416
+ static u64 bpf_xdp_event_output (u64 r1 , u64 r2 , u64 flags , u64 r4 ,
2417
+ u64 meta_size )
2418
+ {
2419
+ struct xdp_buff * xdp = (struct xdp_buff * )(long ) r1 ;
2420
+ struct bpf_map * map = (struct bpf_map * )(long ) r2 ;
2421
+ u64 xdp_size = (flags & BPF_F_CTXLEN_MASK ) >> 32 ;
2422
+ void * meta = (void * )(long ) r4 ;
2423
+
2424
+ if (unlikely (flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK )))
2425
+ return - EINVAL ;
2426
+ if (unlikely (xdp_size > (unsigned long )(xdp -> data_end - xdp -> data )))
2427
+ return - EFAULT ;
2428
+
2429
+ return bpf_event_output (map , flags , meta , meta_size , xdp , xdp_size ,
2430
+ bpf_xdp_copy );
2431
+ }
2432
+
2433
+ static const struct bpf_func_proto bpf_xdp_event_output_proto = {
2434
+ .func = bpf_xdp_event_output ,
2435
+ .gpl_only = true,
2436
+ .ret_type = RET_INTEGER ,
2437
+ .arg1_type = ARG_PTR_TO_CTX ,
2438
+ .arg2_type = ARG_CONST_MAP_PTR ,
2439
+ .arg3_type = ARG_ANYTHING ,
2440
+ .arg4_type = ARG_PTR_TO_STACK ,
2441
+ .arg5_type = ARG_CONST_STACK_SIZE ,
2442
+ };
2318
2443
2319
2444
static const struct bpf_func_proto *
2320
2445
sk_filter_func_proto (enum bpf_func_id func_id )
@@ -2368,6 +2493,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
2368
2493
return & bpf_skb_change_proto_proto ;
2369
2494
case BPF_FUNC_skb_change_type :
2370
2495
return & bpf_skb_change_type_proto ;
2496
+ case BPF_FUNC_skb_change_tail :
2497
+ return & bpf_skb_change_tail_proto ;
2371
2498
case BPF_FUNC_skb_get_tunnel_key :
2372
2499
return & bpf_skb_get_tunnel_key_proto ;
2373
2500
case BPF_FUNC_skb_set_tunnel_key :
@@ -2386,10 +2513,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
2386
2513
return & bpf_skb_event_output_proto ;
2387
2514
case BPF_FUNC_get_smp_processor_id :
2388
2515
return & bpf_get_smp_processor_id_proto ;
2389
- #ifdef CONFIG_SOCK_CGROUP_DATA
2390
2516
case BPF_FUNC_skb_under_cgroup :
2391
2517
return & bpf_skb_under_cgroup_proto ;
2392
- #endif
2393
2518
default :
2394
2519
return sk_filter_func_proto (func_id );
2395
2520
}
@@ -2398,7 +2523,12 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
2398
2523
static const struct bpf_func_proto *
2399
2524
xdp_func_proto (enum bpf_func_id func_id )
2400
2525
{
2401
- return sk_filter_func_proto (func_id );
2526
+ switch (func_id ) {
2527
+ case BPF_FUNC_perf_event_output :
2528
+ return & bpf_xdp_event_output_proto ;
2529
+ default :
2530
+ return sk_filter_func_proto (func_id );
2531
+ }
2402
2532
}
2403
2533
2404
2534
static bool __is_valid_access (int off , int size , enum bpf_access_type type )
0 commit comments