Skip to content

Commit d0d106a

Browse files
committed
Merge tag 'bpf-next-6.14' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Pull bpf updates from Alexei Starovoitov: "A smaller than usual release cycle. The main changes are: - Prepare selftest to run with GCC-BPF backend (Ihor Solodrai) In addition to LLVM-BPF runs the BPF CI now runs GCC-BPF in compile only mode. Half of the tests are failing, since support for btf_decl_tag is still WIP, but this is a great milestone. - Convert various samples/bpf to selftests/bpf/test_progs format (Alexis Lothoré and Bastien Curutchet) - Teach verifier to recognize that array lookup with constant in-range index will always succeed (Daniel Xu) - Cleanup migrate disable scope in BPF maps (Hou Tao) - Fix bpf_timer destroy path in PREEMPT_RT (Hou Tao) - Always use bpf_mem_alloc in bpf_local_storage in PREEMPT_RT (Martin KaFai Lau) - Refactor verifier lock support (Kumar Kartikeya Dwivedi) This is a prerequisite for upcoming resilient spin lock. - Remove excessive 'may_goto +0' instructions in the verifier that LLVM leaves when unrolls the loops (Yonghong Song) - Remove unhelpful bpf_probe_write_user() warning message (Marco Elver) - Add fd_array_cnt attribute for prog_load command (Anton Protopopov) This is a prerequisite for upcoming support for static_branch" * tag 'bpf-next-6.14' of git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (125 commits) selftests/bpf: Add some tests related to 'may_goto 0' insns bpf: Remove 'may_goto 0' instruction in opt_remove_nops() bpf: Allow 'may_goto 0' instruction in verifier selftests/bpf: Add test case for the freeing of bpf_timer bpf: Cancel the running bpf_timer through kworker for PREEMPT_RT bpf: Free element after unlock in __htab_map_lookup_and_delete_elem() bpf: Bail out early in __htab_map_lookup_and_delete_elem() bpf: Free special fields after unlock in htab_lru_map_delete_node() tools: Sync if_xdp.h uapi tooling header libbpf: Work around kernel inconsistently stripping '.llvm.' suffix bpf: selftests: verifier: Add nullness elision tests bpf: verifier: Support eliding map lookup nullness bpf: verifier: Refactor helper access type tracking bpf: tcp: Mark bpf_load_hdr_opt() arg2 as read-write bpf: verifier: Add missing newline on verbose() call selftests/bpf: Add distilled BTF test about marking BTF_IS_EMBEDDED libbpf: Fix incorrect traversal end type ID when marking BTF_IS_EMBEDDED libbpf: Fix return zero when elf_begin failed selftests/bpf: Fix btf leak on new btf alloc failure in btf_distill test veristat: Load struct_ops programs only once ...
2 parents 754916d + 3f3c2f0 commit d0d106a

File tree

161 files changed

+4995
-2099
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

161 files changed

+4995
-2099
lines changed

arch/arm64/net/bpf_jit_comp.c

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -267,6 +267,19 @@ static bool is_addsub_imm(u32 imm)
267267
return !(imm & ~0xfff) || !(imm & ~0xfff000);
268268
}
269269

270+
static inline void emit_a64_add_i(const bool is64, const int dst, const int src,
271+
const int tmp, const s32 imm, struct jit_ctx *ctx)
272+
{
273+
if (is_addsub_imm(imm)) {
274+
emit(A64_ADD_I(is64, dst, src, imm), ctx);
275+
} else if (is_addsub_imm(-imm)) {
276+
emit(A64_SUB_I(is64, dst, src, -imm), ctx);
277+
} else {
278+
emit_a64_mov_i(is64, tmp, imm, ctx);
279+
emit(A64_ADD(is64, dst, src, tmp), ctx);
280+
}
281+
}
282+
270283
/*
271284
* There are 3 types of AArch64 LDR/STR (immediate) instruction:
272285
* Post-index, Pre-index, Unsigned offset.
@@ -648,16 +661,13 @@ static int emit_lse_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
648661
const s16 off = insn->off;
649662
u8 reg = dst;
650663

651-
if (off || arena) {
652-
if (off) {
653-
emit_a64_mov_i(1, tmp, off, ctx);
654-
emit(A64_ADD(1, tmp, tmp, dst), ctx);
655-
reg = tmp;
656-
}
657-
if (arena) {
658-
emit(A64_ADD(1, tmp, reg, arena_vm_base), ctx);
659-
reg = tmp;
660-
}
664+
if (off) {
665+
emit_a64_add_i(1, tmp, reg, tmp, off, ctx);
666+
reg = tmp;
667+
}
668+
if (arena) {
669+
emit(A64_ADD(1, tmp, reg, arena_vm_base), ctx);
670+
reg = tmp;
661671
}
662672

663673
switch (insn->imm) {
@@ -723,7 +733,7 @@ static int emit_ll_sc_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
723733
const s32 imm = insn->imm;
724734
const s16 off = insn->off;
725735
const bool isdw = BPF_SIZE(code) == BPF_DW;
726-
u8 reg;
736+
u8 reg = dst;
727737
s32 jmp_offset;
728738

729739
if (BPF_MODE(code) == BPF_PROBE_ATOMIC) {
@@ -732,11 +742,8 @@ static int emit_ll_sc_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
732742
return -EINVAL;
733743
}
734744

735-
if (!off) {
736-
reg = dst;
737-
} else {
738-
emit_a64_mov_i(1, tmp, off, ctx);
739-
emit(A64_ADD(1, tmp, tmp, dst), ctx);
745+
if (off) {
746+
emit_a64_add_i(1, tmp, reg, tmp, off, ctx);
740747
reg = tmp;
741748
}
742749

@@ -1146,14 +1153,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
11461153
/* dst = dst OP imm */
11471154
case BPF_ALU | BPF_ADD | BPF_K:
11481155
case BPF_ALU64 | BPF_ADD | BPF_K:
1149-
if (is_addsub_imm(imm)) {
1150-
emit(A64_ADD_I(is64, dst, dst, imm), ctx);
1151-
} else if (is_addsub_imm(-imm)) {
1152-
emit(A64_SUB_I(is64, dst, dst, -imm), ctx);
1153-
} else {
1154-
emit_a64_mov_i(is64, tmp, imm, ctx);
1155-
emit(A64_ADD(is64, dst, dst, tmp), ctx);
1156-
}
1156+
emit_a64_add_i(is64, dst, dst, tmp, imm, ctx);
11571157
break;
11581158
case BPF_ALU | BPF_SUB | BPF_K:
11591159
case BPF_ALU64 | BPF_SUB | BPF_K:

include/linux/bpf.h

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2299,6 +2299,14 @@ void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);
22992299
struct bpf_map *bpf_map_get(u32 ufd);
23002300
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
23012301

2302+
/*
2303+
* The __bpf_map_get() and __btf_get_by_fd() functions parse a file
2304+
* descriptor and return a corresponding map or btf object.
2305+
* Their names are double underscored to emphasize the fact that they
2306+
* do not increase refcnt. To also increase refcnt use corresponding
2307+
* bpf_map_get() and btf_get_by_fd() functions.
2308+
*/
2309+
23022310
static inline struct bpf_map *__bpf_map_get(struct fd f)
23032311
{
23042312
if (fd_empty(f))
@@ -2308,6 +2316,15 @@ static inline struct bpf_map *__bpf_map_get(struct fd f)
23082316
return fd_file(f)->private_data;
23092317
}
23102318

2319+
static inline struct btf *__btf_get_by_fd(struct fd f)
2320+
{
2321+
if (fd_empty(f))
2322+
return ERR_PTR(-EBADF);
2323+
if (unlikely(fd_file(f)->f_op != &btf_fops))
2324+
return ERR_PTR(-EINVAL);
2325+
return fd_file(f)->private_data;
2326+
}
2327+
23112328
void bpf_map_inc(struct bpf_map *map);
23122329
void bpf_map_inc_with_uref(struct bpf_map *map);
23132330
struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref);

include/linux/bpf_verifier.h

Lines changed: 16 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -233,6 +233,7 @@ enum bpf_stack_slot_type {
233233
*/
234234
STACK_DYNPTR,
235235
STACK_ITER,
236+
STACK_IRQ_FLAG,
236237
};
237238

238239
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
@@ -254,8 +255,9 @@ struct bpf_reference_state {
254255
* default to pointer reference on zero initialization of a state.
255256
*/
256257
enum ref_state_type {
257-
REF_TYPE_PTR = 0,
258-
REF_TYPE_LOCK,
258+
REF_TYPE_PTR = 1,
259+
REF_TYPE_IRQ = 2,
260+
REF_TYPE_LOCK = 3,
259261
} type;
260262
/* Track each reference created with a unique id, even if the same
261263
* instruction creates the reference multiple times (eg, via CALL).
@@ -315,9 +317,6 @@ struct bpf_func_state {
315317
u32 callback_depth;
316318

317319
/* The following fields should be last. See copy_func_state() */
318-
int acquired_refs;
319-
int active_locks;
320-
struct bpf_reference_state *refs;
321320
/* The state of the stack. Each element of the array describes BPF_REG_SIZE
322321
* (i.e. 8) bytes worth of stack memory.
323322
* stack[0] represents bytes [*(r10-8)..*(r10-1)]
@@ -370,6 +369,8 @@ struct bpf_verifier_state {
370369
/* call stack tracking */
371370
struct bpf_func_state *frame[MAX_CALL_FRAMES];
372371
struct bpf_verifier_state *parent;
372+
/* Acquired reference states */
373+
struct bpf_reference_state *refs;
373374
/*
374375
* 'branches' field is the number of branches left to explore:
375376
* 0 - all possible paths from this state reached bpf_exit or
@@ -419,9 +420,13 @@ struct bpf_verifier_state {
419420
u32 insn_idx;
420421
u32 curframe;
421422

422-
bool speculative;
423+
u32 acquired_refs;
424+
u32 active_locks;
425+
u32 active_preempt_locks;
426+
u32 active_irq_id;
423427
bool active_rcu_lock;
424-
u32 active_preempt_lock;
428+
429+
bool speculative;
425430
/* If this state was ever pointed-to by other state's loop_entry field
426431
* this flag would be set to true. Used to avoid freeing such states
427432
* while they are still in use.
@@ -980,8 +985,9 @@ const char *dynptr_type_str(enum bpf_dynptr_type type);
980985
const char *iter_type_str(const struct btf *btf, u32 btf_id);
981986
const char *iter_state_str(enum bpf_iter_state state);
982987

983-
void print_verifier_state(struct bpf_verifier_env *env,
984-
const struct bpf_func_state *state, bool print_all);
985-
void print_insn_state(struct bpf_verifier_env *env, const struct bpf_func_state *state);
988+
void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
989+
u32 frameno, bool print_all);
990+
void print_insn_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
991+
u32 frameno);
986992

987993
#endif /* _LINUX_BPF_VERIFIER_H */

include/linux/btf.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -353,6 +353,11 @@ static inline bool btf_type_is_scalar(const struct btf_type *t)
353353
return btf_type_is_int(t) || btf_type_is_enum(t);
354354
}
355355

356+
static inline bool btf_type_is_fwd(const struct btf_type *t)
357+
{
358+
return BTF_INFO_KIND(t->info) == BTF_KIND_FWD;
359+
}
360+
356361
static inline bool btf_type_is_typedef(const struct btf_type *t)
357362
{
358363
return BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF;

include/uapi/linux/bpf.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1573,6 +1573,16 @@ union bpf_attr {
15731573
* If provided, prog_flags should have BPF_F_TOKEN_FD flag set.
15741574
*/
15751575
__s32 prog_token_fd;
1576+
/* The fd_array_cnt can be used to pass the length of the
1577+
* fd_array array. In this case all the [map] file descriptors
1578+
* passed in this array will be bound to the program, even if
1579+
* the maps are not referenced directly. The functionality is
1580+
* similar to the BPF_PROG_BIND_MAP syscall, but maps can be
1581+
* used by the verifier during the program load. If provided,
1582+
* then the fd_array[0,...,fd_array_cnt-1] is expected to be
1583+
* continuous.
1584+
*/
1585+
__u32 fd_array_cnt;
15761586
};
15771587

15781588
struct { /* anonymous struct used by BPF_OBJ_* commands */

kernel/bpf/arena.c

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,11 @@ static struct bpf_map *arena_map_alloc(union bpf_attr *attr)
138138
INIT_LIST_HEAD(&arena->vma_list);
139139
bpf_map_init_from_attr(&arena->map, attr);
140140
range_tree_init(&arena->rt);
141-
range_tree_set(&arena->rt, 0, attr->max_entries);
141+
err = range_tree_set(&arena->rt, 0, attr->max_entries);
142+
if (err) {
143+
bpf_map_area_free(arena);
144+
goto err;
145+
}
142146
mutex_init(&arena->lock);
143147

144148
return &arena->map;
@@ -218,7 +222,7 @@ static u64 arena_map_mem_usage(const struct bpf_map *map)
218222
struct vma_list {
219223
struct vm_area_struct *vma;
220224
struct list_head head;
221-
atomic_t mmap_count;
225+
refcount_t mmap_count;
222226
};
223227

224228
static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
@@ -228,7 +232,7 @@ static int remember_vma(struct bpf_arena *arena, struct vm_area_struct *vma)
228232
vml = kmalloc(sizeof(*vml), GFP_KERNEL);
229233
if (!vml)
230234
return -ENOMEM;
231-
atomic_set(&vml->mmap_count, 1);
235+
refcount_set(&vml->mmap_count, 1);
232236
vma->vm_private_data = vml;
233237
vml->vma = vma;
234238
list_add(&vml->head, &arena->vma_list);
@@ -239,7 +243,7 @@ static void arena_vm_open(struct vm_area_struct *vma)
239243
{
240244
struct vma_list *vml = vma->vm_private_data;
241245

242-
atomic_inc(&vml->mmap_count);
246+
refcount_inc(&vml->mmap_count);
243247
}
244248

245249
static void arena_vm_close(struct vm_area_struct *vma)
@@ -248,7 +252,7 @@ static void arena_vm_close(struct vm_area_struct *vma)
248252
struct bpf_arena *arena = container_of(map, struct bpf_arena, map);
249253
struct vma_list *vml = vma->vm_private_data;
250254

251-
if (!atomic_dec_and_test(&vml->mmap_count))
255+
if (!refcount_dec_and_test(&vml->mmap_count))
252256
return;
253257
guard(mutex)(&arena->lock);
254258
/* update link list under lock */
@@ -257,8 +261,6 @@ static void arena_vm_close(struct vm_area_struct *vma)
257261
kfree(vml);
258262
}
259263

260-
#define MT_ENTRY ((void *)&arena_map_ops) /* unused. has to be valid pointer */
261-
262264
static vm_fault_t arena_vm_fault(struct vm_fault *vmf)
263265
{
264266
struct bpf_map *map = vmf->vma->vm_file->private_data;

kernel/bpf/arraymap.c

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -735,13 +735,13 @@ static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback
735735
u64 ret = 0;
736736
void *val;
737737

738+
cant_migrate();
739+
738740
if (flags != 0)
739741
return -EINVAL;
740742

741743
is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
742744
array = container_of(map, struct bpf_array, map);
743-
if (is_percpu)
744-
migrate_disable();
745745
for (i = 0; i < map->max_entries; i++) {
746746
if (is_percpu)
747747
val = this_cpu_ptr(array->pptrs[i]);
@@ -756,8 +756,6 @@ static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback
756756
break;
757757
}
758758

759-
if (is_percpu)
760-
migrate_enable();
761759
return num_elems;
762760
}
763761

kernel/bpf/bpf_cgrp_storage.c

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -15,22 +15,20 @@ static DEFINE_PER_CPU(int, bpf_cgrp_storage_busy);
1515

1616
static void bpf_cgrp_storage_lock(void)
1717
{
18-
migrate_disable();
18+
cant_migrate();
1919
this_cpu_inc(bpf_cgrp_storage_busy);
2020
}
2121

2222
static void bpf_cgrp_storage_unlock(void)
2323
{
2424
this_cpu_dec(bpf_cgrp_storage_busy);
25-
migrate_enable();
2625
}
2726

2827
static bool bpf_cgrp_storage_trylock(void)
2928
{
30-
migrate_disable();
29+
cant_migrate();
3130
if (unlikely(this_cpu_inc_return(bpf_cgrp_storage_busy) != 1)) {
3231
this_cpu_dec(bpf_cgrp_storage_busy);
33-
migrate_enable();
3432
return false;
3533
}
3634
return true;
@@ -47,17 +45,18 @@ void bpf_cgrp_storage_free(struct cgroup *cgroup)
4745
{
4846
struct bpf_local_storage *local_storage;
4947

48+
migrate_disable();
5049
rcu_read_lock();
5150
local_storage = rcu_dereference(cgroup->bpf_cgrp_storage);
52-
if (!local_storage) {
53-
rcu_read_unlock();
54-
return;
55-
}
51+
if (!local_storage)
52+
goto out;
5653

5754
bpf_cgrp_storage_lock();
5855
bpf_local_storage_destroy(local_storage);
5956
bpf_cgrp_storage_unlock();
57+
out:
6058
rcu_read_unlock();
59+
migrate_enable();
6160
}
6261

6362
static struct bpf_local_storage_data *

kernel/bpf/bpf_inode_storage.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -62,16 +62,17 @@ void bpf_inode_storage_free(struct inode *inode)
6262
if (!bsb)
6363
return;
6464

65+
migrate_disable();
6566
rcu_read_lock();
6667

6768
local_storage = rcu_dereference(bsb->storage);
68-
if (!local_storage) {
69-
rcu_read_unlock();
70-
return;
71-
}
69+
if (!local_storage)
70+
goto out;
7271

7372
bpf_local_storage_destroy(local_storage);
73+
out:
7474
rcu_read_unlock();
75+
migrate_enable();
7576
}
7677

7778
static void *bpf_fd_inode_storage_lookup_elem(struct bpf_map *map, void *key)

0 commit comments

Comments
 (0)