Skip to content

Commit 6d641ca

Browse files
ubizjakAlexei Starovoitov
authored andcommitted
bpf: Fix percpu address space issues
In arraymap.c: In bpf_array_map_seq_start() and bpf_array_map_seq_next() cast return values from the __percpu address space to the generic address space via uintptr_t [1]. Correct the declaration of pptr pointer in __bpf_array_map_seq_show() to void __percpu * and cast the value from the generic address space to the __percpu address space via uintptr_t [1]. In hashtab.c: Assign the return value from bpf_mem_cache_alloc() to void pointer and cast the value to void __percpu ** (void pointer to percpu void pointer) before dereferencing. In memalloc.c: Explicitly declare __percpu variables. Cast obj to void __percpu **. In helpers.c: Cast ptr in BPF_CALL_1 and BPF_CALL_2 from generic address space to __percpu address space via const uintptr_t [1]. Found by GCC's named address space checks. There were no changes in the resulting object files. [1] https://sparse.docs.kernel.org/en/latest/annotations.html#address-space-name Signed-off-by: Uros Bizjak <[email protected]> Cc: Alexei Starovoitov <[email protected]> Cc: Daniel Borkmann <[email protected]> Cc: Andrii Nakryiko <[email protected]> Cc: Martin KaFai Lau <[email protected]> Cc: Eduard Zingerman <[email protected]> Cc: Song Liu <[email protected]> Cc: Yonghong Song <[email protected]> Cc: John Fastabend <[email protected]> Cc: KP Singh <[email protected]> Cc: Stanislav Fomichev <[email protected]> Cc: Hao Luo <[email protected]> Cc: Jiri Olsa <[email protected]> Acked-by: Eduard Zingerman <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent 5148f19 commit 6d641ca

File tree

4 files changed

+17
-16
lines changed

4 files changed

+17
-16
lines changed

kernel/bpf/arraymap.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -600,7 +600,7 @@ static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
600600
array = container_of(map, struct bpf_array, map);
601601
index = info->index & array->index_mask;
602602
if (info->percpu_value_buf)
603-
return array->pptrs[index];
603+
return (void *)(uintptr_t)array->pptrs[index];
604604
return array_map_elem_ptr(array, index);
605605
}
606606

@@ -619,7 +619,7 @@ static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
619619
array = container_of(map, struct bpf_array, map);
620620
index = info->index & array->index_mask;
621621
if (info->percpu_value_buf)
622-
return array->pptrs[index];
622+
return (void *)(uintptr_t)array->pptrs[index];
623623
return array_map_elem_ptr(array, index);
624624
}
625625

@@ -632,7 +632,7 @@ static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
632632
struct bpf_iter_meta meta;
633633
struct bpf_prog *prog;
634634
int off = 0, cpu = 0;
635-
void __percpu **pptr;
635+
void __percpu *pptr;
636636
u32 size;
637637

638638
meta.seq = seq;
@@ -648,7 +648,7 @@ static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
648648
if (!info->percpu_value_buf) {
649649
ctx.value = v;
650650
} else {
651-
pptr = v;
651+
pptr = (void __percpu *)(uintptr_t)v;
652652
size = array->elem_size;
653653
for_each_possible_cpu(cpu) {
654654
copy_map_value_long(map, info->percpu_value_buf + off,

kernel/bpf/hashtab.c

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1049,14 +1049,15 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
10491049
pptr = htab_elem_get_ptr(l_new, key_size);
10501050
} else {
10511051
/* alloc_percpu zero-fills */
1052-
pptr = bpf_mem_cache_alloc(&htab->pcpu_ma);
1053-
if (!pptr) {
1052+
void *ptr = bpf_mem_cache_alloc(&htab->pcpu_ma);
1053+
1054+
if (!ptr) {
10541055
bpf_mem_cache_free(&htab->ma, l_new);
10551056
l_new = ERR_PTR(-ENOMEM);
10561057
goto dec_count;
10571058
}
1058-
l_new->ptr_to_pptr = pptr;
1059-
pptr = *(void **)pptr;
1059+
l_new->ptr_to_pptr = ptr;
1060+
pptr = *(void __percpu **)ptr;
10601061
}
10611062

10621063
pcpu_init_value(htab, pptr, value, onallcpus);

kernel/bpf/helpers.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -715,7 +715,7 @@ BPF_CALL_2(bpf_per_cpu_ptr, const void *, ptr, u32, cpu)
715715
if (cpu >= nr_cpu_ids)
716716
return (unsigned long)NULL;
717717

718-
return (unsigned long)per_cpu_ptr((const void __percpu *)ptr, cpu);
718+
return (unsigned long)per_cpu_ptr((const void __percpu *)(const uintptr_t)ptr, cpu);
719719
}
720720

721721
const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
@@ -728,7 +728,7 @@ const struct bpf_func_proto bpf_per_cpu_ptr_proto = {
728728

729729
BPF_CALL_1(bpf_this_cpu_ptr, const void *, percpu_ptr)
730730
{
731-
return (unsigned long)this_cpu_ptr((const void __percpu *)percpu_ptr);
731+
return (unsigned long)this_cpu_ptr((const void __percpu *)(const uintptr_t)percpu_ptr);
732732
}
733733

734734
const struct bpf_func_proto bpf_this_cpu_ptr_proto = {

kernel/bpf/memalloc.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -138,8 +138,8 @@ static struct llist_node notrace *__llist_del_first(struct llist_head *head)
138138
static void *__alloc(struct bpf_mem_cache *c, int node, gfp_t flags)
139139
{
140140
if (c->percpu_size) {
141-
void **obj = kmalloc_node(c->percpu_size, flags, node);
142-
void *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags);
141+
void __percpu **obj = kmalloc_node(c->percpu_size, flags, node);
142+
void __percpu *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags);
143143

144144
if (!obj || !pptr) {
145145
free_percpu(pptr);
@@ -253,7 +253,7 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node, bool atomic)
253253
static void free_one(void *obj, bool percpu)
254254
{
255255
if (percpu) {
256-
free_percpu(((void **)obj)[1]);
256+
free_percpu(((void __percpu **)obj)[1]);
257257
kfree(obj);
258258
return;
259259
}
@@ -509,8 +509,8 @@ static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu)
509509
*/
510510
int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu)
511511
{
512-
struct bpf_mem_caches *cc, __percpu *pcc;
513-
struct bpf_mem_cache *c, __percpu *pc;
512+
struct bpf_mem_caches *cc; struct bpf_mem_caches __percpu *pcc;
513+
struct bpf_mem_cache *c; struct bpf_mem_cache __percpu *pc;
514514
struct obj_cgroup *objcg = NULL;
515515
int cpu, i, unit_size, percpu_size = 0;
516516

@@ -591,7 +591,7 @@ int bpf_mem_alloc_percpu_init(struct bpf_mem_alloc *ma, struct obj_cgroup *objcg
591591

592592
int bpf_mem_alloc_percpu_unit_init(struct bpf_mem_alloc *ma, int size)
593593
{
594-
struct bpf_mem_caches *cc, __percpu *pcc;
594+
struct bpf_mem_caches *cc; struct bpf_mem_caches __percpu *pcc;
595595
int cpu, i, unit_size, percpu_size;
596596
struct obj_cgroup *objcg;
597597
struct bpf_mem_cache *c;

0 commit comments

Comments
 (0)