Skip to content

Commit 81ed18a

Browse files
4astdavem330
authored andcommitted
bpf: add helper inlining infra and optimize map_array lookup
Optimize bpf_call -> bpf_map_lookup_elem() -> array_map_lookup_elem() into a sequence of bpf instructions. When JIT is on the sequence of bpf instructions is the sequence of native cpu instructions with significantly faster performance than indirect call and two function's prologue/epilogue. Signed-off-by: Alexei Starovoitov <[email protected]> Acked-by: Daniel Borkmann <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 8041902 commit 81ed18a

File tree

5 files changed

+77
-4
lines changed

5 files changed

+77
-4
lines changed

include/linux/bpf.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@ struct bpf_map_ops {
3535
void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
3636
int fd);
3737
void (*map_fd_put_ptr)(void *ptr);
38+
u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
3839
};
3940

4041
struct bpf_map {

include/linux/bpf_verifier.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,10 @@ struct bpf_verifier_state_list {
6666
};
6767

6868
struct bpf_insn_aux_data {
69-
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
69+
union {
70+
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
71+
struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
72+
};
7073
};
7174

7275
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */

include/linux/filter.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -693,6 +693,11 @@ static inline bool bpf_jit_is_ebpf(void)
693693
# endif
694694
}
695695

696+
static inline bool ebpf_jit_enabled(void)
697+
{
698+
return bpf_jit_enable && bpf_jit_is_ebpf();
699+
}
700+
696701
static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
697702
{
698703
return fp->jited && bpf_jit_is_ebpf();
@@ -753,6 +758,11 @@ void bpf_prog_kallsyms_del(struct bpf_prog *fp);
753758

754759
#else /* CONFIG_BPF_JIT */
755760

761+
static inline bool ebpf_jit_enabled(void)
762+
{
763+
return false;
764+
}
765+
756766
static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
757767
{
758768
return false;

kernel/bpf/arraymap.c

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2+
* Copyright (c) 2016,2017 Facebook
23
*
34
* This program is free software; you can redistribute it and/or
45
* modify it under the terms of version 2 of the GNU General Public
@@ -113,6 +114,33 @@ static void *array_map_lookup_elem(struct bpf_map *map, void *key)
113114
return array->value + array->elem_size * index;
114115
}
115116

117+
/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
118+
static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
119+
{
120+
struct bpf_array *array = container_of(map, struct bpf_array, map);
121+
struct bpf_insn *insn = insn_buf;
122+
u32 elem_size = array->elem_size;
123+
const int ret = BPF_REG_0;
124+
const int map_ptr = BPF_REG_1;
125+
const int index = BPF_REG_2;
126+
127+
*insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
128+
*insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
129+
*insn++ = BPF_JMP_IMM(BPF_JGE, ret, array->map.max_entries,
130+
elem_size == 1 ? 2 : 3);
131+
if (elem_size == 1) {
132+
/* nop */
133+
} else if (is_power_of_2(elem_size)) {
134+
*insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
135+
} else {
136+
*insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
137+
}
138+
*insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
139+
*insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
140+
*insn++ = BPF_MOV64_IMM(ret, 0);
141+
return insn - insn_buf;
142+
}
143+
116144
/* Called from eBPF program */
117145
static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
118146
{
@@ -267,6 +295,7 @@ static const struct bpf_map_ops array_ops = {
267295
.map_lookup_elem = array_map_lookup_elem,
268296
.map_update_elem = array_map_update_elem,
269297
.map_delete_elem = array_map_delete_elem,
298+
.map_gen_lookup = array_map_gen_lookup,
270299
};
271300

272301
static struct bpf_map_type_list array_type __ro_after_init = {

kernel/bpf/verifier.c

Lines changed: 33 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1273,7 +1273,7 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
12731273
}
12741274
}
12751275

1276-
static int check_call(struct bpf_verifier_env *env, int func_id)
1276+
static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
12771277
{
12781278
struct bpf_verifier_state *state = &env->cur_state;
12791279
const struct bpf_func_proto *fn = NULL;
@@ -1369,6 +1369,7 @@ static int check_call(struct bpf_verifier_env *env, int func_id)
13691369
}
13701370
regs[BPF_REG_0].map_ptr = meta.map_ptr;
13711371
regs[BPF_REG_0].id = ++env->id_gen;
1372+
env->insn_aux_data[insn_idx].map_ptr = meta.map_ptr;
13721373
} else {
13731374
verbose("unknown return type %d of func %s#%d\n",
13741375
fn->ret_type, func_id_name(func_id), func_id);
@@ -2940,7 +2941,7 @@ static int do_check(struct bpf_verifier_env *env)
29402941
return -EINVAL;
29412942
}
29422943

2943-
err = check_call(env, insn->imm);
2944+
err = check_call(env, insn->imm, insn_idx);
29442945
if (err)
29452946
return err;
29462947

@@ -3268,6 +3269,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
32683269
}
32693270

32703271
/* fixup insn->imm field of bpf_call instructions
3272+
* and inline eligible helpers as explicit sequence of BPF instructions
32713273
*
32723274
* this function is called after eBPF program passed verification
32733275
*/
@@ -3277,7 +3279,10 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
32773279
struct bpf_insn *insn = prog->insnsi;
32783280
const struct bpf_func_proto *fn;
32793281
const int insn_cnt = prog->len;
3280-
int i;
3282+
struct bpf_insn insn_buf[16];
3283+
struct bpf_prog *new_prog;
3284+
struct bpf_map *map_ptr;
3285+
int i, cnt, delta = 0;
32813286

32823287
for (i = 0; i < insn_cnt; i++, insn++) {
32833288
if (insn->code != (BPF_JMP | BPF_CALL))
@@ -3300,6 +3305,31 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
33003305
continue;
33013306
}
33023307

3308+
if (ebpf_jit_enabled() && insn->imm == BPF_FUNC_map_lookup_elem) {
3309+
map_ptr = env->insn_aux_data[i + delta].map_ptr;
3310+
if (!map_ptr->ops->map_gen_lookup)
3311+
goto patch_call_imm;
3312+
3313+
cnt = map_ptr->ops->map_gen_lookup(map_ptr, insn_buf);
3314+
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
3315+
verbose("bpf verifier is misconfigured\n");
3316+
return -EINVAL;
3317+
}
3318+
3319+
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf,
3320+
cnt);
3321+
if (!new_prog)
3322+
return -ENOMEM;
3323+
3324+
delta += cnt - 1;
3325+
3326+
/* keep walking new program and skip insns we just inserted */
3327+
env->prog = prog = new_prog;
3328+
insn = new_prog->insnsi + i + delta;
3329+
continue;
3330+
}
3331+
3332+
patch_call_imm:
33033333
fn = prog->aux->ops->get_func_proto(insn->imm);
33043334
/* all functions that have prototype and verifier allowed
33053335
* programs to call them, must be real in-kernel functions

0 commit comments

Comments
 (0)