Skip to content

Commit b2ee6d2

Browse files
eddyz87Alexei Starovoitov
authored andcommitted
bpf: support bpf_fastcall patterns for kfuncs
Recognize bpf_fastcall patterns around kfunc calls. For example, suppose bpf_cast_to_kern_ctx() follows bpf_fastcall contract (which it does), in such a case allow verifier to rewrite BPF program below: r2 = 1; *(u64 *)(r10 - 32) = r2; call %[bpf_cast_to_kern_ctx]; r2 = *(u64 *)(r10 - 32); r0 = r2; By removing the spill/fill pair: r2 = 1; call %[bpf_cast_to_kern_ctx]; r0 = r2; Acked-by: Yonghong Song <[email protected]> Signed-off-by: Eduard Zingerman <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent adec67d commit b2ee6d2

File tree

1 file changed

+34
-1
lines changed

1 file changed

+34
-1
lines changed

kernel/bpf/verifier.c

Lines changed: 34 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16125,7 +16125,7 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
1612516125
*/
1612616126
static u32 helper_fastcall_clobber_mask(const struct bpf_func_proto *fn)
1612716127
{
16128-
u8 mask;
16128+
u32 mask;
1612916129
int i;
1613016130

1613116131
mask = 0;
@@ -16153,6 +16153,26 @@ static bool verifier_inlines_helper_call(struct bpf_verifier_env *env, s32 imm)
1615316153
}
1615416154
}
1615516155

16156+
/* Same as helper_fastcall_clobber_mask() but for kfuncs, see comment above */
16157+
static u32 kfunc_fastcall_clobber_mask(struct bpf_kfunc_call_arg_meta *meta)
16158+
{
16159+
u32 vlen, i, mask;
16160+
16161+
vlen = btf_type_vlen(meta->func_proto);
16162+
mask = 0;
16163+
if (!btf_type_is_void(btf_type_by_id(meta->btf, meta->func_proto->type)))
16164+
mask |= BIT(BPF_REG_0);
16165+
for (i = 0; i < vlen; ++i)
16166+
mask |= BIT(BPF_REG_1 + i);
16167+
return mask;
16168+
}
16169+
16170+
/* Same as verifier_inlines_helper_call() but for kfuncs, see comment above */
16171+
static bool is_fastcall_kfunc_call(struct bpf_kfunc_call_arg_meta *meta)
16172+
{
16173+
return false;
16174+
}
16175+
1615616176
/* LLVM define a bpf_fastcall function attribute.
1615716177
* This attribute means that function scratches only some of
1615816178
* the caller saved registers defined by ABI.
@@ -16250,6 +16270,19 @@ static void mark_fastcall_pattern_for_call(struct bpf_verifier_env *env,
1625016270
bpf_jit_inlines_helper_call(call->imm));
1625116271
}
1625216272

16273+
if (bpf_pseudo_kfunc_call(call)) {
16274+
struct bpf_kfunc_call_arg_meta meta;
16275+
int err;
16276+
16277+
err = fetch_kfunc_meta(env, call, &meta, NULL);
16278+
if (err < 0)
16279+
/* error would be reported later */
16280+
return;
16281+
16282+
clobbered_regs_mask = kfunc_fastcall_clobber_mask(&meta);
16283+
can_be_inlined = is_fastcall_kfunc_call(&meta);
16284+
}
16285+
1625316286
if (clobbered_regs_mask == ALL_CALLER_SAVED_REGS)
1625416287
return;
1625516288

0 commit comments

Comments
 (0)