Skip to content

Commit f1d5df8

Browse files
iii-iAlexei Starovoitov
authored andcommitted
s390/bpf: Implement bpf_arch_text_poke()
bpf_arch_text_poke() is used to hotpatch eBPF programs and trampolines. s390x has a very strict hotpatching restriction: the only thing that is allowed to be hotpatched is conditional branch mask. Take the same approach as commit de5012b ("s390/ftrace: implement hotpatching"): create a conditional jump to a "plt", which loads the target address from memory and jumps to it; then first patch this address, and then the mask. Trampolines (introduced in the next patch) respect the ftrace calling convention: the return address is in %r0, and %r1 is clobbered. With that in mind, bpf_arch_text_poke() does not differentiate between jumps and calls. However, there is a simple optimization for jumps (for the epilogue_ip case): if a jump already points to the destination, then there is no "plt" and we can just flip the mask. For simplicity, the "plt" template is defined in assembly, and its size is used to define C arrays. There doesn't seem to be a way to convey this size to C as a constant, so it's hardcoded and double-checked during runtime. Signed-off-by: Ilya Leoshkevich <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent bb4ef8f commit f1d5df8

File tree

1 file changed

+97
-0
lines changed

1 file changed

+97
-0
lines changed

arch/s390/net/bpf_jit_comp.c

Lines changed: 97 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@
3030
#include <asm/facility.h>
3131
#include <asm/nospec-branch.h>
3232
#include <asm/set_memory.h>
33+
#include <asm/text-patching.h>
3334
#include "bpf_jit.h"
3435

3536
struct bpf_jit {
@@ -50,6 +51,8 @@ struct bpf_jit {
5051
int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */
5152
int tail_call_start; /* Tail call start offset */
5253
int excnt; /* Number of exception table entries */
54+
int prologue_plt_ret; /* Return address for prologue hotpatch PLT */
55+
int prologue_plt; /* Start of prologue hotpatch PLT */
5356
};
5457

5558
#define SEEN_MEM BIT(0) /* use mem[] for temporary storage */
@@ -506,6 +509,36 @@ static void bpf_skip(struct bpf_jit *jit, int size)
506509
}
507510
}
508511

512+
/*
513+
* PLT for hotpatchable calls. The calling convention is the same as for the
514+
* ftrace hotpatch trampolines: %r0 is return address, %r1 is clobbered.
515+
*/
516+
extern const char bpf_plt[];
517+
extern const char bpf_plt_ret[];
518+
extern const char bpf_plt_target[];
519+
extern const char bpf_plt_end[];
520+
#define BPF_PLT_SIZE 32
521+
asm(
522+
".pushsection .rodata\n"
523+
" .align 8\n"
524+
"bpf_plt:\n"
525+
" lgrl %r0,bpf_plt_ret\n"
526+
" lgrl %r1,bpf_plt_target\n"
527+
" br %r1\n"
528+
" .align 8\n"
529+
"bpf_plt_ret: .quad 0\n"
530+
"bpf_plt_target: .quad 0\n"
531+
"bpf_plt_end:\n"
532+
" .popsection\n"
533+
);
534+
535+
static void bpf_jit_plt(void *plt, void *ret, void *target)
536+
{
537+
memcpy(plt, bpf_plt, BPF_PLT_SIZE);
538+
*(void **)((char *)plt + (bpf_plt_ret - bpf_plt)) = ret;
539+
*(void **)((char *)plt + (bpf_plt_target - bpf_plt)) = target;
540+
}
541+
509542
/*
510543
* Emit function prologue
511544
*
@@ -514,6 +547,11 @@ static void bpf_skip(struct bpf_jit *jit, int size)
514547
*/
515548
static void bpf_jit_prologue(struct bpf_jit *jit, u32 stack_depth)
516549
{
550+
/* No-op for hotpatching */
551+
/* brcl 0,prologue_plt */
552+
EMIT6_PCREL_RILC(0xc0040000, 0, jit->prologue_plt);
553+
jit->prologue_plt_ret = jit->prg;
554+
517555
if (jit->seen & SEEN_TAIL_CALL) {
518556
/* xc STK_OFF_TCCNT(4,%r15),STK_OFF_TCCNT(%r15) */
519557
_EMIT6(0xd703f000 | STK_OFF_TCCNT, 0xf000 | STK_OFF_TCCNT);
@@ -589,6 +627,13 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth)
589627
/* br %r1 */
590628
_EMIT2(0x07f1);
591629
}
630+
631+
jit->prg = ALIGN(jit->prg, 8);
632+
jit->prologue_plt = jit->prg;
633+
if (jit->prg_buf)
634+
bpf_jit_plt(jit->prg_buf + jit->prg,
635+
jit->prg_buf + jit->prologue_plt_ret, NULL);
636+
jit->prg += BPF_PLT_SIZE;
592637
}
593638

594639
static int get_probe_mem_regno(const u8 *insn)
@@ -1776,6 +1821,9 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
17761821
struct bpf_jit jit;
17771822
int pass;
17781823

1824+
if (WARN_ON_ONCE(bpf_plt_end - bpf_plt != BPF_PLT_SIZE))
1825+
return orig_fp;
1826+
17791827
if (!fp->jit_requested)
17801828
return orig_fp;
17811829

@@ -1867,3 +1915,52 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
18671915
tmp : orig_fp);
18681916
return fp;
18691917
}
1918+
1919+
int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
1920+
void *old_addr, void *new_addr)
1921+
{
1922+
struct {
1923+
u16 opc;
1924+
s32 disp;
1925+
} __packed insn;
1926+
char expected_plt[BPF_PLT_SIZE];
1927+
char current_plt[BPF_PLT_SIZE];
1928+
char *plt;
1929+
int err;
1930+
1931+
/* Verify the branch to be patched. */
1932+
err = copy_from_kernel_nofault(&insn, ip, sizeof(insn));
1933+
if (err < 0)
1934+
return err;
1935+
if (insn.opc != (0xc004 | (old_addr ? 0xf0 : 0)))
1936+
return -EINVAL;
1937+
1938+
if (t == BPF_MOD_JUMP &&
1939+
insn.disp == ((char *)new_addr - (char *)ip) >> 1) {
1940+
/*
1941+
* The branch already points to the destination,
1942+
* there is no PLT.
1943+
*/
1944+
} else {
1945+
/* Verify the PLT. */
1946+
plt = (char *)ip + (insn.disp << 1);
1947+
err = copy_from_kernel_nofault(current_plt, plt, BPF_PLT_SIZE);
1948+
if (err < 0)
1949+
return err;
1950+
bpf_jit_plt(expected_plt, (char *)ip + 6, old_addr);
1951+
if (memcmp(current_plt, expected_plt, BPF_PLT_SIZE))
1952+
return -EINVAL;
1953+
/* Adjust the call address. */
1954+
s390_kernel_write(plt + (bpf_plt_target - bpf_plt),
1955+
&new_addr, sizeof(void *));
1956+
}
1957+
1958+
/* Adjust the mask of the branch. */
1959+
insn.opc = 0xc004 | (new_addr ? 0xf0 : 0);
1960+
s390_kernel_write((char *)ip + 1, (char *)&insn.opc + 1, 1);
1961+
1962+
/* Make the new code visible to the other CPUs. */
1963+
text_poke_sync_lock();
1964+
1965+
return 0;
1966+
}

0 commit comments

Comments
 (0)