Skip to content

Commit f3c2af7

Browse files
Alexei Starovoitovdavem330
authored andcommitted
net: filter: x86: split bpf_jit_compile()
Split bpf_jit_compile() into two functions to improve readability of for(pass++) loop. The change follows similar style of JIT compilers for arm, powerpc, s390 The body of new do_jit() was not reformatted to reduce noise in this patch, since the following patch replaces most of it. Tested with BPF testsuite. Signed-off-by: Alexei Starovoitov <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 9509b1c commit f3c2af7

File tree

1 file changed

+92
-65
lines changed

1 file changed

+92
-65
lines changed

arch/x86/net/bpf_jit_comp.c

Lines changed: 92 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -178,41 +178,26 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
178178
return header;
179179
}
180180

181-
void bpf_jit_compile(struct sk_filter *fp)
181+
struct jit_context {
182+
unsigned int cleanup_addr; /* epilogue code offset */
183+
int pc_ret0; /* bpf index of first RET #0 instruction (if any) */
184+
u8 seen;
185+
};
186+
187+
static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
188+
int oldproglen, struct jit_context *ctx)
182189
{
190+
const struct sock_filter *filter = bpf_prog->insns;
191+
int flen = bpf_prog->len;
183192
u8 temp[64];
184193
u8 *prog;
185-
unsigned int proglen, oldproglen = 0;
186-
int ilen, i;
194+
int ilen, i, proglen;
187195
int t_offset, f_offset;
188-
u8 t_op, f_op, seen = 0, pass;
189-
u8 *image = NULL;
190-
struct bpf_binary_header *header = NULL;
196+
u8 t_op, f_op, seen = 0;
191197
u8 *func;
192-
int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
193-
unsigned int cleanup_addr; /* epilogue code offset */
194-
unsigned int *addrs;
195-
const struct sock_filter *filter = fp->insns;
196-
int flen = fp->len;
197-
198-
if (!bpf_jit_enable)
199-
return;
200-
201-
addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
202-
if (addrs == NULL)
203-
return;
204-
205-
/* Before first pass, make a rough estimation of addrs[]
206-
* each bpf instruction is translated to less than 64 bytes
207-
*/
208-
for (proglen = 0, i = 0; i < flen; i++) {
209-
proglen += 64;
210-
addrs[i] = proglen;
211-
}
212-
cleanup_addr = proglen; /* epilogue address */
198+
unsigned int cleanup_addr = ctx->cleanup_addr;
199+
u8 seen_or_pass0 = ctx->seen;
213200

214-
for (pass = 0; pass < 10; pass++) {
215-
u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
216201
/* no prologue/epilogue for trivial filters (RET something) */
217202
proglen = 0;
218203
prog = temp;
@@ -325,12 +310,12 @@ void bpf_jit_compile(struct sk_filter *fp)
325310
case BPF_S_ALU_DIV_X: /* A /= X; */
326311
seen |= SEEN_XREG;
327312
EMIT2(0x85, 0xdb); /* test %ebx,%ebx */
328-
if (pc_ret0 > 0) {
313+
if (ctx->pc_ret0 > 0) {
329314
/* addrs[pc_ret0 - 1] is start address of target
330315
* (addrs[i] - 4) is the address following this jmp
331316
* ("xor %edx,%edx; div %ebx" being 4 bytes long)
332317
*/
333-
EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
318+
EMIT_COND_JMP(X86_JE, addrs[ctx->pc_ret0 - 1] -
334319
(addrs[i] - 4));
335320
} else {
336321
EMIT_COND_JMP(X86_JNE, 2 + 5);
@@ -342,12 +327,12 @@ void bpf_jit_compile(struct sk_filter *fp)
342327
case BPF_S_ALU_MOD_X: /* A %= X; */
343328
seen |= SEEN_XREG;
344329
EMIT2(0x85, 0xdb); /* test %ebx,%ebx */
345-
if (pc_ret0 > 0) {
330+
if (ctx->pc_ret0 > 0) {
346331
/* addrs[pc_ret0 - 1] is start address of target
347332
* (addrs[i] - 6) is the address following this jmp
348333
* ("xor %edx,%edx; div %ebx;mov %edx,%eax" being 6 bytes long)
349334
*/
350-
EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
335+
EMIT_COND_JMP(X86_JE, addrs[ctx->pc_ret0 - 1] -
351336
(addrs[i] - 6));
352337
} else {
353338
EMIT_COND_JMP(X86_JNE, 2 + 5);
@@ -441,8 +426,8 @@ void bpf_jit_compile(struct sk_filter *fp)
441426
break;
442427
case BPF_S_RET_K:
443428
if (!K) {
444-
if (pc_ret0 == -1)
445-
pc_ret0 = i;
429+
if (ctx->pc_ret0 == -1)
430+
ctx->pc_ret0 = i;
446431
CLEAR_A();
447432
} else {
448433
EMIT1_off32(0xb8, K); /* mov $imm32,%eax */
@@ -603,7 +588,7 @@ void bpf_jit_compile(struct sk_filter *fp)
603588
int off = pkt_type_offset();
604589

605590
if (off < 0)
606-
goto out;
591+
return -EINVAL;
607592
if (is_imm8(off)) {
608593
/* movzbl off8(%rdi),%eax */
609594
EMIT4(0x0f, 0xb6, 0x47, off);
@@ -725,36 +710,79 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
725710
}
726711
EMIT_COND_JMP(f_op, f_offset);
727712
break;
728-
default:
729-
/* hmm, too complex filter, give up with jit compiler */
730-
goto out;
731-
}
732-
ilen = prog - temp;
733-
if (image) {
734-
if (unlikely(proglen + ilen > oldproglen)) {
735-
pr_err("bpb_jit_compile fatal error\n");
736-
kfree(addrs);
737-
module_free(NULL, header);
738-
return;
739-
}
740-
memcpy(image + proglen, temp, ilen);
713+
default:
714+
/* hmm, too complex filter, give up with jit compiler */
715+
return -EINVAL;
716+
}
717+
ilen = prog - temp;
718+
if (image) {
719+
if (unlikely(proglen + ilen > oldproglen)) {
720+
pr_err("bpb_jit_compile fatal error\n");
721+
return -EFAULT;
741722
}
742-
proglen += ilen;
743-
addrs[i] = proglen;
744-
prog = temp;
723+
memcpy(image + proglen, temp, ilen);
745724
}
746-
/* last bpf instruction is always a RET :
747-
* use it to give the cleanup instruction(s) addr
748-
*/
749-
cleanup_addr = proglen - 1; /* ret */
750-
if (seen_or_pass0)
751-
cleanup_addr -= 1; /* leaveq */
752-
if (seen_or_pass0 & SEEN_XREG)
753-
cleanup_addr -= 4; /* mov -8(%rbp),%rbx */
725+
proglen += ilen;
726+
addrs[i] = proglen;
727+
prog = temp;
728+
}
729+
/* last bpf instruction is always a RET :
730+
* use it to give the cleanup instruction(s) addr
731+
*/
732+
ctx->cleanup_addr = proglen - 1; /* ret */
733+
if (seen_or_pass0)
734+
ctx->cleanup_addr -= 1; /* leaveq */
735+
if (seen_or_pass0 & SEEN_XREG)
736+
ctx->cleanup_addr -= 4; /* mov -8(%rbp),%rbx */
737+
738+
ctx->seen = seen;
739+
740+
return proglen;
741+
}
742+
743+
void bpf_jit_compile(struct sk_filter *prog)
744+
{
745+
struct bpf_binary_header *header = NULL;
746+
int proglen, oldproglen = 0;
747+
struct jit_context ctx = {};
748+
u8 *image = NULL;
749+
int *addrs;
750+
int pass;
751+
int i;
752+
753+
if (!bpf_jit_enable)
754+
return;
754755

756+
if (!prog || !prog->len)
757+
return;
758+
759+
addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
760+
if (!addrs)
761+
return;
762+
763+
/* Before first pass, make a rough estimation of addrs[]
764+
* each bpf instruction is translated to less than 64 bytes
765+
*/
766+
for (proglen = 0, i = 0; i < prog->len; i++) {
767+
proglen += 64;
768+
addrs[i] = proglen;
769+
}
770+
ctx.cleanup_addr = proglen;
771+
ctx.seen = SEEN_XREG | SEEN_DATAREF | SEEN_MEM;
772+
ctx.pc_ret0 = -1;
773+
774+
for (pass = 0; pass < 10; pass++) {
775+
proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
776+
if (proglen <= 0) {
777+
image = NULL;
778+
if (header)
779+
module_free(NULL, header);
780+
goto out;
781+
}
755782
if (image) {
756783
if (proglen != oldproglen)
757-
pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen);
784+
pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
785+
proglen, oldproglen);
758786
break;
759787
}
760788
if (proglen == oldproglen) {
@@ -766,17 +794,16 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
766794
}
767795

768796
if (bpf_jit_enable > 1)
769-
bpf_jit_dump(flen, proglen, pass, image);
797+
bpf_jit_dump(prog->len, proglen, 0, image);
770798

771799
if (image) {
772800
bpf_flush_icache(header, image + proglen);
773801
set_memory_ro((unsigned long)header, header->pages);
774-
fp->bpf_func = (void *)image;
775-
fp->jited = 1;
802+
prog->bpf_func = (void *)image;
803+
prog->jited = 1;
776804
}
777805
out:
778806
kfree(addrs);
779-
return;
780807
}
781808

782809
static void bpf_jit_free_deferred(struct work_struct *work)

0 commit comments

Comments
 (0)