@@ -178,41 +178,26 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
178
178
return header ;
179
179
}
180
180
181
- void bpf_jit_compile (struct sk_filter * fp )
181
+ struct jit_context {
182
+ unsigned int cleanup_addr ; /* epilogue code offset */
183
+ int pc_ret0 ; /* bpf index of first RET #0 instruction (if any) */
184
+ u8 seen ;
185
+ };
186
+
187
+ static int do_jit (struct sk_filter * bpf_prog , int * addrs , u8 * image ,
188
+ int oldproglen , struct jit_context * ctx )
182
189
{
190
+ const struct sock_filter * filter = bpf_prog -> insns ;
191
+ int flen = bpf_prog -> len ;
183
192
u8 temp [64 ];
184
193
u8 * prog ;
185
- unsigned int proglen , oldproglen = 0 ;
186
- int ilen , i ;
194
+ int ilen , i , proglen ;
187
195
int t_offset , f_offset ;
188
- u8 t_op , f_op , seen = 0 , pass ;
189
- u8 * image = NULL ;
190
- struct bpf_binary_header * header = NULL ;
196
+ u8 t_op , f_op , seen = 0 ;
191
197
u8 * func ;
192
- int pc_ret0 = -1 ; /* bpf index of first RET #0 instruction (if any) */
193
- unsigned int cleanup_addr ; /* epilogue code offset */
194
- unsigned int * addrs ;
195
- const struct sock_filter * filter = fp -> insns ;
196
- int flen = fp -> len ;
197
-
198
- if (!bpf_jit_enable )
199
- return ;
200
-
201
- addrs = kmalloc (flen * sizeof (* addrs ), GFP_KERNEL );
202
- if (addrs == NULL )
203
- return ;
204
-
205
- /* Before first pass, make a rough estimation of addrs[]
206
- * each bpf instruction is translated to less than 64 bytes
207
- */
208
- for (proglen = 0 , i = 0 ; i < flen ; i ++ ) {
209
- proglen += 64 ;
210
- addrs [i ] = proglen ;
211
- }
212
- cleanup_addr = proglen ; /* epilogue address */
198
+ unsigned int cleanup_addr = ctx -> cleanup_addr ;
199
+ u8 seen_or_pass0 = ctx -> seen ;
213
200
214
- for (pass = 0 ; pass < 10 ; pass ++ ) {
215
- u8 seen_or_pass0 = (pass == 0 ) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM ) : seen ;
216
201
/* no prologue/epilogue for trivial filters (RET something) */
217
202
proglen = 0 ;
218
203
prog = temp ;
@@ -325,12 +310,12 @@ void bpf_jit_compile(struct sk_filter *fp)
325
310
case BPF_S_ALU_DIV_X : /* A /= X; */
326
311
seen |= SEEN_XREG ;
327
312
EMIT2 (0x85 , 0xdb ); /* test %ebx,%ebx */
328
- if (pc_ret0 > 0 ) {
313
+ if (ctx -> pc_ret0 > 0 ) {
329
314
/* addrs[pc_ret0 - 1] is start address of target
330
315
* (addrs[i] - 4) is the address following this jmp
331
316
* ("xor %edx,%edx; div %ebx" being 4 bytes long)
332
317
*/
333
- EMIT_COND_JMP (X86_JE , addrs [pc_ret0 - 1 ] -
318
+ EMIT_COND_JMP (X86_JE , addrs [ctx -> pc_ret0 - 1 ] -
334
319
(addrs [i ] - 4 ));
335
320
} else {
336
321
EMIT_COND_JMP (X86_JNE , 2 + 5 );
@@ -342,12 +327,12 @@ void bpf_jit_compile(struct sk_filter *fp)
342
327
case BPF_S_ALU_MOD_X : /* A %= X; */
343
328
seen |= SEEN_XREG ;
344
329
EMIT2 (0x85 , 0xdb ); /* test %ebx,%ebx */
345
- if (pc_ret0 > 0 ) {
330
+ if (ctx -> pc_ret0 > 0 ) {
346
331
/* addrs[pc_ret0 - 1] is start address of target
347
332
* (addrs[i] - 6) is the address following this jmp
348
333
* ("xor %edx,%edx; div %ebx;mov %edx,%eax" being 6 bytes long)
349
334
*/
350
- EMIT_COND_JMP (X86_JE , addrs [pc_ret0 - 1 ] -
335
+ EMIT_COND_JMP (X86_JE , addrs [ctx -> pc_ret0 - 1 ] -
351
336
(addrs [i ] - 6 ));
352
337
} else {
353
338
EMIT_COND_JMP (X86_JNE , 2 + 5 );
@@ -441,8 +426,8 @@ void bpf_jit_compile(struct sk_filter *fp)
441
426
break ;
442
427
case BPF_S_RET_K :
443
428
if (!K ) {
444
- if (pc_ret0 == -1 )
445
- pc_ret0 = i ;
429
+ if (ctx -> pc_ret0 == -1 )
430
+ ctx -> pc_ret0 = i ;
446
431
CLEAR_A ();
447
432
} else {
448
433
EMIT1_off32 (0xb8 , K ); /* mov $imm32,%eax */
@@ -603,7 +588,7 @@ void bpf_jit_compile(struct sk_filter *fp)
603
588
int off = pkt_type_offset ();
604
589
605
590
if (off < 0 )
606
- goto out ;
591
+ return - EINVAL ;
607
592
if (is_imm8 (off )) {
608
593
/* movzbl off8(%rdi),%eax */
609
594
EMIT4 (0x0f , 0xb6 , 0x47 , off );
@@ -725,36 +710,79 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
725
710
}
726
711
EMIT_COND_JMP (f_op , f_offset );
727
712
break ;
728
- default :
729
- /* hmm, too complex filter, give up with jit compiler */
730
- goto out ;
731
- }
732
- ilen = prog - temp ;
733
- if (image ) {
734
- if (unlikely (proglen + ilen > oldproglen )) {
735
- pr_err ("bpb_jit_compile fatal error\n" );
736
- kfree (addrs );
737
- module_free (NULL , header );
738
- return ;
739
- }
740
- memcpy (image + proglen , temp , ilen );
713
+ default :
714
+ /* hmm, too complex filter, give up with jit compiler */
715
+ return - EINVAL ;
716
+ }
717
+ ilen = prog - temp ;
718
+ if (image ) {
719
+ if (unlikely (proglen + ilen > oldproglen )) {
720
+ pr_err ("bpb_jit_compile fatal error\n" );
721
+ return - EFAULT ;
741
722
}
742
- proglen += ilen ;
743
- addrs [i ] = proglen ;
744
- prog = temp ;
723
+ memcpy (image + proglen , temp , ilen );
745
724
}
746
- /* last bpf instruction is always a RET :
747
- * use it to give the cleanup instruction(s) addr
748
- */
749
- cleanup_addr = proglen - 1 ; /* ret */
750
- if (seen_or_pass0 )
751
- cleanup_addr -= 1 ; /* leaveq */
752
- if (seen_or_pass0 & SEEN_XREG )
753
- cleanup_addr -= 4 ; /* mov -8(%rbp),%rbx */
725
+ proglen += ilen ;
726
+ addrs [i ] = proglen ;
727
+ prog = temp ;
728
+ }
729
+ /* last bpf instruction is always a RET :
730
+ * use it to give the cleanup instruction(s) addr
731
+ */
732
+ ctx -> cleanup_addr = proglen - 1 ; /* ret */
733
+ if (seen_or_pass0 )
734
+ ctx -> cleanup_addr -= 1 ; /* leaveq */
735
+ if (seen_or_pass0 & SEEN_XREG )
736
+ ctx -> cleanup_addr -= 4 ; /* mov -8(%rbp),%rbx */
737
+
738
+ ctx -> seen = seen ;
739
+
740
+ return proglen ;
741
+ }
742
+
743
+ void bpf_jit_compile (struct sk_filter * prog )
744
+ {
745
+ struct bpf_binary_header * header = NULL ;
746
+ int proglen , oldproglen = 0 ;
747
+ struct jit_context ctx = {};
748
+ u8 * image = NULL ;
749
+ int * addrs ;
750
+ int pass ;
751
+ int i ;
752
+
753
+ if (!bpf_jit_enable )
754
+ return ;
754
755
756
+ if (!prog || !prog -> len )
757
+ return ;
758
+
759
+ addrs = kmalloc (prog -> len * sizeof (* addrs ), GFP_KERNEL );
760
+ if (!addrs )
761
+ return ;
762
+
763
+ /* Before first pass, make a rough estimation of addrs[]
764
+ * each bpf instruction is translated to less than 64 bytes
765
+ */
766
+ for (proglen = 0 , i = 0 ; i < prog -> len ; i ++ ) {
767
+ proglen += 64 ;
768
+ addrs [i ] = proglen ;
769
+ }
770
+ ctx .cleanup_addr = proglen ;
771
+ ctx .seen = SEEN_XREG | SEEN_DATAREF | SEEN_MEM ;
772
+ ctx .pc_ret0 = -1 ;
773
+
774
+ for (pass = 0 ; pass < 10 ; pass ++ ) {
775
+ proglen = do_jit (prog , addrs , image , oldproglen , & ctx );
776
+ if (proglen <= 0 ) {
777
+ image = NULL ;
778
+ if (header )
779
+ module_free (NULL , header );
780
+ goto out ;
781
+ }
755
782
if (image ) {
756
783
if (proglen != oldproglen )
757
- pr_err ("bpb_jit_compile proglen=%u != oldproglen=%u\n" , proglen , oldproglen );
784
+ pr_err ("bpf_jit: proglen=%d != oldproglen=%d\n" ,
785
+ proglen , oldproglen );
758
786
break ;
759
787
}
760
788
if (proglen == oldproglen ) {
@@ -766,17 +794,16 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
766
794
}
767
795
768
796
if (bpf_jit_enable > 1 )
769
- bpf_jit_dump (flen , proglen , pass , image );
797
+ bpf_jit_dump (prog -> len , proglen , 0 , image );
770
798
771
799
if (image ) {
772
800
bpf_flush_icache (header , image + proglen );
773
801
set_memory_ro ((unsigned long )header , header -> pages );
774
- fp -> bpf_func = (void * )image ;
775
- fp -> jited = 1 ;
802
+ prog -> bpf_func = (void * )image ;
803
+ prog -> jited = 1 ;
776
804
}
777
805
out :
778
806
kfree (addrs );
779
- return ;
780
807
}
781
808
782
809
static void bpf_jit_free_deferred (struct work_struct * work )
0 commit comments