@@ -198,6 +198,8 @@ struct jit_context {
198
198
/* Maximum number of bytes emitted while JITing one eBPF insn */
199
199
#define BPF_MAX_INSN_SIZE 128
200
200
#define BPF_INSN_SAFETY 64
201
+ /* number of bytes emit_call() needs to generate call instruction */
202
+ #define X86_CALL_SIZE 5
201
203
202
204
#define PROLOGUE_SIZE 20
203
205
@@ -390,6 +392,99 @@ static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
390
392
* pprog = prog ;
391
393
}
392
394
395
+ /* LDX: dst_reg = *(u8*)(src_reg + off) */
396
+ static void emit_ldx (u8 * * pprog , u32 size , u32 dst_reg , u32 src_reg , int off )
397
+ {
398
+ u8 * prog = * pprog ;
399
+ int cnt = 0 ;
400
+
401
+ switch (size ) {
402
+ case BPF_B :
403
+ /* Emit 'movzx rax, byte ptr [rax + off]' */
404
+ EMIT3 (add_2mod (0x48 , src_reg , dst_reg ), 0x0F , 0xB6 );
405
+ break ;
406
+ case BPF_H :
407
+ /* Emit 'movzx rax, word ptr [rax + off]' */
408
+ EMIT3 (add_2mod (0x48 , src_reg , dst_reg ), 0x0F , 0xB7 );
409
+ break ;
410
+ case BPF_W :
411
+ /* Emit 'mov eax, dword ptr [rax+0x14]' */
412
+ if (is_ereg (dst_reg ) || is_ereg (src_reg ))
413
+ EMIT2 (add_2mod (0x40 , src_reg , dst_reg ), 0x8B );
414
+ else
415
+ EMIT1 (0x8B );
416
+ break ;
417
+ case BPF_DW :
418
+ /* Emit 'mov rax, qword ptr [rax+0x14]' */
419
+ EMIT2 (add_2mod (0x48 , src_reg , dst_reg ), 0x8B );
420
+ break ;
421
+ }
422
+ /*
423
+ * If insn->off == 0 we can save one extra byte, but
424
+ * special case of x86 R13 which always needs an offset
425
+ * is not worth the hassle
426
+ */
427
+ if (is_imm8 (off ))
428
+ EMIT2 (add_2reg (0x40 , src_reg , dst_reg ), off );
429
+ else
430
+ EMIT1_off32 (add_2reg (0x80 , src_reg , dst_reg ), off );
431
+ * pprog = prog ;
432
+ }
433
+
434
+ /* STX: *(u8*)(dst_reg + off) = src_reg */
435
+ static void emit_stx (u8 * * pprog , u32 size , u32 dst_reg , u32 src_reg , int off )
436
+ {
437
+ u8 * prog = * pprog ;
438
+ int cnt = 0 ;
439
+
440
+ switch (size ) {
441
+ case BPF_B :
442
+ /* Emit 'mov byte ptr [rax + off], al' */
443
+ if (is_ereg (dst_reg ) || is_ereg (src_reg ) ||
444
+ /* We have to add extra byte for x86 SIL, DIL regs */
445
+ src_reg == BPF_REG_1 || src_reg == BPF_REG_2 )
446
+ EMIT2 (add_2mod (0x40 , dst_reg , src_reg ), 0x88 );
447
+ else
448
+ EMIT1 (0x88 );
449
+ break ;
450
+ case BPF_H :
451
+ if (is_ereg (dst_reg ) || is_ereg (src_reg ))
452
+ EMIT3 (0x66 , add_2mod (0x40 , dst_reg , src_reg ), 0x89 );
453
+ else
454
+ EMIT2 (0x66 , 0x89 );
455
+ break ;
456
+ case BPF_W :
457
+ if (is_ereg (dst_reg ) || is_ereg (src_reg ))
458
+ EMIT2 (add_2mod (0x40 , dst_reg , src_reg ), 0x89 );
459
+ else
460
+ EMIT1 (0x89 );
461
+ break ;
462
+ case BPF_DW :
463
+ EMIT2 (add_2mod (0x48 , dst_reg , src_reg ), 0x89 );
464
+ break ;
465
+ }
466
+ if (is_imm8 (off ))
467
+ EMIT2 (add_2reg (0x40 , dst_reg , src_reg ), off );
468
+ else
469
+ EMIT1_off32 (add_2reg (0x80 , dst_reg , src_reg ), off );
470
+ * pprog = prog ;
471
+ }
472
+
473
+ static int emit_call (u8 * * pprog , void * func , void * ip )
474
+ {
475
+ u8 * prog = * pprog ;
476
+ int cnt = 0 ;
477
+ s64 offset ;
478
+
479
+ offset = func - (ip + X86_CALL_SIZE );
480
+ if (!is_simm32 (offset )) {
481
+ pr_err ("Target call %p is out of range\n" , func );
482
+ return - EINVAL ;
483
+ }
484
+ EMIT1_off32 (0xE8 , offset );
485
+ * pprog = prog ;
486
+ return 0 ;
487
+ }
393
488
394
489
static bool ex_handler_bpf (const struct exception_table_entry * x ,
395
490
struct pt_regs * regs , int trapnr ,
@@ -773,68 +868,22 @@ st: if (is_imm8(insn->off))
773
868
774
869
/* STX: *(u8*)(dst_reg + off) = src_reg */
775
870
case BPF_STX | BPF_MEM | BPF_B :
776
- /* Emit 'mov byte ptr [rax + off], al' */
777
- if (is_ereg (dst_reg ) || is_ereg (src_reg ) ||
778
- /* We have to add extra byte for x86 SIL, DIL regs */
779
- src_reg == BPF_REG_1 || src_reg == BPF_REG_2 )
780
- EMIT2 (add_2mod (0x40 , dst_reg , src_reg ), 0x88 );
781
- else
782
- EMIT1 (0x88 );
783
- goto stx ;
784
871
case BPF_STX | BPF_MEM | BPF_H :
785
- if (is_ereg (dst_reg ) || is_ereg (src_reg ))
786
- EMIT3 (0x66 , add_2mod (0x40 , dst_reg , src_reg ), 0x89 );
787
- else
788
- EMIT2 (0x66 , 0x89 );
789
- goto stx ;
790
872
case BPF_STX | BPF_MEM | BPF_W :
791
- if (is_ereg (dst_reg ) || is_ereg (src_reg ))
792
- EMIT2 (add_2mod (0x40 , dst_reg , src_reg ), 0x89 );
793
- else
794
- EMIT1 (0x89 );
795
- goto stx ;
796
873
case BPF_STX | BPF_MEM | BPF_DW :
797
- EMIT2 (add_2mod (0x48 , dst_reg , src_reg ), 0x89 );
798
- stx : if (is_imm8 (insn -> off ))
799
- EMIT2 (add_2reg (0x40 , dst_reg , src_reg ), insn -> off );
800
- else
801
- EMIT1_off32 (add_2reg (0x80 , dst_reg , src_reg ),
802
- insn -> off );
874
+ emit_stx (& prog , BPF_SIZE (insn -> code ), dst_reg , src_reg , insn -> off );
803
875
break ;
804
876
805
877
/* LDX: dst_reg = *(u8*)(src_reg + off) */
806
878
case BPF_LDX | BPF_MEM | BPF_B :
807
879
case BPF_LDX | BPF_PROBE_MEM | BPF_B :
808
- /* Emit 'movzx rax, byte ptr [rax + off]' */
809
- EMIT3 (add_2mod (0x48 , src_reg , dst_reg ), 0x0F , 0xB6 );
810
- goto ldx ;
811
880
case BPF_LDX | BPF_MEM | BPF_H :
812
881
case BPF_LDX | BPF_PROBE_MEM | BPF_H :
813
- /* Emit 'movzx rax, word ptr [rax + off]' */
814
- EMIT3 (add_2mod (0x48 , src_reg , dst_reg ), 0x0F , 0xB7 );
815
- goto ldx ;
816
882
case BPF_LDX | BPF_MEM | BPF_W :
817
883
case BPF_LDX | BPF_PROBE_MEM | BPF_W :
818
- /* Emit 'mov eax, dword ptr [rax+0x14]' */
819
- if (is_ereg (dst_reg ) || is_ereg (src_reg ))
820
- EMIT2 (add_2mod (0x40 , src_reg , dst_reg ), 0x8B );
821
- else
822
- EMIT1 (0x8B );
823
- goto ldx ;
824
884
case BPF_LDX | BPF_MEM | BPF_DW :
825
885
case BPF_LDX | BPF_PROBE_MEM | BPF_DW :
826
- /* Emit 'mov rax, qword ptr [rax+0x14]' */
827
- EMIT2 (add_2mod (0x48 , src_reg , dst_reg ), 0x8B );
828
- ldx : /*
829
- * If insn->off == 0 we can save one extra byte, but
830
- * special case of x86 R13 which always needs an offset
831
- * is not worth the hassle
832
- */
833
- if (is_imm8 (insn -> off ))
834
- EMIT2 (add_2reg (0x40 , src_reg , dst_reg ), insn -> off );
835
- else
836
- EMIT1_off32 (add_2reg (0x80 , src_reg , dst_reg ),
837
- insn -> off );
886
+ emit_ldx (& prog , BPF_SIZE (insn -> code ), dst_reg , src_reg , insn -> off );
838
887
if (BPF_MODE (insn -> code ) == BPF_PROBE_MEM ) {
839
888
struct exception_table_entry * ex ;
840
889
u8 * _insn = image + proglen ;
@@ -899,13 +948,8 @@ xadd: if (is_imm8(insn->off))
899
948
/* call */
900
949
case BPF_JMP | BPF_CALL :
901
950
func = (u8 * ) __bpf_call_base + imm32 ;
902
- jmp_offset = func - (image + addrs [i ]);
903
- if (!imm32 || !is_simm32 (jmp_offset )) {
904
- pr_err ("unsupported BPF func %d addr %p image %p\n" ,
905
- imm32 , func , image );
951
+ if (!imm32 || emit_call (& prog , func , image + addrs [i - 1 ]))
906
952
return - EINVAL ;
907
- }
908
- EMIT1_off32 (0xE8 , jmp_offset );
909
953
break ;
910
954
911
955
case BPF_JMP | BPF_TAIL_CALL :
0 commit comments