@@ -647,6 +647,81 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
647
647
return 0 ;
648
648
}
649
649
650
+ static int emit_atomic_ld_st (const struct bpf_insn * insn , struct jit_ctx * ctx )
651
+ {
652
+ const s32 imm = insn -> imm ;
653
+ const s16 off = insn -> off ;
654
+ const u8 code = insn -> code ;
655
+ const bool arena = BPF_MODE (code ) == BPF_PROBE_ATOMIC ;
656
+ const u8 arena_vm_base = bpf2a64 [ARENA_VM_START ];
657
+ const u8 dst = bpf2a64 [insn -> dst_reg ];
658
+ const u8 src = bpf2a64 [insn -> src_reg ];
659
+ const u8 tmp = bpf2a64 [TMP_REG_1 ];
660
+ u8 reg ;
661
+
662
+ switch (imm ) {
663
+ case BPF_LOAD_ACQ :
664
+ reg = src ;
665
+ break ;
666
+ case BPF_STORE_REL :
667
+ reg = dst ;
668
+ break ;
669
+ default :
670
+ pr_err_once ("unknown atomic load/store op code %02x\n" , imm );
671
+ return - EINVAL ;
672
+ }
673
+
674
+ if (off ) {
675
+ emit_a64_add_i (1 , tmp , reg , tmp , off , ctx );
676
+ reg = tmp ;
677
+ }
678
+ if (arena ) {
679
+ emit (A64_ADD (1 , tmp , reg , arena_vm_base ), ctx );
680
+ reg = tmp ;
681
+ }
682
+
683
+ switch (imm ) {
684
+ case BPF_LOAD_ACQ :
685
+ switch (BPF_SIZE (code )) {
686
+ case BPF_B :
687
+ emit (A64_LDARB (dst , reg ), ctx );
688
+ break ;
689
+ case BPF_H :
690
+ emit (A64_LDARH (dst , reg ), ctx );
691
+ break ;
692
+ case BPF_W :
693
+ emit (A64_LDAR32 (dst , reg ), ctx );
694
+ break ;
695
+ case BPF_DW :
696
+ emit (A64_LDAR64 (dst , reg ), ctx );
697
+ break ;
698
+ }
699
+ break ;
700
+ case BPF_STORE_REL :
701
+ switch (BPF_SIZE (code )) {
702
+ case BPF_B :
703
+ emit (A64_STLRB (src , reg ), ctx );
704
+ break ;
705
+ case BPF_H :
706
+ emit (A64_STLRH (src , reg ), ctx );
707
+ break ;
708
+ case BPF_W :
709
+ emit (A64_STLR32 (src , reg ), ctx );
710
+ break ;
711
+ case BPF_DW :
712
+ emit (A64_STLR64 (src , reg ), ctx );
713
+ break ;
714
+ }
715
+ break ;
716
+ default :
717
+ pr_err_once ("unexpected atomic load/store op code %02x\n" ,
718
+ imm );
719
+ return - EINVAL ;
720
+ }
721
+
722
+ return 0 ;
723
+ }
724
+
650
725
#ifdef CONFIG_ARM64_LSE_ATOMICS
651
726
static int emit_lse_atomic (const struct bpf_insn * insn , struct jit_ctx * ctx )
652
727
{
@@ -1641,11 +1716,17 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
1641
1716
return ret ;
1642
1717
break ;
1643
1718
1719
+ case BPF_STX | BPF_ATOMIC | BPF_B :
1720
+ case BPF_STX | BPF_ATOMIC | BPF_H :
1644
1721
case BPF_STX | BPF_ATOMIC | BPF_W :
1645
1722
case BPF_STX | BPF_ATOMIC | BPF_DW :
1723
+ case BPF_STX | BPF_PROBE_ATOMIC | BPF_B :
1724
+ case BPF_STX | BPF_PROBE_ATOMIC | BPF_H :
1646
1725
case BPF_STX | BPF_PROBE_ATOMIC | BPF_W :
1647
1726
case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW :
1648
- if (cpus_have_cap (ARM64_HAS_LSE_ATOMICS ))
1727
+ if (bpf_atomic_is_load_store (insn ))
1728
+ ret = emit_atomic_ld_st (insn , ctx );
1729
+ else if (cpus_have_cap (ARM64_HAS_LSE_ATOMICS ))
1649
1730
ret = emit_lse_atomic (insn , ctx );
1650
1731
else
1651
1732
ret = emit_ll_sc_atomic (insn , ctx );
@@ -2667,13 +2748,10 @@ bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
2667
2748
if (!in_arena )
2668
2749
return true;
2669
2750
switch (insn -> code ) {
2670
- case BPF_STX | BPF_ATOMIC | BPF_B :
2671
- case BPF_STX | BPF_ATOMIC | BPF_H :
2672
2751
case BPF_STX | BPF_ATOMIC | BPF_W :
2673
2752
case BPF_STX | BPF_ATOMIC | BPF_DW :
2674
- if (bpf_atomic_is_load_store (insn ))
2675
- return false;
2676
- if (!cpus_have_cap (ARM64_HAS_LSE_ATOMICS ))
2753
+ if (!bpf_atomic_is_load_store (insn ) &&
2754
+ !cpus_have_cap (ARM64_HAS_LSE_ATOMICS ))
2677
2755
return false;
2678
2756
}
2679
2757
return true;
0 commit comments