@@ -647,6 +647,82 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
647
647
return 0 ;
648
648
}
649
649
650
+ static int emit_atomic_load_store (const struct bpf_insn * insn ,
651
+ struct jit_ctx * ctx )
652
+ {
653
+ const s32 imm = insn -> imm ;
654
+ const s16 off = insn -> off ;
655
+ const u8 code = insn -> code ;
656
+ const bool arena = BPF_MODE (code ) == BPF_PROBE_ATOMIC ;
657
+ const u8 arena_vm_base = bpf2a64 [ARENA_VM_START ];
658
+ const u8 dst = bpf2a64 [insn -> dst_reg ];
659
+ const u8 src = bpf2a64 [insn -> src_reg ];
660
+ const u8 tmp = bpf2a64 [TMP_REG_1 ];
661
+ u8 reg ;
662
+
663
+ switch (imm ) {
664
+ case BPF_LOAD_ACQ :
665
+ reg = src ;
666
+ break ;
667
+ case BPF_STORE_REL :
668
+ reg = dst ;
669
+ break ;
670
+ default :
671
+ pr_err_once ("unknown atomic load/store op code %02x\n" , imm );
672
+ return - EINVAL ;
673
+ }
674
+
675
+ if (off ) {
676
+ emit_a64_add_i (1 , tmp , reg , tmp , off , ctx );
677
+ reg = tmp ;
678
+ }
679
+ if (arena ) {
680
+ emit (A64_ADD (1 , tmp , reg , arena_vm_base ), ctx );
681
+ reg = tmp ;
682
+ }
683
+
684
+ switch (imm ) {
685
+ case BPF_LOAD_ACQ :
686
+ switch (BPF_SIZE (code )) {
687
+ case BPF_B :
688
+ emit (A64_LDARB (dst , reg ), ctx );
689
+ break ;
690
+ case BPF_H :
691
+ emit (A64_LDARH (dst , reg ), ctx );
692
+ break ;
693
+ case BPF_W :
694
+ emit (A64_LDAR32 (dst , reg ), ctx );
695
+ break ;
696
+ case BPF_DW :
697
+ emit (A64_LDAR64 (dst , reg ), ctx );
698
+ break ;
699
+ }
700
+ break ;
701
+ case BPF_STORE_REL :
702
+ switch (BPF_SIZE (code )) {
703
+ case BPF_B :
704
+ emit (A64_STLRB (src , reg ), ctx );
705
+ break ;
706
+ case BPF_H :
707
+ emit (A64_STLRH (src , reg ), ctx );
708
+ break ;
709
+ case BPF_W :
710
+ emit (A64_STLR32 (src , reg ), ctx );
711
+ break ;
712
+ case BPF_DW :
713
+ emit (A64_STLR64 (src , reg ), ctx );
714
+ break ;
715
+ }
716
+ break ;
717
+ default :
718
+ pr_err_once ("unexpected atomic load/store op code %02x\n" ,
719
+ imm );
720
+ return - EINVAL ;
721
+ }
722
+
723
+ return 0 ;
724
+ }
725
+
650
726
#ifdef CONFIG_ARM64_LSE_ATOMICS
651
727
static int emit_lse_atomic (const struct bpf_insn * insn , struct jit_ctx * ctx )
652
728
{
@@ -1641,11 +1717,17 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
1641
1717
return ret ;
1642
1718
break ;
1643
1719
1720
+ case BPF_STX | BPF_ATOMIC | BPF_B :
1721
+ case BPF_STX | BPF_ATOMIC | BPF_H :
1644
1722
case BPF_STX | BPF_ATOMIC | BPF_W :
1645
1723
case BPF_STX | BPF_ATOMIC | BPF_DW :
1724
+ case BPF_STX | BPF_PROBE_ATOMIC | BPF_B :
1725
+ case BPF_STX | BPF_PROBE_ATOMIC | BPF_H :
1646
1726
case BPF_STX | BPF_PROBE_ATOMIC | BPF_W :
1647
1727
case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW :
1648
- if (cpus_have_cap (ARM64_HAS_LSE_ATOMICS ))
1728
+ if (bpf_atomic_is_load_store (insn ))
1729
+ ret = emit_atomic_load_store (insn , ctx );
1730
+ else if (cpus_have_cap (ARM64_HAS_LSE_ATOMICS ))
1649
1731
ret = emit_lse_atomic (insn , ctx );
1650
1732
else
1651
1733
ret = emit_ll_sc_atomic (insn , ctx );
@@ -2667,13 +2749,10 @@ bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena)
2667
2749
if (!in_arena )
2668
2750
return true;
2669
2751
switch (insn -> code ) {
2670
- case BPF_STX | BPF_ATOMIC | BPF_B :
2671
- case BPF_STX | BPF_ATOMIC | BPF_H :
2672
2752
case BPF_STX | BPF_ATOMIC | BPF_W :
2673
2753
case BPF_STX | BPF_ATOMIC | BPF_DW :
2674
- if (bpf_atomic_is_load_store (insn ))
2675
- return false;
2676
- if (!cpus_have_cap (ARM64_HAS_LSE_ATOMICS ))
2754
+ if (!bpf_atomic_is_load_store (insn ) &&
2755
+ !cpus_have_cap (ARM64_HAS_LSE_ATOMICS ))
2677
2756
return false;
2678
2757
}
2679
2758
return true;
0 commit comments