@@ -578,10 +578,16 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
578
578
579
579
switch (type ) {
580
580
case AARCH64_INSN_LDST_LOAD_EX :
581
+ case AARCH64_INSN_LDST_LOAD_ACQ_EX :
581
582
insn = aarch64_insn_get_load_ex_value ();
583
+ if (type == AARCH64_INSN_LDST_LOAD_ACQ_EX )
584
+ insn |= BIT (15 );
582
585
break ;
583
586
case AARCH64_INSN_LDST_STORE_EX :
587
+ case AARCH64_INSN_LDST_STORE_REL_EX :
584
588
insn = aarch64_insn_get_store_ex_value ();
589
+ if (type == AARCH64_INSN_LDST_STORE_REL_EX )
590
+ insn |= BIT (15 );
585
591
break ;
586
592
default :
587
593
pr_err ("%s: unknown load/store exclusive encoding %d\n" , __func__ , type );
@@ -603,12 +609,65 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
603
609
state );
604
610
}
605
611
606
- u32 aarch64_insn_gen_ldadd (enum aarch64_insn_register result ,
607
- enum aarch64_insn_register address ,
608
- enum aarch64_insn_register value ,
609
- enum aarch64_insn_size_type size )
612
+ #ifdef CONFIG_ARM64_LSE_ATOMICS
613
+ static u32 aarch64_insn_encode_ldst_order (enum aarch64_insn_mem_order_type type ,
614
+ u32 insn )
610
615
{
611
- u32 insn = aarch64_insn_get_ldadd_value ();
616
+ u32 order ;
617
+
618
+ switch (type ) {
619
+ case AARCH64_INSN_MEM_ORDER_NONE :
620
+ order = 0 ;
621
+ break ;
622
+ case AARCH64_INSN_MEM_ORDER_ACQ :
623
+ order = 2 ;
624
+ break ;
625
+ case AARCH64_INSN_MEM_ORDER_REL :
626
+ order = 1 ;
627
+ break ;
628
+ case AARCH64_INSN_MEM_ORDER_ACQREL :
629
+ order = 3 ;
630
+ break ;
631
+ default :
632
+ pr_err ("%s: unknown mem order %d\n" , __func__ , type );
633
+ return AARCH64_BREAK_FAULT ;
634
+ }
635
+
636
+ insn &= ~GENMASK (23 , 22 );
637
+ insn |= order << 22 ;
638
+
639
+ return insn ;
640
+ }
641
+
642
+ u32 aarch64_insn_gen_atomic_ld_op (enum aarch64_insn_register result ,
643
+ enum aarch64_insn_register address ,
644
+ enum aarch64_insn_register value ,
645
+ enum aarch64_insn_size_type size ,
646
+ enum aarch64_insn_mem_atomic_op op ,
647
+ enum aarch64_insn_mem_order_type order )
648
+ {
649
+ u32 insn ;
650
+
651
+ switch (op ) {
652
+ case AARCH64_INSN_MEM_ATOMIC_ADD :
653
+ insn = aarch64_insn_get_ldadd_value ();
654
+ break ;
655
+ case AARCH64_INSN_MEM_ATOMIC_CLR :
656
+ insn = aarch64_insn_get_ldclr_value ();
657
+ break ;
658
+ case AARCH64_INSN_MEM_ATOMIC_EOR :
659
+ insn = aarch64_insn_get_ldeor_value ();
660
+ break ;
661
+ case AARCH64_INSN_MEM_ATOMIC_SET :
662
+ insn = aarch64_insn_get_ldset_value ();
663
+ break ;
664
+ case AARCH64_INSN_MEM_ATOMIC_SWP :
665
+ insn = aarch64_insn_get_swp_value ();
666
+ break ;
667
+ default :
668
+ pr_err ("%s: unimplemented mem atomic op %d\n" , __func__ , op );
669
+ return AARCH64_BREAK_FAULT ;
670
+ }
612
671
613
672
switch (size ) {
614
673
case AARCH64_INSN_SIZE_32 :
@@ -621,6 +680,8 @@ u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
621
680
622
681
insn = aarch64_insn_encode_ldst_size (size , insn );
623
682
683
+ insn = aarch64_insn_encode_ldst_order (order , insn );
684
+
624
685
insn = aarch64_insn_encode_register (AARCH64_INSN_REGTYPE_RT , insn ,
625
686
result );
626
687
@@ -631,17 +692,68 @@ u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
631
692
value );
632
693
}
633
694
634
- u32 aarch64_insn_gen_stadd (enum aarch64_insn_register address ,
635
- enum aarch64_insn_register value ,
636
- enum aarch64_insn_size_type size )
695
+ static u32 aarch64_insn_encode_cas_order (enum aarch64_insn_mem_order_type type ,
696
+ u32 insn )
637
697
{
638
- /*
639
- * STADD is simply encoded as an alias for LDADD with XZR as
640
- * the destination register.
641
- */
642
- return aarch64_insn_gen_ldadd (AARCH64_INSN_REG_ZR , address ,
643
- value , size );
698
+ u32 order ;
699
+
700
+ switch (type ) {
701
+ case AARCH64_INSN_MEM_ORDER_NONE :
702
+ order = 0 ;
703
+ break ;
704
+ case AARCH64_INSN_MEM_ORDER_ACQ :
705
+ order = BIT (22 );
706
+ break ;
707
+ case AARCH64_INSN_MEM_ORDER_REL :
708
+ order = BIT (15 );
709
+ break ;
710
+ case AARCH64_INSN_MEM_ORDER_ACQREL :
711
+ order = BIT (15 ) | BIT (22 );
712
+ break ;
713
+ default :
714
+ pr_err ("%s: unknown mem order %d\n" , __func__ , type );
715
+ return AARCH64_BREAK_FAULT ;
716
+ }
717
+
718
+ insn &= ~(BIT (15 ) | BIT (22 ));
719
+ insn |= order ;
720
+
721
+ return insn ;
722
+ }
723
+
724
+ u32 aarch64_insn_gen_cas (enum aarch64_insn_register result ,
725
+ enum aarch64_insn_register address ,
726
+ enum aarch64_insn_register value ,
727
+ enum aarch64_insn_size_type size ,
728
+ enum aarch64_insn_mem_order_type order )
729
+ {
730
+ u32 insn ;
731
+
732
+ switch (size ) {
733
+ case AARCH64_INSN_SIZE_32 :
734
+ case AARCH64_INSN_SIZE_64 :
735
+ break ;
736
+ default :
737
+ pr_err ("%s: unimplemented size encoding %d\n" , __func__ , size );
738
+ return AARCH64_BREAK_FAULT ;
739
+ }
740
+
741
+ insn = aarch64_insn_get_cas_value ();
742
+
743
+ insn = aarch64_insn_encode_ldst_size (size , insn );
744
+
745
+ insn = aarch64_insn_encode_cas_order (order , insn );
746
+
747
+ insn = aarch64_insn_encode_register (AARCH64_INSN_REGTYPE_RT , insn ,
748
+ result );
749
+
750
+ insn = aarch64_insn_encode_register (AARCH64_INSN_REGTYPE_RN , insn ,
751
+ address );
752
+
753
+ return aarch64_insn_encode_register (AARCH64_INSN_REGTYPE_RS , insn ,
754
+ value );
644
755
}
756
+ #endif
645
757
646
758
static u32 aarch64_insn_encode_prfm_imm (enum aarch64_insn_prfm_type type ,
647
759
enum aarch64_insn_prfm_target target ,
@@ -1379,7 +1491,7 @@ static u32 aarch64_encode_immediate(u64 imm,
1379
1491
* Compute the rotation to get a continuous set of
1380
1492
* ones, with the first bit set at position 0
1381
1493
*/
1382
- ror = fls (~imm );
1494
+ ror = fls64 (~imm );
1383
1495
}
1384
1496
1385
1497
/*
@@ -1456,3 +1568,48 @@ u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
1456
1568
insn = aarch64_insn_encode_register (AARCH64_INSN_REGTYPE_RN , insn , Rn );
1457
1569
return aarch64_insn_encode_register (AARCH64_INSN_REGTYPE_RM , insn , Rm );
1458
1570
}
1571
+
1572
+ u32 aarch64_insn_gen_dmb (enum aarch64_insn_mb_type type )
1573
+ {
1574
+ u32 opt ;
1575
+ u32 insn ;
1576
+
1577
+ switch (type ) {
1578
+ case AARCH64_INSN_MB_SY :
1579
+ opt = 0xf ;
1580
+ break ;
1581
+ case AARCH64_INSN_MB_ST :
1582
+ opt = 0xe ;
1583
+ break ;
1584
+ case AARCH64_INSN_MB_LD :
1585
+ opt = 0xd ;
1586
+ break ;
1587
+ case AARCH64_INSN_MB_ISH :
1588
+ opt = 0xb ;
1589
+ break ;
1590
+ case AARCH64_INSN_MB_ISHST :
1591
+ opt = 0xa ;
1592
+ break ;
1593
+ case AARCH64_INSN_MB_ISHLD :
1594
+ opt = 0x9 ;
1595
+ break ;
1596
+ case AARCH64_INSN_MB_NSH :
1597
+ opt = 0x7 ;
1598
+ break ;
1599
+ case AARCH64_INSN_MB_NSHST :
1600
+ opt = 0x6 ;
1601
+ break ;
1602
+ case AARCH64_INSN_MB_NSHLD :
1603
+ opt = 0x5 ;
1604
+ break ;
1605
+ default :
1606
+ pr_err ("%s: unknown dmb type %d\n" , __func__ , type );
1607
+ return AARCH64_BREAK_FAULT ;
1608
+ }
1609
+
1610
+ insn = aarch64_insn_get_dmb_value ();
1611
+ insn &= ~GENMASK (11 , 8 );
1612
+ insn |= (opt << 8 );
1613
+
1614
+ return insn ;
1615
+ }
0 commit comments