@@ -699,7 +699,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
699
699
700
700
switch (E->getOp ()) {
701
701
case AtomicExpr::AO__c11_atomic_init:
702
- llvm_unreachable (" Already handled!" );
702
+ llvm_unreachable (" Already handled above with EmitAtomicInit !" );
703
703
704
704
case AtomicExpr::AO__c11_atomic_load:
705
705
case AtomicExpr::AO__atomic_load_n:
@@ -785,20 +785,43 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
785
785
if (UseLibcall) {
786
786
bool UseOptimizedLibcall = false ;
787
787
switch (E->getOp ()) {
788
+ case AtomicExpr::AO__c11_atomic_init:
789
+ llvm_unreachable (" Already handled above with EmitAtomicInit!" );
790
+
788
791
case AtomicExpr::AO__c11_atomic_fetch_add:
789
792
case AtomicExpr::AO__atomic_fetch_add:
790
793
case AtomicExpr::AO__c11_atomic_fetch_and:
791
794
case AtomicExpr::AO__atomic_fetch_and:
792
795
case AtomicExpr::AO__c11_atomic_fetch_or:
793
796
case AtomicExpr::AO__atomic_fetch_or:
797
+ case AtomicExpr::AO__atomic_fetch_nand:
794
798
case AtomicExpr::AO__c11_atomic_fetch_sub:
795
799
case AtomicExpr::AO__atomic_fetch_sub:
796
800
case AtomicExpr::AO__c11_atomic_fetch_xor:
797
801
case AtomicExpr::AO__atomic_fetch_xor:
802
+ case AtomicExpr::AO__atomic_add_fetch:
803
+ case AtomicExpr::AO__atomic_and_fetch:
804
+ case AtomicExpr::AO__atomic_nand_fetch:
805
+ case AtomicExpr::AO__atomic_or_fetch:
806
+ case AtomicExpr::AO__atomic_sub_fetch:
807
+ case AtomicExpr::AO__atomic_xor_fetch:
798
808
// For these, only library calls for certain sizes exist.
799
809
UseOptimizedLibcall = true ;
800
810
break ;
801
- default :
811
+
812
+ case AtomicExpr::AO__c11_atomic_load:
813
+ case AtomicExpr::AO__c11_atomic_store:
814
+ case AtomicExpr::AO__c11_atomic_exchange:
815
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
816
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
817
+ case AtomicExpr::AO__atomic_load_n:
818
+ case AtomicExpr::AO__atomic_load:
819
+ case AtomicExpr::AO__atomic_store_n:
820
+ case AtomicExpr::AO__atomic_store:
821
+ case AtomicExpr::AO__atomic_exchange_n:
822
+ case AtomicExpr::AO__atomic_exchange:
823
+ case AtomicExpr::AO__atomic_compare_exchange_n:
824
+ case AtomicExpr::AO__atomic_compare_exchange:
802
825
// Only use optimized library calls for sizes for which they exist.
803
826
if (Size == 1 || Size == 2 || Size == 4 || Size == 8 )
804
827
UseOptimizedLibcall = true ;
@@ -820,6 +843,9 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
820
843
QualType RetTy;
821
844
bool HaveRetTy = false ;
822
845
switch (E->getOp ()) {
846
+ case AtomicExpr::AO__c11_atomic_init:
847
+ llvm_unreachable (" Already handled!" );
848
+
823
849
// There is only one libcall for compare an exchange, because there is no
824
850
// optimisation benefit possible from a libcall version of a weak compare
825
851
// and exchange.
@@ -903,7 +929,49 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
903
929
AddDirectArgument (*this , Args, UseOptimizedLibcall, Val1, MemTy,
904
930
E->getExprLoc (), sizeChars);
905
931
break ;
906
- default : return EmitUnsupportedRValue (E, " atomic library call" );
932
+ // T __atomic_fetch_nand_N(T *mem, T val, int order)
933
+ case AtomicExpr::AO__atomic_fetch_nand:
934
+ LibCallName = " __atomic_fetch_nand" ;
935
+ AddDirectArgument (*this , Args, UseOptimizedLibcall, Val1, MemTy,
936
+ E->getExprLoc (), sizeChars);
937
+ break ;
938
+
939
+ // T __atomic_add_fetch_N(T *mem, T val, int order)
940
+ case AtomicExpr::AO__atomic_add_fetch:
941
+ LibCallName = " __atomic_add_fetch" ;
942
+ AddDirectArgument (*this , Args, UseOptimizedLibcall, Val1, LoweredMemTy,
943
+ E->getExprLoc (), sizeChars);
944
+ break ;
945
+ // T __atomic_and_fetch_N(T *mem, T val, int order)
946
+ case AtomicExpr::AO__atomic_and_fetch:
947
+ LibCallName = " __atomic_and_fetch" ;
948
+ AddDirectArgument (*this , Args, UseOptimizedLibcall, Val1, MemTy,
949
+ E->getExprLoc (), sizeChars);
950
+ break ;
951
+ // T __atomic_or_fetch_N(T *mem, T val, int order)
952
+ case AtomicExpr::AO__atomic_or_fetch:
953
+ LibCallName = " __atomic_or_fetch" ;
954
+ AddDirectArgument (*this , Args, UseOptimizedLibcall, Val1, MemTy,
955
+ E->getExprLoc (), sizeChars);
956
+ break ;
957
+ // T __atomic_sub_fetch_N(T *mem, T val, int order)
958
+ case AtomicExpr::AO__atomic_sub_fetch:
959
+ LibCallName = " __atomic_sub_fetch" ;
960
+ AddDirectArgument (*this , Args, UseOptimizedLibcall, Val1, LoweredMemTy,
961
+ E->getExprLoc (), sizeChars);
962
+ break ;
963
+ // T __atomic_xor_fetch_N(T *mem, T val, int order)
964
+ case AtomicExpr::AO__atomic_xor_fetch:
965
+ LibCallName = " __atomic_xor_fetch" ;
966
+ AddDirectArgument (*this , Args, UseOptimizedLibcall, Val1, MemTy,
967
+ E->getExprLoc (), sizeChars);
968
+ break ;
969
+ // T __atomic_nand_fetch_N(T *mem, T val, int order)
970
+ case AtomicExpr::AO__atomic_nand_fetch:
971
+ LibCallName = " __atomic_nand_fetch" ;
972
+ AddDirectArgument (*this , Args, UseOptimizedLibcall, Val1, MemTy,
973
+ E->getExprLoc (), sizeChars);
974
+ break ;
907
975
}
908
976
909
977
// Optimized functions have the size in their name.
0 commit comments