@@ -6920,7 +6920,8 @@ pub unsafe fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4
6920
6920
#[inline]
6921
6921
#[target_feature(enable = "neon")]
6922
6922
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
6923
- #[cfg_attr(test, assert_instr(mov))]
6923
+ #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))]
6924
+ #[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(mov))]
6924
6925
pub unsafe fn vcombine_f16 ( low: float16x4_t, high: float16x4_t) -> float16x8_t {
6925
6926
simd_shuffle8!(low, high, [0, 1, 2, 3, 4, 5, 6, 7])
6926
6927
}
@@ -6930,7 +6931,8 @@ pub unsafe fn vcombine_f16 ( low: float16x4_t, high: float16x4_t) -> float16x8_
6930
6931
#[ inline]
6931
6932
#[ target_feature( enable = "neon" ) ]
6932
6933
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v7" ) ) ]
6933
- #[ cfg_attr( test, assert_instr( mov) ) ]
6934
+ #[ cfg_attr( all( test, target_arch = "arm" ) , assert_instr( nop) ) ]
6935
+ #[ cfg_attr( all( test, target_arch = "aarch64" ) , assert_instr( mov) ) ]
6934
6936
#[ stable( feature = "neon_intrinsics" , since = "1.59.0" ) ]
6935
6937
pub unsafe fn vcombine_f32 ( low : float32x2_t , high : float32x2_t ) -> float32x4_t {
6936
6938
simd_shuffle4 ! ( low, high, [ 0 , 1 , 2 , 3 ] )
@@ -6940,7 +6942,8 @@ pub unsafe fn vcombine_f32(low: float32x2_t, high: float32x2_t) -> float32x4_t {
6940
6942
#[ inline]
6941
6943
#[ target_feature( enable = "neon" ) ]
6942
6944
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v7" ) ) ]
6943
- #[ cfg_attr( test, assert_instr( mov) ) ]
6945
+ #[ cfg_attr( all( test, target_arch = "arm" ) , assert_instr( nop) ) ]
6946
+ #[ cfg_attr( all( test, target_arch = "aarch64" ) , assert_instr( mov) ) ]
6944
6947
#[ stable( feature = "neon_intrinsics" , since = "1.59.0" ) ]
6945
6948
pub unsafe fn vcombine_p8 ( low : poly8x8_t , high : poly8x8_t ) -> poly8x16_t {
6946
6949
simd_shuffle16 ! (
@@ -6964,7 +6967,8 @@ pub unsafe fn vcombine_p16(low: poly16x4_t, high: poly16x4_t) -> poly16x8_t {
6964
6967
#[ inline]
6965
6968
#[ target_feature( enable = "neon" ) ]
6966
6969
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v7" ) ) ]
6967
- #[ cfg_attr( test, assert_instr( mov) ) ]
6970
+ #[ cfg_attr( all( test, target_arch = "arm" ) , assert_instr( nop) ) ]
6971
+ #[ cfg_attr( all( test, target_arch = "aarch64" ) , assert_instr( mov) ) ]
6968
6972
#[ cfg_attr(
6969
6973
target_arch = "aarch64" ,
6970
6974
stable( feature = "neon_intrinsics" , since = "1.59.0" )
@@ -6981,7 +6985,8 @@ pub unsafe fn vcombine_s8(low: int8x8_t, high: int8x8_t) -> int8x16_t {
6981
6985
#[ inline]
6982
6986
#[ target_feature( enable = "neon" ) ]
6983
6987
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v7" ) ) ]
6984
- #[ cfg_attr( test, assert_instr( mov) ) ]
6988
+ #[ cfg_attr( all( test, target_arch = "arm" ) , assert_instr( nop) ) ]
6989
+ #[ cfg_attr( all( test, target_arch = "aarch64" ) , assert_instr( mov) ) ]
6985
6990
#[ cfg_attr(
6986
6991
target_arch = "aarch64" ,
6987
6992
stable( feature = "neon_intrinsics" , since = "1.59.0" )
@@ -6994,7 +6999,8 @@ pub unsafe fn vcombine_s16(low: int16x4_t, high: int16x4_t) -> int16x8_t {
6994
6999
#[ inline]
6995
7000
#[ target_feature( enable = "neon" ) ]
6996
7001
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v7" ) ) ]
6997
- #[ cfg_attr( test, assert_instr( mov) ) ]
7002
+ #[ cfg_attr( all( test, target_arch = "arm" ) , assert_instr( nop) ) ]
7003
+ #[ cfg_attr( all( test, target_arch = "aarch64" ) , assert_instr( mov) ) ]
6998
7004
#[ cfg_attr(
6999
7005
target_arch = "aarch64" ,
7000
7006
stable( feature = "neon_intrinsics" , since = "1.59.0" )
@@ -7007,7 +7013,8 @@ pub unsafe fn vcombine_s32(low: int32x2_t, high: int32x2_t) -> int32x4_t {
7007
7013
#[ inline]
7008
7014
#[ target_feature( enable = "neon" ) ]
7009
7015
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v7" ) ) ]
7010
- #[ cfg_attr( test, assert_instr( mov) ) ]
7016
+ #[ cfg_attr( all( test, target_arch = "arm" ) , assert_instr( nop) ) ]
7017
+ #[ cfg_attr( all( test, target_arch = "aarch64" ) , assert_instr( mov) ) ]
7011
7018
#[ cfg_attr(
7012
7019
target_arch = "aarch64" ,
7013
7020
stable( feature = "neon_intrinsics" , since = "1.59.0" )
@@ -7020,7 +7027,8 @@ pub unsafe fn vcombine_s64(low: int64x1_t, high: int64x1_t) -> int64x2_t {
7020
7027
#[ inline]
7021
7028
#[ target_feature( enable = "neon" ) ]
7022
7029
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v7" ) ) ]
7023
- #[ cfg_attr( test, assert_instr( mov) ) ]
7030
+ #[ cfg_attr( all( test, target_arch = "arm" ) , assert_instr( nop) ) ]
7031
+ #[ cfg_attr( all( test, target_arch = "aarch64" ) , assert_instr( mov) ) ]
7024
7032
#[ cfg_attr(
7025
7033
target_arch = "aarch64" ,
7026
7034
stable( feature = "neon_intrinsics" , since = "1.59.0" )
@@ -7037,7 +7045,8 @@ pub unsafe fn vcombine_u8(low: uint8x8_t, high: uint8x8_t) -> uint8x16_t {
7037
7045
#[ inline]
7038
7046
#[ target_feature( enable = "neon" ) ]
7039
7047
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v7" ) ) ]
7040
- #[ cfg_attr( test, assert_instr( mov) ) ]
7048
+ #[ cfg_attr( all( test, target_arch = "arm" ) , assert_instr( nop) ) ]
7049
+ #[ cfg_attr( all( test, target_arch = "aarch64" ) , assert_instr( mov) ) ]
7041
7050
#[ cfg_attr(
7042
7051
target_arch = "aarch64" ,
7043
7052
stable( feature = "neon_intrinsics" , since = "1.59.0" )
@@ -7050,7 +7059,8 @@ pub unsafe fn vcombine_u16(low: uint16x4_t, high: uint16x4_t) -> uint16x8_t {
7050
7059
#[ inline]
7051
7060
#[ target_feature( enable = "neon" ) ]
7052
7061
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v7" ) ) ]
7053
- #[ cfg_attr( test, assert_instr( mov) ) ]
7062
+ #[ cfg_attr( all( test, target_arch = "arm" ) , assert_instr( nop) ) ]
7063
+ #[ cfg_attr( all( test, target_arch = "aarch64" ) , assert_instr( mov) ) ]
7054
7064
#[ cfg_attr(
7055
7065
target_arch = "aarch64" ,
7056
7066
stable( feature = "neon_intrinsics" , since = "1.59.0" )
@@ -7063,7 +7073,8 @@ pub unsafe fn vcombine_u32(low: uint32x2_t, high: uint32x2_t) -> uint32x4_t {
7063
7073
#[ inline]
7064
7074
#[ target_feature( enable = "neon" ) ]
7065
7075
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v7" ) ) ]
7066
- #[ cfg_attr( test, assert_instr( mov) ) ]
7076
+ #[ cfg_attr( all( test, target_arch = "arm" ) , assert_instr( nop) ) ]
7077
+ #[ cfg_attr( all( test, target_arch = "aarch64" ) , assert_instr( mov) ) ]
7067
7078
#[ cfg_attr(
7068
7079
target_arch = "aarch64" ,
7069
7080
stable( feature = "neon_intrinsics" , since = "1.59.0" )
@@ -7076,7 +7087,8 @@ pub unsafe fn vcombine_u64(low: uint64x1_t, high: uint64x1_t) -> uint64x2_t {
7076
7087
#[ inline]
7077
7088
#[ target_feature( enable = "neon" ) ]
7078
7089
#[ cfg_attr( target_arch = "arm" , target_feature( enable = "v7" ) ) ]
7079
- #[ cfg_attr( test, assert_instr( mov) ) ]
7090
+ #[ cfg_attr( all( test, target_arch = "arm" ) , assert_instr( nop) ) ]
7091
+ #[ cfg_attr( all( test, target_arch = "aarch64" ) , assert_instr( mov) ) ]
7080
7092
#[ cfg_attr(
7081
7093
target_arch = "aarch64" ,
7082
7094
stable( feature = "neon_intrinsics" , since = "1.59.0" )
0 commit comments