@@ -6915,6 +6915,163 @@ pub unsafe fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4
6915
6915
vusmmlaq_s32_ ( a, b, c)
6916
6916
}
6917
6917
6918
+ /* FIXME: 16-bit float
6919
+ /// Vector combine
6920
+ #[inline]
6921
+ #[target_feature(enable = "neon")]
6922
+ #[cfg_attr(test, assert_instr(mov))]
6923
+ pub unsafe fn vcombine_f16 ( low: float16x4_t, high: float16x4_t) -> float16x8_t {
6924
+ simd_shuffle8!(low, high, [0, 1, 2, 3, 4, 5, 6, 7])
6925
+ }
6926
+ */
6927
+
6928
+ /// Vector combine
6929
+ #[ inline]
6930
+ #[ target_feature( enable = "neon" ) ]
6931
+ #[ cfg_attr( test, assert_instr( mov) ) ]
6932
+ #[ stable( feature = "neon_intrinsics" , since = "1.59.0" ) ]
6933
+ pub unsafe fn vcombine_f32 ( low : float32x2_t , high : float32x2_t ) -> float32x4_t {
6934
+ simd_shuffle4 ! ( low, high, [ 0 , 1 , 2 , 3 ] )
6935
+ }
6936
+
6937
+ /// Vector combine
6938
+ #[ inline]
6939
+ #[ target_feature( enable = "neon" ) ]
6940
+ #[ cfg_attr( test, assert_instr( mov) ) ]
6941
+ #[ stable( feature = "neon_intrinsics" , since = "1.59.0" ) ]
6942
+ pub unsafe fn vcombine_p8 ( low : poly8x8_t , high : poly8x8_t ) -> poly8x16_t {
6943
+ simd_shuffle16 ! (
6944
+ low,
6945
+ high,
6946
+ [ 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 ] ,
6947
+ )
6948
+ }
6949
+
6950
+ /// Vector combine
6951
+ #[ inline]
6952
+ #[ target_feature( enable = "neon" ) ]
6953
+ #[ cfg_attr( test, assert_instr( mov) ) ]
6954
+ #[ stable( feature = "neon_intrinsics" , since = "1.59.0" ) ]
6955
+ pub unsafe fn vcombine_p16 ( low : poly16x4_t , high : poly16x4_t ) -> poly16x8_t {
6956
+ simd_shuffle8 ! ( low, high, [ 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ] )
6957
+ }
6958
+
6959
+ /// Vector combine
6960
+ #[ inline]
6961
+ #[ target_feature( enable = "neon,v7" ) ]
6962
+ #[ cfg_attr( test, assert_instr( mov) ) ]
6963
+ #[ cfg_attr(
6964
+ target_arch = "aarch64" ,
6965
+ stable( feature = "neon_intrinsics" , since = "1.59.0" )
6966
+ ) ]
6967
+ pub unsafe fn vcombine_s8 ( low : int8x8_t , high : int8x8_t ) -> int8x16_t {
6968
+ simd_shuffle16 ! (
6969
+ low,
6970
+ high,
6971
+ [ 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 ] ,
6972
+ )
6973
+ }
6974
+
6975
+ /// Vector combine
6976
+ #[ inline]
6977
+ #[ target_feature( enable = "neon,v7" ) ]
6978
+ #[ cfg_attr( test, assert_instr( mov) ) ]
6979
+ #[ cfg_attr(
6980
+ target_arch = "aarch64" ,
6981
+ stable( feature = "neon_intrinsics" , since = "1.59.0" )
6982
+ ) ]
6983
+ pub unsafe fn vcombine_s16 ( low : int16x4_t , high : int16x4_t ) -> int16x8_t {
6984
+ simd_shuffle8 ! ( low, high, [ 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ] )
6985
+ }
6986
+
6987
+ /// Vector combine
6988
+ #[ inline]
6989
+ #[ target_feature( enable = "neon,v7" ) ]
6990
+ #[ cfg_attr( test, assert_instr( mov) ) ]
6991
+ #[ cfg_attr(
6992
+ target_arch = "aarch64" ,
6993
+ stable( feature = "neon_intrinsics" , since = "1.59.0" )
6994
+ ) ]
6995
+ pub unsafe fn vcombine_s32 ( low : int32x2_t , high : int32x2_t ) -> int32x4_t {
6996
+ simd_shuffle4 ! ( low, high, [ 0 , 1 , 2 , 3 ] )
6997
+ }
6998
+
6999
+ /// Vector combine
7000
+ #[ inline]
7001
+ #[ target_feature( enable = "neon,v7" ) ]
7002
+ #[ cfg_attr( test, assert_instr( mov) ) ]
7003
+ #[ cfg_attr(
7004
+ target_arch = "aarch64" ,
7005
+ stable( feature = "neon_intrinsics" , since = "1.59.0" )
7006
+ ) ]
7007
+ pub unsafe fn vcombine_s64 ( low : int64x1_t , high : int64x1_t ) -> int64x2_t {
7008
+ simd_shuffle2 ! ( low, high, [ 0 , 1 ] )
7009
+ }
7010
+
7011
+ /// Vector combine
7012
+ #[ inline]
7013
+ #[ target_feature( enable = "neon,v7" ) ]
7014
+ #[ cfg_attr( test, assert_instr( mov) ) ]
7015
+ #[ cfg_attr(
7016
+ target_arch = "aarch64" ,
7017
+ stable( feature = "neon_intrinsics" , since = "1.59.0" )
7018
+ ) ]
7019
+ pub unsafe fn vcombine_u8 ( low : uint8x8_t , high : uint8x8_t ) -> uint8x16_t {
7020
+ simd_shuffle16 ! (
7021
+ low,
7022
+ high,
7023
+ [ 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10 , 11 , 12 , 13 , 14 , 15 ] ,
7024
+ )
7025
+ }
7026
+
7027
+ /// Vector combine
7028
+ #[ inline]
7029
+ #[ target_feature( enable = "neon,v7" ) ]
7030
+ #[ cfg_attr( test, assert_instr( mov) ) ]
7031
+ #[ cfg_attr(
7032
+ target_arch = "aarch64" ,
7033
+ stable( feature = "neon_intrinsics" , since = "1.59.0" )
7034
+ ) ]
7035
+ pub unsafe fn vcombine_u16 ( low : uint16x4_t , high : uint16x4_t ) -> uint16x8_t {
7036
+ simd_shuffle8 ! ( low, high, [ 0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 ] )
7037
+ }
7038
+
7039
+ /// Vector combine
7040
+ #[ inline]
7041
+ #[ target_feature( enable = "neon,v7" ) ]
7042
+ #[ cfg_attr( test, assert_instr( mov) ) ]
7043
+ #[ cfg_attr(
7044
+ target_arch = "aarch64" ,
7045
+ stable( feature = "neon_intrinsics" , since = "1.59.0" )
7046
+ ) ]
7047
+ pub unsafe fn vcombine_u32 ( low : uint32x2_t , high : uint32x2_t ) -> uint32x4_t {
7048
+ simd_shuffle4 ! ( low, high, [ 0 , 1 , 2 , 3 ] )
7049
+ }
7050
+
7051
+ /// Vector combine
7052
+ #[ inline]
7053
+ #[ target_feature( enable = "neon,v7" ) ]
7054
+ #[ cfg_attr( test, assert_instr( mov) ) ]
7055
+ #[ cfg_attr(
7056
+ target_arch = "aarch64" ,
7057
+ stable( feature = "neon_intrinsics" , since = "1.59.0" )
7058
+ ) ]
7059
+ pub unsafe fn vcombine_u64 ( low : uint64x1_t , high : uint64x1_t ) -> uint64x2_t {
7060
+ simd_shuffle2 ! ( low, high, [ 0 , 1 ] )
7061
+ }
7062
+
7063
+ /// Vector combine
7064
+ #[ inline]
7065
+ #[ target_feature( enable = "neon,v7" ) ]
7066
+ #[ cfg_attr( test, assert_instr( mov) ) ]
7067
+ #[ cfg_attr(
7068
+ target_arch = "aarch64" ,
7069
+ stable( feature = "neon_intrinsics" , since = "1.59.0" )
7070
+ ) ]
7071
+ pub unsafe fn vcombine_p64 ( low : poly64x1_t , high : poly64x1_t ) -> poly64x2_t {
7072
+ simd_shuffle2 ! ( low, high, [ 0 , 1 ] )
7073
+ }
7074
+
6918
7075
#[ cfg( test) ]
6919
7076
mod tests {
6920
7077
use super :: * ;
0 commit comments