Skip to content

Commit 97be0ed

Browse files
committed
Remove boilerplate of over and over repeated impl
1 parent bc9c941 commit 97be0ed

File tree

1 file changed

+59
-325
lines changed

1 file changed

+59
-325
lines changed

crates/core_arch/src/arm/neon.rs

Lines changed: 59 additions & 325 deletions
Original file line numberDiff line numberDiff line change
@@ -698,331 +698,65 @@ pub unsafe fn vmvnq_p8(a: poly8x16_t) -> poly8x16_t {
698698
simd_xor(a, b)
699699
}
700700

701-
/// Vector bitwise and.
702-
#[inline]
703-
#[target_feature(enable = "neon")]
704-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
705-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
706-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
707-
pub unsafe fn vand_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
708-
simd_and(a, b)
709-
}
710-
711-
/// Vector bitwise and.
712-
#[inline]
713-
#[target_feature(enable = "neon")]
714-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
715-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
716-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
717-
pub unsafe fn vandq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
718-
simd_and(a, b)
719-
}
720-
721-
/// Vector bitwise and.
722-
#[inline]
723-
#[target_feature(enable = "neon")]
724-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
725-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
726-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
727-
pub unsafe fn vand_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
728-
simd_and(a, b)
729-
}
730-
731-
/// Vector bitwise and.
732-
#[inline]
733-
#[target_feature(enable = "neon")]
734-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
735-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
736-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
737-
pub unsafe fn vandq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
738-
simd_and(a, b)
739-
}
740-
741-
/// Vector bitwise and.
742-
#[inline]
743-
#[target_feature(enable = "neon")]
744-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
745-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
746-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
747-
pub unsafe fn vand_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
748-
simd_and(a, b)
749-
}
750-
751-
/// Vector bitwise and.
752-
#[inline]
753-
#[target_feature(enable = "neon")]
754-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
755-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
756-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
757-
pub unsafe fn vandq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
758-
simd_and(a, b)
759-
}
760-
761-
/// Vector bitwise and.
762-
#[inline]
763-
#[target_feature(enable = "neon")]
764-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
765-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
766-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
767-
pub unsafe fn vand_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t {
768-
simd_and(a, b)
769-
}
770-
771-
772-
/// Vector bitwise and.
773-
#[inline]
774-
#[target_feature(enable = "neon")]
775-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
776-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
777-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
778-
pub unsafe fn vandq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
779-
simd_and(a, b)
780-
}
781-
782-
/// Vector bitwise and.
783-
#[inline]
784-
#[target_feature(enable = "neon")]
785-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
786-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
787-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
788-
pub unsafe fn vand_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
789-
simd_and(a, b)
790-
}
791-
792-
/// Vector bitwise and.
793-
#[inline]
794-
#[target_feature(enable = "neon")]
795-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
796-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
797-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
798-
pub unsafe fn vandq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
799-
simd_and(a, b)
800-
}
801-
802-
/// Vector bitwise and.
803-
#[inline]
804-
#[target_feature(enable = "neon")]
805-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
806-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
807-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
808-
pub unsafe fn vand_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
809-
simd_and(a, b)
810-
}
811-
812-
/// Vector bitwise and.
813-
#[inline]
814-
#[target_feature(enable = "neon")]
815-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
816-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
817-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
818-
pub unsafe fn vandq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
819-
simd_and(a, b)
820-
}
821-
822-
/// Vector bitwise and.
823-
#[inline]
824-
#[target_feature(enable = "neon")]
825-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
826-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
827-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
828-
pub unsafe fn vand_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
829-
simd_and(a, b)
830-
}
831-
832-
/// Vector bitwise and.
833-
#[inline]
834-
#[target_feature(enable = "neon")]
835-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
836-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
837-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
838-
pub unsafe fn vandq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
839-
simd_and(a, b)
840-
841-
}
842-
843-
/// Vector bitwise and.
844-
#[inline]
845-
#[target_feature(enable = "neon")]
846-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
847-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
848-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
849-
pub unsafe fn vand_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
850-
simd_and(a, b)
851-
}
852-
853-
/// Vector bitwise and.
854-
#[inline]
855-
#[target_feature(enable = "neon")]
856-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
857-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))]
858-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(and))]
859-
pub unsafe fn vandq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
860-
simd_and(a, b)
861-
862-
}
863-
864-
/// Vector bitwise or (immediate, inclusive).
865-
#[inline]
866-
#[target_feature(enable = "neon")]
867-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
868-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
869-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
870-
pub unsafe fn vorr_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t {
871-
simd_or(a, b)
872-
}
873-
874-
/// Vector bitwise or (immediate, inclusive).
875-
#[inline]
876-
#[target_feature(enable = "neon")]
877-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
878-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
879-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
880-
pub unsafe fn vorrq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t {
881-
simd_or(a, b)
882-
}
883-
884-
/// Vector bitwise or (immediate, inclusive).
885-
#[inline]
886-
#[target_feature(enable = "neon")]
887-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
888-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
889-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
890-
pub unsafe fn vorr_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t {
891-
simd_or(a, b)
892-
}
893-
894-
/// Vector bitwise or (immediate, inclusive).
895-
#[inline]
896-
#[target_feature(enable = "neon")]
897-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
898-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
899-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
900-
pub unsafe fn vorrq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t {
901-
simd_or(a, b)
902-
}
903-
904-
/// Vector bitwise or (immediate, inclusive).
905-
#[inline]
906-
#[target_feature(enable = "neon")]
907-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
908-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
909-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
910-
pub unsafe fn vorr_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t {
911-
simd_or(a, b)
912-
}
913-
914-
/// Vector bitwise or (immediate, inclusive).
915-
#[inline]
916-
#[target_feature(enable = "neon")]
917-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
918-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
919-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
920-
pub unsafe fn vorrq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t {
921-
simd_or(a, b)
922-
}
923-
924-
/// Vector bitwise or (immediate, inclusive).
925-
#[inline]
926-
#[target_feature(enable = "neon")]
927-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
928-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
929-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
930-
pub unsafe fn vorr_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t {
931-
simd_or(a, b)
932-
}
933-
934-
935-
/// Vector bitwise or (immediate, inclusive).
936-
#[inline]
937-
#[target_feature(enable = "neon")]
938-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
939-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
940-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
941-
pub unsafe fn vorrq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t {
942-
simd_or(a, b)
943-
}
944-
945-
/// Vector bitwise or (immediate, inclusive).
946-
#[inline]
947-
#[target_feature(enable = "neon")]
948-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
949-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
950-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
951-
pub unsafe fn vorr_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t {
952-
simd_or(a, b)
953-
}
954-
955-
/// Vector bitwise or (immediate, inclusive).
956-
#[inline]
957-
#[target_feature(enable = "neon")]
958-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
959-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
960-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
961-
pub unsafe fn vorrq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t {
962-
simd_or(a, b)
963-
}
964-
965-
/// Vector bitwise or (immediate, inclusive).
966-
#[inline]
967-
#[target_feature(enable = "neon")]
968-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
969-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
970-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
971-
pub unsafe fn vorr_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t {
972-
simd_or(a, b)
973-
}
974-
975-
/// Vector bitwise or (immediate, inclusive).
976-
#[inline]
977-
#[target_feature(enable = "neon")]
978-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
979-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
980-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
981-
pub unsafe fn vorrq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t {
982-
simd_or(a, b)
983-
}
984-
985-
/// Vector bitwise or (immediate, inclusive).
986-
#[inline]
987-
#[target_feature(enable = "neon")]
988-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
989-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
990-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
991-
pub unsafe fn vorr_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t {
992-
simd_or(a, b)
993-
}
994-
995-
/// Vector bitwise or (immediate, inclusive).
996-
#[inline]
997-
#[target_feature(enable = "neon")]
998-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
999-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
1000-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
1001-
pub unsafe fn vorrq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t {
1002-
simd_or(a, b)
1003-
1004-
}
1005-
1006-
/// Vector bitwise or (immediate, inclusive).
1007-
#[inline]
1008-
#[target_feature(enable = "neon")]
1009-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
1010-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
1011-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
1012-
pub unsafe fn vorr_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t {
1013-
simd_or(a, b)
1014-
}
1015-
1016-
/// Vector bitwise or (immediate, inclusive).
1017-
#[inline]
1018-
#[target_feature(enable = "neon")]
1019-
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
1020-
#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))]
1021-
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr(orr))]
1022-
pub unsafe fn vorrq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t {
1023-
simd_or(a, b)
1024-
1025-
}
701+
macro_rules! arm_simd_2 {
702+
($name:ident, $type:ty, $simd_fn:ident, $intrarm:ident, $intraarch:ident) => {
703+
#[inline]
704+
#[target_feature(enable = "neon")]
705+
#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))]
706+
#[cfg_attr(all(test, target_arch = "arm"), assert_instr($intrarm))]
707+
#[cfg_attr(all(test, target_arch = "aarch64"), assert_instr($intraarch))]
708+
pub unsafe fn $name(a: $type, b: $type) -> $type {
709+
$simd_fn(a, b)
710+
}
711+
};
712+
}
713+
714+
macro_rules! arm_simd_and {
715+
($name:ident, $type:ty) => {
716+
/// Vector bitwise and.
717+
arm_simd_2!($name, $type, simd_and, vand, and);
718+
};
719+
}
720+
arm_simd_and!(vand_s8, int8x8_t);
721+
arm_simd_and!(vandq_s8, int8x16_t);
722+
arm_simd_and!(vand_s16, int16x4_t);
723+
arm_simd_and!(vandq_s16, int16x8_t);
724+
arm_simd_and!(vand_s32, int32x2_t);
725+
arm_simd_and!(vandq_s32, int32x4_t);
726+
arm_simd_and!(vand_s64, int64x1_t);
727+
arm_simd_and!(vandq_s64, int64x2_t);
728+
arm_simd_and!(vand_u8, uint8x8_t);
729+
arm_simd_and!(vandq_u8, uint8x16_t);
730+
arm_simd_and!(vand_u16, uint16x4_t);
731+
arm_simd_and!(vandq_u16, uint16x8_t);
732+
arm_simd_and!(vand_u32, uint32x2_t);
733+
arm_simd_and!(vandq_u32, uint32x4_t);
734+
arm_simd_and!(vand_u64, uint64x1_t);
735+
arm_simd_and!(vandq_u64, uint64x2_t);
736+
737+
macro_rules! arm_simd_orr {
738+
($name:ident, $type:ty) => {
739+
/// Vector bitwise or (immediate, inclusive).
740+
arm_simd_2!($name, $type, simd_or, vorr, orr);
741+
};
742+
}
743+
744+
arm_simd_orr!(vorr_s8, int8x8_t);
745+
arm_simd_orr!(vorrq_s8, int8x16_t);
746+
arm_simd_orr!(vorr_s16, int16x4_t);
747+
arm_simd_orr!(vorrq_s16, int16x8_t);
748+
arm_simd_orr!(vorr_s32, int32x2_t);
749+
arm_simd_orr!(vorrq_s32, int32x4_t);
750+
arm_simd_orr!(vorr_s64, int64x1_t);
751+
arm_simd_orr!(vorrq_s64, int64x2_t);
752+
arm_simd_orr!(vorr_u8, uint8x8_t);
753+
arm_simd_orr!(vorrq_u8, uint8x16_t);
754+
arm_simd_orr!(vorr_u16, uint16x4_t);
755+
arm_simd_orr!(vorrq_u16, uint16x8_t);
756+
arm_simd_orr!(vorr_u32, uint32x2_t);
757+
arm_simd_orr!(vorrq_u32, uint32x4_t);
758+
arm_simd_orr!(vorr_u64, uint64x1_t);
759+
arm_simd_orr!(vorrq_u64, uint64x2_t);
1026760

1027761
/// Folding minimum of adjacent pairs
1028762
#[inline]

0 commit comments

Comments
 (0)