-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[X86] Rename AVX512 VEXTRACT/INSERT??x? to VEXTRACT/INSERT??X? #116826
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
Use uppercase in the subvector description ("32x2" -> "32X4" etc.) - matches what we already do in VBROADCAST??X?, and we try to use uppercase for all x86 instruction mnemonics anyway (and lowercase just for the arg description suffix).
@llvm/pr-subscribers-llvm-globalisel @llvm/pr-subscribers-backend-x86 Author: Simon Pilgrim (RKSimon) ChangesUse uppercase in the subvector description ("32x2" -> "32X4" etc.) - matches what we already do in VBROADCAST??X?, and we try to use uppercase for all x86 instruction mnemonics anyway (and lowercase just for the arg description suffix). Patch is 55.67 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/116826.diff 11 Files Affected:
diff --git a/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp b/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp
index d425a0d507524a..ee456a11d58441 100644
--- a/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp
+++ b/llvm/lib/Target/X86/GISel/X86InstructionSelector.cpp
@@ -1254,16 +1254,16 @@ bool X86InstructionSelector::selectExtract(MachineInstr &I,
if (SrcTy.getSizeInBits() == 256 && DstTy.getSizeInBits() == 128) {
if (HasVLX)
- I.setDesc(TII.get(X86::VEXTRACTF32x4Z256rri));
+ I.setDesc(TII.get(X86::VEXTRACTF32X4Z256rri));
else if (HasAVX)
I.setDesc(TII.get(X86::VEXTRACTF128rri));
else
return false;
} else if (SrcTy.getSizeInBits() == 512 && HasAVX512) {
if (DstTy.getSizeInBits() == 128)
- I.setDesc(TII.get(X86::VEXTRACTF32x4Zrri));
+ I.setDesc(TII.get(X86::VEXTRACTF32X4Zrri));
else if (DstTy.getSizeInBits() == 256)
- I.setDesc(TII.get(X86::VEXTRACTF64x4Zrri));
+ I.setDesc(TII.get(X86::VEXTRACTF64X4Zrri));
else
return false;
} else
@@ -1387,16 +1387,16 @@ bool X86InstructionSelector::selectInsert(MachineInstr &I,
if (DstTy.getSizeInBits() == 256 && InsertRegTy.getSizeInBits() == 128) {
if (HasVLX)
- I.setDesc(TII.get(X86::VINSERTF32x4Z256rri));
+ I.setDesc(TII.get(X86::VINSERTF32X4Z256rri));
else if (HasAVX)
I.setDesc(TII.get(X86::VINSERTF128rri));
else
return false;
} else if (DstTy.getSizeInBits() == 512 && HasAVX512) {
if (InsertRegTy.getSizeInBits() == 128)
- I.setDesc(TII.get(X86::VINSERTF32x4Zrri));
+ I.setDesc(TII.get(X86::VINSERTF32X4Zrri));
else if (InsertRegTy.getSizeInBits() == 256)
- I.setDesc(TII.get(X86::VINSERTF64x4Zrri));
+ I.setDesc(TII.get(X86::VINSERTF64X4Zrri));
else
return false;
} else
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index 7adc72f8bc7d06..4094a1ba9b7903 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -427,24 +427,24 @@ multiclass vinsert_for_type<ValueType EltVT32, int Opcode128,
X86FoldableSchedWrite sched> {
let Predicates = [HasVLX] in
- defm NAME # "32x4Z256" : vinsert_for_size<Opcode128,
+ defm NAME # "32X4Z256" : vinsert_for_size<Opcode128,
X86VectorVTInfo< 4, EltVT32, VR128X>,
X86VectorVTInfo< 8, EltVT32, VR256X>,
vinsert128_insert, sched>, EVEX_V256;
- defm NAME # "32x4Z" : vinsert_for_size<Opcode128,
+ defm NAME # "32X4Z" : vinsert_for_size<Opcode128,
X86VectorVTInfo< 4, EltVT32, VR128X>,
X86VectorVTInfo<16, EltVT32, VR512>,
vinsert128_insert, sched>, EVEX_V512;
- defm NAME # "64x4Z" : vinsert_for_size<Opcode256,
+ defm NAME # "64X4Z" : vinsert_for_size<Opcode256,
X86VectorVTInfo< 4, EltVT64, VR256X>,
X86VectorVTInfo< 8, EltVT64, VR512>,
vinsert256_insert, sched>, REX_W, EVEX_V512;
// Even with DQI we'd like to only use these instructions for masking.
let Predicates = [HasVLX, HasDQI] in
- defm NAME # "64x2Z256" : vinsert_for_size_split<Opcode128,
+ defm NAME # "64X2Z256" : vinsert_for_size_split<Opcode128,
X86VectorVTInfo< 2, EltVT64, VR128X>,
X86VectorVTInfo< 4, EltVT64, VR256X>,
null_frag, vinsert128_insert, sched>,
@@ -452,13 +452,13 @@ multiclass vinsert_for_type<ValueType EltVT32, int Opcode128,
// Even with DQI we'd like to only use these instructions for masking.
let Predicates = [HasDQI] in {
- defm NAME # "64x2Z" : vinsert_for_size_split<Opcode128,
+ defm NAME # "64X2Z" : vinsert_for_size_split<Opcode128,
X86VectorVTInfo< 2, EltVT64, VR128X>,
X86VectorVTInfo< 8, EltVT64, VR512>,
null_frag, vinsert128_insert, sched>,
REX_W, EVEX_V512;
- defm NAME # "32x8Z" : vinsert_for_size_split<Opcode256,
+ defm NAME # "32X8Z" : vinsert_for_size_split<Opcode256,
X86VectorVTInfo< 8, EltVT32, VR256X>,
X86VectorVTInfo<16, EltVT32, VR512>,
null_frag, vinsert256_insert, sched>,
@@ -472,47 +472,47 @@ defm VINSERTI : vinsert_for_type<i32, 0x38, i64, 0x3a, WriteShuffle256>;
// Codegen pattern with the alternative types,
// Even with AVX512DQ we'll still use these for unmasked operations.
-defm : vinsert_for_size_lowering<"VINSERTF32x4Z256", v2f64x_info, v4f64x_info,
+defm : vinsert_for_size_lowering<"VINSERTF32X4Z256", v2f64x_info, v4f64x_info,
vinsert128_insert, INSERT_get_vinsert128_imm, [HasVLX]>;
-defm : vinsert_for_size_lowering<"VINSERTI32x4Z256", v2i64x_info, v4i64x_info,
+defm : vinsert_for_size_lowering<"VINSERTI32X4Z256", v2i64x_info, v4i64x_info,
vinsert128_insert, INSERT_get_vinsert128_imm, [HasVLX]>;
-defm : vinsert_for_size_lowering<"VINSERTF32x4Z", v2f64x_info, v8f64_info,
+defm : vinsert_for_size_lowering<"VINSERTF32X4Z", v2f64x_info, v8f64_info,
vinsert128_insert, INSERT_get_vinsert128_imm, [HasAVX512]>;
-defm : vinsert_for_size_lowering<"VINSERTI32x4Z", v2i64x_info, v8i64_info,
+defm : vinsert_for_size_lowering<"VINSERTI32X4Z", v2i64x_info, v8i64_info,
vinsert128_insert, INSERT_get_vinsert128_imm, [HasAVX512]>;
-defm : vinsert_for_size_lowering<"VINSERTF64x4Z", v8f32x_info, v16f32_info,
+defm : vinsert_for_size_lowering<"VINSERTF64X4Z", v8f32x_info, v16f32_info,
vinsert256_insert, INSERT_get_vinsert256_imm, [HasAVX512]>;
-defm : vinsert_for_size_lowering<"VINSERTI64x4Z", v8i32x_info, v16i32_info,
+defm : vinsert_for_size_lowering<"VINSERTI64X4Z", v8i32x_info, v16i32_info,
vinsert256_insert, INSERT_get_vinsert256_imm, [HasAVX512]>;
// Codegen pattern with the alternative types insert VEC128 into VEC256
-defm : vinsert_for_size_lowering<"VINSERTI32x4Z256", v8i16x_info, v16i16x_info,
+defm : vinsert_for_size_lowering<"VINSERTI32X4Z256", v8i16x_info, v16i16x_info,
vinsert128_insert, INSERT_get_vinsert128_imm, [HasVLX]>;
-defm : vinsert_for_size_lowering<"VINSERTI32x4Z256", v16i8x_info, v32i8x_info,
+defm : vinsert_for_size_lowering<"VINSERTI32X4Z256", v16i8x_info, v32i8x_info,
vinsert128_insert, INSERT_get_vinsert128_imm, [HasVLX]>;
-defm : vinsert_for_size_lowering<"VINSERTF32x4Z256", v8f16x_info, v16f16x_info,
+defm : vinsert_for_size_lowering<"VINSERTF32X4Z256", v8f16x_info, v16f16x_info,
vinsert128_insert, INSERT_get_vinsert128_imm, [HasVLX]>;
-defm : vinsert_for_size_lowering<"VINSERTF32x4Z256", v8bf16x_info, v16bf16x_info,
+defm : vinsert_for_size_lowering<"VINSERTF32X4Z256", v8bf16x_info, v16bf16x_info,
vinsert128_insert, INSERT_get_vinsert128_imm, [HasVLX]>;
// Codegen pattern with the alternative types insert VEC128 into VEC512
-defm : vinsert_for_size_lowering<"VINSERTI32x4Z", v8i16x_info, v32i16_info,
+defm : vinsert_for_size_lowering<"VINSERTI32X4Z", v8i16x_info, v32i16_info,
vinsert128_insert, INSERT_get_vinsert128_imm, [HasAVX512]>;
-defm : vinsert_for_size_lowering<"VINSERTI32x4Z", v16i8x_info, v64i8_info,
+defm : vinsert_for_size_lowering<"VINSERTI32X4Z", v16i8x_info, v64i8_info,
vinsert128_insert, INSERT_get_vinsert128_imm, [HasAVX512]>;
-defm : vinsert_for_size_lowering<"VINSERTF32x4Z", v8f16x_info, v32f16_info,
+defm : vinsert_for_size_lowering<"VINSERTF32X4Z", v8f16x_info, v32f16_info,
vinsert128_insert, INSERT_get_vinsert128_imm, [HasAVX512]>;
-defm : vinsert_for_size_lowering<"VINSERTF32x4Z", v8bf16x_info, v32bf16_info,
+defm : vinsert_for_size_lowering<"VINSERTF32X4Z", v8bf16x_info, v32bf16_info,
vinsert128_insert, INSERT_get_vinsert128_imm, [HasAVX512]>;
// Codegen pattern with the alternative types insert VEC256 into VEC512
-defm : vinsert_for_size_lowering<"VINSERTI64x4Z", v16i16x_info, v32i16_info,
+defm : vinsert_for_size_lowering<"VINSERTI64X4Z", v16i16x_info, v32i16_info,
vinsert256_insert, INSERT_get_vinsert256_imm, [HasAVX512]>;
-defm : vinsert_for_size_lowering<"VINSERTI64x4Z", v32i8x_info, v64i8_info,
+defm : vinsert_for_size_lowering<"VINSERTI64X4Z", v32i8x_info, v64i8_info,
vinsert256_insert, INSERT_get_vinsert256_imm, [HasAVX512]>;
-defm : vinsert_for_size_lowering<"VINSERTF64x4Z", v16f16x_info, v32f16_info,
+defm : vinsert_for_size_lowering<"VINSERTF64X4Z", v16f16x_info, v32f16_info,
vinsert256_insert, INSERT_get_vinsert256_imm, [HasAVX512]>;
-defm : vinsert_for_size_lowering<"VINSERTF64x4Z", v16bf16x_info, v32bf16_info,
+defm : vinsert_for_size_lowering<"VINSERTF64X4Z", v16bf16x_info, v32bf16_info,
vinsert256_insert, INSERT_get_vinsert256_imm, [HasAVX512]>;
@@ -568,81 +568,81 @@ let Predicates = p in {
}
}
-defm : vinsert_for_mask_cast<"VINSERTF32x4Z256", v2f64x_info, v4f64x_info,
+defm : vinsert_for_mask_cast<"VINSERTF32X4Z256", v2f64x_info, v4f64x_info,
v8f32x_info, vinsert128_insert,
INSERT_get_vinsert128_imm, [HasVLX]>;
-defm : vinsert_for_mask_cast<"VINSERTF64x2Z256", v4f32x_info, v8f32x_info,
+defm : vinsert_for_mask_cast<"VINSERTF64X2Z256", v4f32x_info, v8f32x_info,
v4f64x_info, vinsert128_insert,
INSERT_get_vinsert128_imm, [HasDQI, HasVLX]>;
-defm : vinsert_for_mask_cast<"VINSERTI32x4Z256", v2i64x_info, v4i64x_info,
+defm : vinsert_for_mask_cast<"VINSERTI32X4Z256", v2i64x_info, v4i64x_info,
v8i32x_info, vinsert128_insert,
INSERT_get_vinsert128_imm, [HasVLX]>;
-defm : vinsert_for_mask_cast<"VINSERTI32x4Z256", v8i16x_info, v16i16x_info,
+defm : vinsert_for_mask_cast<"VINSERTI32X4Z256", v8i16x_info, v16i16x_info,
v8i32x_info, vinsert128_insert,
INSERT_get_vinsert128_imm, [HasVLX]>;
-defm : vinsert_for_mask_cast<"VINSERTI32x4Z256", v16i8x_info, v32i8x_info,
+defm : vinsert_for_mask_cast<"VINSERTI32X4Z256", v16i8x_info, v32i8x_info,
v8i32x_info, vinsert128_insert,
INSERT_get_vinsert128_imm, [HasVLX]>;
-defm : vinsert_for_mask_cast<"VINSERTF64x2Z256", v4i32x_info, v8i32x_info,
+defm : vinsert_for_mask_cast<"VINSERTF64X2Z256", v4i32x_info, v8i32x_info,
v4i64x_info, vinsert128_insert,
INSERT_get_vinsert128_imm, [HasDQI, HasVLX]>;
-defm : vinsert_for_mask_cast<"VINSERTF64x2Z256", v8i16x_info, v16i16x_info,
+defm : vinsert_for_mask_cast<"VINSERTF64X2Z256", v8i16x_info, v16i16x_info,
v4i64x_info, vinsert128_insert,
INSERT_get_vinsert128_imm, [HasDQI, HasVLX]>;
-defm : vinsert_for_mask_cast<"VINSERTF64x2Z256", v16i8x_info, v32i8x_info,
+defm : vinsert_for_mask_cast<"VINSERTF64X2Z256", v16i8x_info, v32i8x_info,
v4i64x_info, vinsert128_insert,
INSERT_get_vinsert128_imm, [HasDQI, HasVLX]>;
-defm : vinsert_for_mask_cast<"VINSERTF32x4Z", v2f64x_info, v8f64_info,
+defm : vinsert_for_mask_cast<"VINSERTF32X4Z", v2f64x_info, v8f64_info,
v16f32_info, vinsert128_insert,
INSERT_get_vinsert128_imm, [HasAVX512]>;
-defm : vinsert_for_mask_cast<"VINSERTF64x2Z", v4f32x_info, v16f32_info,
+defm : vinsert_for_mask_cast<"VINSERTF64X2Z", v4f32x_info, v16f32_info,
v8f64_info, vinsert128_insert,
INSERT_get_vinsert128_imm, [HasDQI]>;
-defm : vinsert_for_mask_cast<"VINSERTI32x4Z", v2i64x_info, v8i64_info,
+defm : vinsert_for_mask_cast<"VINSERTI32X4Z", v2i64x_info, v8i64_info,
v16i32_info, vinsert128_insert,
INSERT_get_vinsert128_imm, [HasAVX512]>;
-defm : vinsert_for_mask_cast<"VINSERTI32x4Z", v8i16x_info, v32i16_info,
+defm : vinsert_for_mask_cast<"VINSERTI32X4Z", v8i16x_info, v32i16_info,
v16i32_info, vinsert128_insert,
INSERT_get_vinsert128_imm, [HasAVX512]>;
-defm : vinsert_for_mask_cast<"VINSERTI32x4Z", v16i8x_info, v64i8_info,
+defm : vinsert_for_mask_cast<"VINSERTI32X4Z", v16i8x_info, v64i8_info,
v16i32_info, vinsert128_insert,
INSERT_get_vinsert128_imm, [HasAVX512]>;
-defm : vinsert_for_mask_cast<"VINSERTI64x2Z", v4i32x_info, v16i32_info,
+defm : vinsert_for_mask_cast<"VINSERTI64X2Z", v4i32x_info, v16i32_info,
v8i64_info, vinsert128_insert,
INSERT_get_vinsert128_imm, [HasDQI]>;
-defm : vinsert_for_mask_cast<"VINSERTI64x2Z", v8i16x_info, v32i16_info,
+defm : vinsert_for_mask_cast<"VINSERTI64X2Z", v8i16x_info, v32i16_info,
v8i64_info, vinsert128_insert,
INSERT_get_vinsert128_imm, [HasDQI]>;
-defm : vinsert_for_mask_cast<"VINSERTI64x2Z", v16i8x_info, v64i8_info,
+defm : vinsert_for_mask_cast<"VINSERTI64X2Z", v16i8x_info, v64i8_info,
v8i64_info, vinsert128_insert,
INSERT_get_vinsert128_imm, [HasDQI]>;
-defm : vinsert_for_mask_cast<"VINSERTF32x8Z", v4f64x_info, v8f64_info,
+defm : vinsert_for_mask_cast<"VINSERTF32X8Z", v4f64x_info, v8f64_info,
v16f32_info, vinsert256_insert,
INSERT_get_vinsert256_imm, [HasDQI]>;
-defm : vinsert_for_mask_cast<"VINSERTF64x4Z", v8f32x_info, v16f32_info,
+defm : vinsert_for_mask_cast<"VINSERTF64X4Z", v8f32x_info, v16f32_info,
v8f64_info, vinsert256_insert,
INSERT_get_vinsert256_imm, [HasAVX512]>;
-defm : vinsert_for_mask_cast<"VINSERTI32x8Z", v4i64x_info, v8i64_info,
+defm : vinsert_for_mask_cast<"VINSERTI32X8Z", v4i64x_info, v8i64_info,
v16i32_info, vinsert256_insert,
INSERT_get_vinsert256_imm, [HasDQI]>;
-defm : vinsert_for_mask_cast<"VINSERTI32x8Z", v16i16x_info, v32i16_info,
+defm : vinsert_for_mask_cast<"VINSERTI32X8Z", v16i16x_info, v32i16_info,
v16i32_info, vinsert256_insert,
INSERT_get_vinsert256_imm, [HasDQI]>;
-defm : vinsert_for_mask_cast<"VINSERTI32x8Z", v32i8x_info, v64i8_info,
+defm : vinsert_for_mask_cast<"VINSERTI32X8Z", v32i8x_info, v64i8_info,
v16i32_info, vinsert256_insert,
INSERT_get_vinsert256_imm, [HasDQI]>;
-defm : vinsert_for_mask_cast<"VINSERTI64x4Z", v8i32x_info, v16i32_info,
+defm : vinsert_for_mask_cast<"VINSERTI64X4Z", v8i32x_info, v16i32_info,
v8i64_info, vinsert256_insert,
INSERT_get_vinsert256_imm, [HasAVX512]>;
-defm : vinsert_for_mask_cast<"VINSERTI64x4Z", v16i16x_info, v32i16_info,
+defm : vinsert_for_mask_cast<"VINSERTI64X4Z", v16i16x_info, v32i16_info,
v8i64_info, vinsert256_insert,
INSERT_get_vinsert256_imm, [HasAVX512]>;
-defm : vinsert_for_mask_cast<"VINSERTI64x4Z", v32i8x_info, v64i8_info,
+defm : vinsert_for_mask_cast<"VINSERTI64X4Z", v32i8x_info, v64i8_info,
v8i64_info, vinsert256_insert,
INSERT_get_vinsert256_imm, [HasAVX512]>;
@@ -732,19 +732,19 @@ multiclass vextract_for_type<ValueType EltVT32, int Opcode128,
ValueType EltVT64, int Opcode256,
SchedWrite SchedRR, SchedWrite SchedMR> {
let Predicates = [HasAVX512] in {
- defm NAME # "32x4Z" : vextract_for_size<Opcode128,
+ defm NAME # "32X4Z" : vextract_for_size<Opcode128,
X86VectorVTInfo<16, EltVT32, VR512>,
X86VectorVTInfo< 4, EltVT32, VR128X>,
vextract128_extract, SchedRR, SchedMR>,
EVEX_V512, EVEX_CD8<32, CD8VT4>;
- defm NAME # "64x4Z" : vextract_for_size<Opcode256,
+ defm NAME # "64X4Z" : vextract_for_size<Opcode256,
X86VectorVTInfo< 8, EltVT64, VR512>,
X86VectorVTInfo< 4, EltVT64, VR256X>,
vextract256_extract, SchedRR, SchedMR>,
REX_W, EVEX_V512, EVEX_CD8<64, CD8VT4>;
}
let Predicates = [HasVLX] in
- defm NAME # "32x4Z256" : vextract_for_size<Opcode128,
+ defm NAME # "32X4Z256" : vextract_for_size<Opcode128,
X86VectorVTInfo< 8, EltVT32, VR256X>,
X86VectorVTInfo< 4, EltVT32, VR128X>,
vextract128_extract, SchedRR, SchedMR>,
@@ -752,7 +752,7 @@ multiclass vextract_for_type<ValueType EltVT32, int Opcode128,
// Even with DQI we'd like to only use these instructions for masking.
let Predicates = [HasVLX, HasDQI] in
- defm NAME # "64x2Z256" : vextract_for_size_split<Opcode128,
+ defm NAME # "64X2Z256" : vextract_for_size_split<Opcode128,
X86VectorVTInfo< 4, EltVT64, VR256X>,
X86VectorVTInfo< 2, EltVT64, VR128X>,
null_frag, vextract128_extract, SchedRR, SchedMR>,
@@ -760,12 +760,12 @@ multiclass vextract_for_type<ValueType EltVT32, int Opcode128,
// Even with DQI we'd like to only use these instructions for masking.
let Predicates = [HasDQI] in {
- defm NAME # "64x2Z" : vextract_for_size_split<Opcode128,
+ defm NAME # "64X2Z" : vextract_for_size_split<Opcode128,
X86VectorVTInfo< 8, EltVT64, VR512>,
X86VectorVTInfo< 2, EltVT64, VR128X>,
null_frag, vextract128_extract, SchedRR, SchedMR>,
REX_W, EVEX_V512, EVEX_CD8<64, CD8VT2>;
- defm NAME # "32x8Z" : vextract_for_size_split<Opcode256,
+ defm NAME # "32X8Z" : vextract_for_size_split<Opcode256,
X86VectorVTInfo<16, EltVT32, VR512>,
X86VectorVTInfo< 8, EltVT32, VR256X>,
null_frag, vextract256_extract, SchedRR, SchedMR>,
@@ -779,48 +779,48 @@ defm VEXTRACTI : vextract_for_type<i32, 0x39, i64, 0x3b, WriteShuffle256, WriteV
// extract_subvector codegen patterns with the alternative types.
// Even with AVX512DQ we'll still use these for unmasked operations.
-defm : vextract_for_size_lowering<"VEXTRACTF32x4Z", v8f64_info, v2f64x_info,
+defm : vextract_for_size_lowering<"VEXTRACTF32X4Z", v8f64_info, v2f64x_info,
vextract128_extract, EXTRACT_get_vextract128_imm, [HasAVX512]>;
-defm : vextract_for_size_lowering<"VEXTRACTI32x4Z", v8i64_info, v2i64x_info,
+defm : vextract_for_size_lowering<"VEXTRACTI32X4Z", v8i64_info, v2i64x_info,
vextract128_extract, EXTRACT_get_vextract128_imm, [HasAVX512]>;
-defm : vextract_for_size_lowering<"VEXTRACTF64x4Z", v16f32_info, v8f32x_info,
+defm : vextract_for_size_lowering<"VEXTRACTF64X4Z", v16f32_info, v8f32x_info,
vextract256_extract, EXTRACT_get_vextract256_imm, [HasAVX512]>;
-defm : vextract_for_size_lowering<"VEXTRACTI64x4Z", v16i32_info, v8i32x_info,
+defm : vextract_for_size_lowering<"VEXTRACTI64X4Z", v16i32_info, v8i32x_info,
vextract256_extract, EXTRACT_get_vextract256_imm, [HasAVX512]>;
-defm : vextract_for_size_lowering<"VEXTRACTF32x4Z256", v4f64x_info, v2f64x_info,
+defm : vextract_for_size_lowering<"VEXTRACTF32X4Z256", v4f64x_info, v2f64x_info,
vextract128_extract, EXTRACT_get_vextract128_imm, [HasVLX]>;
-defm : vextract_for_size_lowering<"VEXTRACTI32x4Z256", v4i64x_info, v2i64x_info,
+defm...
[truncated]
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
Use uppercase in the subvector description ("32x2" -> "32X4" etc.) - matches what we already do in VBROADCAST??X?, and we try to use uppercase for all x86 instruction mnemonics anyway (and lowercase just for the arg description suffix).