@@ -121,7 +121,7 @@ def SVLD1UW_GATHER_INDEX_S : MInst<"svld1uw_gather[_{2}base]_index_{d}", "dPul
121
121
122
122
123
123
// First-faulting load one vector (scalar base)
124
- def SVLDFF1 : MInst<"svldff1[_{2}]", "dPc", "csilUcUsUiUlhfd ", [IsLoad], MemEltTyDefault, "aarch64_sve_ldff1">;
124
+ def SVLDFF1 : MInst<"svldff1[_{2}]", "dPc", "csilUcUsUiUlhfdm ", [IsLoad], MemEltTyDefault, "aarch64_sve_ldff1">;
125
125
def SVLDFF1SB : MInst<"svldff1sb_{d}", "dPS", "silUsUiUl", [IsLoad], MemEltTyInt8, "aarch64_sve_ldff1">;
126
126
def SVLDFF1UB : MInst<"svldff1ub_{d}", "dPW", "silUsUiUl", [IsLoad, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ldff1">;
127
127
def SVLDFF1SH : MInst<"svldff1sh_{d}", "dPT", "ilUiUl", [IsLoad], MemEltTyInt16, "aarch64_sve_ldff1">;
@@ -130,7 +130,7 @@ def SVLDFF1SW : MInst<"svldff1sw_{d}", "dPU", "lUl", [IsLoad],
130
130
def SVLDFF1UW : MInst<"svldff1uw_{d}", "dPY", "lUl", [IsLoad, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ldff1">;
131
131
132
132
// First-faulting load one vector (scalar base, VL displacement)
133
- def SVLDFF1_VNUM : MInst<"svldff1_vnum[_{2}]", "dPcl", "csilUcUsUiUlhfd ", [IsLoad], MemEltTyDefault, "aarch64_sve_ldff1">;
133
+ def SVLDFF1_VNUM : MInst<"svldff1_vnum[_{2}]", "dPcl", "csilUcUsUiUlhfdm ", [IsLoad], MemEltTyDefault, "aarch64_sve_ldff1">;
134
134
def SVLDFF1SB_VNUM : MInst<"svldff1sb_vnum_{d}", "dPSl", "silUsUiUl", [IsLoad], MemEltTyInt8, "aarch64_sve_ldff1">;
135
135
def SVLDFF1UB_VNUM : MInst<"svldff1ub_vnum_{d}", "dPWl", "silUsUiUl", [IsLoad, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ldff1">;
136
136
def SVLDFF1SH_VNUM : MInst<"svldff1sh_vnum_{d}", "dPTl", "ilUiUl", [IsLoad], MemEltTyInt16, "aarch64_sve_ldff1">;
@@ -223,7 +223,7 @@ def SVLDFF1SW_GATHER_INDEX_S : MInst<"svldff1sw_gather[_{2}base]_index_{d}", "dP
223
223
def SVLDFF1UW_GATHER_INDEX_S : MInst<"svldff1uw_gather[_{2}base]_index_{d}", "dPul", "lUl", [IsGatherLoad, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ldff1_gather_scalar_offset">;
224
224
225
225
// Non-faulting load one vector (scalar base)
226
- def SVLDNF1 : MInst<"svldnf1[_{2}]", "dPc", "csilUcUsUiUlhfd ", [IsLoad], MemEltTyDefault, "aarch64_sve_ldnf1">;
226
+ def SVLDNF1 : MInst<"svldnf1[_{2}]", "dPc", "csilUcUsUiUlhfdm ", [IsLoad], MemEltTyDefault, "aarch64_sve_ldnf1">;
227
227
def SVLDNF1SB : MInst<"svldnf1sb_{d}", "dPS", "silUsUiUl", [IsLoad], MemEltTyInt8, "aarch64_sve_ldnf1">;
228
228
def SVLDNF1UB : MInst<"svldnf1ub_{d}", "dPW", "silUsUiUl", [IsLoad, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ldnf1">;
229
229
def SVLDNF1SH : MInst<"svldnf1sh_{d}", "dPT", "ilUiUl", [IsLoad], MemEltTyInt16, "aarch64_sve_ldnf1">;
@@ -232,7 +232,7 @@ def SVLDNF1SW : MInst<"svldnf1sw_{d}", "dPU", "lUl", [IsLoad],
232
232
def SVLDNF1UW : MInst<"svldnf1uw_{d}", "dPY", "lUl", [IsLoad, IsZExtReturn], MemEltTyInt32, "aarch64_sve_ldnf1">;
233
233
234
234
// Non-faulting load one vector (scalar base, VL displacement)
235
- def SVLDNF1_VNUM : MInst<"svldnf1_vnum[_{2}]", "dPcl", "csilUcUsUiUlhfd ", [IsLoad], MemEltTyDefault, "aarch64_sve_ldnf1">;
235
+ def SVLDNF1_VNUM : MInst<"svldnf1_vnum[_{2}]", "dPcl", "csilUcUsUiUlhfdm ", [IsLoad], MemEltTyDefault, "aarch64_sve_ldnf1">;
236
236
def SVLDNF1SB_VNUM : MInst<"svldnf1sb_vnum_{d}", "dPSl", "silUsUiUl", [IsLoad], MemEltTyInt8, "aarch64_sve_ldnf1">;
237
237
def SVLDNF1UB_VNUM : MInst<"svldnf1ub_vnum_{d}", "dPWl", "silUsUiUl", [IsLoad, IsZExtReturn], MemEltTyInt8, "aarch64_sve_ldnf1">;
238
238
def SVLDNF1SH_VNUM : MInst<"svldnf1sh_vnum_{d}", "dPTl", "ilUiUl", [IsLoad], MemEltTyInt16, "aarch64_sve_ldnf1">;
@@ -258,7 +258,7 @@ let SVETargetGuard = "sve,bf16", SMETargetGuard = "sme,bf16" in {
258
258
}
259
259
260
260
// Load one quadword and replicate (scalar base)
261
- def SVLD1RQ : SInst<"svld1rq[_{2}]", "dPc", "csilUcUsUiUlhfd ", MergeNone, "aarch64_sve_ld1rq", [VerifyRuntimeMode]>;
261
+ def SVLD1RQ : SInst<"svld1rq[_{2}]", "dPc", "csilUcUsUiUlhfdm ", MergeNone, "aarch64_sve_ld1rq", [VerifyRuntimeMode]>;
262
262
263
263
let SVETargetGuard = "sve,bf16", SMETargetGuard = "sme,bf16" in {
264
264
def SVLD1RQ_BF : SInst<"svld1rq[_{2}]", "dPc", "b", MergeNone, "aarch64_sve_ld1rq", [VerifyRuntimeMode]>;
@@ -283,7 +283,7 @@ defm SVLD4_VNUM : StructLoad<"svld4_vnum[_{2}]", "4Pcl", "aarch64_sve_ld4_sret">
283
283
284
284
// Load one octoword and replicate (scalar base)
285
285
let SVETargetGuard = "sve,f64mm", SMETargetGuard = InvalidMode in {
286
- def SVLD1RO : SInst<"svld1ro[_{2}]", "dPc", "csilUcUsUiUlhfd ", MergeNone, "aarch64_sve_ld1ro">;
286
+ def SVLD1RO : SInst<"svld1ro[_{2}]", "dPc", "csilUcUsUiUlhfdm ", MergeNone, "aarch64_sve_ld1ro">;
287
287
}
288
288
let SVETargetGuard = "sve,f64mm,bf16", SMETargetGuard = InvalidMode in {
289
289
def SVLD1RO_BF16 : SInst<"svld1ro[_{2}]", "dPc", "b", MergeNone, "aarch64_sve_ld1ro">;
0 commit comments