-
Notifications
You must be signed in to change notification settings - Fork 14.3k
[RISCV] Update some of the RVV memory ops in SiFive P400 & P600 sched models #129575
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[RISCV] Update some of the RVV memory ops in SiFive P400 & P600 sched models #129575
Conversation
@llvm/pr-subscribers-backend-risc-v Author: Min-Yih Hsu (mshockwave) ChangesThis patch updates the latencies as well as occupancies of unit stride, strided, and indexed load/store instructions in SiFive P400 & P600 scheduling models. Patch is 242.02 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/129575.diff 8 Files Affected:
diff --git a/llvm/lib/Target/RISCV/RISCVSchedSiFiveP400.td b/llvm/lib/Target/RISCV/RISCVSchedSiFiveP400.td
index e7f8f88e3909f..00edb32d954c0 100644
--- a/llvm/lib/Target/RISCV/RISCVSchedSiFiveP400.td
+++ b/llvm/lib/Target/RISCV/RISCVSchedSiFiveP400.td
@@ -22,6 +22,8 @@ class SiFiveP400IsWorstCaseMXSEW<string mx, int sew, list<string> MxList, bit is
bit c = !and(!eq(mx, LLMUL), !eq(sew, SSEW));
}
+defvar SiFiveP400VLEN = 128;
+
// 1 Micro-Op per cycle.
class SiFiveP400GetLMulCycles<string mx> {
int c = !cond(
@@ -35,19 +37,19 @@ class SiFiveP400GetLMulCycles<string mx> {
);
}
-// Latency for segmented loads and stores are calculated as vl * nf.
-class SiFiveP400GetCyclesSegmented<string mx, int sew, int nf> {
- defvar VLEN = 128;
- defvar VLUpperBound = !cond(
- !eq(mx, "M1") : !div(VLEN, sew),
- !eq(mx, "M2") : !div(!mul(VLEN, 2), sew),
- !eq(mx, "M4") : !div(!mul(VLEN, 4), sew),
- !eq(mx, "M8") : !div(!mul(VLEN, 8), sew),
- !eq(mx, "MF2") : !div(!div(VLEN, 2), sew),
- !eq(mx, "MF4") : !div(!div(VLEN, 4), sew),
- !eq(mx, "MF8") : !div(!div(VLEN, 8), sew),
+class SiFiveP400GetVLMAX<string mx, int sew> {
+ defvar LMUL = SiFiveP400GetLMulCycles<mx>.c;
+ int val = !cond(
+ !eq(mx, "MF2") : !div(!div(SiFiveP400VLEN, 2), sew),
+ !eq(mx, "MF4") : !div(!div(SiFiveP400VLEN, 4), sew),
+ !eq(mx, "MF8") : !div(!div(SiFiveP400VLEN, 8), sew),
+ true: !div(!mul(SiFiveP400VLEN, LMUL), sew)
);
- int c = !mul(VLUpperBound, nf);
+}
+
+// Latency for segmented loads and stores are calculated as vl * nf.
+class SiFiveP400SegmentedLdStCycles<string mx, int sew, int nf> {
+ int c = !mul(SiFiveP400GetVLMAX<mx, sew>.val, nf);
}
// Both variants of floating point vector reductions are based on numbers collected
@@ -368,57 +370,36 @@ def : WriteRes<WriteVSETIVLI, [SiFiveP400SYS]>;
def : WriteRes<WriteVSETVL, [SiFiveP400SYS]>;
// 7. Vector Loads and Stores
-// FIXME: This unit is still being improved, currently
-// it is based on stage numbers. Estimates are optimistic,
-// latency may be longer.
-foreach mx = SchedMxList in {
- defvar LMulLat = SiFiveP400GetLMulCycles<mx>.c;
- defvar IsWorstCase = SiFiveP400IsWorstCaseMX<mx, SchedMxList>.c;
- let Latency = 8, ReleaseAtCycles = [LMulLat] in {
- defm "" : LMULWriteResMX<"WriteVLDE", [SiFiveP400VLD], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVLDM", [SiFiveP400VLD], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVLDFF", [SiFiveP400VLD], mx, IsWorstCase>;
- }
- let Latency = 12, ReleaseAtCycles = [LMulLat] in {
- defm "" : LMULWriteResMX<"WriteVLDS8", [SiFiveP400VLD], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVLDS16", [SiFiveP400VLD], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVLDS32", [SiFiveP400VLD], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVLDS64", [SiFiveP400VLD], mx, IsWorstCase>;
- }
- let Latency = 12, ReleaseAtCycles = [LMulLat] in {
- defm "" : LMULWriteResMX<"WriteVLDUX8", [SiFiveP400VLD], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVLDUX16", [SiFiveP400VLD], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVLDUX32", [SiFiveP400VLD], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVLDUX64", [SiFiveP400VLD], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVLDOX8", [SiFiveP400VLD], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVLDOX16", [SiFiveP400VLD], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVLDOX32", [SiFiveP400VLD], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVLDOX64", [SiFiveP400VLD], mx, IsWorstCase>;
- }
-}
+// Note that the latency of vector loads are measured by consuming the loaded
+// value with vmv.x.s before subtracting the latency of vmv.x.s from the number.
foreach mx = SchedMxList in {
defvar LMulLat = SiFiveP400GetLMulCycles<mx>.c;
defvar IsWorstCase = SiFiveP400IsWorstCaseMX<mx, SchedMxList>.c;
- let Latency = 8, ReleaseAtCycles = [LMulLat] in {
- defm "" : LMULWriteResMX<"WriteVSTE", [SiFiveP400VST], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSTM", [SiFiveP400VST], mx, IsWorstCase>;
- }
- let Latency = 12, ReleaseAtCycles = [LMulLat] in {
- defm "" : LMULWriteResMX<"WriteVSTS8", [SiFiveP400VST], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSTS16", [SiFiveP400VST], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSTS32", [SiFiveP400VST], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSTS64", [SiFiveP400VST], mx, IsWorstCase>;
+ let Latency = 8 in {
+ let ReleaseAtCycles = [LMulLat] in {
+ defm "" : LMULWriteResMX<"WriteVLDE", [SiFiveP400VLD], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVLDFF", [SiFiveP400VLD], mx, IsWorstCase>;
+
+ defm "" : LMULWriteResMX<"WriteVSTE", [SiFiveP400VST], mx, IsWorstCase>;
+ }
+
+ // Mask load and store always have EMUL=1.
+ let ReleaseAtCycles = [SiFiveP400GetLMulCycles<"M1">.c] in {
+ defm "" : LMULWriteResMX<"WriteVLDM", [SiFiveP400VLD], mx, IsWorstCase=!eq(mx, "M1")>;
+ defm "" : LMULWriteResMX<"WriteVSTM", [SiFiveP400VST], mx, IsWorstCase=!eq(mx, "M1")>;
+ }
}
- let Latency = 12, ReleaseAtCycles = [LMulLat] in {
- defm "" : LMULWriteResMX<"WriteVSTUX8", [SiFiveP400VST], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSTUX16", [SiFiveP400VST], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSTUX32", [SiFiveP400VST], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSTUX64", [SiFiveP400VST], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSTOX8", [SiFiveP400VST], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSTOX16", [SiFiveP400VST], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSTOX32", [SiFiveP400VST], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSTOX64", [SiFiveP400VST], mx, IsWorstCase>;
+ foreach eew = [8, 16, 32, 64] in {
+ let Latency = 13, ReleaseAtCycles = [SiFiveP400GetVLMAX<mx, eew>.val] in {
+ defm "" : LMULWriteResMX<"WriteVLDS" # eew, [SiFiveP400VLD], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVLDUX" # eew, [SiFiveP400VLD], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVLDOX" # eew, [SiFiveP400VLD], mx, IsWorstCase>;
+
+ defm "" : LMULWriteResMX<"WriteVSTS" # eew, [SiFiveP400VST], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVSTUX" # eew, [SiFiveP400VST], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVSTOX" # eew, [SiFiveP400VST], mx, IsWorstCase>;
+ }
}
}
@@ -426,7 +407,7 @@ foreach mx = SchedMxList in {
foreach nf=2-8 in {
foreach eew = [8, 16, 32, 64] in {
defvar IsWorstCase = SiFiveP400IsWorstCaseMX<mx, SchedMxList>.c;
- defvar LMulLat = SiFiveP400GetCyclesSegmented<mx, eew, nf>.c;
+ defvar LMulLat = SiFiveP400SegmentedLdStCycles<mx, eew, nf>.c;
let Latency = !add(12, LMulLat), ReleaseAtCycles = [!add(12, LMulLat)] in {
defm "" : LMULWriteResMX<"WriteVLSEG" # nf # "e" #eew, [SiFiveP400VLD], mx, IsWorstCase>;
defm "" : LMULWriteResMX<"WriteVLSEGFF" # nf # "e" #eew, [SiFiveP400VLD], mx, IsWorstCase>;
diff --git a/llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td b/llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td
index 60d41b02f0e8a..a66ca3dcd9790 100644
--- a/llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td
+++ b/llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td
@@ -22,6 +22,8 @@ class SiFiveP600IsWorstCaseMXSEW<string mx, int sew, list<string> MxList, bit is
bit c = !and(!eq(mx, LLMUL), !eq(sew, SSEW));
}
+defvar SiFiveP600VLEN = 128;
+
// 1 Micro-Op per cycle.
class SiFiveP600GetLMulCycles<string mx> {
int c = !cond(
@@ -35,19 +37,19 @@ class SiFiveP600GetLMulCycles<string mx> {
);
}
-// Latency for segmented loads and stores are calculated as vl * nf.
-class SiFiveP600GetCyclesSegmented<string mx, int sew, int nf> {
- defvar VLEN = 128;
- defvar VLUpperBound = !cond(
- !eq(mx, "M1") : !div(VLEN, sew),
- !eq(mx, "M2") : !div(!mul(VLEN, 2), sew),
- !eq(mx, "M4") : !div(!mul(VLEN, 4), sew),
- !eq(mx, "M8") : !div(!mul(VLEN, 8), sew),
- !eq(mx, "MF2") : !div(!div(VLEN, 2), sew),
- !eq(mx, "MF4") : !div(!div(VLEN, 4), sew),
- !eq(mx, "MF8") : !div(!div(VLEN, 8), sew),
+class SiFiveP600GetVLMAX<string mx, int sew> {
+ defvar LMUL = SiFiveP600GetLMulCycles<mx>.c;
+ int val = !cond(
+ !eq(mx, "MF2") : !div(!div(SiFiveP600VLEN, 2), sew),
+ !eq(mx, "MF4") : !div(!div(SiFiveP600VLEN, 4), sew),
+ !eq(mx, "MF8") : !div(!div(SiFiveP600VLEN, 8), sew),
+ true: !div(!mul(SiFiveP600VLEN, LMUL), sew)
);
- int c = !mul(VLUpperBound, nf);
+}
+
+// Latency for segmented loads and stores are calculated as vl * nf.
+class SiFiveP600SegmentedLdStCycles<string mx, int sew, int nf> {
+ int c = !mul(SiFiveP600GetVLMAX<mx, sew>.val, nf);
}
class SiFiveP600VSM3CCycles<string mx> {
@@ -544,64 +546,43 @@ def : WriteRes<WriteVSETIVLI, [SiFiveP600SYS]>;
def : WriteRes<WriteVSETVL, [SiFiveP600SYS]>;
// 7. Vector Loads and Stores
-// FIXME: This unit is still being improved, currently
-// it is based on stage numbers. Estimates are optimistic,
-// latency may be longer.
-foreach mx = SchedMxList in {
- defvar LMulLat = SiFiveP600GetLMulCycles<mx>.c;
- defvar IsWorstCase = SiFiveP600IsWorstCaseMX<mx, SchedMxList>.c;
- let Latency = 8, ReleaseAtCycles = [LMulLat] in {
- defm "" : LMULWriteResMX<"WriteVLDE", [SiFiveP600VLD], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVLDM", [SiFiveP600VLD], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVLDFF", [SiFiveP600VLD], mx, IsWorstCase>;
- }
- let Latency = 12, ReleaseAtCycles = [LMulLat] in {
- defm "" : LMULWriteResMX<"WriteVLDS8", [SiFiveP600VLD], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVLDS16", [SiFiveP600VLD], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVLDS32", [SiFiveP600VLD], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVLDS64", [SiFiveP600VLD], mx, IsWorstCase>;
- }
- let Latency = 12, ReleaseAtCycles = [LMulLat] in {
- defm "" : LMULWriteResMX<"WriteVLDUX8", [SiFiveP600VLD], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVLDUX16", [SiFiveP600VLD], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVLDUX32", [SiFiveP600VLD], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVLDUX64", [SiFiveP600VLD], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVLDOX8", [SiFiveP600VLD], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVLDOX16", [SiFiveP600VLD], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVLDOX32", [SiFiveP600VLD], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVLDOX64", [SiFiveP600VLD], mx, IsWorstCase>;
- }
-}
+// Note that the latency of vector loads are measured by consuming the loaded
+// value with vmv.x.s before subtracting the latency of vmv.x.s from the number.
foreach mx = SchedMxList in {
defvar LMulLat = SiFiveP600GetLMulCycles<mx>.c;
defvar IsWorstCase = SiFiveP600IsWorstCaseMX<mx, SchedMxList>.c;
- let Latency = 8, ReleaseAtCycles = [LMulLat] in {
- defm "" : LMULWriteResMX<"WriteVSTE", [SiFiveP600VST], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSTM", [SiFiveP600VST], mx, IsWorstCase>;
- }
- let Latency = 12, ReleaseAtCycles = [LMulLat] in {
- defm "" : LMULWriteResMX<"WriteVSTS8", [SiFiveP600VST], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSTS16", [SiFiveP600VST], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSTS32", [SiFiveP600VST], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSTS64", [SiFiveP600VST], mx, IsWorstCase>;
+ let Latency = 8 in {
+ let ReleaseAtCycles = [LMulLat] in {
+ defm "" : LMULWriteResMX<"WriteVLDE", [SiFiveP600VLD], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVLDFF", [SiFiveP600VLD], mx, IsWorstCase>;
+
+ defm "" : LMULWriteResMX<"WriteVSTE", [SiFiveP600VST], mx, IsWorstCase>;
+ }
+
+ // Mask load and store always have EMUL=1.
+ let ReleaseAtCycles = [SiFiveP600GetLMulCycles<"M1">.c] in {
+ defm "" : LMULWriteResMX<"WriteVLDM", [SiFiveP600VLD], mx, IsWorstCase=!eq(mx,"M1")>;
+ defm "" : LMULWriteResMX<"WriteVSTM", [SiFiveP600VST], mx, IsWorstCase=!eq(mx,"M1")>;
+ }
}
- let Latency = 12, ReleaseAtCycles = [LMulLat] in {
- defm "" : LMULWriteResMX<"WriteVSTUX8", [SiFiveP600VST], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSTUX16", [SiFiveP600VST], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSTUX32", [SiFiveP600VST], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSTUX64", [SiFiveP600VST], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSTOX8", [SiFiveP600VST], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSTOX16", [SiFiveP600VST], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSTOX32", [SiFiveP600VST], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVSTOX64", [SiFiveP600VST], mx, IsWorstCase>;
+ foreach eew = [8, 16, 32, 64] in {
+ let Latency = 13, ReleaseAtCycles = [SiFiveP600GetVLMAX<mx, eew>.val] in {
+ defm "" : LMULWriteResMX<"WriteVLDS" # eew, [SiFiveP600VLD], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVLDUX" # eew, [SiFiveP600VLD], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVLDOX" # eew, [SiFiveP600VLD], mx, IsWorstCase>;
+
+ defm "" : LMULWriteResMX<"WriteVSTS" # eew, [SiFiveP600VST], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVSTUX" # eew, [SiFiveP600VST], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVSTOX" # eew, [SiFiveP600VST], mx, IsWorstCase>;
+ }
}
}
foreach mx = SchedMxList in {
foreach nf=2-8 in {
foreach eew = [8, 16, 32, 64] in {
- defvar LMulLat = SiFiveP600GetCyclesSegmented<mx, eew, nf>.c;
+ defvar LMulLat = SiFiveP600SegmentedLdStCycles<mx, eew, nf>.c;
defvar IsWorstCase = SiFiveP600IsWorstCaseMX<mx, SchedMxList>.c;
let Latency = !add(12, LMulLat), ReleaseAtCycles = [!add(12, LMulLat)] in {
defm "" : LMULWriteResMX<"WriteVLSEG" # nf # "e" # eew, [SiFiveP600VLD], mx, IsWorstCase>;
diff --git a/llvm/test/tools/llvm-mca/RISCV/SiFiveP400/vle-vse-vlm.s b/llvm/test/tools/llvm-mca/RISCV/SiFiveP400/vle-vse-vlm.s
new file mode 100644
index 0000000000000..0f6802a17e0c5
--- /dev/null
+++ b/llvm/test/tools/llvm-mca/RISCV/SiFiveP400/vle-vse-vlm.s
@@ -0,0 +1,542 @@
+# NOTE: Assertions have been autogenerated by utils/update_mca_test_checks.py
+# RUN: llvm-mca -mtriple=riscv64 -mcpu=sifive-p470 -iterations=1 < %s | FileCheck %s
+
+vsetvli zero, zero, e8, mf2, ta, ma
+vle8.v v8, (a0)
+vsetvli zero, zero, e8, mf4, ta, ma
+vle8.v v8, (a0)
+vsetvli zero, zero, e8, mf8, ta, ma
+vle8.v v8, (a0)
+vsetvli zero, zero, e8, m1, ta, ma
+vle8.v v8, (a0)
+vsetvli zero, zero, e8, m2, ta, ma
+vle8.v v8, (a0)
+vsetvli zero, zero, e8, m4, ta, ma
+vle8.v v8, (a0)
+vsetvli zero, zero, e8, m8, ta, ma
+vle8.v v8, (a0)
+
+vsetvli zero, zero, e16, mf2, ta, ma
+vle16.v v8, (a0)
+vsetvli zero, zero, e16, mf4, ta, ma
+vle16.v v8, (a0)
+vsetvli zero, zero, e16, m1, ta, ma
+vle16.v v8, (a0)
+vsetvli zero, zero, e16, m2, ta, ma
+vle16.v v8, (a0)
+vsetvli zero, zero, e16, m4, ta, ma
+vle16.v v8, (a0)
+vsetvli zero, zero, e16, m8, ta, ma
+vle16.v v8, (a0)
+
+vsetvli zero, zero, e32, mf2, ta, ma
+vle32.v v8, (a0)
+vsetvli zero, zero, e32, m1, ta, ma
+vle32.v v8, (a0)
+vsetvli zero, zero, e32, m2, ta, ma
+vle32.v v8, (a0)
+vsetvli zero, zero, e32, m4, ta, ma
+vle32.v v8, (a0)
+vsetvli zero, zero, e32, m8, ta, ma
+vle32.v v8, (a0)
+
+vsetvli zero, zero, e64, m1, ta, ma
+vle64.v v8, (a0)
+vsetvli zero, zero, e64, m2, ta, ma
+vle64.v v8, (a0)
+vsetvli zero, zero, e64, m4, ta, ma
+vle64.v v8, (a0)
+vsetvli zero, zero, e64, m8, ta, ma
+vle64.v v8, (a0)
+
+vsetvli zero, zero, e8, mf2, ta, ma
+vse8.v v8, (a0)
+vsetvli zero, zero, e8, mf4, ta, ma
+vse8.v v8, (a0)
+vsetvli zero, zero, e8, mf8, ta, ma
+vse8.v v8, (a0)
+vsetvli zero, zero, e8, m1, ta, ma
+vse8.v v8, (a0)
+vsetvli zero, zero, e8, m2, ta, ma
+vse8.v v8, (a0)
+vsetvli zero, zero, e8, m4, ta, ma
+vse8.v v8, (a0)
+vsetvli zero, zero, e8, m8, ta, ma
+vse8.v v8, (a0)
+
+vsetvli zero, zero, e16, mf2, ta, ma
+vse16.v v8, (a0)
+vsetvli zero, zero, e16, mf4, ta, ma
+vse16.v v8, (a0)
+vsetvli zero, zero, e16, m1, ta, ma
+vse16.v v8, (a0)
+vsetvli zero, zero, e16, m2, ta, ma
+vse16.v v8, (a0)
+vsetvli zero, zero, e16, m4, ta, ma
+vse16.v v8, (a0)
+vsetvli zero, zero, e16, m8, ta, ma
+vse16.v v8, (a0)
+
+vsetvli zero, zero, e32, mf2, ta, ma
+vse32.v v8, (a0)
+vsetvli zero, zero, e32, m1, ta, ma
+vse32.v v8, (a0)
+vsetvli zero, zero, e32, m2, ta, ma
+vse32.v v8, (a0)
+vsetvli zero, zero, e32, m4, ta, ma
+vse32.v v8, (a0)
+vsetvli zero, zero, e32, m8, ta, ma
+vse32.v v8, (a0)
+
+vsetvli zero, zero, e64, m1, ta, ma
+vse64.v v8, (a0)
+vsetvli zero, zero, e64, m2, ta, ma
+vse64.v v8, (a0)
+vsetvli zero, zero, e64, m4, ta, ma
+vse64.v v8, (a0)
+vsetvli zero, zero, e64, m8, ta, ma
+vse64.v v8, (a0)
+
+# Unit-stride mask load/store
+
+vsetvli zero, zero, e8, mf2, ta, ma
+vlm.v v8, (a0)
+vsetvli zero, zero, e8, mf4, ta, ma
+vlm.v v8, (a0)
+vsetvli zero, zero, e8, mf8, ta, ma
+vlm.v v8, (a0)
+vsetvli zero, zero, e8, m1, ta, ma
+vlm.v v8, (a0)
+vsetvli zero, zero, e8, m2, ta, ma
+vlm.v v8, (a0)
+vsetvli zero, zero, e8, m4, ta, ma
+vlm.v v8, (a0)
+vsetvli zero, zero, e8, m8, ta, ma
+vlm.v v8, (a0)
+
+vsetvli zero, zero, e8, mf2, ta, ma
+vsm.v v8, (a0)
+vsetvli zero, zero, e8, mf4, ta, ma
+vsm.v v8, (a0)
+vsetvli zero, zero, e8, mf8, ta, ma
+vsm.v v8, (a0)
+vsetvli zero, zero, e8, m1, ta, ma
+vsm.v v8, (a0)
+vsetvli zero, zero, e8, m2, ta, ma
+vsm.v v8, (a0)
+vsetvli zero, zero, e8, m4, ta, ma
+vsm.v v8, (a0)
+vsetvli zero, zero, e8, m8, ta, ma
+vsm.v v8, (a0)
+
+# Fault-only-first
+
+vsetvli zero, zero, e8, mf2, ta, ma
+vle8ff.v v8, (a0)
+vsetvli zero, zero, e8, mf4, ta, ma
+vle8ff.v v8, (a0)
+vsetvli zero, zero, e8, mf8, ta, ma
+vle8ff.v v8, (a0)
+vsetvli zero, zero, e8, m1, ta, ma
+vle8ff.v v8, (a0)
+vsetvli zero, zero, e8, m2, ta, ma
+vle8ff.v v8, (a0)
+vsetvli zero, zero, e8, m4, ta, ma
+vle8ff.v v8, (a0)
+vsetvli zero, zero, e8, m8, ta, ma
+vle8ff.v v8, (a0)
+
+vsetvli zero, zero, e16, mf2, ta, ma
+vle16ff.v v8, (a0)
+vsetvli zero, zero, e16, mf4, ta, ma
+vle16ff.v v8, (a0)
+vsetvli zero, zero, e16, m1, ta, ma
+vle16ff.v v8, (a0)
+vsetvli zero, zero, e16, m2, ta, ma
+vle16ff.v v8, (a0)
+vsetvli zero, zero, e16, m4, ta, ma
+vle16ff.v v8, (a0)
+vsetvli zero, zero, e16, m8, ta, ma
+vle16ff.v v8, (a0)
+
+vsetvli zero, zero, e32, mf2, ta, ma
+vle32ff.v v8, (a0)
+vsetvli zero, zero, e32, m1, ta, ma
+vle32ff.v v8, (a0)
+vsetvli zero, zero, e32, m2, ta, ma
+vle32ff.v v8, (a0)
+vsetvli zero, zero, e32, m4, ta, ma
+vle32ff.v v8, (a0)
+vsetvli zero, zero, e32, m8, ta, ma
+vle32ff.v v8, (a0)
+
+vsetvli zero, zero, e64, m1, ta, ma
+vle64ff.v v8, (a0)
+vsetvli zero, zero, e64, m2, ta, ma
+vle64ff.v v8, (a0)
+vsetvli zero, zero, e64, m4, ta, ma
+vle64ff.v v8, (a0)
+vsetvli zero, zero, e64, m8, ta, ma
+vle64ff.v v8, (a0)
+
+# CHECK: Iterations: 1
+# CHECK-NEXT: Instructions: 160
+# CHECK-NEXT: Total Cycles: 146
+# CHECK-NEXT: Total uOps: 160
+
+# CHECK: Dispatch Width: 3
+# CHECK-NEXT: uOps Per Cycle: 1.10
+# CHECK-NEXT: IPC: 1.10
+# CHECK-NEXT: Block RThroughput: 139.0
+
+# CHECK: Instruction Info:
+# CHECK-NEXT: [1]: #uOps
+# CHECK-NEXT: [2]: Latency
+# CHECK-NEXT: [3]: RThroughput
+# CHECK-NEXT: [4]: MayLoad
+# CHECK-NEXT: [5]: MayStore
+# CHECK-NEXT: [6]: HasSideEffects (U)
+
+# CHECK: [1] [2] [3] [4] [5] [6] Instructions:
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, mf2, ta, ma
+# CHECK-NEXT: 1 8 1.00 * vle8.v v8, (a0)
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, mf4, ta, ma
+# CHECK-NEXT: 1 8 1.00 * vle8.v v8, (a0)
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, mf8, ta, ma
+# CHECK-NEXT: 1 8 1.00 * vle8.v v8, (a0)
+# CHECK-NEXT: 1 1 1.00 U vsetvli zero, zero, e8, m1, ta, ma
+# CHECK-NEXT: 1 8 1.00 * vle8.v v8, (a0)
+# CHECK-NEXT: 1 ...
[truncated]
|
defm "" : LMULWriteResMX<"WriteVSTE", [SiFiveP400VST], mx, IsWorstCase>; | ||
} | ||
|
||
// Mask load and store always have EMUL=1. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
From the true meaning of EMUL, its technically EMUL=[1/64, 1]. Only a whole register or a fraction as small as 1/64 of a register is used. It's calculated as EMUL=(1/SEW)*LMUL where SEW and LMUL come from VTYPE.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
From the true meaning of EMUL, its technically EMUL=[1/64, 1]. Only a whole register or a fraction as small as 1/64 of a register is used. It's calculated as EMUL=(1/SEW)*LMUL where SEW and LMUL come from VTYPE.
I copied the EMUL=1 description from the spec. Though I understand what you meant about EMUL can go as small as 1/64. I've updated the comment to be EMUL can be "Mask load and store have a maximum EMUL of 1".
defm "" : LMULWriteResMX<"WriteVSTOX32", [SiFiveP400VST], mx, IsWorstCase>; | ||
defm "" : LMULWriteResMX<"WriteVSTOX64", [SiFiveP400VST], mx, IsWorstCase>; | ||
foreach eew = [8, 16, 32, 64] in { | ||
let Latency = 13, ReleaseAtCycles = [SiFiveP400GetVLMAX<mx, eew>.val] in { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
If the throughput is related to number of element shouldn't latency also be dependent on the number of elements. A whole VLEN worth of elements needs to be loaded before a dependent operation can start. With larger elements there are less elements per VLEN so I would expect it to have lower latency than smaller elements.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I'd re-measured and updated the latencies. The latencies of stride and indexed ops are indeed dependent on VL.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
This patch updates the latencies as well as occupancies of unit stride, strided, and indexed load/store instructions in SiFive P400 & P600 scheduling models.