Skip to content

Commit 09e6b30

Browse files
James Morseoupton
authored andcommitted
arm64: cpufeature: discover CPU support for MPAM
ARMv8.4 adds support for 'Memory Partitioning And Monitoring' (MPAM) which describes an interface to cache and bandwidth controls wherever they appear in the system. Add support to detect MPAM. Like SVE, MPAM has an extra id register that describes some more properties, including the virtualisation support, which is optional. Detect this separately so we can detect mismatched/insane systems, but still use MPAM on the host even if the virtualisation support is missing. MPAM needs enabling at the highest implemented exception level, otherwise the register accesses trap. The 'enabled' flag is accessible to lower exception levels, but its in a register that traps when MPAM isn't enabled. The cpufeature 'matches' hook is extended to test this on one of the CPUs, so that firmware can emulate MPAM as disabled if it is reserved for use by secure world. Secondary CPUs that appear late could trip cpufeature's 'lower safe' behaviour after the MPAM properties have been advertised to user-space. Add a verify call to ensure late secondaries match the existing CPUs. (If you have a boot failure that bisects here its likely your CPUs advertise MPAM in the id registers, but firmware failed to either enable or MPAM, or emulate the trap as if it were disabled) Signed-off-by: James Morse <[email protected]> Signed-off-by: Joey Gouly <[email protected]> Reviewed-by: Gavin Shan <[email protected]> Tested-by: Shameer Kolothum <[email protected]> Acked-by: Catalin Marinas <[email protected]> Reviewed-by: Marc Zyngier <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Oliver Upton <[email protected]>
1 parent 23b33d1 commit 09e6b30

File tree

7 files changed

+126
-0
lines changed

7 files changed

+126
-0
lines changed

Documentation/arch/arm64/cpu-feature-registers.rst

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -152,6 +152,8 @@ infrastructure:
152152
+------------------------------+---------+---------+
153153
| DIT | [51-48] | y |
154154
+------------------------------+---------+---------+
155+
| MPAM | [43-40] | n |
156+
+------------------------------+---------+---------+
155157
| SVE | [35-32] | y |
156158
+------------------------------+---------+---------+
157159
| GIC | [27-24] | n |

arch/arm64/include/asm/cpu.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ struct cpuinfo_arm64 {
4646
u64 reg_revidr;
4747
u64 reg_gmid;
4848
u64 reg_smidr;
49+
u64 reg_mpamidr;
4950

5051
u64 reg_id_aa64dfr0;
5152
u64 reg_id_aa64dfr1;

arch/arm64/include/asm/cpucaps.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,11 @@ cpucap_is_possible(const unsigned int cap)
6060
return IS_ENABLED(CONFIG_ARM64_WORKAROUND_REPEAT_TLBI);
6161
case ARM64_WORKAROUND_SPECULATIVE_SSBS:
6262
return IS_ENABLED(CONFIG_ARM64_ERRATUM_3194386);
63+
case ARM64_MPAM:
64+
/*
65+
* KVM MPAM support doesn't rely on the host kernel supporting MPAM.
66+
*/
67+
return true;
6368
}
6469

6570
return true;

arch/arm64/include/asm/cpufeature.h

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -612,6 +612,13 @@ static inline bool id_aa64pfr1_sme(u64 pfr1)
612612
return val > 0;
613613
}
614614

615+
static inline bool id_aa64pfr0_mpam(u64 pfr0)
616+
{
617+
u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_MPAM_SHIFT);
618+
619+
return val > 0;
620+
}
621+
615622
static inline bool id_aa64pfr1_mte(u64 pfr1)
616623
{
617624
u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_MTE_SHIFT);
@@ -838,6 +845,16 @@ static inline bool system_supports_poe(void)
838845
alternative_has_cap_unlikely(ARM64_HAS_S1POE);
839846
}
840847

848+
static inline bool system_supports_mpam(void)
849+
{
850+
return alternative_has_cap_unlikely(ARM64_MPAM);
851+
}
852+
853+
static __always_inline bool system_supports_mpam_hcr(void)
854+
{
855+
return alternative_has_cap_unlikely(ARM64_MPAM_HCR);
856+
}
857+
841858
int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt);
842859
bool try_emulate_mrs(struct pt_regs *regs, u32 isn);
843860

arch/arm64/kernel/cpufeature.c

Lines changed: 96 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -684,6 +684,14 @@ static const struct arm64_ftr_bits ftr_id_dfr1[] = {
684684
ARM64_FTR_END,
685685
};
686686

687+
static const struct arm64_ftr_bits ftr_mpamidr[] = {
688+
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, MPAMIDR_EL1_PMG_MAX_SHIFT, MPAMIDR_EL1_PMG_MAX_WIDTH, 0),
689+
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, MPAMIDR_EL1_VPMR_MAX_SHIFT, MPAMIDR_EL1_VPMR_MAX_WIDTH, 0),
690+
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MPAMIDR_EL1_HAS_HCR_SHIFT, 1, 0),
691+
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, MPAMIDR_EL1_PARTID_MAX_SHIFT, MPAMIDR_EL1_PARTID_MAX_WIDTH, 0),
692+
ARM64_FTR_END,
693+
};
694+
687695
/*
688696
* Common ftr bits for a 32bit register with all hidden, strict
689697
* attributes, with 4bit feature fields and a default safe value of
@@ -804,6 +812,9 @@ static const struct __ftr_reg_entry {
804812
ARM64_FTR_REG(SYS_ID_AA64MMFR3_EL1, ftr_id_aa64mmfr3),
805813
ARM64_FTR_REG(SYS_ID_AA64MMFR4_EL1, ftr_id_aa64mmfr4),
806814

815+
/* Op1 = 0, CRn = 10, CRm = 4 */
816+
ARM64_FTR_REG(SYS_MPAMIDR_EL1, ftr_mpamidr),
817+
807818
/* Op1 = 1, CRn = 0, CRm = 0 */
808819
ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid),
809820

@@ -1163,6 +1174,9 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info)
11631174
cpacr_restore(cpacr);
11641175
}
11651176

1177+
if (id_aa64pfr0_mpam(info->reg_id_aa64pfr0))
1178+
init_cpu_ftr_reg(SYS_MPAMIDR_EL1, info->reg_mpamidr);
1179+
11661180
if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
11671181
init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid);
11681182
}
@@ -1419,6 +1433,11 @@ void update_cpu_features(int cpu,
14191433
cpacr_restore(cpacr);
14201434
}
14211435

1436+
if (id_aa64pfr0_mpam(info->reg_id_aa64pfr0)) {
1437+
taint |= check_update_ftr_reg(SYS_MPAMIDR_EL1, cpu,
1438+
info->reg_mpamidr, boot->reg_mpamidr);
1439+
}
1440+
14221441
/*
14231442
* The kernel uses the LDGM/STGM instructions and the number of tags
14241443
* they read/write depends on the GMID_EL1.BS field. Check that the
@@ -2377,6 +2396,36 @@ cpucap_panic_on_conflict(const struct arm64_cpu_capabilities *cap)
23772396
return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT);
23782397
}
23792398

2399+
static bool
2400+
test_has_mpam(const struct arm64_cpu_capabilities *entry, int scope)
2401+
{
2402+
if (!has_cpuid_feature(entry, scope))
2403+
return false;
2404+
2405+
/* Check firmware actually enabled MPAM on this cpu. */
2406+
return (read_sysreg_s(SYS_MPAM1_EL1) & MPAM1_EL1_MPAMEN);
2407+
}
2408+
2409+
static void
2410+
cpu_enable_mpam(const struct arm64_cpu_capabilities *entry)
2411+
{
2412+
/*
2413+
* Access by the kernel (at EL1) should use the reserved PARTID
2414+
* which is configured unrestricted. This avoids priority-inversion
2415+
* where latency sensitive tasks have to wait for a task that has
2416+
* been throttled to release the lock.
2417+
*/
2418+
write_sysreg_s(0, SYS_MPAM1_EL1);
2419+
}
2420+
2421+
static bool
2422+
test_has_mpam_hcr(const struct arm64_cpu_capabilities *entry, int scope)
2423+
{
2424+
u64 idr = read_sanitised_ftr_reg(SYS_MPAMIDR_EL1);
2425+
2426+
return idr & MPAMIDR_EL1_HAS_HCR;
2427+
}
2428+
23802429
static const struct arm64_cpu_capabilities arm64_features[] = {
23812430
{
23822431
.capability = ARM64_ALWAYS_BOOT,
@@ -2873,6 +2922,20 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
28732922
#endif
28742923
},
28752924
#endif
2925+
{
2926+
.desc = "Memory Partitioning And Monitoring",
2927+
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2928+
.capability = ARM64_MPAM,
2929+
.matches = test_has_mpam,
2930+
.cpu_enable = cpu_enable_mpam,
2931+
ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, MPAM, 1)
2932+
},
2933+
{
2934+
.desc = "Memory Partitioning And Monitoring Virtualisation",
2935+
.type = ARM64_CPUCAP_SYSTEM_FEATURE,
2936+
.capability = ARM64_MPAM_HCR,
2937+
.matches = test_has_mpam_hcr,
2938+
},
28762939
{
28772940
.desc = "NV1",
28782941
.capability = ARM64_HAS_HCR_NV1,
@@ -3396,6 +3459,36 @@ static void verify_hyp_capabilities(void)
33963459
}
33973460
}
33983461

3462+
static void verify_mpam_capabilities(void)
3463+
{
3464+
u64 cpu_idr = read_cpuid(ID_AA64PFR0_EL1);
3465+
u64 sys_idr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
3466+
u16 cpu_partid_max, cpu_pmg_max, sys_partid_max, sys_pmg_max;
3467+
3468+
if (FIELD_GET(ID_AA64PFR0_EL1_MPAM_MASK, cpu_idr) !=
3469+
FIELD_GET(ID_AA64PFR0_EL1_MPAM_MASK, sys_idr)) {
3470+
pr_crit("CPU%d: MPAM version mismatch\n", smp_processor_id());
3471+
cpu_die_early();
3472+
}
3473+
3474+
cpu_idr = read_cpuid(MPAMIDR_EL1);
3475+
sys_idr = read_sanitised_ftr_reg(SYS_MPAMIDR_EL1);
3476+
if (FIELD_GET(MPAMIDR_EL1_HAS_HCR, cpu_idr) !=
3477+
FIELD_GET(MPAMIDR_EL1_HAS_HCR, sys_idr)) {
3478+
pr_crit("CPU%d: Missing MPAM HCR\n", smp_processor_id());
3479+
cpu_die_early();
3480+
}
3481+
3482+
cpu_partid_max = FIELD_GET(MPAMIDR_EL1_PARTID_MAX, cpu_idr);
3483+
cpu_pmg_max = FIELD_GET(MPAMIDR_EL1_PMG_MAX, cpu_idr);
3484+
sys_partid_max = FIELD_GET(MPAMIDR_EL1_PARTID_MAX, sys_idr);
3485+
sys_pmg_max = FIELD_GET(MPAMIDR_EL1_PMG_MAX, sys_idr);
3486+
if (cpu_partid_max < sys_partid_max || cpu_pmg_max < sys_pmg_max) {
3487+
pr_crit("CPU%d: MPAM PARTID/PMG max values are mismatched\n", smp_processor_id());
3488+
cpu_die_early();
3489+
}
3490+
}
3491+
33993492
/*
34003493
* Run through the enabled system capabilities and enable() it on this CPU.
34013494
* The capabilities were decided based on the available CPUs at the boot time.
@@ -3422,6 +3515,9 @@ static void verify_local_cpu_capabilities(void)
34223515

34233516
if (is_hyp_mode_available())
34243517
verify_hyp_capabilities();
3518+
3519+
if (system_supports_mpam())
3520+
verify_mpam_capabilities();
34253521
}
34263522

34273523
void check_local_cpu_capabilities(void)

arch/arm64/kernel/cpuinfo.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -478,6 +478,9 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
478478
if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
479479
__cpuinfo_store_cpu_32bit(&info->aarch32);
480480

481+
if (id_aa64pfr0_mpam(info->reg_id_aa64pfr0))
482+
info->reg_mpamidr = read_cpuid(MPAMIDR_EL1);
483+
481484
cpuinfo_detect_icache_policy(info);
482485
}
483486

arch/arm64/tools/cpucaps

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,8 @@ HW_DBM
6060
KVM_HVHE
6161
KVM_PROTECTED_MODE
6262
MISMATCHED_CACHE_TYPE
63+
MPAM
64+
MPAM_HCR
6365
MTE
6466
MTE_ASYMM
6567
SME

0 commit comments

Comments
 (0)