Skip to content

Commit 82228e3

Browse files
kvaneeshmpe
authored andcommitted
powerpc/pseries: Skip using reserved virtual address range
Now that we use all the available virtual address range, we need to make sure we don't generate VSID such that it overlaps with the reserved vsid range. Reserved vsid range include the virtual address range used by the adjunct partition and also the VRMA virtual segment. We find the context value that can result in generating such a VSID and reserve it early in boot. We don't look at the adjunct range, because for now we disable the adjunct usage in a Linux LPAR via CAS interface. Signed-off-by: Aneesh Kumar K.V <[email protected]> [mpe: Rewrite hash__reserve_context_id(), move the rest into pseries] Signed-off-by: Michael Ellerman <[email protected]>
1 parent bb18322 commit 82228e3

File tree

6 files changed

+85
-3
lines changed

6 files changed

+85
-3
lines changed

arch/powerpc/include/asm/book3s/64/mmu-hash.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -589,11 +589,18 @@ extern void slb_set_size(u16 size);
589589
#define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */
590590
#define VSID_BITS_256M (VA_BITS - SID_SHIFT)
591591
#define VSID_BITS_65_256M (65 - SID_SHIFT)
592+
/*
593+
* Modular multiplicative inverse of VSID_MULTIPLIER under modulo VSID_MODULUS
594+
*/
595+
#define VSID_MULINV_256M ASM_CONST(665548017062)
592596

593597
#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
594598
#define VSID_BITS_1T (VA_BITS - SID_SHIFT_1T)
595599
#define VSID_BITS_65_1T (65 - SID_SHIFT_1T)
600+
#define VSID_MULINV_1T ASM_CONST(209034062)
596601

602+
/* 1TB VSID reserved for VRMA */
603+
#define VRMA_VSID 0x1ffffffUL
597604
#define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
598605

599606
/* 4 bits per slice and we have one slice per 1TB */

arch/powerpc/include/asm/kvm_book3s_64.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -49,8 +49,6 @@ static inline bool kvm_is_radix(struct kvm *kvm)
4949
#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
5050
#endif
5151

52-
#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
53-
5452
/*
5553
* We use a lock bit in HPTE dword 0 to synchronize updates and
5654
* accesses to each HPTE, and another bit to indicate non-present

arch/powerpc/include/asm/mmu_context.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,7 @@ static inline void switch_mmu_context(struct mm_struct *prev,
5252
}
5353

5454
extern int hash__alloc_context_id(void);
55+
extern void hash__reserve_context_id(int id);
5556
extern void __destroy_context(int context_id);
5657
static inline void mmu_context_init(void) { }
5758
#else

arch/powerpc/mm/hash_utils_64.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1868,5 +1868,4 @@ static int __init hash64_debugfs(void)
18681868
return 0;
18691869
}
18701870
machine_device_initcall(pseries, hash64_debugfs);
1871-
18721871
#endif /* CONFIG_DEBUG_FS */

arch/powerpc/mm/mmu_context_book3s64.c

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,22 @@ static int alloc_context_id(int min_id, int max_id)
5757
return index;
5858
}
5959

60+
void hash__reserve_context_id(int id)
61+
{
62+
int rc, result = 0;
63+
64+
do {
65+
if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL))
66+
break;
67+
68+
spin_lock(&mmu_context_lock);
69+
rc = ida_get_new_above(&mmu_context_ida, id, &result);
70+
spin_unlock(&mmu_context_lock);
71+
} while (rc == -EAGAIN);
72+
73+
WARN(result != id, "mmu: Failed to reserve context id %d (rc %d)\n", id, result);
74+
}
75+
6076
int hash__alloc_context_id(void)
6177
{
6278
unsigned long max;

arch/powerpc/platforms/pseries/lpar.c

Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -958,3 +958,64 @@ int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
958958

959959
return rc;
960960
}
961+
962+
static unsigned long vsid_unscramble(unsigned long vsid, int ssize)
963+
{
964+
unsigned long protovsid;
965+
unsigned long va_bits = VA_BITS;
966+
unsigned long modinv, vsid_modulus;
967+
unsigned long max_mod_inv, tmp_modinv;
968+
969+
if (!mmu_has_feature(MMU_FTR_68_BIT_VA))
970+
va_bits = 65;
971+
972+
if (ssize == MMU_SEGSIZE_256M) {
973+
modinv = VSID_MULINV_256M;
974+
vsid_modulus = ((1UL << (va_bits - SID_SHIFT)) - 1);
975+
} else {
976+
modinv = VSID_MULINV_1T;
977+
vsid_modulus = ((1UL << (va_bits - SID_SHIFT_1T)) - 1);
978+
}
979+
980+
/*
981+
* vsid outside our range.
982+
*/
983+
if (vsid >= vsid_modulus)
984+
return 0;
985+
986+
/*
987+
* If modinv is the modular multiplicate inverse of (x % vsid_modulus)
988+
* and vsid = (protovsid * x) % vsid_modulus, then we say:
989+
* protovsid = (vsid * modinv) % vsid_modulus
990+
*/
991+
992+
/* Check if (vsid * modinv) overflow (63 bits) */
993+
max_mod_inv = 0x7fffffffffffffffull / vsid;
994+
if (modinv < max_mod_inv)
995+
return (vsid * modinv) % vsid_modulus;
996+
997+
tmp_modinv = modinv/max_mod_inv;
998+
modinv %= max_mod_inv;
999+
1000+
protovsid = (((vsid * max_mod_inv) % vsid_modulus) * tmp_modinv) % vsid_modulus;
1001+
protovsid = (protovsid + vsid * modinv) % vsid_modulus;
1002+
1003+
return protovsid;
1004+
}
1005+
1006+
static int __init reserve_vrma_context_id(void)
1007+
{
1008+
unsigned long protovsid;
1009+
1010+
/*
1011+
* Reserve context ids which map to reserved virtual addresses. For now
1012+
* we only reserve the context id which maps to the VRMA VSID. We ignore
1013+
* the addresses in "ibm,adjunct-virtual-addresses" because we don't
1014+
* enable adjunct support via the "ibm,client-architecture-support"
1015+
* interface.
1016+
*/
1017+
protovsid = vsid_unscramble(VRMA_VSID, MMU_SEGSIZE_1T);
1018+
hash__reserve_context_id(protovsid >> ESID_BITS_1T);
1019+
return 0;
1020+
}
1021+
machine_device_initcall(pseries, reserve_vrma_context_id);

0 commit comments

Comments
 (0)