Skip to content

Commit a736143

Browse files
committed
Merge branch 'topic/ppc-kvm' into next
Pull in some more ppc KVM patches we are keeping in our topic branch. In particular this brings in the series to add H_RPT_INVALIDATE.
2 parents 4a21192 + 51696f3 commit a736143

File tree

16 files changed

+492
-22
lines changed

16 files changed

+492
-22
lines changed

Documentation/virt/kvm/api.rst

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6362,6 +6362,24 @@ default.
63626362

63636363
See Documentation/x86/sgx/2.Kernel-internals.rst for more details.
63646364

6365+
7.26 KVM_CAP_PPC_RPT_INVALIDATE
6366+
-------------------------------
6367+
6368+
:Capability: KVM_CAP_PPC_RPT_INVALIDATE
6369+
:Architectures: ppc
6370+
:Type: vm
6371+
6372+
This capability indicates that the kernel is capable of handling
6373+
H_RPT_INVALIDATE hcall.
6374+
6375+
In order to enable the use of H_RPT_INVALIDATE in the guest,
6376+
user space might have to advertise it for the guest. For example,
6377+
IBM pSeries (sPAPR) guest starts using it if "hcall-rpt-invalidate" is
6378+
present in the "ibm,hypertas-functions" device-tree property.
6379+
6380+
This capability is enabled for hypervisors on platforms like POWER9
6381+
that support radix MMU.
6382+
63656383
8. Other capabilities.
63666384
======================
63676385

arch/powerpc/include/asm/book3s/64/mmu.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ struct mmu_psize_def {
1919
int penc[MMU_PAGE_COUNT]; /* HPTE encoding */
2020
unsigned int tlbiel; /* tlbiel supported for that page size */
2121
unsigned long avpnm; /* bits to mask out in AVPN in the HPTE */
22+
unsigned long h_rpt_pgsize; /* H_RPT_INVALIDATE page size encoding */
2223
union {
2324
unsigned long sllp; /* SLB L||LP (exact mask to use in slbmte) */
2425
unsigned long ap; /* Ap encoding used by PowerISA 3.0 */

arch/powerpc/include/asm/book3s/64/tlbflush-radix.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,10 @@
44

55
#include <asm/hvcall.h>
66

7+
#define RIC_FLUSH_TLB 0
8+
#define RIC_FLUSH_PWC 1
9+
#define RIC_FLUSH_ALL 2
10+
711
struct vm_area_struct;
812
struct mm_struct;
913
struct mmu_gather;

arch/powerpc/include/asm/cputhreads.h

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -98,6 +98,36 @@ static inline int cpu_last_thread_sibling(int cpu)
9898
return cpu | (threads_per_core - 1);
9999
}
100100

101+
/*
102+
* tlb_thread_siblings are siblings which share a TLB. This is not
103+
* architected, is not something a hypervisor could emulate and a future
104+
* CPU may change behaviour even in compat mode, so this should only be
105+
* used on PowerNV, and only with care.
106+
*/
107+
static inline int cpu_first_tlb_thread_sibling(int cpu)
108+
{
109+
if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
110+
return cpu & ~0x6; /* Big Core */
111+
else
112+
return cpu_first_thread_sibling(cpu);
113+
}
114+
115+
static inline int cpu_last_tlb_thread_sibling(int cpu)
116+
{
117+
if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
118+
return cpu | 0x6; /* Big Core */
119+
else
120+
return cpu_last_thread_sibling(cpu);
121+
}
122+
123+
static inline int cpu_tlb_thread_sibling_step(void)
124+
{
125+
if (cpu_has_feature(CPU_FTR_ARCH_300) && (threads_per_core == 8))
126+
return 2; /* Big Core */
127+
else
128+
return 1;
129+
}
130+
101131
static inline u32 get_tensr(void)
102132
{
103133
#ifdef CONFIG_BOOKE

arch/powerpc/include/asm/hvcall.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -423,9 +423,9 @@
423423
#define H_RPTI_TYPE_NESTED 0x0001 /* Invalidate nested guest partition-scope */
424424
#define H_RPTI_TYPE_TLB 0x0002 /* Invalidate TLB */
425425
#define H_RPTI_TYPE_PWC 0x0004 /* Invalidate Page Walk Cache */
426-
/* Invalidate Process Table Entries if H_RPTI_TYPE_NESTED is clear */
426+
/* Invalidate caching of Process Table Entries if H_RPTI_TYPE_NESTED is clear */
427427
#define H_RPTI_TYPE_PRT 0x0008
428-
/* Invalidate Partition Table Entries if H_RPTI_TYPE_NESTED is set */
428+
/* Invalidate caching of Partition Table Entries if H_RPTI_TYPE_NESTED is set */
429429
#define H_RPTI_TYPE_PAT 0x0008
430430
#define H_RPTI_TYPE_ALL (H_RPTI_TYPE_TLB | H_RPTI_TYPE_PWC | \
431431
H_RPTI_TYPE_PRT)

arch/powerpc/include/asm/kvm_book3s.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -307,6 +307,9 @@ void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1);
307307
void kvmhv_release_all_nested(struct kvm *kvm);
308308
long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
309309
long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
310+
long do_h_rpt_invalidate_pat(struct kvm_vcpu *vcpu, unsigned long lpid,
311+
unsigned long type, unsigned long pg_sizes,
312+
unsigned long start, unsigned long end);
310313
int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu,
311314
u64 time_limit, unsigned long lpcr);
312315
void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);

arch/powerpc/include/asm/mmu_context.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -215,6 +215,18 @@ static inline void mm_context_add_copro(struct mm_struct *mm) { }
215215
static inline void mm_context_remove_copro(struct mm_struct *mm) { }
216216
#endif
217217

218+
#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) && defined(CONFIG_PPC_RADIX_MMU)
219+
void do_h_rpt_invalidate_prt(unsigned long pid, unsigned long lpid,
220+
unsigned long type, unsigned long pg_sizes,
221+
unsigned long start, unsigned long end);
222+
#else
223+
static inline void do_h_rpt_invalidate_prt(unsigned long pid,
224+
unsigned long lpid,
225+
unsigned long type,
226+
unsigned long pg_sizes,
227+
unsigned long start,
228+
unsigned long end) { }
229+
#endif
218230

219231
extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
220232
struct task_struct *tsk);

arch/powerpc/kvm/book3s_64_mmu_radix.c

Lines changed: 22 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
#include <asm/pte-walk.h>
2222
#include <asm/ultravisor.h>
2323
#include <asm/kvm_book3s_uvmem.h>
24+
#include <asm/plpar_wrappers.h>
2425

2526
/*
2627
* Supported radix tree geometry.
@@ -318,9 +319,19 @@ void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
318319
}
319320

320321
psi = shift_to_mmu_psize(pshift);
321-
rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
322-
rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1),
323-
lpid, rb);
322+
323+
if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE)) {
324+
rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58));
325+
rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1),
326+
lpid, rb);
327+
} else {
328+
rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU,
329+
H_RPTI_TYPE_NESTED |
330+
H_RPTI_TYPE_TLB,
331+
psize_to_rpti_pgsize(psi),
332+
addr, addr + psize);
333+
}
334+
324335
if (rc)
325336
pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc);
326337
}
@@ -334,8 +345,14 @@ static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid)
334345
return;
335346
}
336347

337-
rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1),
338-
lpid, TLBIEL_INVAL_SET_LPID);
348+
if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE))
349+
rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1),
350+
lpid, TLBIEL_INVAL_SET_LPID);
351+
else
352+
rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU,
353+
H_RPTI_TYPE_NESTED |
354+
H_RPTI_TYPE_PWC, H_RPTI_PAGE_ALL,
355+
0, -1UL);
339356
if (rc)
340357
pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc);
341358
}

arch/powerpc/kvm/book3s_hv.c

Lines changed: 96 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,7 @@
7676
#include <asm/kvm_book3s_uvmem.h>
7777
#include <asm/ultravisor.h>
7878
#include <asm/dtl.h>
79+
#include <asm/plpar_wrappers.h>
7980

8081
#include "book3s.h"
8182

@@ -922,6 +923,68 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
922923
return yield_count;
923924
}
924925

926+
/*
927+
* H_RPT_INVALIDATE hcall handler for nested guests.
928+
*
929+
* Handles only nested process-scoped invalidation requests in L0.
930+
*/
931+
static int kvmppc_nested_h_rpt_invalidate(struct kvm_vcpu *vcpu)
932+
{
933+
unsigned long type = kvmppc_get_gpr(vcpu, 6);
934+
unsigned long pid, pg_sizes, start, end;
935+
936+
/*
937+
* The partition-scoped invalidations aren't handled here in L0.
938+
*/
939+
if (type & H_RPTI_TYPE_NESTED)
940+
return RESUME_HOST;
941+
942+
pid = kvmppc_get_gpr(vcpu, 4);
943+
pg_sizes = kvmppc_get_gpr(vcpu, 7);
944+
start = kvmppc_get_gpr(vcpu, 8);
945+
end = kvmppc_get_gpr(vcpu, 9);
946+
947+
do_h_rpt_invalidate_prt(pid, vcpu->arch.nested->shadow_lpid,
948+
type, pg_sizes, start, end);
949+
950+
kvmppc_set_gpr(vcpu, 3, H_SUCCESS);
951+
return RESUME_GUEST;
952+
}
953+
954+
static long kvmppc_h_rpt_invalidate(struct kvm_vcpu *vcpu,
955+
unsigned long id, unsigned long target,
956+
unsigned long type, unsigned long pg_sizes,
957+
unsigned long start, unsigned long end)
958+
{
959+
if (!kvm_is_radix(vcpu->kvm))
960+
return H_UNSUPPORTED;
961+
962+
if (end < start)
963+
return H_P5;
964+
965+
/*
966+
* Partition-scoped invalidation for nested guests.
967+
*/
968+
if (type & H_RPTI_TYPE_NESTED) {
969+
if (!nesting_enabled(vcpu->kvm))
970+
return H_FUNCTION;
971+
972+
/* Support only cores as target */
973+
if (target != H_RPTI_TARGET_CMMU)
974+
return H_P2;
975+
976+
return do_h_rpt_invalidate_pat(vcpu, id, type, pg_sizes,
977+
start, end);
978+
}
979+
980+
/*
981+
* Process-scoped invalidation for L1 guests.
982+
*/
983+
do_h_rpt_invalidate_prt(id, vcpu->kvm->arch.lpid,
984+
type, pg_sizes, start, end);
985+
return H_SUCCESS;
986+
}
987+
925988
int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
926989
{
927990
struct kvm *kvm = vcpu->kvm;
@@ -1105,6 +1168,14 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
11051168
if (!powernv_get_random_long(&vcpu->arch.regs.gpr[4]))
11061169
ret = H_HARDWARE;
11071170
break;
1171+
case H_RPT_INVALIDATE:
1172+
ret = kvmppc_h_rpt_invalidate(vcpu, kvmppc_get_gpr(vcpu, 4),
1173+
kvmppc_get_gpr(vcpu, 5),
1174+
kvmppc_get_gpr(vcpu, 6),
1175+
kvmppc_get_gpr(vcpu, 7),
1176+
kvmppc_get_gpr(vcpu, 8),
1177+
kvmppc_get_gpr(vcpu, 9));
1178+
break;
11081179

11091180
case H_SET_PARTITION_TABLE:
11101181
ret = H_FUNCTION;
@@ -1225,6 +1296,7 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd)
12251296
case H_XIRR_X:
12261297
#endif
12271298
case H_PAGE_INIT:
1299+
case H_RPT_INVALIDATE:
12281300
return 1;
12291301
}
12301302

@@ -1748,6 +1820,23 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
17481820
if (!xics_on_xive())
17491821
kvmppc_xics_rm_complete(vcpu, 0);
17501822
break;
1823+
case BOOK3S_INTERRUPT_SYSCALL:
1824+
{
1825+
unsigned long req = kvmppc_get_gpr(vcpu, 3);
1826+
1827+
/*
1828+
* The H_RPT_INVALIDATE hcalls issued by nested
1829+
* guests for process-scoped invalidations when
1830+
* GTSE=0, are handled here in L0.
1831+
*/
1832+
if (req == H_RPT_INVALIDATE) {
1833+
r = kvmppc_nested_h_rpt_invalidate(vcpu);
1834+
break;
1835+
}
1836+
1837+
r = RESUME_HOST;
1838+
break;
1839+
}
17511840
default:
17521841
r = RESUME_HOST;
17531842
break;
@@ -2820,7 +2909,7 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
28202909
cpumask_t *cpu_in_guest;
28212910
int i;
28222911

2823-
cpu = cpu_first_thread_sibling(cpu);
2912+
cpu = cpu_first_tlb_thread_sibling(cpu);
28242913
if (nested) {
28252914
cpumask_set_cpu(cpu, &nested->need_tlb_flush);
28262915
cpu_in_guest = &nested->cpu_in_guest;
@@ -2834,9 +2923,10 @@ static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
28342923
* the other side is the first smp_mb() in kvmppc_run_core().
28352924
*/
28362925
smp_mb();
2837-
for (i = 0; i < threads_per_core; ++i)
2838-
if (cpumask_test_cpu(cpu + i, cpu_in_guest))
2839-
smp_call_function_single(cpu + i, do_nothing, NULL, 1);
2926+
for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu);
2927+
i += cpu_tlb_thread_sibling_step())
2928+
if (cpumask_test_cpu(i, cpu_in_guest))
2929+
smp_call_function_single(i, do_nothing, NULL, 1);
28402930
}
28412931

28422932
static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
@@ -2867,8 +2957,8 @@ static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
28672957
*/
28682958
if (prev_cpu != pcpu) {
28692959
if (prev_cpu >= 0 &&
2870-
cpu_first_thread_sibling(prev_cpu) !=
2871-
cpu_first_thread_sibling(pcpu))
2960+
cpu_first_tlb_thread_sibling(prev_cpu) !=
2961+
cpu_first_tlb_thread_sibling(pcpu))
28722962
radix_flush_cpu(kvm, prev_cpu, vcpu);
28732963
if (nested)
28742964
nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu;

arch/powerpc/kvm/book3s_hv_builtin.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -721,7 +721,7 @@ void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
721721
* Thus we make all 4 threads use the same bit.
722722
*/
723723
if (cpu_has_feature(CPU_FTR_ARCH_300))
724-
pcpu = cpu_first_thread_sibling(pcpu);
724+
pcpu = cpu_first_tlb_thread_sibling(pcpu);
725725

726726
if (nested)
727727
need_tlb_flush = &nested->need_tlb_flush;

0 commit comments

Comments
 (0)