Skip to content

Commit 82f9133

Browse files
Ben Gardonbonzini
authored andcommitted
KVM: selftests: Add option to overlap vCPU memory access
Add an option to overlap the ranges of memory each vCPU accesses instead of partitioning them. This option will increase the probability of multiple vCPUs faulting on the same page at the same time, and causing interesting races, if there are bugs in the page fault handler or elsewhere in the kernel. Reviewed-by: Jacob Xu <[email protected]> Reviewed-by: Makarand Sonare <[email protected]> Signed-off-by: Ben Gardon <[email protected]> Message-Id: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent 86753bd commit 82f9133

File tree

4 files changed

+57
-18
lines changed

4 files changed

+57
-18
lines changed

tools/testing/selftests/kvm/demand_paging_test.c

Lines changed: 25 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -250,6 +250,7 @@ static int setup_demand_paging(struct kvm_vm *vm,
250250
struct test_params {
251251
bool use_uffd;
252252
useconds_t uffd_delay;
253+
bool partition_vcpu_memory_access;
253254
};
254255

255256
static void run_test(enum vm_guest_mode mode, void *arg)
@@ -277,7 +278,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
277278
vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
278279
TEST_ASSERT(vcpu_threads, "Memory allocation failed");
279280

280-
perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size);
281+
perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size,
282+
p->partition_vcpu_memory_access);
281283

282284
if (p->use_uffd) {
283285
uffd_handler_threads =
@@ -293,10 +295,19 @@ static void run_test(enum vm_guest_mode mode, void *arg)
293295
for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
294296
vm_paddr_t vcpu_gpa;
295297
void *vcpu_hva;
298+
uint64_t vcpu_mem_size;
296299

297-
vcpu_gpa = guest_test_phys_mem + (vcpu_id * guest_percpu_mem_size);
300+
301+
if (p->partition_vcpu_memory_access) {
302+
vcpu_gpa = guest_test_phys_mem +
303+
(vcpu_id * guest_percpu_mem_size);
304+
vcpu_mem_size = guest_percpu_mem_size;
305+
} else {
306+
vcpu_gpa = guest_test_phys_mem;
307+
vcpu_mem_size = guest_percpu_mem_size * nr_vcpus;
308+
}
298309
PER_VCPU_DEBUG("Added VCPU %d with test mem gpa [%lx, %lx)\n",
299-
vcpu_id, vcpu_gpa, vcpu_gpa + guest_percpu_mem_size);
310+
vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_mem_size);
300311

301312
/* Cache the HVA pointer of the region */
302313
vcpu_hva = addr_gpa2hva(vm, vcpu_gpa);
@@ -313,7 +324,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
313324
&uffd_handler_threads[vcpu_id],
314325
pipefds[vcpu_id * 2],
315326
p->uffd_delay, &uffd_args[vcpu_id],
316-
vcpu_hva, guest_percpu_mem_size);
327+
vcpu_hva, vcpu_mem_size);
317328
if (r < 0)
318329
exit(-r);
319330
}
@@ -376,7 +387,7 @@ static void help(char *name)
376387
{
377388
puts("");
378389
printf("usage: %s [-h] [-m mode] [-u] [-d uffd_delay_usec]\n"
379-
" [-b memory] [-v vcpus]\n", name);
390+
" [-b memory] [-v vcpus] [-o]\n", name);
380391
guest_modes_help();
381392
printf(" -u: use User Fault FD to handle vCPU page\n"
382393
" faults.\n");
@@ -387,19 +398,23 @@ static void help(char *name)
387398
" demand paged by each vCPU. e.g. 10M or 3G.\n"
388399
" Default: 1G\n");
389400
printf(" -v: specify the number of vCPUs to run.\n");
401+
printf(" -o: Overlap guest memory accesses instead of partitioning\n"
402+
" them into a separate region of memory for each vCPU.\n");
390403
puts("");
391404
exit(0);
392405
}
393406

394407
int main(int argc, char *argv[])
395408
{
396409
int max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
397-
struct test_params p = {};
410+
struct test_params p = {
411+
.partition_vcpu_memory_access = true,
412+
};
398413
int opt;
399414

400415
guest_modes_append_default();
401416

402-
while ((opt = getopt(argc, argv, "hm:ud:b:v:")) != -1) {
417+
while ((opt = getopt(argc, argv, "hm:ud:b:v:o")) != -1) {
403418
switch (opt) {
404419
case 'm':
405420
guest_modes_cmdline(optarg);
@@ -419,6 +434,9 @@ int main(int argc, char *argv[])
419434
TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus,
420435
"Invalid number of vcpus, must be between 1 and %d", max_vcpus);
421436
break;
437+
case 'o':
438+
p.partition_vcpu_memory_access = false;
439+
break;
422440
case 'h':
423441
default:
424442
help(argv[0]);

tools/testing/selftests/kvm/dirty_log_perf_test.c

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,7 @@ struct test_params {
9292
unsigned long iterations;
9393
uint64_t phys_offset;
9494
int wr_fract;
95+
bool partition_vcpu_memory_access;
9596
};
9697

9798
static void run_test(enum vm_guest_mode mode, void *arg)
@@ -129,7 +130,8 @@ static void run_test(enum vm_guest_mode mode, void *arg)
129130
vcpu_threads = malloc(nr_vcpus * sizeof(*vcpu_threads));
130131
TEST_ASSERT(vcpu_threads, "Memory allocation failed");
131132

132-
perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size);
133+
perf_test_setup_vcpus(vm, nr_vcpus, guest_percpu_mem_size,
134+
p->partition_vcpu_memory_access);
133135

134136
sync_global_to_guest(vm, perf_test_args);
135137

@@ -240,7 +242,7 @@ static void help(char *name)
240242
{
241243
puts("");
242244
printf("usage: %s [-h] [-i iterations] [-p offset] "
243-
"[-m mode] [-b vcpu bytes] [-v vcpus]\n", name);
245+
"[-m mode] [-b vcpu bytes] [-v vcpus] [-o]\n", name);
244246
puts("");
245247
printf(" -i: specify iteration counts (default: %"PRIu64")\n",
246248
TEST_HOST_LOOP_N);
@@ -255,6 +257,8 @@ static void help(char *name)
255257
" 1/<fraction of pages to write>.\n"
256258
" (default: 1 i.e. all pages are written to.)\n");
257259
printf(" -v: specify the number of vCPUs to run.\n");
260+
printf(" -o: Overlap guest memory accesses instead of partitioning\n"
261+
" them into a separate region of memory for each vCPU.\n");
258262
puts("");
259263
exit(0);
260264
}
@@ -265,6 +269,7 @@ int main(int argc, char *argv[])
265269
struct test_params p = {
266270
.iterations = TEST_HOST_LOOP_N,
267271
.wr_fract = 1,
272+
.partition_vcpu_memory_access = true,
268273
};
269274
int opt;
270275

@@ -275,7 +280,7 @@ int main(int argc, char *argv[])
275280

276281
guest_modes_append_default();
277282

278-
while ((opt = getopt(argc, argv, "hi:p:m:b:f:v:")) != -1) {
283+
while ((opt = getopt(argc, argv, "hi:p:m:b:f:v:o")) != -1) {
279284
switch (opt) {
280285
case 'i':
281286
p.iterations = atoi(optarg);
@@ -299,6 +304,9 @@ int main(int argc, char *argv[])
299304
TEST_ASSERT(nr_vcpus > 0 && nr_vcpus <= max_vcpus,
300305
"Invalid number of vcpus, must be between 1 and %d", max_vcpus);
301306
break;
307+
case 'o':
308+
p.partition_vcpu_memory_access = false;
309+
break;
302310
case 'h':
303311
default:
304312
help(argv[0]);

tools/testing/selftests/kvm/include/perf_test_util.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,8 @@ extern uint64_t guest_test_phys_mem;
4646
struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
4747
uint64_t vcpu_memory_bytes);
4848
void perf_test_destroy_vm(struct kvm_vm *vm);
49-
void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_bytes);
49+
void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus,
50+
uint64_t vcpu_memory_bytes,
51+
bool partition_vcpu_memory_access);
5052

5153
#endif /* SELFTEST_KVM_PERF_TEST_UTIL_H */

tools/testing/selftests/kvm/lib/perf_test_util.c

Lines changed: 18 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,9 @@ void perf_test_destroy_vm(struct kvm_vm *vm)
112112
kvm_vm_free(vm);
113113
}
114114

115-
void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_bytes)
115+
void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus,
116+
uint64_t vcpu_memory_bytes,
117+
bool partition_vcpu_memory_access)
116118
{
117119
vm_paddr_t vcpu_gpa;
118120
struct perf_test_vcpu_args *vcpu_args;
@@ -122,13 +124,22 @@ void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus, uint64_t vcpu_memory_by
122124
vcpu_args = &perf_test_args.vcpu_args[vcpu_id];
123125

124126
vcpu_args->vcpu_id = vcpu_id;
125-
vcpu_args->gva = guest_test_virt_mem +
126-
(vcpu_id * vcpu_memory_bytes);
127-
vcpu_args->pages = vcpu_memory_bytes /
128-
perf_test_args.guest_page_size;
127+
if (partition_vcpu_memory_access) {
128+
vcpu_args->gva = guest_test_virt_mem +
129+
(vcpu_id * vcpu_memory_bytes);
130+
vcpu_args->pages = vcpu_memory_bytes /
131+
perf_test_args.guest_page_size;
132+
vcpu_gpa = guest_test_phys_mem +
133+
(vcpu_id * vcpu_memory_bytes);
134+
} else {
135+
vcpu_args->gva = guest_test_virt_mem;
136+
vcpu_args->pages = (vcpus * vcpu_memory_bytes) /
137+
perf_test_args.guest_page_size;
138+
vcpu_gpa = guest_test_phys_mem;
139+
}
129140

130-
vcpu_gpa = guest_test_phys_mem + (vcpu_id * vcpu_memory_bytes);
131141
pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
132-
vcpu_id, vcpu_gpa, vcpu_gpa + vcpu_memory_bytes);
142+
vcpu_id, vcpu_gpa, vcpu_gpa +
143+
(vcpu_args->pages * perf_test_args.guest_page_size));
133144
}
134145
}

0 commit comments

Comments
 (0)