@@ -454,6 +454,98 @@ static void __init sev_map_percpu_data(void)
454
454
}
455
455
456
456
#ifdef CONFIG_SMP
457
+ #define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
458
+
459
+ static void __send_ipi_mask (const struct cpumask * mask , int vector )
460
+ {
461
+ unsigned long flags ;
462
+ int cpu , apic_id , icr ;
463
+ int min = 0 , max = 0 ;
464
+ #ifdef CONFIG_X86_64
465
+ __uint128_t ipi_bitmap = 0 ;
466
+ #else
467
+ u64 ipi_bitmap = 0 ;
468
+ #endif
469
+
470
+ if (cpumask_empty (mask ))
471
+ return ;
472
+
473
+ local_irq_save (flags );
474
+
475
+ switch (vector ) {
476
+ default :
477
+ icr = APIC_DM_FIXED | vector ;
478
+ break ;
479
+ case NMI_VECTOR :
480
+ icr = APIC_DM_NMI ;
481
+ break ;
482
+ }
483
+
484
+ for_each_cpu (cpu , mask ) {
485
+ apic_id = per_cpu (x86_cpu_to_apicid , cpu );
486
+ if (!ipi_bitmap ) {
487
+ min = max = apic_id ;
488
+ } else if (apic_id < min && max - apic_id < KVM_IPI_CLUSTER_SIZE ) {
489
+ ipi_bitmap <<= min - apic_id ;
490
+ min = apic_id ;
491
+ } else if (apic_id < min + KVM_IPI_CLUSTER_SIZE ) {
492
+ max = apic_id < max ? max : apic_id ;
493
+ } else {
494
+ kvm_hypercall4 (KVM_HC_SEND_IPI , (unsigned long )ipi_bitmap ,
495
+ (unsigned long )(ipi_bitmap >> BITS_PER_LONG ), min , icr );
496
+ min = max = apic_id ;
497
+ ipi_bitmap = 0 ;
498
+ }
499
+ __set_bit (apic_id - min , (unsigned long * )& ipi_bitmap );
500
+ }
501
+
502
+ if (ipi_bitmap ) {
503
+ kvm_hypercall4 (KVM_HC_SEND_IPI , (unsigned long )ipi_bitmap ,
504
+ (unsigned long )(ipi_bitmap >> BITS_PER_LONG ), min , icr );
505
+ }
506
+
507
+ local_irq_restore (flags );
508
+ }
509
+
510
+ static void kvm_send_ipi_mask (const struct cpumask * mask , int vector )
511
+ {
512
+ __send_ipi_mask (mask , vector );
513
+ }
514
+
515
+ static void kvm_send_ipi_mask_allbutself (const struct cpumask * mask , int vector )
516
+ {
517
+ unsigned int this_cpu = smp_processor_id ();
518
+ struct cpumask new_mask ;
519
+ const struct cpumask * local_mask ;
520
+
521
+ cpumask_copy (& new_mask , mask );
522
+ cpumask_clear_cpu (this_cpu , & new_mask );
523
+ local_mask = & new_mask ;
524
+ __send_ipi_mask (local_mask , vector );
525
+ }
526
+
527
+ static void kvm_send_ipi_allbutself (int vector )
528
+ {
529
+ kvm_send_ipi_mask_allbutself (cpu_online_mask , vector );
530
+ }
531
+
532
+ static void kvm_send_ipi_all (int vector )
533
+ {
534
+ __send_ipi_mask (cpu_online_mask , vector );
535
+ }
536
+
537
+ /*
538
+ * Set the IPI entry points
539
+ */
540
+ static void kvm_setup_pv_ipi (void )
541
+ {
542
+ apic -> send_IPI_mask = kvm_send_ipi_mask ;
543
+ apic -> send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself ;
544
+ apic -> send_IPI_allbutself = kvm_send_ipi_allbutself ;
545
+ apic -> send_IPI_all = kvm_send_ipi_all ;
546
+ pr_info ("KVM setup pv IPIs\n" );
547
+ }
548
+
457
549
static void __init kvm_smp_prepare_cpus (unsigned int max_cpus )
458
550
{
459
551
native_smp_prepare_cpus (max_cpus );
@@ -626,6 +718,10 @@ static uint32_t __init kvm_detect(void)
626
718
627
719
static void __init kvm_apic_init (void )
628
720
{
721
+ #if defined(CONFIG_SMP )
722
+ if (kvm_para_has_feature (KVM_FEATURE_PV_SEND_IPI ))
723
+ kvm_setup_pv_ipi ();
724
+ #endif
629
725
}
630
726
631
727
static void __init kvm_init_platform (void )
0 commit comments