31
31
#include <linux/bitmap.h>
32
32
#include <linux/sched/signal.h>
33
33
34
+ #include <linux/string.h>
34
35
#include <asm/asm-offsets.h>
35
36
#include <asm/lowcore.h>
36
37
#include <asm/stp.h>
@@ -750,6 +751,129 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
750
751
return 0 ;
751
752
}
752
753
754
+ static void kvm_s390_sync_request_broadcast (struct kvm * kvm , int req )
755
+ {
756
+ int cx ;
757
+ struct kvm_vcpu * vcpu ;
758
+
759
+ kvm_for_each_vcpu (cx , vcpu , kvm )
760
+ kvm_s390_sync_request (req , vcpu );
761
+ }
762
+
763
+ /*
764
+ * Must be called with kvm->srcu held to avoid races on memslots, and with
765
+ * kvm->lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
766
+ */
767
+ static int kvm_s390_vm_start_migration (struct kvm * kvm )
768
+ {
769
+ struct kvm_s390_migration_state * mgs ;
770
+ struct kvm_memory_slot * ms ;
771
+ /* should be the only one */
772
+ struct kvm_memslots * slots ;
773
+ unsigned long ram_pages ;
774
+ int slotnr ;
775
+
776
+ /* migration mode already enabled */
777
+ if (kvm -> arch .migration_state )
778
+ return 0 ;
779
+
780
+ slots = kvm_memslots (kvm );
781
+ if (!slots || !slots -> used_slots )
782
+ return - EINVAL ;
783
+
784
+ mgs = kzalloc (sizeof (* mgs ), GFP_KERNEL );
785
+ if (!mgs )
786
+ return - ENOMEM ;
787
+ kvm -> arch .migration_state = mgs ;
788
+
789
+ if (kvm -> arch .use_cmma ) {
790
+ /*
791
+ * Get the last slot. They should be sorted by base_gfn, so the
792
+ * last slot is also the one at the end of the address space.
793
+ * We have verified above that at least one slot is present.
794
+ */
795
+ ms = slots -> memslots + slots -> used_slots - 1 ;
796
+ /* round up so we only use full longs */
797
+ ram_pages = roundup (ms -> base_gfn + ms -> npages , BITS_PER_LONG );
798
+ /* allocate enough bytes to store all the bits */
799
+ mgs -> pgste_bitmap = vmalloc (ram_pages / 8 );
800
+ if (!mgs -> pgste_bitmap ) {
801
+ kfree (mgs );
802
+ kvm -> arch .migration_state = NULL ;
803
+ return - ENOMEM ;
804
+ }
805
+
806
+ mgs -> bitmap_size = ram_pages ;
807
+ atomic64_set (& mgs -> dirty_pages , ram_pages );
808
+ /* mark all the pages in active slots as dirty */
809
+ for (slotnr = 0 ; slotnr < slots -> used_slots ; slotnr ++ ) {
810
+ ms = slots -> memslots + slotnr ;
811
+ bitmap_set (mgs -> pgste_bitmap , ms -> base_gfn , ms -> npages );
812
+ }
813
+
814
+ kvm_s390_sync_request_broadcast (kvm , KVM_REQ_START_MIGRATION );
815
+ }
816
+ return 0 ;
817
+ }
818
+
819
+ /*
820
+ * Must be called with kvm->lock to avoid races with ourselves and
821
+ * kvm_s390_vm_start_migration.
822
+ */
823
+ static int kvm_s390_vm_stop_migration (struct kvm * kvm )
824
+ {
825
+ struct kvm_s390_migration_state * mgs ;
826
+
827
+ /* migration mode already disabled */
828
+ if (!kvm -> arch .migration_state )
829
+ return 0 ;
830
+ mgs = kvm -> arch .migration_state ;
831
+ kvm -> arch .migration_state = NULL ;
832
+
833
+ if (kvm -> arch .use_cmma ) {
834
+ kvm_s390_sync_request_broadcast (kvm , KVM_REQ_STOP_MIGRATION );
835
+ vfree (mgs -> pgste_bitmap );
836
+ }
837
+ kfree (mgs );
838
+ return 0 ;
839
+ }
840
+
841
+ static int kvm_s390_vm_set_migration (struct kvm * kvm ,
842
+ struct kvm_device_attr * attr )
843
+ {
844
+ int idx , res = - ENXIO ;
845
+
846
+ mutex_lock (& kvm -> lock );
847
+ switch (attr -> attr ) {
848
+ case KVM_S390_VM_MIGRATION_START :
849
+ idx = srcu_read_lock (& kvm -> srcu );
850
+ res = kvm_s390_vm_start_migration (kvm );
851
+ srcu_read_unlock (& kvm -> srcu , idx );
852
+ break ;
853
+ case KVM_S390_VM_MIGRATION_STOP :
854
+ res = kvm_s390_vm_stop_migration (kvm );
855
+ break ;
856
+ default :
857
+ break ;
858
+ }
859
+ mutex_unlock (& kvm -> lock );
860
+
861
+ return res ;
862
+ }
863
+
864
+ static int kvm_s390_vm_get_migration (struct kvm * kvm ,
865
+ struct kvm_device_attr * attr )
866
+ {
867
+ u64 mig = (kvm -> arch .migration_state != NULL );
868
+
869
+ if (attr -> attr != KVM_S390_VM_MIGRATION_STATUS )
870
+ return - ENXIO ;
871
+
872
+ if (copy_to_user ((void __user * )attr -> addr , & mig , sizeof (mig )))
873
+ return - EFAULT ;
874
+ return 0 ;
875
+ }
876
+
753
877
static int kvm_s390_set_tod_high (struct kvm * kvm , struct kvm_device_attr * attr )
754
878
{
755
879
u8 gtod_high ;
@@ -1090,6 +1214,9 @@ static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1090
1214
case KVM_S390_VM_CRYPTO :
1091
1215
ret = kvm_s390_vm_set_crypto (kvm , attr );
1092
1216
break ;
1217
+ case KVM_S390_VM_MIGRATION :
1218
+ ret = kvm_s390_vm_set_migration (kvm , attr );
1219
+ break ;
1093
1220
default :
1094
1221
ret = - ENXIO ;
1095
1222
break ;
@@ -1112,6 +1239,9 @@ static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1112
1239
case KVM_S390_VM_CPU_MODEL :
1113
1240
ret = kvm_s390_get_cpu_model (kvm , attr );
1114
1241
break ;
1242
+ case KVM_S390_VM_MIGRATION :
1243
+ ret = kvm_s390_vm_get_migration (kvm , attr );
1244
+ break ;
1115
1245
default :
1116
1246
ret = - ENXIO ;
1117
1247
break ;
@@ -1179,6 +1309,9 @@ static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1179
1309
break ;
1180
1310
}
1181
1311
break ;
1312
+ case KVM_S390_VM_MIGRATION :
1313
+ ret = 0 ;
1314
+ break ;
1182
1315
default :
1183
1316
ret = - ENXIO ;
1184
1317
break ;
@@ -1633,6 +1766,10 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
1633
1766
kvm_s390_destroy_adapters (kvm );
1634
1767
kvm_s390_clear_float_irqs (kvm );
1635
1768
kvm_s390_vsie_destroy (kvm );
1769
+ if (kvm -> arch .migration_state ) {
1770
+ vfree (kvm -> arch .migration_state -> pgste_bitmap );
1771
+ kfree (kvm -> arch .migration_state );
1772
+ }
1636
1773
KVM_EVENT (3 , "vm 0x%pK destroyed" , kvm );
1637
1774
}
1638
1775
@@ -1977,7 +2114,6 @@ int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1977
2114
if (!vcpu -> arch .sie_block -> cbrlo )
1978
2115
return - ENOMEM ;
1979
2116
1980
- vcpu -> arch .sie_block -> ecb2 |= ECB2_CMMA ;
1981
2117
vcpu -> arch .sie_block -> ecb2 &= ~ECB2_PFMFI ;
1982
2118
return 0 ;
1983
2119
}
@@ -2489,6 +2625,27 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2489
2625
goto retry ;
2490
2626
}
2491
2627
2628
+ if (kvm_check_request (KVM_REQ_START_MIGRATION , vcpu )) {
2629
+ /*
2630
+ * Disable CMMA virtualization; we will emulate the ESSA
2631
+ * instruction manually, in order to provide additional
2632
+ * functionalities needed for live migration.
2633
+ */
2634
+ vcpu -> arch .sie_block -> ecb2 &= ~ECB2_CMMA ;
2635
+ goto retry ;
2636
+ }
2637
+
2638
+ if (kvm_check_request (KVM_REQ_STOP_MIGRATION , vcpu )) {
2639
+ /*
2640
+ * Re-enable CMMA virtualization if CMMA is available and
2641
+ * was used.
2642
+ */
2643
+ if ((vcpu -> kvm -> arch .use_cmma ) &&
2644
+ (vcpu -> kvm -> mm -> context .use_cmma ))
2645
+ vcpu -> arch .sie_block -> ecb2 |= ECB2_CMMA ;
2646
+ goto retry ;
2647
+ }
2648
+
2492
2649
/* nothing to do, just clear the request */
2493
2650
kvm_clear_request (KVM_REQ_UNHALT , vcpu );
2494
2651
0 commit comments