@@ -179,6 +179,28 @@ int kvm_arch_hardware_enable(void)
179
179
static void kvm_gmap_notifier (struct gmap * gmap , unsigned long start ,
180
180
unsigned long end );
181
181
182
+ static void kvm_clock_sync_scb (struct kvm_s390_sie_block * scb , u64 delta )
183
+ {
184
+ u8 delta_idx = 0 ;
185
+
186
+ /*
187
+ * The TOD jumps by delta, we have to compensate this by adding
188
+ * -delta to the epoch.
189
+ */
190
+ delta = - delta ;
191
+
192
+ /* sign-extension - we're adding to signed values below */
193
+ if ((s64 )delta < 0 )
194
+ delta_idx = -1 ;
195
+
196
+ scb -> epoch += delta ;
197
+ if (scb -> ecd & ECD_MEF ) {
198
+ scb -> epdx += delta_idx ;
199
+ if (scb -> epoch < delta )
200
+ scb -> epdx += 1 ;
201
+ }
202
+ }
203
+
182
204
/*
183
205
* This callback is executed during stop_machine(). All CPUs are therefore
184
206
* temporarily stopped. In order not to change guest behavior, we have to
@@ -194,13 +216,17 @@ static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
194
216
unsigned long long * delta = v ;
195
217
196
218
list_for_each_entry (kvm , & vm_list , vm_list ) {
197
- kvm -> arch .epoch -= * delta ;
198
219
kvm_for_each_vcpu (i , vcpu , kvm ) {
199
- vcpu -> arch .sie_block -> epoch -= * delta ;
220
+ kvm_clock_sync_scb (vcpu -> arch .sie_block , * delta );
221
+ if (i == 0 ) {
222
+ kvm -> arch .epoch = vcpu -> arch .sie_block -> epoch ;
223
+ kvm -> arch .epdx = vcpu -> arch .sie_block -> epdx ;
224
+ }
200
225
if (vcpu -> arch .cputm_enabled )
201
226
vcpu -> arch .cputm_start += * delta ;
202
227
if (vcpu -> arch .vsie_block )
203
- vcpu -> arch .vsie_block -> epoch -= * delta ;
228
+ kvm_clock_sync_scb (vcpu -> arch .vsie_block ,
229
+ * delta );
204
230
}
205
231
}
206
232
return NOTIFY_OK ;
@@ -902,12 +928,9 @@ static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
902
928
if (copy_from_user (& gtod , (void __user * )attr -> addr , sizeof (gtod )))
903
929
return - EFAULT ;
904
930
905
- if (test_kvm_facility (kvm , 139 ))
906
- kvm_s390_set_tod_clock_ext (kvm , & gtod );
907
- else if (gtod .epoch_idx == 0 )
908
- kvm_s390_set_tod_clock (kvm , gtod .tod );
909
- else
931
+ if (!test_kvm_facility (kvm , 139 ) && gtod .epoch_idx )
910
932
return - EINVAL ;
933
+ kvm_s390_set_tod_clock (kvm , & gtod );
911
934
912
935
VM_EVENT (kvm , 3 , "SET: TOD extension: 0x%x, TOD base: 0x%llx" ,
913
936
gtod .epoch_idx , gtod .tod );
@@ -932,13 +955,14 @@ static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
932
955
933
956
static int kvm_s390_set_tod_low (struct kvm * kvm , struct kvm_device_attr * attr )
934
957
{
935
- u64 gtod ;
958
+ struct kvm_s390_vm_tod_clock gtod = { 0 } ;
936
959
937
- if (copy_from_user (& gtod , (void __user * )attr -> addr , sizeof (gtod )))
960
+ if (copy_from_user (& gtod .tod , (void __user * )attr -> addr ,
961
+ sizeof (gtod .tod )))
938
962
return - EFAULT ;
939
963
940
- kvm_s390_set_tod_clock (kvm , gtod );
941
- VM_EVENT (kvm , 3 , "SET: TOD base: 0x%llx" , gtod );
964
+ kvm_s390_set_tod_clock (kvm , & gtod );
965
+ VM_EVENT (kvm , 3 , "SET: TOD base: 0x%llx" , gtod . tod );
942
966
return 0 ;
943
967
}
944
968
@@ -2389,6 +2413,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
2389
2413
mutex_lock (& vcpu -> kvm -> lock );
2390
2414
preempt_disable ();
2391
2415
vcpu -> arch .sie_block -> epoch = vcpu -> kvm -> arch .epoch ;
2416
+ vcpu -> arch .sie_block -> epdx = vcpu -> kvm -> arch .epdx ;
2392
2417
preempt_enable ();
2393
2418
mutex_unlock (& vcpu -> kvm -> lock );
2394
2419
if (!kvm_is_ucontrol (vcpu -> kvm )) {
@@ -3021,8 +3046,8 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3021
3046
return 0 ;
3022
3047
}
3023
3048
3024
- void kvm_s390_set_tod_clock_ext (struct kvm * kvm ,
3025
- const struct kvm_s390_vm_tod_clock * gtod )
3049
+ void kvm_s390_set_tod_clock (struct kvm * kvm ,
3050
+ const struct kvm_s390_vm_tod_clock * gtod )
3026
3051
{
3027
3052
struct kvm_vcpu * vcpu ;
3028
3053
struct kvm_s390_tod_clock_ext htod ;
@@ -3034,10 +3059,12 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
3034
3059
get_tod_clock_ext ((char * )& htod );
3035
3060
3036
3061
kvm -> arch .epoch = gtod -> tod - htod .tod ;
3037
- kvm -> arch .epdx = gtod -> epoch_idx - htod .epoch_idx ;
3038
-
3039
- if (kvm -> arch .epoch > gtod -> tod )
3040
- kvm -> arch .epdx -= 1 ;
3062
+ kvm -> arch .epdx = 0 ;
3063
+ if (test_kvm_facility (kvm , 139 )) {
3064
+ kvm -> arch .epdx = gtod -> epoch_idx - htod .epoch_idx ;
3065
+ if (kvm -> arch .epoch > gtod -> tod )
3066
+ kvm -> arch .epdx -= 1 ;
3067
+ }
3041
3068
3042
3069
kvm_s390_vcpu_block_all (kvm );
3043
3070
kvm_for_each_vcpu (i , vcpu , kvm ) {
@@ -3050,22 +3077,6 @@ void kvm_s390_set_tod_clock_ext(struct kvm *kvm,
3050
3077
mutex_unlock (& kvm -> lock );
3051
3078
}
3052
3079
3053
- void kvm_s390_set_tod_clock (struct kvm * kvm , u64 tod )
3054
- {
3055
- struct kvm_vcpu * vcpu ;
3056
- int i ;
3057
-
3058
- mutex_lock (& kvm -> lock );
3059
- preempt_disable ();
3060
- kvm -> arch .epoch = tod - get_tod_clock ();
3061
- kvm_s390_vcpu_block_all (kvm );
3062
- kvm_for_each_vcpu (i , vcpu , kvm )
3063
- vcpu -> arch .sie_block -> epoch = kvm -> arch .epoch ;
3064
- kvm_s390_vcpu_unblock_all (kvm );
3065
- preempt_enable ();
3066
- mutex_unlock (& kvm -> lock );
3067
- }
3068
-
3069
3080
/**
3070
3081
* kvm_arch_fault_in_page - fault-in guest page if necessary
3071
3082
* @vcpu: The corresponding virtual cpu
0 commit comments