@@ -7106,6 +7106,19 @@ static void mmu_destroy_caches(void)
7106
7106
kmem_cache_destroy (mmu_page_header_cache );
7107
7107
}
7108
7108
7109
+ static void kvm_wake_nx_recovery_thread (struct kvm * kvm )
7110
+ {
7111
+ /*
7112
+ * The NX recovery thread is spawned on-demand at the first KVM_RUN and
7113
+ * may not be valid even though the VM is globally visible. Do nothing,
7114
+ * as such a VM can't have any possible NX huge pages.
7115
+ */
7116
+ struct vhost_task * nx_thread = READ_ONCE (kvm -> arch .nx_huge_page_recovery_thread );
7117
+
7118
+ if (nx_thread )
7119
+ vhost_task_wake (nx_thread );
7120
+ }
7121
+
7109
7122
static int get_nx_huge_pages (char * buffer , const struct kernel_param * kp )
7110
7123
{
7111
7124
if (nx_hugepage_mitigation_hard_disabled )
@@ -7166,7 +7179,7 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
7166
7179
kvm_mmu_zap_all_fast (kvm );
7167
7180
mutex_unlock (& kvm -> slots_lock );
7168
7181
7169
- vhost_task_wake (kvm -> arch . nx_huge_page_recovery_thread );
7182
+ kvm_wake_nx_recovery_thread (kvm );
7170
7183
}
7171
7184
mutex_unlock (& kvm_lock );
7172
7185
}
@@ -7312,7 +7325,7 @@ static int set_nx_huge_pages_recovery_param(const char *val, const struct kernel
7312
7325
mutex_lock (& kvm_lock );
7313
7326
7314
7327
list_for_each_entry (kvm , & vm_list , vm_list )
7315
- vhost_task_wake (kvm -> arch . nx_huge_page_recovery_thread );
7328
+ kvm_wake_nx_recovery_thread (kvm );
7316
7329
7317
7330
mutex_unlock (& kvm_lock );
7318
7331
}
@@ -7448,14 +7461,20 @@ static void kvm_mmu_start_lpage_recovery(struct once *once)
7448
7461
{
7449
7462
struct kvm_arch * ka = container_of (once , struct kvm_arch , nx_once );
7450
7463
struct kvm * kvm = container_of (ka , struct kvm , arch );
7464
+ struct vhost_task * nx_thread ;
7451
7465
7452
7466
kvm -> arch .nx_huge_page_last = get_jiffies_64 ();
7453
- kvm -> arch . nx_huge_page_recovery_thread = vhost_task_create (
7454
- kvm_nx_huge_page_recovery_worker , kvm_nx_huge_page_recovery_worker_kill ,
7455
- kvm , "kvm-nx-lpage-recovery" );
7467
+ nx_thread = vhost_task_create (kvm_nx_huge_page_recovery_worker ,
7468
+ kvm_nx_huge_page_recovery_worker_kill ,
7469
+ kvm , "kvm-nx-lpage-recovery" );
7456
7470
7457
- if (kvm -> arch .nx_huge_page_recovery_thread )
7458
- vhost_task_start (kvm -> arch .nx_huge_page_recovery_thread );
7471
+ if (!nx_thread )
7472
+ return ;
7473
+
7474
+ vhost_task_start (nx_thread );
7475
+
7476
+ /* Make the task visible only once it is fully started. */
7477
+ WRITE_ONCE (kvm -> arch .nx_huge_page_recovery_thread , nx_thread );
7459
7478
}
7460
7479
7461
7480
int kvm_mmu_post_init_vm (struct kvm * kvm )
0 commit comments