Skip to content

Commit c560bc9

Browse files
Maxim Levitskybonzini
authored andcommitted
x86: KVM: SVM: use kvm_lock_all_vcpus instead of a custom implementation
Use kvm_lock_all_vcpus instead of sev's own implementation. Because kvm_lock_all_vcpus uses the _nest_lock feature of lockdep, which ignores subclasses, there is no longer a need to use separate subclasses for source and target VMs. No functional change intended. Suggested-by: Paolo Bonzini <[email protected]> Signed-off-by: Maxim Levitsky <[email protected]> Acked-by: Peter Zijlstra (Intel) <[email protected]> Message-ID: <[email protected]> Signed-off-by: Paolo Bonzini <[email protected]>
1 parent e4a454c commit c560bc9

File tree

1 file changed

+4
-68
lines changed

1 file changed

+4
-68
lines changed

arch/x86/kvm/svm/sev.c

Lines changed: 4 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -1882,70 +1882,6 @@ static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
18821882
atomic_set_release(&src_sev->migration_in_progress, 0);
18831883
}
18841884

1885-
/* vCPU mutex subclasses. */
1886-
enum sev_migration_role {
1887-
SEV_MIGRATION_SOURCE = 0,
1888-
SEV_MIGRATION_TARGET,
1889-
SEV_NR_MIGRATION_ROLES,
1890-
};
1891-
1892-
static int sev_lock_vcpus_for_migration(struct kvm *kvm,
1893-
enum sev_migration_role role)
1894-
{
1895-
struct kvm_vcpu *vcpu;
1896-
unsigned long i, j;
1897-
1898-
kvm_for_each_vcpu(i, vcpu, kvm) {
1899-
if (mutex_lock_killable_nested(&vcpu->mutex, role))
1900-
goto out_unlock;
1901-
1902-
#ifdef CONFIG_PROVE_LOCKING
1903-
if (!i)
1904-
/*
1905-
* Reset the role to one that avoids colliding with
1906-
* the role used for the first vcpu mutex.
1907-
*/
1908-
role = SEV_NR_MIGRATION_ROLES;
1909-
else
1910-
mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
1911-
#endif
1912-
}
1913-
1914-
return 0;
1915-
1916-
out_unlock:
1917-
1918-
kvm_for_each_vcpu(j, vcpu, kvm) {
1919-
if (i == j)
1920-
break;
1921-
1922-
#ifdef CONFIG_PROVE_LOCKING
1923-
if (j)
1924-
mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
1925-
#endif
1926-
1927-
mutex_unlock(&vcpu->mutex);
1928-
}
1929-
return -EINTR;
1930-
}
1931-
1932-
static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
1933-
{
1934-
struct kvm_vcpu *vcpu;
1935-
unsigned long i;
1936-
bool first = true;
1937-
1938-
kvm_for_each_vcpu(i, vcpu, kvm) {
1939-
if (first)
1940-
first = false;
1941-
else
1942-
mutex_acquire(&vcpu->mutex.dep_map,
1943-
SEV_NR_MIGRATION_ROLES, 0, _THIS_IP_);
1944-
1945-
mutex_unlock(&vcpu->mutex);
1946-
}
1947-
}
1948-
19491885
static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
19501886
{
19511887
struct kvm_sev_info *dst = to_kvm_sev_info(dst_kvm);
@@ -2083,10 +2019,10 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
20832019
charged = true;
20842020
}
20852021

2086-
ret = sev_lock_vcpus_for_migration(kvm, SEV_MIGRATION_SOURCE);
2022+
ret = kvm_lock_all_vcpus(kvm);
20872023
if (ret)
20882024
goto out_dst_cgroup;
2089-
ret = sev_lock_vcpus_for_migration(source_kvm, SEV_MIGRATION_TARGET);
2025+
ret = kvm_lock_all_vcpus(source_kvm);
20902026
if (ret)
20912027
goto out_dst_vcpu;
20922028

@@ -2100,9 +2036,9 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
21002036
ret = 0;
21012037

21022038
out_source_vcpu:
2103-
sev_unlock_vcpus_for_migration(source_kvm);
2039+
kvm_unlock_all_vcpus(source_kvm);
21042040
out_dst_vcpu:
2105-
sev_unlock_vcpus_for_migration(kvm);
2041+
kvm_unlock_all_vcpus(kvm);
21062042
out_dst_cgroup:
21072043
/* Operates on the source on success, on the destination on failure. */
21082044
if (charged)

0 commit comments

Comments
 (0)