Skip to content

Commit d90b308

Browse files
Zuulopenstack-gerrit
authored andcommitted
Merge "Clean up when queued live migration aborted"
2 parents f41be79 + 219520d commit d90b308

File tree

3 files changed

+69
-39
lines changed

3 files changed

+69
-39
lines changed

nova/compute/manager.py

Lines changed: 35 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -8699,15 +8699,41 @@ def live_migration_abort(self, context, instance, migration_id):
86998699
migration, future = (
87008700
self._waiting_live_migrations.pop(instance.uuid))
87018701
if future and future.cancel():
8702-
# If we got here, we've successfully aborted the queued
8703-
# migration and _do_live_migration won't run so we need
8704-
# to set the migration status to cancelled and send the
8705-
# notification. If Future.cancel() fails, it means
8706-
# _do_live_migration is running and the migration status
8707-
# is preparing, and _do_live_migration() itself will attempt
8708-
# to pop the queued migration, hit a KeyError, and rollback,
8709-
# set the migration to cancelled and send the
8710-
# live.migration.abort.end notification.
8702+
# If we got here, we've successfully dropped a queued
8703+
# migration from the queue, so _do_live_migration won't run
8704+
# and we only need to revert minor changes introduced by Nova
8705+
# control plane (port bindings, resource allocations and
8706+
# instance's PCI devices), restore VM's state, set the
8707+
# migration's status to cancelled and send the notification.
8708+
# If Future.cancel() fails, it means _do_live_migration is
8709+
# running and the migration status is preparing, and
8710+
# _do_live_migration() itself will attempt to pop the queued
8711+
# migration, hit a KeyError, and rollback, set the migration
8712+
# to cancelled and send the live.migration.abort.end
8713+
# notification.
8714+
self._revert_allocation(context, instance, migration)
8715+
try:
8716+
# This call will delete any inactive destination host
8717+
# port bindings.
8718+
self.network_api.setup_networks_on_host(
8719+
context, instance, host=migration.dest_compute,
8720+
teardown=True)
8721+
except exception.PortBindingDeletionFailed as e:
8722+
# Removing the inactive port bindings from the destination
8723+
# host is not critical so just log an error but don't fail.
8724+
LOG.error(
8725+
'Network cleanup failed for destination host %s '
8726+
'during live migration rollback. You may need to '
8727+
'manually clean up resources in the network service. '
8728+
'Error: %s', migration.dest_compute, str(e))
8729+
except Exception:
8730+
with excutils.save_and_reraise_exception():
8731+
LOG.exception(
8732+
'An error occurred while cleaning up networking '
8733+
'during live migration rollback.',
8734+
instance=instance)
8735+
instance.task_state = None
8736+
instance.save(expected_task_state=[task_states.MIGRATING])
87118737
self._set_migration_status(migration, 'cancelled')
87128738
except KeyError:
87138739
migration = objects.Migration.get_by_id(context, migration_id)

nova/tests/functional/libvirt/test_live_migration.py

Lines changed: 17 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -117,16 +117,12 @@ def test_queued_live_migration_abort_vm_status(self):
117117
'/servers/%s/migrations/%s' % (self.server_b['id'],
118118
serverb_migration['id']))
119119
self._wait_for_migration_status(self.server_b, ['cancelled'])
120-
# Unlock live migrations and confirm that server_a becomes
121-
# active again after successful live migration
120+
# Unlock live migrations and confirm that both servers become
121+
# active again after successful (server_a) and aborted
122+
# (server_b) live migrations
122123
self.lock_live_migration.release()
123124
self._wait_for_state_change(self.server_a, 'ACTIVE')
124-
125-
# FIXME(artom) Assert the server_b never comes out of 'MIGRATING'
126-
self.assertRaises(
127-
AssertionError,
128-
self._wait_for_state_change, self.server_b, 'ACTIVE')
129-
self._wait_for_state_change(self.server_b, 'MIGRATING')
125+
self._wait_for_state_change(self.server_b, 'ACTIVE')
130126

131127

132128
class LiveMigrationQueuedAbortTestLeftoversRemoved(LiveMigrationWithLockBase):
@@ -182,36 +178,28 @@ def test_queued_live_migration_abort_leftovers_removed(self):
182178
'/servers/%s/migrations/%s' % (self.server_b['id'],
183179
migration_server_b['id']))
184180
self._wait_for_migration_status(self.server_b, ['cancelled'])
185-
# Unlock live migrations and confirm that server_a becomes
186-
# active again after successful live migration
181+
# Unlock live migrations and confirm that both servers become
182+
# active again after successful (server_a) and aborted
183+
# (server_b) live migrations
187184
self.lock_live_migration.release()
188185
self._wait_for_state_change(self.server_a, 'ACTIVE')
189186
self._wait_for_migration_status(self.server_a, ['completed'])
190-
# FIXME(astupnikov) Assert the server_b never comes out of 'MIGRATING'
191-
# This should be fixed after bug #1949808 is addressed
192-
self._wait_for_state_change(self.server_b, 'MIGRATING')
187+
self._wait_for_state_change(self.server_b, 'ACTIVE')
193188

194-
# FIXME(astupnikov) Because of bug #1960412 allocations for aborted
195-
# queued live migration (server_b) would not be removed. Allocations
196-
# for completed live migration (server_a) should be empty.
189+
# Allocations for both successful (server_a) and aborted queued live
190+
# migration (server_b) should be removed.
197191
allocations_server_a_migration = self.placement.get(
198192
'/allocations/%s' % migration_server_a['uuid']
199193
).body['allocations']
200194
self.assertEqual({}, allocations_server_a_migration)
201195
allocations_server_b_migration = self.placement.get(
202196
'/allocations/%s' % migration_server_b['uuid']
203197
).body['allocations']
204-
src_uuid = self.api.api_get(
205-
'os-hypervisors?hypervisor_hostname_pattern=%s' %
206-
self.src_hostname).body['hypervisors'][0]['id']
207-
self.assertIn(src_uuid, allocations_server_b_migration)
208-
209-
# FIXME(astupnikov) Because of bug #1960412 INACTIVE port binding
210-
# on destination host would not be removed when queued live migration
211-
# is aborted, so 2 port bindings would exist for server_b port from
212-
# Neutron's perspective.
213-
# server_a should be migrated to dest compute, server_b should still
214-
# be hosted by src compute.
198+
self.assertEqual({}, allocations_server_b_migration)
199+
200+
# INACTIVE port binding on destination host should be removed when
201+
# queued live migration is aborted, so only 1 port binding would
202+
# exist for ports attached to both servers.
215203
port_binding_server_a = copy.deepcopy(
216204
self.neutron._port_bindings[self.neutron.port_1['id']]
217205
)
@@ -220,4 +208,5 @@ def test_queued_live_migration_abort_leftovers_removed(self):
220208
port_binding_server_b = copy.deepcopy(
221209
self.neutron._port_bindings[self.neutron.port_2['id']]
222210
)
223-
self.assertEqual(2, len(port_binding_server_b))
211+
self.assertEqual(1, len(port_binding_server_b))
212+
self.assertNotIn('dest', port_binding_server_b)

nova/tests/unit/compute/test_compute_mgr.py

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10420,19 +10420,34 @@ def test_live_migration_abort(self, mock_notify_action, mock_driver,
1042010420
action='live_migration_abort', phase='end')]
1042110421
)
1042210422

10423+
@mock.patch.object(objects.Instance, 'save')
10424+
@mock.patch.object(manager.ComputeManager, '_revert_allocation')
1042310425
@mock.patch.object(manager.ComputeManager, '_notify_about_instance_usage')
1042410426
@mock.patch.object(objects.Migration, 'get_by_id')
1042510427
@mock.patch('nova.compute.utils.notify_about_instance_action')
1042610428
def test_live_migration_abort_queued(self, mock_notify_action,
10427-
mock_get_migration, mock_notify):
10429+
mock_get_migration, mock_notify,
10430+
mock_revert_allocations,
10431+
mock_instance_save):
1042810432
instance = objects.Instance(id=123, uuid=uuids.instance)
1042910433
migration = self._get_migration(10, 'queued', 'live-migration')
10434+
migration.dest_compute = uuids.dest
10435+
migration.dest_node = uuids.dest
1043010436
migration.save = mock.MagicMock()
1043110437
mock_get_migration.return_value = migration
1043210438
fake_future = mock.MagicMock()
1043310439
self.compute._waiting_live_migrations[instance.uuid] = (
1043410440
migration, fake_future)
10435-
self.compute.live_migration_abort(self.context, instance, migration.id)
10441+
with mock.patch.object(
10442+
self.compute.network_api,
10443+
'setup_networks_on_host') as mock_setup_net:
10444+
self.compute.live_migration_abort(
10445+
self.context, instance, migration.id)
10446+
mock_setup_net.assert_called_once_with(
10447+
self.context, instance, host=migration.dest_compute,
10448+
teardown=True)
10449+
mock_revert_allocations.assert_called_once_with(
10450+
self.context, instance, migration)
1043610451
mock_notify.assert_has_calls(
1043710452
[mock.call(self.context, instance,
1043810453
'live.migration.abort.start'),

0 commit comments

Comments
 (0)