@@ -6211,6 +6211,7 @@ def stupid(*args, **kwargs):
6211
6211
# cleanup
6212
6212
db.instance_destroy(c, instance['uuid'])
6213
6213
6214
+ @mock.patch.object(cinder.API, 'attachment_delete')
6214
6215
@mock.patch.object(fake.FakeDriver, 'get_instance_disk_info')
6215
6216
@mock.patch.object(compute_rpcapi.ComputeAPI, 'pre_live_migration')
6216
6217
@mock.patch.object(objects.ComputeNode,
@@ -6223,7 +6224,8 @@ def stupid(*args, **kwargs):
6223
6224
def test_live_migration_exception_rolls_back(self, mock_save,
6224
6225
mock_rollback, mock_remove,
6225
6226
mock_get_bdms,
6226
- mock_get_node, mock_pre, mock_get_disk):
6227
+ mock_get_node, mock_pre, mock_get_disk,
6228
+ mock_attachment_delete):
6227
6229
# Confirm exception when pre_live_migration fails.
6228
6230
c = context.get_admin_context()
6229
6231
@@ -6241,27 +6243,34 @@ def test_live_migration_exception_rolls_back(self, mock_save,
6241
6243
# All the fake BDMs we've generated, in order
6242
6244
fake_bdms = []
6243
6245
6246
+ # A list of the attachment_ids returned by gen_fake_bdms
6247
+ fake_attachment_ids = []
6248
+
6244
6249
def gen_fake_bdms(obj, instance):
6245
- # generate a unique fake connection_info every time we're called,
6246
- # simulating connection_info being mutated elsewhere.
6250
+ # generate a unique fake connection_info and attachment_id every
6251
+ # time we're called, simulating attachment_id and connection_info
6252
+ # being mutated elsewhere.
6247
6253
bdms = objects.BlockDeviceMappingList(objects=[
6248
6254
objects.BlockDeviceMapping(
6249
- **fake_block_device.FakeDbBlockDeviceDict (
6255
+ **fake_block_device.AnonFakeDbBlockDeviceDict (
6250
6256
{'volume_id': uuids.volume_id_1,
6257
+ 'attachment_id': uuidutils.generate_uuid(),
6251
6258
'source_type': 'volume',
6252
6259
'connection_info':
6253
6260
jsonutils.dumps(uuidutils.generate_uuid()),
6254
6261
'destination_type': 'volume'})),
6255
6262
objects.BlockDeviceMapping(
6256
- **fake_block_device.FakeDbBlockDeviceDict (
6263
+ **fake_block_device.AnonFakeDbBlockDeviceDict (
6257
6264
{'volume_id': uuids.volume_id_2,
6265
+ 'attachment_id': uuidutils.generate_uuid(),
6258
6266
'source_type': 'volume',
6259
6267
'connection_info':
6260
6268
jsonutils.dumps(uuidutils.generate_uuid()),
6261
6269
'destination_type': 'volume'}))
6262
6270
])
6263
6271
for bdm in bdms:
6264
6272
bdm.save = mock.Mock()
6273
+ fake_attachment_ids.append(bdm.attachment_id)
6265
6274
fake_bdms.append(bdms)
6266
6275
return bdms
6267
6276
@@ -6312,10 +6321,13 @@ def do_it(mock_client, mock_setup):
6312
6321
# BDMs with unique connection_info every time it's called. These are
6313
6322
# stored in fake_bdms in the order they were generated. We assert here
6314
6323
# that the last BDMs generated (in _rollback_live_migration) now have
6315
- # the same connection_info as the first BDMs generated (before calling
6316
- # pre_live_migration), and that we saved them.
6324
+ # the same connection_info and attachment_id as the first BDMs
6325
+ # generated (before calling pre_live_migration), and that we saved
6326
+ # them.
6317
6327
self.assertGreater(len(fake_bdms), 1)
6318
6328
for source_bdm, final_bdm in zip(fake_bdms[0], fake_bdms[-1]):
6329
+ self.assertEqual(source_bdm.attachment_id,
6330
+ final_bdm.attachment_id)
6319
6331
self.assertEqual(source_bdm.connection_info,
6320
6332
final_bdm.connection_info)
6321
6333
final_bdm.save.assert_called()
@@ -6328,6 +6340,11 @@ def do_it(mock_client, mock_setup):
6328
6340
destroy_disks=True,
6329
6341
migrate_data=test.MatchType(
6330
6342
migrate_data_obj.XenapiLiveMigrateData))
6343
+ # Assert that the final attachment_ids returned by
6344
+ # BlockDeviceMappingList.get_by_instance_uuid are then deleted.
6345
+ mock_attachment_delete.assert_has_calls([
6346
+ mock.call(c, fake_attachment_ids.pop()),
6347
+ mock.call(c, fake_attachment_ids.pop())], any_order=True)
6331
6348
6332
6349
@mock.patch.object(compute_rpcapi.ComputeAPI, 'pre_live_migration')
6333
6350
@mock.patch.object(compute_rpcapi.ComputeAPI,
0 commit comments