|
12 | 12 |
|
13 | 13 | import mock
|
14 | 14 |
|
| 15 | +from oslo_utils.fixture import uuidsentinel as uuids |
| 16 | + |
| 17 | +from nova.compute import instance_actions |
15 | 18 | from nova import context as nova_context
|
16 | 19 | from nova import exception
|
17 | 20 | from nova import objects
|
@@ -378,13 +381,172 @@ def _resize_and_validate(self, volume_backed=False, stopped=False,
|
378 | 381 |
|
379 | 382 | return server, source_rp_uuid, target_rp_uuid, old_flavor, new_flavor
|
380 | 383 |
|
| 384 | + def _attach_volume_to_server(self, server_id, volume_id): |
| 385 | + """Attaches the volume to the server and waits for the |
| 386 | + "instance.volume_attach.end" versioned notification. |
| 387 | + """ |
| 388 | + body = {'volumeAttachment': {'volumeId': volume_id}} |
| 389 | + self.api.api_post( |
| 390 | + '/servers/%s/os-volume_attachments' % server_id, body) |
| 391 | + fake_notifier.wait_for_versioned_notifications( |
| 392 | + 'instance.volume_attach.end') |
| 393 | + |
| 394 | + def assert_volume_is_attached(self, server_id, volume_id): |
| 395 | + """Asserts the volume is attached to the server.""" |
| 396 | + server = self.api.get_server(server_id) |
| 397 | + attachments = server['os-extended-volumes:volumes_attached'] |
| 398 | + attached_vol_ids = [attachment['id'] for attachment in attachments] |
| 399 | + self.assertIn(volume_id, attached_vol_ids, |
| 400 | + 'Attached volumes: %s' % attachments) |
| 401 | + |
| 402 | + def assert_resize_confirm_notifications(self): |
| 403 | + # We should have gotten only two notifications: |
| 404 | + # 1. instance.resize_confirm.start |
| 405 | + # 2. instance.resize_confirm.end |
| 406 | + self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS), |
| 407 | + 'Unexpected number of versioned notifications for ' |
| 408 | + 'cross-cell resize confirm: %s' % |
| 409 | + fake_notifier.VERSIONED_NOTIFICATIONS) |
| 410 | + start = fake_notifier.VERSIONED_NOTIFICATIONS[0]['event_type'] |
| 411 | + self.assertEqual('instance.resize_confirm.start', start) |
| 412 | + end = fake_notifier.VERSIONED_NOTIFICATIONS[1]['event_type'] |
| 413 | + self.assertEqual('instance.resize_confirm.end', end) |
| 414 | + |
| 415 | + def delete_server_and_assert_cleanup(self, server): |
| 416 | + """Deletes the server and makes various cleanup checks. |
| 417 | +
|
| 418 | + - makes sure allocations from placement are gone |
| 419 | + - makes sure the instance record is gone from both cells |
| 420 | + - makes sure there are no leaked volume attachments |
| 421 | +
|
| 422 | + :param server: dict of the server resource to delete |
| 423 | + """ |
| 424 | + # Determine which cell the instance was in when the server was deleted |
| 425 | + # in the API so we can check hard vs soft delete in the DB. |
| 426 | + current_cell = self.host_to_cell_mappings[ |
| 427 | + server['OS-EXT-SRV-ATTR:host']] |
| 428 | + # Delete the server and check that the allocations are gone from |
| 429 | + # the placement service. |
| 430 | + self._delete_and_check_allocations(server) |
| 431 | + # Make sure the instance record is gone from both cell databases. |
| 432 | + ctxt = nova_context.get_admin_context() |
| 433 | + for cell_name in self.host_to_cell_mappings.values(): |
| 434 | + cell = self.cell_mappings[cell_name] |
| 435 | + with nova_context.target_cell(ctxt, cell) as cctxt: |
| 436 | + # If this is the current cell the instance was in when it was |
| 437 | + # deleted it should be soft-deleted (instance.deleted!=0), |
| 438 | + # otherwise it should be hard-deleted and getting it with a |
| 439 | + # read_deleted='yes' context should still raise. |
| 440 | + read_deleted = 'no' if current_cell == cell_name else 'yes' |
| 441 | + with utils.temporary_mutation( |
| 442 | + cctxt, read_deleted=read_deleted): |
| 443 | + self.assertRaises(exception.InstanceNotFound, |
| 444 | + objects.Instance.get_by_uuid, |
| 445 | + cctxt, server['id']) |
| 446 | + # Make sure there are no leaked volume attachments. |
| 447 | + attachment_count = self._count_volume_attachments(server['id']) |
| 448 | + self.assertEqual(0, attachment_count, 'Leaked volume attachments: %s' % |
| 449 | + self.cinder.volume_to_attachment) |
| 450 | + |
| 451 | + def assert_resize_confirm_actions(self, server): |
| 452 | + actions = self.api.api_get( |
| 453 | + '/servers/%s/os-instance-actions' % server['id'] |
| 454 | + ).body['instanceActions'] |
| 455 | + actions_by_action = {action['action']: action for action in actions} |
| 456 | + self.assertIn(instance_actions.CONFIRM_RESIZE, actions_by_action) |
| 457 | + confirm_action = actions_by_action[instance_actions.CONFIRM_RESIZE] |
| 458 | + detail = self.api.api_get( |
| 459 | + '/servers/%s/os-instance-actions/%s' % ( |
| 460 | + server['id'], confirm_action['request_id']) |
| 461 | + ).body['instanceAction'] |
| 462 | + events_by_name = {event['event']: event for event in detail['events']} |
| 463 | + self.assertEqual(2, len(detail['events']), detail) |
| 464 | + for event_name in ('conductor_confirm_snapshot_based_resize', |
| 465 | + 'compute_confirm_snapshot_based_resize_at_source'): |
| 466 | + self.assertIn(event_name, events_by_name) |
| 467 | + self.assertEqual('Success', events_by_name[event_name]['result']) |
| 468 | + self.assertEqual('Success', detail['events'][0]['result']) |
| 469 | + |
381 | 470 | def test_resize_confirm_image_backed(self):
|
382 | 471 | """Creates an image-backed server in one cell and resizes it to the
|
383 | 472 | host in the other cell. The resize is confirmed.
|
384 | 473 | """
|
385 |
| - self._resize_and_validate() |
| 474 | + server, source_rp_uuid, target_rp_uuid, _, new_flavor = ( |
| 475 | + self._resize_and_validate()) |
| 476 | + |
| 477 | + # Attach a fake volume to the server to make sure it survives confirm. |
| 478 | + self._attach_volume_to_server(server['id'], uuids.fake_volume_id) |
386 | 479 |
|
387 |
| - # TODO(mriedem): Confirm the resize and make assertions. |
| 480 | + # Reset the fake notifier so we only check confirmation notifications. |
| 481 | + fake_notifier.reset() |
| 482 | + |
| 483 | + # Confirm the resize and check all the things. The instance and its |
| 484 | + # related records should be gone from the source cell database; the |
| 485 | + # migration should be confirmed; the allocations, held by the migration |
| 486 | + # record on the source compute node resource provider, should now be |
| 487 | + # gone; there should be a confirmResize instance action record with |
| 488 | + # a successful event. |
| 489 | + target_host = server['OS-EXT-SRV-ATTR:host'] |
| 490 | + source_host = 'host1' if target_host == 'host2' else 'host2' |
| 491 | + self.api.post_server_action(server['id'], {'confirmResize': None}) |
| 492 | + self._wait_for_state_change(server, 'ACTIVE') |
| 493 | + |
| 494 | + # The migration should be confirmed. |
| 495 | + migrations = self.api.api_get( |
| 496 | + '/os-migrations?instance_uuid=%s' % server['id'] |
| 497 | + ).body['migrations'] |
| 498 | + self.assertEqual(1, len(migrations), migrations) |
| 499 | + migration = migrations[0] |
| 500 | + self.assertEqual('confirmed', migration['status'], migration) |
| 501 | + |
| 502 | + # The resource allocations held against the source node by the |
| 503 | + # migration record should be gone and the target node provider should |
| 504 | + # have allocations held by the instance. |
| 505 | + source_allocations = self._get_allocations_by_provider_uuid( |
| 506 | + source_rp_uuid) |
| 507 | + self.assertEqual({}, source_allocations) |
| 508 | + target_allocations = self._get_allocations_by_provider_uuid( |
| 509 | + target_rp_uuid) |
| 510 | + self.assertIn(server['id'], target_allocations) |
| 511 | + self.assertFlavorMatchesAllocation( |
| 512 | + new_flavor, target_allocations[server['id']]['resources']) |
| 513 | + |
| 514 | + self.assert_resize_confirm_actions(server) |
| 515 | + |
| 516 | + # Make sure the guest is on the target node hypervisor and not on the |
| 517 | + # source node hypervisor. |
| 518 | + source_guest_uuids = ( |
| 519 | + self.computes[source_host].manager.driver.list_instance_uuids()) |
| 520 | + self.assertNotIn(server['id'], source_guest_uuids, |
| 521 | + 'Guest is still running on the source hypervisor.') |
| 522 | + target_guest_uuids = ( |
| 523 | + self.computes[target_host].manager.driver.list_instance_uuids()) |
| 524 | + self.assertIn(server['id'], target_guest_uuids, |
| 525 | + 'Guest is not running on the target hypervisor.') |
| 526 | + |
| 527 | + # Assert the source host hypervisor usage is back to 0 and the target |
| 528 | + # is using the new flavor. |
| 529 | + self.assert_hypervisor_usage( |
| 530 | + target_rp_uuid, new_flavor, volume_backed=False) |
| 531 | + no_usage = {'vcpus': 0, 'disk': 0, 'ram': 0} |
| 532 | + self.assert_hypervisor_usage( |
| 533 | + source_rp_uuid, no_usage, volume_backed=False) |
| 534 | + |
| 535 | + # Run periodics and make sure the usage is still as expected. |
| 536 | + self._run_periodics() |
| 537 | + self.assert_hypervisor_usage( |
| 538 | + target_rp_uuid, new_flavor, volume_backed=False) |
| 539 | + self.assert_hypervisor_usage( |
| 540 | + source_rp_uuid, no_usage, volume_backed=False) |
| 541 | + |
| 542 | + # Make sure the fake volume is still attached. |
| 543 | + self.assert_volume_is_attached(server['id'], uuids.fake_volume_id) |
| 544 | + |
| 545 | + # Make sure we got the expected notifications for the confirm action. |
| 546 | + self.assert_resize_confirm_notifications() |
| 547 | + |
| 548 | + # Explicitly delete the server and make sure it's gone from all cells. |
| 549 | + self.delete_server_and_assert_cleanup(server) |
388 | 550 |
|
389 | 551 | def test_resize_revert_volume_backed(self):
|
390 | 552 | """Tests a volume-backed resize to another cell where the resize
|
@@ -457,11 +619,25 @@ def test_cold_migrate_target_host_in_other_cell(self):
|
457 | 619 | # onto the source host in the source cell.
|
458 | 620 |
|
459 | 621 | def test_resize_confirm_from_stopped(self):
|
460 |
| - """Tests resizing and confirming a server that was initially stopped |
461 |
| - so it should remain stopped through the resize. |
| 622 | + """Tests resizing and confirming a volume-backed server that was |
| 623 | + initially stopped so it should remain stopped through the resize. |
462 | 624 | """
|
463 |
| - self._resize_and_validate(volume_backed=True, stopped=True) |
464 |
| - # TODO(mriedem): Confirm the resize and assert the guest remains off |
| 625 | + server = self._resize_and_validate(volume_backed=True, stopped=True)[0] |
| 626 | + # Confirm the resize and assert the guest remains off. |
| 627 | + self.api.post_server_action(server['id'], {'confirmResize': None}) |
| 628 | + server = self._wait_for_state_change(server, 'SHUTOFF') |
| 629 | + self.assertEqual(4, server['OS-EXT-STS:power_state'], |
| 630 | + "Unexpected power state after confirmResize.") |
| 631 | + self._wait_for_migration_status(server, ['confirmed']) |
| 632 | + |
| 633 | + # Now try cold-migrating back to cell1 to make sure there is no |
| 634 | + # duplicate entry error in the DB. |
| 635 | + self.api.post_server_action(server['id'], {'migrate': None}) |
| 636 | + server = self._wait_for_state_change(server, 'VERIFY_RESIZE') |
| 637 | + # Should be back on host1 in cell1. |
| 638 | + self.assertEqual('host1', server['OS-EXT-SRV-ATTR:host']) |
| 639 | + |
| 640 | + # TODO(mriedem): test_resize_revert_from_stopped with image-backed. |
465 | 641 |
|
466 | 642 | def test_finish_snapshot_based_resize_at_dest_spawn_fails(self):
|
467 | 643 | """Negative test where the driver spawn fails on the dest host during
|
|
0 commit comments