|
14 | 14 |
|
15 | 15 | from nova import context
|
16 | 16 | from nova.db import api as db_api
|
17 |
| -from nova import exception |
18 | 17 | from nova import objects
|
19 | 18 | from nova import test
|
20 | 19 | from nova.tests import fixtures as nova_fixtures
|
@@ -93,30 +92,25 @@ def test_update_available_resource_node_recreate(self):
|
93 | 92 | # Now stub the driver again to report node2 as being back and run
|
94 | 93 | # the periodic task.
|
95 | 94 | compute.manager.driver._set_nodes(['node1', 'node2'])
|
| 95 | + LOG.info('Running update_available_resource which should bring back ' |
| 96 | + 'node2.') |
96 | 97 | compute.manager.update_available_resource(ctxt)
|
97 |
| - # FIXME(mriedem): This is bug 1839560 where the ResourceTracker fails |
98 |
| - # to create a ComputeNode for node2 because of conflicting UUIDs. |
| 98 | + # The DBDuplicateEntry error should have been handled and resulted in |
| 99 | + # updating the (soft) deleted record to no longer be deleted. |
99 | 100 | log = self.stdlog.logger.output
|
100 |
| - self.assertIn('Error updating resources for node node2', log) |
101 |
| - self.assertIn('DBDuplicateEntry', log) |
102 |
| - # Should still only have one reported hypervisor (node1). |
| 101 | + self.assertNotIn('DBDuplicateEntry', log) |
| 102 | + # Should have two reported hypervisors again. |
103 | 103 | hypervisors = self.api.api_get('/os-hypervisors').body['hypervisors']
|
104 |
| - self.assertEqual(1, len(hypervisors), hypervisors) |
105 |
| - # Test the workaround for bug 1839560 by archiving the deleted node2 |
106 |
| - # compute_nodes table record which will allow the periodic to create a |
107 |
| - # new entry for node2. We can remove this when the bug is fixed. |
| 104 | + self.assertEqual(2, len(hypervisors), hypervisors) |
| 105 | + # Now that the node2 record was un-soft-deleted, archiving should not |
| 106 | + # remove any compute_nodes. |
108 | 107 | LOG.info('Archiving the database.')
|
109 | 108 | archived = db_api.archive_deleted_rows(1000)[0]
|
110 |
| - self.assertIn('compute_nodes', archived) |
111 |
| - self.assertEqual(1, archived['compute_nodes']) |
112 |
| - with utils.temporary_mutation(ctxt, read_deleted='yes'): |
113 |
| - self.assertRaises(exception.ComputeHostNotFound, |
114 |
| - objects.ComputeNode.get_by_host_and_nodename, |
115 |
| - ctxt, 'node1', 'node2') |
116 |
| - # Now run the periodic again and we should have a new ComputeNode for |
117 |
| - # node2. |
118 |
| - LOG.info('Running update_available_resource which should create a new ' |
119 |
| - 'ComputeNode record for node2.') |
120 |
| - compute.manager.update_available_resource(ctxt) |
121 |
| - hypervisors = self.api.api_get('/os-hypervisors').body['hypervisors'] |
122 |
| - self.assertEqual(2, len(hypervisors), hypervisors) |
| 109 | + self.assertNotIn('compute_nodes', archived) |
| 110 | + cn2 = objects.ComputeNode.get_by_host_and_nodename( |
| 111 | + ctxt, 'node1', 'node2') |
| 112 | + self.assertFalse(cn2.deleted) |
| 113 | + self.assertIsNone(cn2.deleted_at) |
| 114 | + # The node2 id and uuid should not have changed in the DB. |
| 115 | + self.assertEqual(cn.id, cn2.id) |
| 116 | + self.assertEqual(cn.uuid, cn2.uuid) |
0 commit comments