Skip to content

Commit 8b00726

Browse files
committed
Restore soft-deleted compute node with same uuid
There is a unique index on the compute_nodes.uuid column which means we can't have more than one compute_nodes record in the same DB with the same UUID even if one is soft deleted because the deleted column is not part of that unique index constraint. This is a problem with ironic nodes where the node is 1:1 with the compute node record, and when a node is undergoing maintenance the driver doesn't return it from get_available_nodes() so the ComputeManager.update_available_resource periodic task (soft) deletes the compute node record, but when the node is no longer under maintenance in ironic and the driver reports it, the ResourceTracker._init_compute_node code will fail to create the ComputeNode record again because of the duplicate uuid. This change handles the DBDuplicateEntry error in compute_node_create by finding the soft-deleted compute node with the same uuid and simply updating it to no longer be (soft) deleted. Closes-Bug: #1839560 Change-Id: Iafba419fe86446ffe636721f523fb619f8f787b3
1 parent 89dd74a commit 8b00726

File tree

3 files changed

+74
-24
lines changed

3 files changed

+74
-24
lines changed

nova/db/sqlalchemy/api.py

Lines changed: 45 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@
3030
from oslo_db.sqlalchemy import update_match
3131
from oslo_db.sqlalchemy import utils as sqlalchemyutils
3232
from oslo_log import log as logging
33+
from oslo_utils import excutils
3334
from oslo_utils import importutils
3435
from oslo_utils import timeutils
3536
from oslo_utils import uuidutils
@@ -696,11 +697,54 @@ def compute_node_create(context, values):
696697

697698
compute_node_ref = models.ComputeNode()
698699
compute_node_ref.update(values)
699-
compute_node_ref.save(context.session)
700+
try:
701+
compute_node_ref.save(context.session)
702+
except db_exc.DBDuplicateEntry:
703+
with excutils.save_and_reraise_exception(logger=LOG) as err_ctx:
704+
# Check to see if we have a (soft) deleted ComputeNode with the
705+
# same UUID and if so just update it and mark as no longer (soft)
706+
# deleted. See bug 1839560 for details.
707+
if 'uuid' in values:
708+
# Get a fresh context for a new DB session and allow it to
709+
# get a deleted record.
710+
ctxt = nova.context.get_admin_context(read_deleted='yes')
711+
compute_node_ref = _compute_node_get_and_update_deleted(
712+
ctxt, values)
713+
# If we didn't get anything back we failed to find the node
714+
# by uuid and update it so re-raise the DBDuplicateEntry.
715+
if compute_node_ref:
716+
err_ctx.reraise = False
700717

701718
return compute_node_ref
702719

703720

721+
@pick_context_manager_writer
722+
def _compute_node_get_and_update_deleted(context, values):
723+
"""Find a ComputeNode by uuid, update and un-delete it.
724+
725+
This is a special case from the ``compute_node_create`` method which
726+
needs to be separate to get a new Session.
727+
728+
This method will update the ComputeNode, if found, to have deleted=0 and
729+
deleted_at=None values.
730+
731+
:param context: request auth context which should be able to read deleted
732+
records
733+
:param values: values used to update the ComputeNode record - must include
734+
uuid
735+
:return: updated ComputeNode sqlalchemy model object if successfully found
736+
and updated, None otherwise
737+
"""
738+
cn = model_query(
739+
context, models.ComputeNode).filter_by(uuid=values['uuid']).first()
740+
if cn:
741+
# Update with the provided values but un-soft-delete.
742+
update_values = copy.deepcopy(values)
743+
update_values['deleted'] = 0
744+
update_values['deleted_at'] = None
745+
return compute_node_update(context, cn.id, update_values)
746+
747+
704748
@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
705749
@pick_context_manager_writer
706750
def compute_node_update(context, compute_id, values):

nova/tests/functional/regressions/test_bug_1839560.py

Lines changed: 17 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414

1515
from nova import context
1616
from nova.db import api as db_api
17-
from nova import exception
1817
from nova import objects
1918
from nova import test
2019
from nova.tests import fixtures as nova_fixtures
@@ -93,30 +92,25 @@ def test_update_available_resource_node_recreate(self):
9392
# Now stub the driver again to report node2 as being back and run
9493
# the periodic task.
9594
compute.manager.driver._set_nodes(['node1', 'node2'])
95+
LOG.info('Running update_available_resource which should bring back '
96+
'node2.')
9697
compute.manager.update_available_resource(ctxt)
97-
# FIXME(mriedem): This is bug 1839560 where the ResourceTracker fails
98-
# to create a ComputeNode for node2 because of conflicting UUIDs.
98+
# The DBDuplicateEntry error should have been handled and resulted in
99+
# updating the (soft) deleted record to no longer be deleted.
99100
log = self.stdlog.logger.output
100-
self.assertIn('Error updating resources for node node2', log)
101-
self.assertIn('DBDuplicateEntry', log)
102-
# Should still only have one reported hypervisor (node1).
101+
self.assertNotIn('DBDuplicateEntry', log)
102+
# Should have two reported hypervisors again.
103103
hypervisors = self.api.api_get('/os-hypervisors').body['hypervisors']
104-
self.assertEqual(1, len(hypervisors), hypervisors)
105-
# Test the workaround for bug 1839560 by archiving the deleted node2
106-
# compute_nodes table record which will allow the periodic to create a
107-
# new entry for node2. We can remove this when the bug is fixed.
104+
self.assertEqual(2, len(hypervisors), hypervisors)
105+
# Now that the node2 record was un-soft-deleted, archiving should not
106+
# remove any compute_nodes.
108107
LOG.info('Archiving the database.')
109108
archived = db_api.archive_deleted_rows(1000)[0]
110-
self.assertIn('compute_nodes', archived)
111-
self.assertEqual(1, archived['compute_nodes'])
112-
with utils.temporary_mutation(ctxt, read_deleted='yes'):
113-
self.assertRaises(exception.ComputeHostNotFound,
114-
objects.ComputeNode.get_by_host_and_nodename,
115-
ctxt, 'node1', 'node2')
116-
# Now run the periodic again and we should have a new ComputeNode for
117-
# node2.
118-
LOG.info('Running update_available_resource which should create a new '
119-
'ComputeNode record for node2.')
120-
compute.manager.update_available_resource(ctxt)
121-
hypervisors = self.api.api_get('/os-hypervisors').body['hypervisors']
122-
self.assertEqual(2, len(hypervisors), hypervisors)
109+
self.assertNotIn('compute_nodes', archived)
110+
cn2 = objects.ComputeNode.get_by_host_and_nodename(
111+
ctxt, 'node1', 'node2')
112+
self.assertFalse(cn2.deleted)
113+
self.assertIsNone(cn2.deleted_at)
114+
# The node2 id and uuid should not have changed in the DB.
115+
self.assertEqual(cn.id, cn2.id)
116+
self.assertEqual(cn.uuid, cn2.uuid)

nova/tests/unit/db/test_db_api.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7215,6 +7215,18 @@ def test_compute_node_create(self):
72157215
new_stats = jsonutils.loads(self.item['stats'])
72167216
self.assertEqual(self.stats, new_stats)
72177217

7218+
def test_compute_node_create_duplicate_host_hypervisor_hostname(self):
7219+
"""Tests to make sure that DBDuplicateEntry is raised when trying to
7220+
create a duplicate ComputeNode with the same host and
7221+
hypervisor_hostname values but different uuid values. This makes
7222+
sure that when _compute_node_get_and_update_deleted returns None
7223+
the DBDuplicateEntry is re-raised.
7224+
"""
7225+
other_node = dict(self.compute_node_dict)
7226+
other_node['uuid'] = uuidutils.generate_uuid()
7227+
self.assertRaises(db_exc.DBDuplicateEntry,
7228+
db.compute_node_create, self.ctxt, other_node)
7229+
72187230
def test_compute_node_get_all(self):
72197231
nodes = db.compute_node_get_all(self.ctxt)
72207232
self.assertEqual(1, len(nodes))

0 commit comments

Comments
 (0)