Skip to content

Commit 5e74915

Browse files
committed
Follow up for counting quota usage from placement
This addresses comments from the series: * Remove usage-specific info from docstring * Add note to nova-next job description "changelog" * Add info about data migration to config option help * Consolidate code under count_usage_from_placement conditional * Consolidate variables for checking data migration doneness * Remove hard-coded user_id and project_id from func test * Re-word code comment about checking data migration doneness Related to blueprint count-quota-usage-from-placement Change-Id: Ida2de9256fcc9e092fb9977b8ac067fc1472c316
1 parent e3aadaf commit 5e74915

File tree

5 files changed

+24
-17
lines changed

5 files changed

+24
-17
lines changed

.zuul.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -165,6 +165,7 @@
165165
TLS console proxy code in the libvirt driver.
166166
Starting in Stein, the job was changed to run with python 3 and enabled
167167
volume multi-attach testing.
168+
Starting in Train, the job enabled counting quota usage from placement.
168169
Runs all tempest compute API and most scenario tests concurrently.
169170
run: playbooks/legacy/nova-next/run.yaml
170171
post-run: playbooks/legacy/nova-next/post.yaml

nova/conf/quota.py

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -332,6 +332,15 @@
332332
possible for a request to unshelve a server to be rejected if the user does not
333333
have enough quota available to support the cores and ram needed by the server
334334
to be unshelved.
335+
336+
The ``populate_queued_for_delete`` and ``populate_user_id`` online data
337+
migrations must be completed before usage can be counted from placement. Until
338+
the data migration is complete, the system will fall back to legacy quota usage
339+
counting from cell databases depending on the result of an EXISTS database
340+
query during each quota check, if this configuration option is set to True.
341+
Operators who want to avoid the performance hit from the EXISTS queries should
342+
wait to set this configuration option to True until after they have completed
343+
their online data migrations via ``nova-manage db online_data_migrations``.
335344
"""),
336345
]
337346

nova/objects/instance_mapping.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -417,10 +417,6 @@ def get_counts(cls, context, project_id, user_id=None):
417417
not included in the count (deleted and SOFT_DELETED instances).
418418
Instances that are queued_for_deleted=None are not included in the
419419
count because we are not certain about whether or not they are deleted.
420-
When counting quota usage, we will fall back on the legacy counting
421-
method and count instances, cores, and ram from cell databases if any
422-
unmigrated instance mappings (user_id=None or queued_for_delete=None)
423-
are detected, to avoid using a potentially inaccurate count.
424420
425421
:param context: The request context for database access
426422
:param project_id: The project_id to count across

nova/quota.py

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1176,18 +1176,18 @@ def _server_group_count_members_by_user(context, group, user_id):
11761176
{'user': 'server_group_members': <count across user>}}
11771177
"""
11781178
# Because server group members quota counting is not scoped to a project,
1179-
# but scoped to a particular InstanceGroup and user, we cannot filter our
1180-
# user_id/queued_for_delete populated check on project_id or user_id.
1179+
# but scoped to a particular InstanceGroup and user, we have no reasonable
1180+
# way of pruning down our migration check to only a subset of all instance
1181+
# mapping records.
11811182
# So, we check whether user_id/queued_for_delete is populated for all
11821183
# records and cache the result to prevent unnecessary checking once the
11831184
# data migration has been completed.
11841185
global UID_QFD_POPULATED_CACHE_ALL
11851186
if not UID_QFD_POPULATED_CACHE_ALL:
11861187
LOG.debug('Checking whether user_id and queued_for_delete are '
11871188
'populated for all projects')
1188-
uid_qfd_populated = _user_id_queued_for_delete_populated(context)
1189-
if uid_qfd_populated:
1190-
UID_QFD_POPULATED_CACHE_ALL = True
1189+
UID_QFD_POPULATED_CACHE_ALL = _user_id_queued_for_delete_populated(
1190+
context)
11911191

11921192
if UID_QFD_POPULATED_CACHE_ALL:
11931193
count = objects.InstanceMappingList.get_count_by_uuids_and_user(
@@ -1308,13 +1308,12 @@ def _instances_cores_ram_count(context, project_id, user_id=None):
13081308
UID_QFD_POPULATED_CACHE_BY_PROJECT.add(project_id)
13091309
else:
13101310
uid_qfd_populated = True
1311-
if not uid_qfd_populated:
1312-
LOG.warning('Falling back to legacy quota counting method for '
1313-
'instances, cores, and ram')
1314-
1315-
if CONF.quota.count_usage_from_placement and uid_qfd_populated:
1316-
return _instances_cores_ram_count_api_db_placement(context, project_id,
1317-
user_id=user_id)
1311+
if uid_qfd_populated:
1312+
return _instances_cores_ram_count_api_db_placement(context,
1313+
project_id,
1314+
user_id=user_id)
1315+
LOG.warning('Falling back to legacy quota counting method for instances, '
1316+
'cores, and ram')
13181317
return _instances_cores_ram_count_legacy(context, project_id,
13191318
user_id=user_id)
13201319

nova/tests/functional/db/test_quota.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,9 @@ def test_instances_cores_ram_count(self):
196196
self.assertEqual(1536, count['user']['ram'])
197197

198198
def test_user_id_queued_for_delete_populated(self):
199-
ctxt = context.RequestContext('fake-user', 'fake-project')
199+
ctxt = context.RequestContext(
200+
test_instance_mapping.sample_mapping['user_id'],
201+
test_instance_mapping.sample_mapping['project_id'])
200202

201203
# One deleted or SOFT_DELETED instance with user_id=None, should not be
202204
# considered by the check.

0 commit comments

Comments
 (0)