21
21
from nova .compute import manager as compute_manager
22
22
from nova import context
23
23
from nova import objects
24
+ from nova import test
24
25
from nova .tests .functional import integrated_helpers
25
26
from nova .tests .functional .libvirt import base
26
27
from nova .tests .unit .virt .libvirt import fake_os_brick_connector
@@ -66,37 +67,35 @@ def setUp(self):
66
67
'nova.virt.libvirt.driver.connector' ,
67
68
fake_os_brick_connector ))
68
69
70
+ def _migrate_stub (self , domain , destination , params , flags ):
71
+ raise test .TestingException ('_migrate_stub() must be implemented in '
72
+ ' tests that expect the live migration '
73
+ ' to start.' )
74
+
69
75
def get_host (self , server_id ):
70
76
server = self .api .get_server (server_id )
71
77
return server ['OS-EXT-SRV-ATTR:host' ]
72
78
73
- def _get_host_numa_topology (self , host ):
74
- ctxt = context .get_admin_context ()
75
- return objects .NUMATopology .obj_from_db_obj (
76
- objects .ComputeNode .get_by_nodename (ctxt , host ).numa_topology )
77
-
78
- def _assert_no_migration_context (self , instance_uuid ):
79
- ctxt = context .get_admin_context ()
80
- self .assertFalse (
81
- objects .MigrationContext .get_by_instance_uuid (ctxt , instance_uuid ))
82
-
83
- def _assert_has_migration_context (self , instance_uuid ):
79
+ def _get_migration_context (self , instance_uuid ):
84
80
ctxt = context .get_admin_context ()
85
- self . assertTrue (
86
- objects . MigrationContext . get_by_instance_uuid ( ctxt , instance_uuid ) )
81
+ return objects . MigrationContext . get_by_instance_uuid ( ctxt ,
82
+ instance_uuid )
87
83
88
84
def _assert_instance_pinned_cpus (self , uuid , instance_cpus , host_cpus ):
89
85
ctxt = context .get_admin_context ()
90
86
topology = objects .InstanceNUMATopology .get_by_instance_uuid (
91
87
ctxt , uuid )
92
88
self .assertEqual (1 , len (topology .cells ))
93
- self .assertItemsEqual (instance_cpus ,
89
+ # NOTE(artom) DictOfIntegersField has strings as keys, need to convert
90
+ self .assertItemsEqual ([str (cpu ) for cpu in instance_cpus ],
94
91
topology .cells [0 ].cpu_pinning_raw .keys ())
95
92
self .assertItemsEqual (host_cpus ,
96
93
topology .cells [0 ].cpu_pinning_raw .values ())
97
94
98
95
def _assert_host_consumed_cpus (self , host , cpus ):
99
- topology = self ._get_host_numa_topology (host )
96
+ ctxt = context .get_admin_context ()
97
+ topology = objects .NUMATopology .obj_from_db_obj (
98
+ objects .ComputeNode .get_by_nodename (ctxt , host ).numa_topology )
100
99
self .assertItemsEqual (cpus , topology .cells [0 ].pinned_cpus )
101
100
102
101
@@ -132,17 +131,12 @@ def start_computes_and_servers(self):
132
131
# CPUs 0,1.
133
132
for server_name , host in [('server_a' , 'host_a' ),
134
133
('server_b' , 'host_b' )]:
135
- server = self ._build_server (
136
- flavor_id = flavor ,
137
- image_uuid = '155d900f-4e14-4e4c-a73d-069cbf4541e6' )
138
- server .update ({'networks' : 'none' ,
139
- 'host' : host })
140
- post = {'server' : server }
141
- server = self .api .post_server (post )
134
+ server = self ._create_server (flavor_id = flavor , host = host ,
135
+ networks = 'none' )
142
136
setattr (self , server_name ,
143
137
self ._wait_for_state_change (server , 'ACTIVE' ))
144
138
self .assertEqual (host , self .get_host (server ['id' ]))
145
- self ._assert_instance_pinned_cpus (server ['id' ], ['0' , '1' ], [0 , 1 ])
139
+ self ._assert_instance_pinned_cpus (server ['id' ], [0 , 1 ], [0 , 1 ])
146
140
147
141
def _rpc_pin_host (self , hostname ):
148
142
ctxt = context .get_admin_context ()
@@ -153,14 +147,6 @@ def _rpc_pin_host(self, hostname):
153
147
dest_mgr .compute_rpcapi .router .client (
154
148
ctxt ).can_send_version ('5.3' ))
155
149
156
- def _live_migrate (self , server , migration_final_status ):
157
- self .api .post_server_action (
158
- server ['id' ],
159
- {'os-migrateLive' : {'host' : None ,
160
- 'block_migration' : 'auto' }})
161
- self ._wait_for_state_change (server , 'ACTIVE' )
162
- self ._wait_for_migration_status (server , [migration_final_status ])
163
-
164
150
165
151
class NUMALiveMigrationPositiveTests (NUMALiveMigrationPositiveBase ):
166
152
"""Tests that expect the live migration to succeed. Stubs out fakelibvirt's
@@ -177,14 +163,23 @@ def _migrate_stub(self, domain, destination, params, flags):
177
163
until this method is done, the last thing we do is make fakelibvirt's
178
164
Domain.jobStats() return VIR_DOMAIN_JOB_COMPLETED.
179
165
"""
180
- self ._assert_has_migration_context (self .server_a ['id' ])
166
+ self .assertIsInstance (
167
+ self ._get_migration_context (self .server_a ['id' ]),
168
+ objects .MigrationContext )
181
169
182
170
# During the migration, server_a is consuming CPUs 0,1 on host_a, while
183
171
# all 4 of host_b's CPU are consumed by server_b and the incoming
184
172
# migration.
185
173
self ._assert_host_consumed_cpus ('host_a' , [0 , 1 ])
186
174
self ._assert_host_consumed_cpus ('host_b' , [0 , 1 , 2 , 3 ])
187
175
176
+ host_a_rp = self ._get_provider_uuid_by_name ('host_a' )
177
+ host_b_rp = self ._get_provider_uuid_by_name ('host_b' )
178
+ usages_a = self ._get_provider_usages (host_a_rp )
179
+ usages_b = self ._get_provider_usages (host_b_rp )
180
+ self .assertEqual (2 , usages_a ['PCPU' ])
181
+ self .assertEqual (4 , usages_b ['PCPU' ])
182
+
188
183
# In a real live migration, libvirt and QEMU on the source and
189
184
# destination talk it out, resulting in the instance starting to exist
190
185
# on the destination. Fakelibvirt cannot do that, so we have to
@@ -230,7 +225,7 @@ def _test(self, pin_dest=False):
230
225
self ._rpc_pin_host ('host_b' )
231
226
self ._live_migrate (self .server_a , 'completed' )
232
227
self .assertEqual ('host_b' , self .get_host (self .server_a ['id' ]))
233
- self ._assert_no_migration_context (self .server_a ['id' ])
228
+ self .assertIsNone (self ._get_migration_context ( self . server_a ['id' ]) )
234
229
235
230
# At this point host_a should have no CPUs consumed (server_a has moved
236
231
# to host_b), and host_b should have all of its CPUs consumed. In
@@ -241,14 +236,14 @@ def _test(self, pin_dest=False):
241
236
self ._assert_host_consumed_cpus ('host_a' , [])
242
237
self ._assert_host_consumed_cpus ('host_b' , [0 , 1 , 2 , 3 ])
243
238
self ._assert_instance_pinned_cpus (self .server_a ['id' ],
244
- ['0' , '1' ], [2 , 3 ])
239
+ [0 , 1 ], [2 , 3 ])
245
240
246
241
self ._run_periodics ()
247
242
248
243
self ._assert_host_consumed_cpus ('host_a' , [])
249
244
self ._assert_host_consumed_cpus ('host_b' , [0 , 1 , 2 , 3 ])
250
245
self ._assert_instance_pinned_cpus (self .server_a ['id' ],
251
- ['0' , '1' ], [2 , 3 ])
246
+ [0 , 1 ], [2 , 3 ])
252
247
253
248
self .assertTrue (self .migrate_stub_ran )
254
249
@@ -262,8 +257,22 @@ def test_numa_live_migration_dest_pinned(self):
262
257
self ._test (pin_dest = True )
263
258
264
259
def test_bug_1843639 (self ):
265
- orig_live_migration = \
266
- compute_manager .ComputeManager .live_migration
260
+ """Live migrations in 'accepted' status were not considered in progress
261
+ before the fix for 1845146 merged, and were ignored by the update
262
+ available resources periodic task. From the task's POV, live-migrating
263
+ instances with migration status 'accepted' were considered to be on the
264
+ source, and any resource claims on the destination would get
265
+ erroneously removed. For that to happen, the task had to run at just
266
+ the "right" time, when the migration was in 'accepted' and had not yet
267
+ been moved to 'queued' by live_migration() in the compute manager.
268
+
269
+ This test triggers this race by wrapping around live_migration() and
270
+ running the update available resources periodic task while the
271
+ migration is still in 'accepted'.
272
+ """
273
+
274
+ self .live_migration_ran = False
275
+ orig_live_migration = compute_manager .ComputeManager .live_migration
267
276
268
277
def live_migration (* args , ** kwargs ):
269
278
self ._run_periodics ()
@@ -272,12 +281,22 @@ def live_migration(*args, **kwargs):
272
281
# incoming # migration.
273
282
self ._assert_host_consumed_cpus ('host_a' , [0 , 1 ])
274
283
self ._assert_host_consumed_cpus ('host_b' , [0 , 1 , 2 , 3 ])
284
+
285
+ # The migration should also be in 'accepted' at this point in time.
286
+ ctxt = context .get_admin_context ()
287
+ self .assertIsInstance (
288
+ objects .Migration .get_by_instance_and_status (
289
+ ctxt , self .server_a ['id' ], 'accepted' ),
290
+ objects .Migration )
291
+
292
+ self .live_migration_ran = True
275
293
return orig_live_migration (* args , ** kwargs )
276
294
277
295
self .useFixture (fixtures .MonkeyPatch (
278
296
'nova.compute.manager.ComputeManager.live_migration' ,
279
297
live_migration ))
280
298
self ._test ()
299
+ self .assertTrue (self .live_migration_ran )
281
300
282
301
283
302
class NUMALiveMigrationRollbackTests (NUMALiveMigrationPositiveBase ):
@@ -290,7 +309,9 @@ def _migrate_stub(self, domain, destination, params, flags):
290
309
"""Designed to stub fakelibvirt's migrateToURI3 and "fail" the
291
310
live migration by monkeypatching jobStats() to return an error.
292
311
"""
293
- self ._assert_has_migration_context (self .server_a ['id' ])
312
+ self .assertIsInstance (
313
+ self ._get_migration_context (self .server_a ['id' ]),
314
+ objects .MigrationContext )
294
315
295
316
# During the migration, server_a is consuming CPUs 0,1 on host_a, while
296
317
# all 4 of host_b's CPU are consumed by server_b and the incoming
@@ -332,15 +353,15 @@ def _test(self, pin_dest=False):
332
353
self ._rpc_pin_host ('host_b' )
333
354
self ._live_migrate (self .server_a , 'error' )
334
355
self .assertEqual ('host_a' , self .get_host (self .server_a ['id' ]))
335
- self ._assert_no_migration_context (self .server_a ['id' ])
356
+ self .assertIsNone (self ._get_migration_context ( self . server_a ['id' ]) )
336
357
337
358
# Check consumed and pinned CPUs. Things should be as they were before
338
359
# the live migration, with CPUs 0,1 consumed on both hosts by the 2
339
360
# servers.
340
361
self ._assert_host_consumed_cpus ('host_a' , [0 , 1 ])
341
362
self ._assert_host_consumed_cpus ('host_b' , [0 , 1 ])
342
363
self ._assert_instance_pinned_cpus (self .server_a ['id' ],
343
- ['0' , '1' ], [0 , 1 ])
364
+ [0 , 1 ], [0 , 1 ])
344
365
345
366
def test_rollback (self ):
346
367
self ._test ()
@@ -403,15 +424,8 @@ def _test(self, pin_source, pin_cond, expect_success=True):
403
424
extra_spec = {'hw:numa_nodes' : 1 ,
404
425
'hw:cpu_policy' : 'dedicated' }
405
426
flavor = self ._create_flavor (vcpu = 2 , extra_spec = extra_spec )
406
- server = self ._build_server (
407
- flavor_id = flavor ,
408
- image_uuid = '155d900f-4e14-4e4c-a73d-069cbf4541e6' )
409
- server ['networks' ] = 'none'
410
- post = {'server' : server }
411
- server1 = self .api .post_server (post )
412
- server2 = self .api .post_server (post )
413
- self ._wait_for_state_change (server1 , 'ACTIVE' )
414
- self ._wait_for_state_change (server2 , 'ACTIVE' )
427
+ server1 = self ._create_server (flavor_id = flavor , networks = 'none' )
428
+ server2 = self ._create_server (flavor_id = flavor , networks = 'none' )
415
429
if self .get_host (server1 ['id' ]) == 'source' :
416
430
self .migrating_server = server1
417
431
else :
@@ -445,7 +459,8 @@ def _migrate_stub(self, domain, destination, params, flags):
445
459
# NOTE(artom) This is the crucial bit: by asserting that the migrating
446
460
# instance has no migration context, we're making sure that we're
447
461
# hitting the old, pre-claims code paths.
448
- self ._assert_no_migration_context (self .migrating_server ['id' ])
462
+ self .assertIsNone (
463
+ self ._get_migration_context (self .migrating_server ['id' ]))
449
464
dest = self .computes ['dest' ]
450
465
dest .driver ._host .get_connection ().createXML (
451
466
params ['destination_xml' ],
@@ -475,7 +490,8 @@ def _migrate_stub(self, domain, destination, params, flags):
475
490
# NOTE(artom) This is the crucial bit: by asserting that the migrating
476
491
# instance has no migration context, we're making sure that we're
477
492
# hitting the old, pre-claims code paths.
478
- self ._assert_no_migration_context (self .migrating_server ['id' ])
493
+ self .assertIsNone (
494
+ self ._get_migration_context (self .migrating_server ['id' ]))
479
495
source = self .computes ['source' ]
480
496
conn = source .driver ._host .get_connection ()
481
497
dom = conn .lookupByUUIDString (self .migrating_server ['id' ])
@@ -531,7 +547,7 @@ def test_insufficient_resources(self):
531
547
check_response_status = [500 ])
532
548
self ._wait_for_state_change (server , 'ACTIVE' )
533
549
self ._wait_for_migration_status (server , ['error' ])
534
- self ._assert_no_migration_context ( server ['id' ])
550
+ self .assertIsNone ( self . _get_migration_context ( server ['id' ]) )
535
551
self .assertEqual ('host_a' , self .get_host (server ['id' ]))
536
552
log_out = self .stdlog .logger .output
537
553
self .assertIn ('Migration pre-check error: '
@@ -577,7 +593,7 @@ def test_different_page_sizes(self):
577
593
self ._wait_for_state_change (server , 'ACTIVE' )
578
594
self ._wait_for_migration_status (server , ['error' ])
579
595
self .assertEqual (initial_host , self .get_host (server ['id' ]))
580
- self ._assert_no_migration_context ( server ['id' ])
596
+ self .assertIsNone ( self . _get_migration_context ( server ['id' ]) )
581
597
log_out = self .stdlog .logger .output
582
598
self .assertIn ('Migration pre-check error: '
583
599
'Insufficient compute resources: '
0 commit comments