@@ -237,59 +237,12 @@ def _execute(self):
237
237
# pop the first alternate from the list to use for the destination, and
238
238
# pass the remaining alternates to the compute.
239
239
if self .host_list is None :
240
- selection_lists = self .query_client .select_destinations (
241
- self .context , self .request_spec , [self .instance .uuid ],
242
- return_objects = True , return_alternates = True )
243
- # Since there is only ever one instance to migrate per call, we
244
- # just need the first returned element.
245
- selection_list = selection_lists [0 ]
246
- # The selected host is the first item in the list, with the
247
- # alternates being the remainder of the list.
248
- selection , self .host_list = selection_list [0 ], selection_list [1 :]
249
-
250
- scheduler_utils .fill_provider_mapping (
251
- self .context , self .reportclient , self .request_spec , selection )
240
+ selection = self ._schedule ()
252
241
253
242
else :
254
243
# This is a reschedule that will use the supplied alternate hosts
255
- # in the host_list as destinations. Since the resources on these
256
- # alternates may have been consumed and might not be able to
257
- # support the migrated instance, we need to first claim the
258
- # resources to verify the host still has sufficient availabile
259
- # resources.
260
- elevated = self .context .elevated ()
261
- host_available = False
262
- while self .host_list and not host_available :
263
- selection = self .host_list .pop (0 )
264
- if selection .allocation_request :
265
- alloc_req = jsonutils .loads (selection .allocation_request )
266
- else :
267
- alloc_req = None
268
- if alloc_req :
269
- # If this call succeeds, the resources on the destination
270
- # host will be claimed by the instance.
271
- host_available = scheduler_utils .claim_resources (
272
- elevated , self .reportclient , self .request_spec ,
273
- self .instance .uuid , alloc_req ,
274
- selection .allocation_request_version )
275
- if host_available :
276
- scheduler_utils .fill_provider_mapping (
277
- self .context , self .reportclient , self .request_spec ,
278
- selection )
279
- else :
280
- # Some deployments use different schedulers that do not
281
- # use Placement, so they will not have an
282
- # allocation_request to claim with. For those cases,
283
- # there is no concept of claiming, so just assume that
284
- # the host is valid.
285
- host_available = True
286
- # There are no more available hosts. Raise a MaxRetriesExceeded
287
- # exception in that case.
288
- if not host_available :
289
- reason = ("Exhausted all hosts available for retrying build "
290
- "failures for instance %(instance_uuid)s." %
291
- {"instance_uuid" : self .instance .uuid })
292
- raise exception .MaxRetriesExceeded (reason = reason )
244
+ # in the host_list as destinations.
245
+ selection = self ._reschedule ()
293
246
294
247
scheduler_utils .populate_filter_properties (legacy_props , selection )
295
248
# context is not serializable
@@ -317,6 +270,62 @@ def _execute(self):
317
270
node = node , clean_shutdown = self .clean_shutdown ,
318
271
host_list = self .host_list )
319
272
273
+ def _schedule (self ):
274
+ selection_lists = self .query_client .select_destinations (
275
+ self .context , self .request_spec , [self .instance .uuid ],
276
+ return_objects = True , return_alternates = True )
277
+ # Since there is only ever one instance to migrate per call, we
278
+ # just need the first returned element.
279
+ selection_list = selection_lists [0 ]
280
+ # The selected host is the first item in the list, with the
281
+ # alternates being the remainder of the list.
282
+ selection , self .host_list = selection_list [0 ], selection_list [1 :]
283
+
284
+ scheduler_utils .fill_provider_mapping (
285
+ self .context , self .reportclient , self .request_spec , selection )
286
+ return selection
287
+
288
+ def _reschedule (self ):
289
+ # Since the resources on these alternates may have been consumed and
290
+ # might not be able to support the migrated instance, we need to first
291
+ # claim the resources to verify the host still has sufficient
292
+ # available resources.
293
+ elevated = self .context .elevated ()
294
+ host_available = False
295
+ selection = None
296
+ while self .host_list and not host_available :
297
+ selection = self .host_list .pop (0 )
298
+ if selection .allocation_request :
299
+ alloc_req = jsonutils .loads (selection .allocation_request )
300
+ else :
301
+ alloc_req = None
302
+ if alloc_req :
303
+ # If this call succeeds, the resources on the destination
304
+ # host will be claimed by the instance.
305
+ host_available = scheduler_utils .claim_resources (
306
+ elevated , self .reportclient , self .request_spec ,
307
+ self .instance .uuid , alloc_req ,
308
+ selection .allocation_request_version )
309
+ if host_available :
310
+ scheduler_utils .fill_provider_mapping (
311
+ self .context , self .reportclient , self .request_spec ,
312
+ selection )
313
+ else :
314
+ # Some deployments use different schedulers that do not
315
+ # use Placement, so they will not have an
316
+ # allocation_request to claim with. For those cases,
317
+ # there is no concept of claiming, so just assume that
318
+ # the host is valid.
319
+ host_available = True
320
+ # There are no more available hosts. Raise a MaxRetriesExceeded
321
+ # exception in that case.
322
+ if not host_available :
323
+ reason = ("Exhausted all hosts available for retrying build "
324
+ "failures for instance %(instance_uuid)s." %
325
+ {"instance_uuid" : self .instance .uuid })
326
+ raise exception .MaxRetriesExceeded (reason = reason )
327
+ return selection
328
+
320
329
def rollback (self ):
321
330
if self ._migration :
322
331
self ._migration .status = 'error'
0 commit comments