@@ -86,7 +86,7 @@ should go to.
86
86
87
87
*/
88
88
89
- use build:: { BlockAnd , BlockAndExtension , Builder , CFG } ;
89
+ use build:: { BlockAnd , BlockAndExtension , Builder } ;
90
90
use rustc:: middle:: region:: CodeExtent ;
91
91
use rustc:: middle:: lang_items;
92
92
use rustc:: middle:: subst:: { Substs , Subst , VecPerParamSpace } ;
@@ -236,7 +236,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
236
236
self . diverge_cleanup ( ) ;
237
237
let scope = self . scopes . pop ( ) . unwrap ( ) ;
238
238
assert_eq ! ( scope. extent, extent) ;
239
- build_scope_drops ( & mut self . cfg , & scope, & self . scopes [ ..] , block)
239
+ self . with_scopes ( |this , scopes| this . build_scope_drops ( & scope, & scopes[ ..] , block) )
240
240
}
241
241
242
242
@@ -249,27 +249,47 @@ impl<'a,'tcx> Builder<'a,'tcx> {
249
249
extent : CodeExtent ,
250
250
mut block : BasicBlock ,
251
251
target : BasicBlock ) {
252
- let scope_count = 1 + self . scopes . iter ( ) . rev ( ) . position ( |scope| scope. extent == extent)
253
- . unwrap_or_else ( ||{
254
- self . hir . span_bug ( span, & format ! ( "extent {:?} does not enclose" , extent) )
252
+ self . with_scopes ( |this, scopes| {
253
+ let scope_count = 1 + scopes. iter ( ) . rev ( ) . position ( |scope| scope. extent == extent)
254
+ . unwrap_or_else ( ||{
255
+ this. hir . span_bug ( span, & format ! ( "extent {:?} does not enclose" , extent) )
256
+ } ) ;
257
+ for ( idx, ref scope) in scopes. iter ( ) . enumerate ( ) . rev ( ) . take ( scope_count) {
258
+ unpack ! ( block = this. build_scope_drops( scope, & scopes[ ..idx] , block) ) ;
259
+ if let Some ( ref free_data) = scope. free {
260
+ let next = this. cfg . start_new_block ( ) ;
261
+ let free = this. build_free ( free_data, next) ;
262
+ this. cfg . terminate ( block, free) ;
263
+ block = next;
264
+ }
265
+ }
266
+ this. cfg . terminate ( block, Terminator :: Goto { target : target } ) ;
255
267
} ) ;
268
+ }
256
269
257
- let tmp = self . get_unit_temp ( ) ;
258
- for ( idx, ref scope) in self . scopes . iter ( ) . enumerate ( ) . rev ( ) . take ( scope_count) {
259
- unpack ! ( block = build_scope_drops( & mut self . cfg,
260
- scope,
261
- & self . scopes[ ..idx] ,
262
- block) ) ;
263
- if let Some ( ref free_data) = scope. free {
264
- let next = self . cfg . start_new_block ( ) ;
265
- let free = build_free ( self . hir . tcx ( ) , tmp. clone ( ) , free_data, next) ;
266
- self . cfg . terminate ( block, free) ;
267
- block = next;
268
- }
270
+ /// Creates a path that performs all required cleanup for unwinding.
271
+ ///
272
+ /// This path terminates in Resume. Returns the start of the path.
273
+ /// See module comment for more details. None indicates there’s no
274
+ /// cleanup to do at this point.
275
+ pub fn diverge_cleanup ( & mut self ) -> Option < BasicBlock > {
276
+ if self . scopes . is_empty ( ) {
277
+ return None ;
269
278
}
270
- self . cfg . terminate ( block, Terminator :: Goto { target : target } ) ;
279
+ let mut next_block = None ;
280
+ self . with_scopes ( |this, scopes| {
281
+ // Given an array of scopes, we generate these from the outermost scope to the
282
+ // innermost one. Thus for array [S0, S1, S2] with corresponding cleanup blocks [B0,
283
+ // B1, B2], we will generate B0 <- B1 <- B2 in left-to-right order. Control flow of the
284
+ // generated blocks always ends up at a block with the Resume terminator.
285
+ for scope in scopes. iter_mut ( ) . filter ( |s| !s. drops . is_empty ( ) || s. free . is_some ( ) ) {
286
+ next_block = Some ( this. build_diverge_scope ( scope, next_block) ) ;
287
+ }
288
+ scopes. iter ( ) . rev ( ) . flat_map ( |x| x. cached_block ( ) ) . next ( )
289
+ } )
271
290
}
272
291
292
+
273
293
// Finding scopes
274
294
// ==============
275
295
/// Finds the loop scope for a given label. This is used for
@@ -369,33 +389,6 @@ impl<'a,'tcx> Builder<'a,'tcx> {
369
389
370
390
// Other
371
391
// =====
372
- /// Creates a path that performs all required cleanup for unwinding.
373
- ///
374
- /// This path terminates in Resume. Returns the start of the path.
375
- /// See module comment for more details. None indicates there’s no
376
- /// cleanup to do at this point.
377
- pub fn diverge_cleanup ( & mut self ) -> Option < BasicBlock > {
378
- if self . scopes . is_empty ( ) {
379
- return None ;
380
- }
381
- let unit_temp = self . get_unit_temp ( ) ;
382
- let Builder { ref mut hir, ref mut cfg, ref mut scopes, .. } = * self ;
383
- let mut next_block = None ;
384
-
385
- // Given an array of scopes, we generate these from the outermost scope to the innermost
386
- // one. Thus for array [S0, S1, S2] with corresponding cleanup blocks [B0, B1, B2], we will
387
- // generate B0 <- B1 <- B2 in left-to-right order. Control flow of the generated blocks
388
- // always ends up at a block with the Resume terminator.
389
- for scope in scopes. iter_mut ( ) . filter ( |s| !s. drops . is_empty ( ) || s. free . is_some ( ) ) {
390
- next_block = Some ( build_diverge_scope ( hir. tcx ( ) ,
391
- cfg,
392
- unit_temp. clone ( ) ,
393
- scope,
394
- next_block) ) ;
395
- }
396
- scopes. iter ( ) . rev ( ) . flat_map ( |x| x. cached_block ( ) ) . next ( )
397
- }
398
-
399
392
/// Utility function for *non*-scope code to build their own drops
400
393
pub fn build_drop ( & mut self , block : BasicBlock , value : Lvalue < ' tcx > ) -> BlockAnd < ( ) > {
401
394
let next_target = self . cfg . start_new_block ( ) ;
@@ -408,6 +401,142 @@ impl<'a,'tcx> Builder<'a,'tcx> {
408
401
next_target. unit ( )
409
402
}
410
403
404
+ /// Build the call to the language item which frees the unfilled drops.
405
+ fn build_free ( & mut self , data : & FreeData < ' tcx > , target : BasicBlock ) -> Terminator < ' tcx > {
406
+ let tcx = self . hir . tcx ( ) ;
407
+ let free_func = tcx. lang_items . require ( lang_items:: BoxFreeFnLangItem )
408
+ . unwrap_or_else ( |e| tcx. sess . fatal ( & e) ) ;
409
+ let substs = tcx. mk_substs ( Substs :: new (
410
+ VecPerParamSpace :: new ( vec ! [ ] , vec ! [ ] , vec ! [ data. item_ty] ) ,
411
+ VecPerParamSpace :: new ( vec ! [ ] , vec ! [ ] , vec ! [ ] )
412
+ ) ) ;
413
+ Terminator :: Call {
414
+ func : Operand :: Constant ( Constant {
415
+ span : data. span ,
416
+ ty : tcx. lookup_item_type ( free_func) . ty . subst ( tcx, substs) ,
417
+ literal : Literal :: Item {
418
+ def_id : free_func,
419
+ kind : ItemKind :: Function ,
420
+ substs : substs
421
+ }
422
+ } ) ,
423
+ args : vec ! [ Operand :: Consume ( data. value. clone( ) ) ] ,
424
+ destination : Some ( ( self . get_unit_temp ( ) , target) ) ,
425
+ cleanup : None
426
+ }
427
+ }
428
+
429
+ /// Builds drops for pop_scope and exit_scope.
430
+ fn build_scope_drops ( & mut self ,
431
+ scope : & Scope < ' tcx > ,
432
+ earlier_scopes : & [ Scope < ' tcx > ] ,
433
+ mut block : BasicBlock )
434
+ -> BlockAnd < ( ) > {
435
+ let mut iter = scope. drops . iter ( ) . rev ( ) . peekable ( ) ;
436
+ while let Some ( drop_data) = iter. next ( ) {
437
+ // Try to find the next block with its cached block for us to diverge into in case the
438
+ // drop panics.
439
+ let on_diverge = iter. peek ( ) . iter ( ) . flat_map ( |dd| dd. cached_block . into_iter ( ) ) . next ( ) ;
440
+ // If there’s no `cached_block`s within current scope, we must look for one in the
441
+ // enclosing scope.
442
+ let on_diverge = on_diverge. or_else ( ||{
443
+ earlier_scopes. iter ( ) . rev ( ) . flat_map ( |s| s. cached_block ( ) ) . next ( )
444
+ } ) ;
445
+ let next = self . cfg . start_new_block ( ) ;
446
+ self . cfg . terminate ( block, Terminator :: Drop {
447
+ value : drop_data. value . clone ( ) ,
448
+ target : next,
449
+ unwind : on_diverge
450
+ } ) ;
451
+ block = next;
452
+ }
453
+ block. unit ( )
454
+ }
455
+
456
+ /// Build the diverging blocks for this scope.
457
+ fn build_diverge_scope ( & mut self ,
458
+ scope : & mut Scope < ' tcx > ,
459
+ target : Option < BasicBlock > )
460
+ -> BasicBlock {
461
+ debug_assert ! ( !scope. drops. is_empty( ) || scope. free. is_some( ) ) ;
462
+ // First, we build the drops, iterating the drops array in reverse. We do that because
463
+ // soon as we find a cached block, we know that we’re done.
464
+ let mut previous = None ;
465
+ let mut last_drop_block = None ;
466
+ for drop_data in scope. drops . iter_mut ( ) . rev ( ) {
467
+ if let Some ( cached_block) = drop_data. cached_block {
468
+ if let Some ( ( previous_block, previous_value) ) = previous {
469
+ self . cfg . terminate ( previous_block, Terminator :: Drop {
470
+ value : previous_value,
471
+ target : cached_block,
472
+ unwind : None
473
+ } ) ;
474
+ return last_drop_block. unwrap ( ) ;
475
+ } else {
476
+ return cached_block;
477
+ }
478
+ } else {
479
+ let block = self . cfg . start_new_cleanup_block ( ) ;
480
+ drop_data. cached_block = Some ( block) ;
481
+ if let Some ( ( previous_block, previous_value) ) = previous {
482
+ self . cfg . terminate ( previous_block, Terminator :: Drop {
483
+ value : previous_value,
484
+ target : block,
485
+ unwind : None
486
+ } ) ;
487
+ } else {
488
+ last_drop_block = Some ( block) ;
489
+ }
490
+ previous = Some ( ( block, drop_data. value . clone ( ) ) ) ;
491
+ }
492
+ }
493
+
494
+ // Prepare the end target for this chain.
495
+ let mut target = target. unwrap_or_else ( ||{
496
+ let b = self . cfg . start_new_cleanup_block ( ) ;
497
+ self . cfg . terminate ( b, Terminator :: Resume ) ;
498
+ b
499
+ } ) ;
500
+
501
+ // Then, build the free branching into the prepared target.
502
+ if let Some ( ref mut free_data) = scope. free {
503
+ target = if let Some ( cached_block) = free_data. cached_block {
504
+ cached_block
505
+ } else {
506
+ let into = self . cfg . start_new_cleanup_block ( ) ;
507
+ let free = self . build_free ( free_data, target) ;
508
+ self . cfg . terminate ( into, free) ;
509
+ free_data. cached_block = Some ( into) ;
510
+ into
511
+ }
512
+ } ;
513
+
514
+ if let Some ( ( previous_block, previous_value) ) = previous {
515
+ // Finally, branch into that just-built `target` from the `previous_block`.
516
+ self . cfg . terminate ( previous_block, Terminator :: Drop {
517
+ value : previous_value,
518
+ target : target,
519
+ unwind : None
520
+ } ) ;
521
+ last_drop_block. unwrap ( )
522
+ } else {
523
+ // If `previous.is_none()`, there were no drops in this scope – we return the
524
+ // target, which is possibly the free drop we just built.
525
+ target
526
+ }
527
+ }
528
+
529
+ /// Utility function for temporary stealing ownership of the scopes array from the Builder.
530
+ fn with_scopes < R , F : FnOnce ( & mut Self , & mut [ Scope < ' tcx > ] ) -> R > ( & mut self , f : F ) -> R {
531
+ let mut scopes = Vec :: new ( ) ;
532
+ // Take ownership of the scopes for the duration of the closure
533
+ :: std:: mem:: swap ( & mut scopes, & mut self . scopes ) ;
534
+ let r = f ( self , & mut scopes) ;
535
+ // Return the ownership and ensure the replacement vector hasn’t been changed.
536
+ :: std:: mem:: swap ( & mut scopes, & mut self . scopes ) ;
537
+ assert ! ( scopes. is_empty( ) ) ;
538
+ r
539
+ }
411
540
412
541
// Panicking
413
542
// =========
@@ -514,132 +643,4 @@ impl<'a,'tcx> Builder<'a,'tcx> {
514
643
literal : self . hir . usize_literal ( span_lines. line )
515
644
} )
516
645
}
517
-
518
- }
519
-
520
- /// Builds drops for pop_scope and exit_scope.
521
- fn build_scope_drops < ' tcx > ( cfg : & mut CFG < ' tcx > ,
522
- scope : & Scope < ' tcx > ,
523
- earlier_scopes : & [ Scope < ' tcx > ] ,
524
- mut block : BasicBlock )
525
- -> BlockAnd < ( ) > {
526
- let mut iter = scope. drops . iter ( ) . rev ( ) . peekable ( ) ;
527
- while let Some ( drop_data) = iter. next ( ) {
528
- // Try to find the next block with its cached block for us to diverge into in case the
529
- // drop panics.
530
- let on_diverge = iter. peek ( ) . iter ( ) . flat_map ( |dd| dd. cached_block . into_iter ( ) ) . next ( ) ;
531
- // If there’s no `cached_block`s within current scope, we must look for one in the
532
- // enclosing scope.
533
- let on_diverge = on_diverge. or_else ( ||{
534
- earlier_scopes. iter ( ) . rev ( ) . flat_map ( |s| s. cached_block ( ) ) . next ( )
535
- } ) ;
536
- let next = cfg. start_new_block ( ) ;
537
- cfg. terminate ( block, Terminator :: Drop {
538
- value : drop_data. value . clone ( ) ,
539
- target : next,
540
- unwind : on_diverge
541
- } ) ;
542
- block = next;
543
- }
544
- block. unit ( )
545
- }
546
-
547
- fn build_diverge_scope < ' tcx > ( tcx : & ty:: ctxt < ' tcx > ,
548
- cfg : & mut CFG < ' tcx > ,
549
- unit_temp : Lvalue < ' tcx > ,
550
- scope : & mut Scope < ' tcx > ,
551
- target : Option < BasicBlock > )
552
- -> BasicBlock {
553
- debug_assert ! ( !scope. drops. is_empty( ) || scope. free. is_some( ) ) ;
554
-
555
- // First, we build the drops, iterating the drops array in reverse. We do that so that as soon
556
- // as we find a `cached_block`, we know that we’re finished and don’t need to do anything else.
557
- let mut previous = None ;
558
- let mut last_drop_block = None ;
559
- for drop_data in scope. drops . iter_mut ( ) . rev ( ) {
560
- if let Some ( cached_block) = drop_data. cached_block {
561
- if let Some ( ( previous_block, previous_value) ) = previous {
562
- cfg. terminate ( previous_block, Terminator :: Drop {
563
- value : previous_value,
564
- target : cached_block,
565
- unwind : None
566
- } ) ;
567
- return last_drop_block. unwrap ( ) ;
568
- } else {
569
- return cached_block;
570
- }
571
- } else {
572
- let block = cfg. start_new_cleanup_block ( ) ;
573
- drop_data. cached_block = Some ( block) ;
574
- if let Some ( ( previous_block, previous_value) ) = previous {
575
- cfg. terminate ( previous_block, Terminator :: Drop {
576
- value : previous_value,
577
- target : block,
578
- unwind : None
579
- } ) ;
580
- } else {
581
- last_drop_block = Some ( block) ;
582
- }
583
- previous = Some ( ( block, drop_data. value . clone ( ) ) ) ;
584
- }
585
- }
586
-
587
- // Prepare the end target for this chain.
588
- let mut target = target. unwrap_or_else ( ||{
589
- let b = cfg. start_new_cleanup_block ( ) ;
590
- cfg. terminate ( b, Terminator :: Resume ) ;
591
- b
592
- } ) ;
593
-
594
- // Then, build the free branching into the prepared target.
595
- if let Some ( ref mut free_data) = scope. free {
596
- target = if let Some ( cached_block) = free_data. cached_block {
597
- cached_block
598
- } else {
599
- let into = cfg. start_new_cleanup_block ( ) ;
600
- cfg. terminate ( into, build_free ( tcx, unit_temp, free_data, target) ) ;
601
- free_data. cached_block = Some ( into) ;
602
- into
603
- }
604
- } ;
605
-
606
- if let Some ( ( previous_block, previous_value) ) = previous {
607
- // Finally, branch into that just-built `target` from the `previous_block`.
608
- cfg. terminate ( previous_block, Terminator :: Drop {
609
- value : previous_value,
610
- target : target,
611
- unwind : None
612
- } ) ;
613
- last_drop_block. unwrap ( )
614
- } else {
615
- // If `previous.is_none()`, there were no drops in this scope – we return the
616
- // target.
617
- target
618
- }
619
- }
620
-
621
- fn build_free < ' tcx > ( tcx : & ty:: ctxt < ' tcx > ,
622
- unit_temp : Lvalue < ' tcx > ,
623
- data : & FreeData < ' tcx > ,
624
- target : BasicBlock ) -> Terminator < ' tcx > {
625
- let free_func = tcx. lang_items . require ( lang_items:: BoxFreeFnLangItem )
626
- . unwrap_or_else ( |e| tcx. sess . fatal ( & e) ) ;
627
- let substs = tcx. mk_substs ( Substs :: new (
628
- VecPerParamSpace :: new ( vec ! [ ] , vec ! [ ] , vec ! [ data. item_ty] ) ,
629
- VecPerParamSpace :: new ( vec ! [ ] , vec ! [ ] , vec ! [ ] )
630
- ) ) ;
631
- Terminator :: Call {
632
- func : Operand :: Constant ( Constant {
633
- span : data. span ,
634
- ty : tcx. lookup_item_type ( free_func) . ty . subst ( tcx, substs) ,
635
- literal : Literal :: Item {
636
- def_id : free_func,
637
- kind : ItemKind :: Function ,
638
- substs : substs
639
- }
640
- } ) ,
641
- args : vec ! [ Operand :: Consume ( data. value. clone( ) ) ] ,
642
- destination : Some ( ( unit_temp, target) ) ,
643
- cleanup : None
644
- }
645
646
}
0 commit comments