@@ -77,12 +77,11 @@ pub struct OnDiskCache<'sess> {
77
77
// `serialized_data`.
78
78
prev_diagnostics_index : FxHashMap < SerializedDepNodeIndex , AbsoluteBytePos > ,
79
79
80
- // A cache to ensure we don't read allocations twice
81
- interpret_alloc_cache : RefCell < FxHashMap < usize , interpret :: AllocId > > ,
80
+ // Alloc indices to memory location map
81
+ prev_interpret_alloc_index : Vec < AbsoluteBytePos > ,
82
82
83
- // A map from positions to size of the serialized allocation
84
- // so we can skip over already processed allocations
85
- interpret_alloc_size : RefCell < FxHashMap < usize , usize > > ,
83
+ /// Deserialization: A cache to ensure we don't read allocations twice
84
+ interpret_alloc_cache : RefCell < FxHashMap < usize , interpret:: AllocId > > ,
86
85
}
87
86
88
87
// This type is used only for (de-)serialization.
@@ -92,6 +91,8 @@ struct Footer {
92
91
prev_cnums : Vec < ( u32 , String , CrateDisambiguator ) > ,
93
92
query_result_index : EncodedQueryResultIndex ,
94
93
diagnostics_index : EncodedQueryResultIndex ,
94
+ // the location of all allocations
95
+ interpret_alloc_index : Vec < AbsoluteBytePos > ,
95
96
}
96
97
97
98
type EncodedQueryResultIndex = Vec < ( SerializedDepNodeIndex , AbsoluteBytePos ) > ;
@@ -148,8 +149,8 @@ impl<'sess> OnDiskCache<'sess> {
148
149
query_result_index : footer. query_result_index . into_iter ( ) . collect ( ) ,
149
150
prev_diagnostics_index : footer. diagnostics_index . into_iter ( ) . collect ( ) ,
150
151
synthetic_expansion_infos : Lock :: new ( FxHashMap ( ) ) ,
152
+ prev_interpret_alloc_index : footer. interpret_alloc_index ,
151
153
interpret_alloc_cache : RefCell :: new ( FxHashMap :: default ( ) ) ,
152
- interpret_alloc_size : RefCell :: new ( FxHashMap :: default ( ) ) ,
153
154
}
154
155
}
155
156
@@ -165,8 +166,8 @@ impl<'sess> OnDiskCache<'sess> {
165
166
query_result_index : FxHashMap ( ) ,
166
167
prev_diagnostics_index : FxHashMap ( ) ,
167
168
synthetic_expansion_infos : Lock :: new ( FxHashMap ( ) ) ,
169
+ prev_interpret_alloc_index : Vec :: new ( ) ,
168
170
interpret_alloc_cache : RefCell :: new ( FxHashMap :: default ( ) ) ,
169
- interpret_alloc_size : RefCell :: new ( FxHashMap :: default ( ) ) ,
170
171
}
171
172
}
172
173
@@ -199,7 +200,9 @@ impl<'sess> OnDiskCache<'sess> {
199
200
type_shorthands : FxHashMap ( ) ,
200
201
predicate_shorthands : FxHashMap ( ) ,
201
202
expn_info_shorthands : FxHashMap ( ) ,
202
- interpret_alloc_shorthands : FxHashMap ( ) ,
203
+ interpret_allocs : FxHashMap ( ) ,
204
+ interpret_alloc_ids : FxHashSet ( ) ,
205
+ interpret_allocs_inverse : Vec :: new ( ) ,
203
206
codemap : CachingCodemapView :: new ( tcx. sess . codemap ( ) ) ,
204
207
file_to_file_index,
205
208
} ;
@@ -277,6 +280,31 @@ impl<'sess> OnDiskCache<'sess> {
277
280
diagnostics_index
278
281
} ;
279
282
283
+ let interpret_alloc_index = {
284
+ let mut interpret_alloc_index = Vec :: new ( ) ;
285
+ let mut n = 0 ;
286
+ loop {
287
+ let new_n = encoder. interpret_alloc_ids . len ( ) ;
288
+ for idx in n..new_n {
289
+ let id = encoder. interpret_allocs_inverse [ idx] ;
290
+ let pos = AbsoluteBytePos :: new ( encoder. position ( ) ) ;
291
+ interpret_alloc_index. push ( pos) ;
292
+ interpret:: specialized_encode_alloc_id (
293
+ & mut encoder,
294
+ tcx,
295
+ id,
296
+ ) ?;
297
+ }
298
+ // if we have found new ids, serialize those, too
299
+ if n == new_n {
300
+ // otherwise, abort
301
+ break ;
302
+ }
303
+ n = new_n;
304
+ }
305
+ interpret_alloc_index
306
+ } ;
307
+
280
308
let sorted_cnums = sorted_cnums_including_local_crate ( tcx) ;
281
309
let prev_cnums: Vec < _ > = sorted_cnums. iter ( ) . map ( |& cnum| {
282
310
let crate_name = tcx. original_crate_name ( cnum) . as_str ( ) . to_string ( ) ;
@@ -291,6 +319,7 @@ impl<'sess> OnDiskCache<'sess> {
291
319
prev_cnums,
292
320
query_result_index,
293
321
diagnostics_index,
322
+ interpret_alloc_index,
294
323
} ) ?;
295
324
296
325
// Encode the position of the footer as the last 8 bytes of the
@@ -396,8 +425,8 @@ impl<'sess> OnDiskCache<'sess> {
396
425
file_index_to_file : & self . file_index_to_file ,
397
426
file_index_to_stable_id : & self . file_index_to_stable_id ,
398
427
synthetic_expansion_infos : & self . synthetic_expansion_infos ,
428
+ prev_interpret_alloc_index : & self . prev_interpret_alloc_index ,
399
429
interpret_alloc_cache : & self . interpret_alloc_cache ,
400
- interpret_alloc_size : & self . interpret_alloc_size ,
401
430
} ;
402
431
403
432
match decode_tagged ( & mut decoder, dep_node_index) {
@@ -460,7 +489,8 @@ struct CacheDecoder<'a, 'tcx: 'a, 'x> {
460
489
file_index_to_file : & ' x Lock < FxHashMap < FileMapIndex , Lrc < FileMap > > > ,
461
490
file_index_to_stable_id : & ' x FxHashMap < FileMapIndex , StableFilemapId > ,
462
491
interpret_alloc_cache : & ' x RefCell < FxHashMap < usize , interpret:: AllocId > > ,
463
- interpret_alloc_size : & ' x RefCell < FxHashMap < usize , usize > > ,
492
+ /// maps from index in the cache file to location in the cache file
493
+ prev_interpret_alloc_index : & ' x [ AbsoluteBytePos ] ,
464
494
}
465
495
466
496
impl < ' a , ' tcx , ' x > CacheDecoder < ' a , ' tcx , ' x > {
@@ -584,36 +614,29 @@ implement_ty_decoder!( CacheDecoder<'a, 'tcx, 'x> );
584
614
impl < ' a , ' tcx , ' x > SpecializedDecoder < interpret:: AllocId > for CacheDecoder < ' a , ' tcx , ' x > {
585
615
fn specialized_decode ( & mut self ) -> Result < interpret:: AllocId , Self :: Error > {
586
616
let tcx = self . tcx ;
587
- let pos = TyDecoder :: position ( self ) ;
588
- trace ! ( "specialized_decode_alloc_id: {:?}" , pos) ;
589
- if let Some ( cached) = self . interpret_alloc_cache . borrow ( ) . get ( & pos) . cloned ( ) {
590
- // if there's no end position we are currently deserializing a recursive
591
- // allocation
592
- if let Some ( end) = self . interpret_alloc_size . borrow ( ) . get ( & pos) . cloned ( ) {
593
- trace ! ( "{} already cached as {:?}" , pos, cached) ;
594
- // skip ahead
595
- self . opaque . set_position ( end) ;
596
- return Ok ( cached)
597
- }
617
+ let idx = usize:: decode ( self ) ?;
618
+ trace ! ( "loading index {}" , idx) ;
619
+
620
+ if let Some ( cached) = self . interpret_alloc_cache . borrow ( ) . get ( & idx) . cloned ( ) {
621
+ trace ! ( "loading alloc id {:?} from alloc_cache" , cached) ;
622
+ return Ok ( cached) ;
598
623
}
599
- let id = interpret:: specialized_decode_alloc_id (
600
- self ,
601
- tcx,
602
- pos,
603
- |this, pos, alloc_id| {
604
- assert ! ( this. interpret_alloc_cache. borrow_mut( ) . insert( pos, alloc_id) . is_none( ) ) ;
605
- } ,
606
- |this, shorthand| {
607
- // need to load allocation
608
- this. with_position ( shorthand, |this| interpret:: AllocId :: decode ( this) )
609
- }
610
- ) ?;
611
- assert ! ( self
612
- . interpret_alloc_size
613
- . borrow_mut( )
614
- . insert( pos, TyDecoder :: position( self ) )
615
- . is_none( ) ) ;
616
- Ok ( id)
624
+ let pos = self . prev_interpret_alloc_index [ idx] . to_usize ( ) ;
625
+ trace ! ( "loading position {}" , pos) ;
626
+ self . with_position ( pos, |this| {
627
+ interpret:: specialized_decode_alloc_id (
628
+ this,
629
+ tcx,
630
+ |this, alloc_id| {
631
+ trace ! ( "caching idx {} for alloc id {} at position {}" , idx, alloc_id, pos) ;
632
+ assert ! ( this
633
+ . interpret_alloc_cache
634
+ . borrow_mut( )
635
+ . insert( idx, alloc_id)
636
+ . is_none( ) ) ;
637
+ } ,
638
+ )
639
+ } )
617
640
}
618
641
}
619
642
impl < ' a , ' tcx , ' x > SpecializedDecoder < Span > for CacheDecoder < ' a , ' tcx , ' x > {
@@ -777,7 +800,9 @@ struct CacheEncoder<'enc, 'a, 'tcx, E>
777
800
type_shorthands : FxHashMap < ty:: Ty < ' tcx > , usize > ,
778
801
predicate_shorthands : FxHashMap < ty:: Predicate < ' tcx > , usize > ,
779
802
expn_info_shorthands : FxHashMap < Mark , AbsoluteBytePos > ,
780
- interpret_alloc_shorthands : FxHashMap < interpret:: AllocId , usize > ,
803
+ interpret_allocs : FxHashMap < interpret:: AllocId , usize > ,
804
+ interpret_allocs_inverse : Vec < interpret:: AllocId > ,
805
+ interpret_alloc_ids : FxHashSet < interpret:: AllocId > ,
781
806
codemap : CachingCodemapView < ' tcx > ,
782
807
file_to_file_index : FxHashMap < * const FileMap , FileMapIndex > ,
783
808
}
@@ -814,27 +839,17 @@ impl<'enc, 'a, 'tcx, E> SpecializedEncoder<interpret::AllocId> for CacheEncoder<
814
839
where E : ' enc + ty_codec:: TyEncoder
815
840
{
816
841
fn specialized_encode ( & mut self , alloc_id : & interpret:: AllocId ) -> Result < ( ) , Self :: Error > {
817
- use std:: collections:: hash_map:: Entry ;
818
- let tcx = self . tcx ;
819
- let pos = self . position ( ) ;
820
- let shorthand = match self . interpret_alloc_shorthands . entry ( * alloc_id) {
821
- Entry :: Occupied ( entry) => Some ( entry. get ( ) . clone ( ) ) ,
822
- Entry :: Vacant ( entry) => {
823
- // ensure that we don't place any AllocIds at the very beginning
824
- // of the metadata file, because that would end up making our indices
825
- // not special. It is essentially impossible for that to happen,
826
- // but let's make sure
827
- assert ! ( pos >= interpret:: SHORTHAND_START ) ;
828
- entry. insert ( pos) ;
829
- None
830
- } ,
842
+ let index = if self . interpret_alloc_ids . insert ( * alloc_id) {
843
+ let idx = self . interpret_alloc_ids . len ( ) - 1 ;
844
+ assert_eq ! ( idx, self . interpret_allocs_inverse. len( ) ) ;
845
+ self . interpret_allocs_inverse . push ( * alloc_id) ;
846
+ assert ! ( self . interpret_allocs. insert( * alloc_id, idx) . is_none( ) ) ;
847
+ idx
848
+ } else {
849
+ self . interpret_allocs [ alloc_id]
831
850
} ;
832
- interpret:: specialized_encode_alloc_id (
833
- self ,
834
- tcx,
835
- * alloc_id,
836
- shorthand,
837
- )
851
+
852
+ index. encode ( self )
838
853
}
839
854
}
840
855
0 commit comments