@@ -536,7 +536,7 @@ namespace {
536
536
};
537
537
538
538
struct ConformanceCacheEntry {
539
- private :
539
+ public :
540
540
// / Storage used when we have global actor isolation on the conformance.
541
541
struct ExtendedStorage {
542
542
// / The protocol to which the type conforms.
@@ -549,18 +549,22 @@ namespace {
549
549
// / When the conformance is global-actor-isolated, this is the conformance
550
550
// / of globalActorIsolationType to GlobalActor.
551
551
const WitnessTable *globalActorIsolationWitnessTable = nullptr ;
552
+
553
+ // / The next pointer in the list of extended storage allocations.
554
+ ExtendedStorage *next = nullptr ;
552
555
};
553
556
554
557
const Metadata *Type;
555
- llvm::PointerUnion<const ProtocolDescriptor *, const ExtendedStorage *>
558
+ llvm::PointerUnion<const ProtocolDescriptor *, ExtendedStorage *>
556
559
ProtoOrStorage;
557
560
558
561
// / The witness table.
559
562
const WitnessTable *Witness;
560
563
561
564
public:
562
565
ConformanceCacheEntry (ConformanceCacheKey key,
563
- ConformanceLookupResult result)
566
+ ConformanceLookupResult result,
567
+ std::atomic<ExtendedStorage *> &storageHead)
564
568
: Type(key.Type), Witness(result.witnessTable)
565
569
{
566
570
if (!result.globalActorIsolationType ) {
@@ -576,6 +580,17 @@ namespace {
576
580
};
577
581
578
582
ProtoOrStorage = storage;
583
+
584
+ // Add the storage pointer to the list of extended storage allocations
585
+ // so that we can free them later.
586
+ auto head = storageHead.load (std::memory_order_relaxed);
587
+ while (true ) {
588
+ storage->next = head;
589
+ if (storageHead.compare_exchange_weak (
590
+ head, storage, std::memory_order_release,
591
+ std::memory_order_relaxed))
592
+ break ;
593
+ };
579
594
}
580
595
581
596
bool matchesKey (const ConformanceCacheKey &key) const {
@@ -591,7 +606,7 @@ namespace {
591
606
if (auto proto = ProtoOrStorage.dyn_cast <const ProtocolDescriptor *>())
592
607
return proto;
593
608
594
- if (auto storage = ProtoOrStorage.dyn_cast <const ExtendedStorage *>())
609
+ if (auto storage = ProtoOrStorage.dyn_cast <ExtendedStorage *>())
595
610
return storage->Proto ;
596
611
597
612
return nullptr ;
@@ -611,7 +626,7 @@ namespace {
611
626
if (ProtoOrStorage.is <const ProtocolDescriptor *>())
612
627
return ConformanceLookupResult { Witness, nullptr , nullptr };
613
628
614
- if (auto storage = ProtoOrStorage.dyn_cast <const ExtendedStorage *>()) {
629
+ if (auto storage = ProtoOrStorage.dyn_cast <ExtendedStorage *>()) {
615
630
return ConformanceLookupResult (
616
631
Witness, storage->globalActorIsolationType ,
617
632
storage->globalActorIsolationWitnessTable );
@@ -626,6 +641,11 @@ namespace {
626
641
struct ConformanceState {
627
642
ConcurrentReadableHashMap<ConformanceCacheEntry> Cache;
628
643
ConcurrentReadableArray<ConformanceSection> SectionsToScan;
644
+
645
+ // / The head of an intrusive linked list that keeps track of all of the
646
+ // / conformance cache entries that require extended storage.
647
+ std::atomic<ConformanceCacheEntry::ExtendedStorage *> ExtendedStorageHead{nullptr };
648
+
629
649
bool scanSectionsBackwards;
630
650
631
651
#if USE_DYLD_SHARED_CACHE_CONFORMANCE_TABLES
@@ -714,7 +734,8 @@ struct ConformanceState {
714
734
return false ; // abandon the new entry
715
735
716
736
::new (entry) ConformanceCacheEntry (
717
- ConformanceCacheKey (type, proto), result);
737
+ ConformanceCacheKey (type, proto), result,
738
+ ExtendedStorageHead);
718
739
return true ; // keep the new entry
719
740
});
720
741
}
@@ -748,7 +769,20 @@ static void _registerProtocolConformances(ConformanceState &C,
748
769
749
770
// Blow away the conformances cache to get rid of any negative entries that
750
771
// may now be obsolete.
751
- C.Cache .clear ();
772
+ C.Cache .clear ([&](ConcurrentFreeListNode *&freeListHead) {
773
+ // The extended storage for conformance entries will need to be freed
774
+ // eventually. Put it on the concurrent free list so the cache will do so.
775
+ auto storageHead = C.ExtendedStorageHead .load (std::memory_order_relaxed);
776
+ while (storageHead) {
777
+ auto current = storageHead;
778
+ auto newHead = current->next ;
779
+ if (C.ExtendedStorageHead .compare_exchange_weak (
780
+ storageHead, newHead, std::memory_order_release,
781
+ std::memory_order_relaxed)) {
782
+ ConcurrentFreeListNode::add (&freeListHead, current);
783
+ }
784
+ }
785
+ });
752
786
}
753
787
754
788
void swift::addImageProtocolConformanceBlockCallbackUnsafe (
0 commit comments