@@ -579,6 +579,11 @@ using llvm::hash_value;
579
579
// / ensure that readers which started before the clear see valid (pre-clear)
580
580
// / data. Readers which see any array as empty will produce no results, thus
581
581
// / providing valid post-clear data.
582
+ // /
583
+ // / This is intended to be used for tables that exist for the life of the
584
+ // / process. It has no destructor, to avoid generating useless global destructor
585
+ // / calls. The memory it allocates can be freed by calling clear() with no
586
+ // / outstanding readers, but this won't destroy the static mutex it uses.
582
587
template <class ElemTy > struct ConcurrentReadableHashMap {
583
588
// We use memcpy and don't call destructors. Make sure the elements will put
584
589
// up with this.
@@ -724,7 +729,7 @@ template <class ElemTy> struct ConcurrentReadableHashMap {
724
729
std::atomic<IndexStorage *> Indices{nullptr };
725
730
726
731
// / The writer lock, which must be taken before any mutation of the table.
727
- Mutex WriterLock;
732
+ StaticMutex WriterLock;
728
733
729
734
// / The maximum number of elements that the current elements array can hold.
730
735
uint32_t ElementCapacity{0 };
@@ -841,20 +846,19 @@ template <class ElemTy> struct ConcurrentReadableHashMap {
841
846
}
842
847
843
848
public:
849
+ // Implicitly trivial constructor/destructor.
850
+ ConcurrentReadableHashMap () = default ;
851
+ ~ConcurrentReadableHashMap () = default ;
852
+
844
853
// This type cannot be safely copied or moved.
845
854
ConcurrentReadableHashMap (const ConcurrentReadableHashMap &) = delete ;
846
855
ConcurrentReadableHashMap (ConcurrentReadableHashMap &&) = delete ;
847
856
ConcurrentReadableHashMap &
848
857
operator =(const ConcurrentReadableHashMap &) = delete ;
849
858
850
- ConcurrentReadableHashMap ()
851
- : ReaderCount(0 ), ElementCount(0 ), Elements(nullptr ), Indices(nullptr ),
852
- ElementCapacity (0 ) {}
853
-
854
- ~ConcurrentReadableHashMap () {
855
- assert (ReaderCount.load (std::memory_order_acquire) == 0 &&
856
- " deallocating ConcurrentReadableHashMap with outstanding snapshots" );
857
- FreeListNode::freeAll (&FreeList);
859
+ // / Returns whether there are outstanding readers. For testing purposes only.
860
+ bool hasActiveReaders () {
861
+ return ReaderCount.load (std::memory_order_relaxed) > 0 ;
858
862
}
859
863
860
864
// / Readers take a snapshot of the hash map, then work with the snapshot.
@@ -945,7 +949,7 @@ template <class ElemTy> struct ConcurrentReadableHashMap {
945
949
// / The return value is ignored when `created` is `false`.
946
950
template <class KeyTy , typename Call>
947
951
void getOrInsert (KeyTy key, const Call &call) {
948
- ScopedLock guard (WriterLock);
952
+ StaticScopedLock guard (WriterLock);
949
953
950
954
auto *indices = Indices.load (std::memory_order_relaxed);
951
955
if (!indices)
@@ -955,7 +959,7 @@ template <class ElemTy> struct ConcurrentReadableHashMap {
955
959
auto elementCount = ElementCount.load (std::memory_order_relaxed);
956
960
auto *elements = Elements.load (std::memory_order_relaxed);
957
961
958
- auto found = find (key, indices, elementCount, elements);
962
+ auto found = this -> find (key, indices, elementCount, elements);
959
963
if (found.first ) {
960
964
call (found.first , false );
961
965
deallocateFreeListIfSafe ();
@@ -996,7 +1000,7 @@ template <class ElemTy> struct ConcurrentReadableHashMap {
996
1000
// / Clear the hash table, freeing (when safe) all memory currently used for
997
1001
// / indices and elements.
998
1002
void clear () {
999
- ScopedLock guard (WriterLock);
1003
+ StaticScopedLock guard (WriterLock);
1000
1004
1001
1005
auto *indices = Indices.load (std::memory_order_relaxed);
1002
1006
auto *elements = Elements.load (std::memory_order_relaxed);
@@ -1015,6 +1019,66 @@ template <class ElemTy> struct ConcurrentReadableHashMap {
1015
1019
}
1016
1020
};
1017
1021
1022
+ // / A wrapper type for indirect hash map elements. Stores a pointer to the real
1023
+ // / element and forwards key matching and hashing.
1024
+ template <class ElemTy > struct HashMapElementWrapper {
1025
+ ElemTy *Ptr;
1026
+
1027
+ template <class KeyTy > bool matchesKey (const KeyTy &key) {
1028
+ return Ptr->matchesKey (key);
1029
+ }
1030
+
1031
+ friend llvm::hash_code hash_value (const HashMapElementWrapper &wrapper) {
1032
+ return hash_value (*wrapper.Ptr );
1033
+ }
1034
+ };
1035
+
1036
+ // / A ConcurrentReadableHashMap that provides stable addresses for the elements
1037
+ // / by allocating them separately and storing pointers to them. The elements of
1038
+ // / the hash table are instances of HashMapElementWrapper. A new getOrInsert
1039
+ // / method is provided that directly returns the stable element pointer.
1040
+ template <class ElemTy , class Allocator >
1041
+ struct StableAddressConcurrentReadableHashMap
1042
+ : public ConcurrentReadableHashMap<HashMapElementWrapper<ElemTy>> {
1043
+ // Implicitly trivial destructor.
1044
+ ~StableAddressConcurrentReadableHashMap () = default ;
1045
+
1046
+ // / Get or insert an element for the given key and arguments. Returns the
1047
+ // / pointer to the existing or new element, and a bool indicating whether the
1048
+ // / element was created. When false, the element already existed before the
1049
+ // / call.
1050
+ template <class KeyTy , class ... ArgTys>
1051
+ std::pair<ElemTy *, bool > getOrInsert (KeyTy key, ArgTys &&...args) {
1052
+ // Optimize for the case where the value already exists.
1053
+ if (auto wrapper = this ->snapshot ().find (key))
1054
+ return {wrapper->Ptr , false };
1055
+
1056
+ // No such element. Insert if needed. Note: another thread may have inserted
1057
+ // it in the meantime, so we still have to handle both cases!
1058
+ ElemTy *ptr = nullptr ;
1059
+ bool outerCreated = false ;
1060
+ ConcurrentReadableHashMap<HashMapElementWrapper<ElemTy>>::getOrInsert (
1061
+ key, [&](HashMapElementWrapper<ElemTy> *wrapper, bool created) {
1062
+ if (created) {
1063
+ // Created the indirect entry. Allocate the actual storage.
1064
+ size_t allocSize =
1065
+ sizeof (ElemTy) + ElemTy::getExtraAllocationSize (key, args...);
1066
+ void *memory = Allocator ().Allocate (allocSize, alignof (ElemTy));
1067
+ new (memory) ElemTy (key, std::forward<ArgTys>(args)...);
1068
+ wrapper->Ptr = reinterpret_cast <ElemTy *>(memory);
1069
+ }
1070
+ ptr = wrapper->Ptr ;
1071
+ outerCreated = created;
1072
+ return true ; // Keep the new entry.
1073
+ });
1074
+ return {ptr, outerCreated};
1075
+ }
1076
+
1077
+ private:
1078
+ // Clearing would require deallocating elements, which we don't support.
1079
+ void clear () = delete;
1080
+ };
1081
+
1018
1082
} // end namespace swift
1019
1083
1020
1084
#endif // SWIFT_RUNTIME_CONCURRENTUTILS_H
0 commit comments