67
67
using namespace swift ;
68
68
using namespace metadataimpl ;
69
69
70
- template <class T >
71
- static int compareIntegers (T left, T right) {
72
- return (left == right ? 0 : left < right ? -1 : 1 );
73
- }
74
-
75
70
static const size_t ValueTypeMetadataAddressPoint = sizeof (TypeMetadataHeader);
76
71
77
72
static ClassMetadataBounds
@@ -161,40 +156,41 @@ swift::getResilientImmediateMembersOffset(const ClassDescriptor *description) {
161
156
}
162
157
163
158
namespace {
164
- struct GenericCacheEntry ;
165
-
166
- // The cache entries in a generic cache are laid out like this:
167
- struct GenericCacheEntryHeader {
168
- const Metadata *Value;
169
- size_t NumArguments;
170
- };
171
-
172
- struct GenericCacheEntry
173
- : CacheEntry<GenericCacheEntry, GenericCacheEntryHeader> {
159
+ struct GenericCacheEntry final
160
+ : MetadataCacheEntryBase<GenericCacheEntry, const Metadata *> {
174
161
175
162
static const char *getName () { return " GenericCache" ; }
176
163
177
- GenericCacheEntry ( unsigned numArguments) {
178
- NumArguments = numArguments;
179
- }
164
+ template < class ... Args>
165
+ GenericCacheEntry (MetadataCacheKey key, Args &&...args)
166
+ : MetadataCacheEntryBase(key) { }
180
167
181
- size_t getNumArguments () const { return NumArguments; }
168
+ // Note that we have to pass 'arguments' separately here instead of
169
+ // using the key data because there might be non-key arguments,
170
+ // like protocol conformances.
171
+ const Metadata *initialize (const TypeContextDescriptor *description,
172
+ const void * const *arguments) {
173
+ // Find a pattern. Currently we always use the default pattern.
174
+ auto &generics = description->getFullGenericContextHeader ();
175
+ auto pattern = generics.DefaultInstantiationPattern .get ();
182
176
183
- static GenericCacheEntry *getFromMetadata (Metadata *metadata) {
184
- char *bytes = (char *) metadata;
185
- if (auto classType = dyn_cast<ClassMetadata>(metadata)) {
186
- assert (classType->isTypeMetadata ());
187
- bytes -= classType->getClassAddressPoint ();
188
- } else {
189
- bytes -= ValueTypeMetadataAddressPoint;
177
+ // Call the pattern's instantiation function.
178
+ auto metadata =
179
+ pattern->InstantiationFunction (description, arguments, pattern);
180
+
181
+ // Complete the metadata's instantiation.
182
+ if (!pattern->CompletionFunction .isNull ()) {
183
+ MetadataCompletionContext context = {};
184
+ auto dep = pattern->CompletionFunction (metadata, &context, pattern);
185
+ assert (!dep && " completion dependencies not yet supported" ); (void ) dep;
190
186
}
191
- bytes -= sizeof (GenericCacheEntry);
192
- return reinterpret_cast <GenericCacheEntry*>(bytes) ;
187
+
188
+ return metadata ;
193
189
}
194
190
};
195
191
} // end anonymous namespace
196
192
197
- using GenericMetadataCache = MetadataCache <GenericCacheEntry>;
193
+ using GenericMetadataCache = LockingConcurrentMap <GenericCacheEntry>;
198
194
using LazyGenericMetadataCache = Lazy<GenericMetadataCache>;
199
195
200
196
// / Fetch the metadata cache for a generic metadata structure.
@@ -266,6 +262,8 @@ initializeClassMetadataFromPattern(ClassMetadata *metadata,
266
262
// TODO: only memset the parts that aren't covered by the pattern.
267
263
memset (immediateMembers, 0 , description->getImmediateMembersSize ());
268
264
265
+ // Copy in the immediate arguments.
266
+
269
267
// Copy the immediate-members pattern.
270
268
if (pattern->hasImmediateMembersPattern ()) {
271
269
auto immediateMembersPattern = pattern->getImmediateMembersPattern ();
@@ -354,12 +352,9 @@ ClassMetadata *
354
352
swift::swift_allocateGenericClassMetadata (const ClassDescriptor *description,
355
353
const void *arguments,
356
354
const GenericClassMetadataPattern *pattern){
357
- void * const *argumentsAsArray = reinterpret_cast <void * const *>(arguments);
358
355
auto &generics = description->getFullGenericContextHeader ();
359
356
auto &cache = unsafeGetInitializedCache (generics);
360
357
361
- size_t numGenericArguments = generics.Base .NumKeyArguments ;
362
-
363
358
// Compute the formal bounds of the metadata.
364
359
auto bounds = description->getMetadataBounds ();
365
360
@@ -371,18 +366,15 @@ swift::swift_allocateGenericClassMetadata(const ClassDescriptor *description,
371
366
extraDataPattern->OffsetInWords + extraDataPattern->SizeInWords ;
372
367
}
373
368
374
- auto entry = GenericCacheEntry::allocate (cache.getAllocator (),
375
- argumentsAsArray,
376
- numGenericArguments,
377
- allocationBounds.getTotalSizeInBytes ());
369
+ auto bytes = (char *)
370
+ cache.getAllocator ().Allocate (allocationBounds.getTotalSizeInBytes (),
371
+ alignof (void *));
378
372
379
- auto bytes = entry->getData <char >();
380
373
auto addressPoint = bytes + allocationBounds.getAddressPointInBytes ();
381
374
auto metadata = reinterpret_cast <ClassMetadata *>(addressPoint);
382
375
383
376
initializeClassMetadataFromPattern (metadata, bounds, description, pattern);
384
377
385
- assert (GenericCacheEntry::getFromMetadata (metadata) == entry);
386
378
assert (metadata->isTypeMetadata ());
387
379
388
380
return metadata;
@@ -424,12 +416,9 @@ swift::swift_allocateGenericValueMetadata(const ValueTypeDescriptor *description
424
416
const void *arguments,
425
417
const GenericValueMetadataPattern *pattern,
426
418
size_t extraDataSize) {
427
- void * const *argumentsAsArray = reinterpret_cast <void * const *>(arguments);
428
419
auto &generics = description->getFullGenericContextHeader ();
429
420
auto &cache = unsafeGetInitializedCache (generics);
430
421
431
- size_t numGenericArguments = generics.Base .NumKeyArguments ;
432
-
433
422
static_assert (sizeof (StructMetadata::HeaderType)
434
423
== sizeof (ValueMetadata::HeaderType),
435
424
" struct metadata header unexpectedly has extra members" );
@@ -443,19 +432,13 @@ swift::swift_allocateGenericValueMetadata(const ValueTypeDescriptor *description
443
432
444
433
size_t totalSize = sizeof (FullMetadata<ValueMetadata>) + extraDataSize;
445
434
446
- auto entry =
447
- GenericCacheEntry::allocate (cache.getAllocator (),
448
- argumentsAsArray, numGenericArguments,
449
- totalSize);
435
+ auto bytes = (char *) cache.getAllocator ().Allocate (totalSize, alignof (void *));
450
436
451
- auto bytes = entry->getData <char >();
452
437
auto addressPoint = bytes + sizeof (ValueMetadata::HeaderType);
453
438
auto metadata = reinterpret_cast <ValueMetadata *>(addressPoint);
454
439
455
440
initializeValueMetadataFromPattern (metadata, description, pattern);
456
441
457
- assert (GenericCacheEntry::getFromMetadata (metadata) == entry);
458
-
459
442
return metadata;
460
443
}
461
444
@@ -467,30 +450,10 @@ swift::swift_getGenericMetadata(const TypeContextDescriptor *description,
467
450
auto &generics = description->getFullGenericContextHeader ();
468
451
size_t numGenericArgs = generics.Base .NumKeyArguments ;
469
452
470
- auto entry = getCache (generics).findOrAdd (genericArgs, numGenericArgs,
471
- [&]() -> GenericCacheEntry* {
472
- // Create new metadata to cache.
473
-
474
- // Find a pattern. Currently we always use the default pattern.
475
- auto pattern = generics.DefaultInstantiationPattern .get ();
476
-
477
- // Call the pattern's instantiation function.
478
- auto metadata =
479
- pattern->InstantiationFunction (description, arguments, pattern);
480
-
481
- // Complete the metadata's instantiation.
482
- if (!pattern->CompletionFunction .isNull ()) {
483
- MetadataCompletionContext context = {};
484
- auto dep = pattern->CompletionFunction (metadata, &context, pattern);
485
- assert (!dep && " completion dependencies not yet supported" ); (void ) dep;
486
- }
487
-
488
- auto entry = GenericCacheEntry::getFromMetadata (metadata);
489
- entry->Value = metadata;
490
- return entry;
491
- });
453
+ auto key = MetadataCacheKey (genericArgs, numGenericArgs);
454
+ auto result = getCache (generics).getOrInsert (key, description, genericArgs);
492
455
493
- return entry-> Value ;
456
+ return result. second ;
494
457
}
495
458
496
459
/* **************************************************************************/
@@ -3000,28 +2963,78 @@ template <> void Metadata::dump() const {
3000
2963
/* **************************************************************************/
3001
2964
3002
2965
namespace {
3003
- class WitnessTableCacheEntry : public CacheEntry <WitnessTableCacheEntry> {
3004
- public:
3005
- static const char *getName () { return " WitnessTableCache" ; }
3006
2966
3007
- WitnessTableCacheEntry (size_t numArguments) {
3008
- assert (numArguments == getNumArguments ());
3009
- }
2967
+ // / A cache-entry type suitable for use with LockingConcurrentMap.
2968
+ class WitnessTableCacheEntry {
2969
+ // / The type for which this table was instantiated.
2970
+ const Metadata * const Type;
3010
2971
3011
- static constexpr size_t getNumArguments () {
3012
- return 1 ;
3013
- }
2972
+ // / The generic table. This is only kept around so that we can
2973
+ // / compute the size of an entry correctly in case of a race to
2974
+ // / allocate the entry.
2975
+ GenericWitnessTable * const GenericTable;
2976
+
2977
+ // / The table. When this is fully-initialized, we set it to the table.
2978
+ std::atomic<WitnessTable*> Table;
2979
+
2980
+ public:
2981
+ // / We use a pointer to the allocated witness table directly as a
2982
+ // / status value. The witness table itself is actually allocated in the
2983
+ // / tail storage of the entry.
2984
+ using StatusType = WitnessTable*;
2985
+
2986
+ // / Do the structural initialization necessary for this entry to appear
2987
+ // / in a concurrent map.
2988
+ WitnessTableCacheEntry (const Metadata *type,
2989
+ GenericWitnessTable *genericTable,
2990
+ void ** const *instantiationArgs)
2991
+ : Type(type), GenericTable(genericTable), Table(nullptr ) {}
2992
+
2993
+ intptr_t getKeyIntValueForDump () const {
2994
+ return reinterpret_cast <intptr_t >(Type);
2995
+ }
2996
+
2997
+ // / The key value of the entry is just its type pointer.
2998
+ int compareWithKey (const Metadata *type) const {
2999
+ return comparePointers (Type, type);
3000
+ }
3001
+
3002
+ static size_t getExtraAllocationSize (const Metadata *type,
3003
+ GenericWitnessTable *genericTable,
3004
+ void ** const *instantiationArgs) {
3005
+ return getWitnessTableSize (genericTable);
3006
+ }
3007
+
3008
+ size_t getExtraAllocationSize () const {
3009
+ return getWitnessTableSize (GenericTable);
3010
+ }
3011
+
3012
+ static size_t getWitnessTableSize (GenericWitnessTable *genericTable) {
3013
+ auto protocol = genericTable->Protocol .get ();
3014
+ size_t numPrivateWords = genericTable->WitnessTablePrivateSizeInWords ;
3015
+ size_t numRequirementWords =
3016
+ WitnessTableFirstRequirementOffset + protocol->NumRequirements ;
3017
+ return (numPrivateWords + numRequirementWords) * sizeof (void *);
3018
+ }
3019
+
3020
+ WitnessTable *checkStatus (bool isLocked,
3021
+ GenericWitnessTable *genericTable,
3022
+ void ** const *instantiationArgs) {
3023
+ return Table.load (std::memory_order_acquire);
3024
+ }
3025
+
3026
+ WitnessTable *initialize (GenericWitnessTable *genericTable,
3027
+ void ** const *instantiationArgs);
3028
+
3029
+ void flagInitializedStatus (WitnessTable *table) {
3030
+ Table.store (table, std::memory_order_release);
3031
+ }
3032
+ };
3014
3033
3015
- // / Advance the address point to the end of the private storage area.
3016
- WitnessTable *get (GenericWitnessTable *genericTable) const {
3017
- return reinterpret_cast <WitnessTable *>(
3018
- const_cast <void **>(getData<void *>()) +
3019
- genericTable->WitnessTablePrivateSizeInWords );
3020
- }
3021
- };
3022
3034
} // end anonymous namespace
3023
3035
3024
- using GenericWitnessTableCache = MetadataCache<WitnessTableCacheEntry>;
3036
+ using GenericWitnessTableCache =
3037
+ LockingConcurrentMap<WitnessTableCacheEntry, /* destructor*/ false >;
3025
3038
using LazyGenericWitnessTableCache = Lazy<GenericWitnessTableCache>;
3026
3039
3027
3040
// / Fetch the cache for a generic witness-table structure.
@@ -3058,11 +3071,9 @@ static bool doesNotRequireInstantiation(GenericWitnessTable *genericTable) {
3058
3071
3059
3072
// / Instantiate a brand new witness table for a resilient or generic
3060
3073
// / protocol conformance.
3061
- static WitnessTableCacheEntry *
3062
- allocateWitnessTable (GenericWitnessTable *genericTable,
3063
- MetadataAllocator &allocator,
3064
- const void *args[],
3065
- size_t numGenericArgs) {
3074
+ WitnessTable *
3075
+ WitnessTableCacheEntry::initialize (GenericWitnessTable *genericTable,
3076
+ void ** const *instantiationArgs) {
3066
3077
// The number of witnesses provided by the table pattern.
3067
3078
size_t numPatternWitnesses = genericTable->WitnessTableSizeInWords ;
3068
3079
@@ -3079,26 +3090,18 @@ allocateWitnessTable(GenericWitnessTable *genericTable,
3079
3090
assert (numPatternWitnesses <= numRequirements);
3080
3091
3081
3092
// Number of bytes for any private storage used by the conformance itself.
3082
- size_t privateSize =
3083
- genericTable->WitnessTablePrivateSizeInWords * sizeof (void *);
3093
+ size_t privateSizeInWords = genericTable->WitnessTablePrivateSizeInWords ;
3084
3094
3085
- // Number of bytes for the full witness table.
3086
- size_t expectedWitnessTableSize = numRequirements * sizeof (void *);
3087
-
3088
- // Create a new entry for the cache.
3089
- auto entry = WitnessTableCacheEntry::allocate (
3090
- allocator, args, numGenericArgs,
3091
- (privateSize + expectedWitnessTableSize) * sizeof (void *));
3092
-
3093
- char *fullTable = entry->getData <char >();
3095
+ // Find the allocation.
3096
+ void **fullTable = reinterpret_cast <void **>(this + 1 );
3094
3097
3095
3098
// Zero out the private storage area.
3096
- memset (fullTable, 0 , privateSize * sizeof (void *));
3099
+ memset (fullTable, 0 , privateSizeInWords * sizeof (void *));
3097
3100
3098
3101
// Advance the address point; the private storage area is accessed via
3099
3102
// negative offsets.
3100
- auto table = ( void **) entry-> get (genericTable) ;
3101
- auto pattern = ( void * const *) &*genericTable->Pattern ;
3103
+ auto table = fullTable + privateSizeInWords ;
3104
+ auto pattern = reinterpret_cast < void * const *>( &*genericTable->Pattern ) ;
3102
3105
auto requirements = protocol->Requirements .get ();
3103
3106
3104
3107
// Fill in the provided part of the requirements from the pattern.
@@ -3116,7 +3119,14 @@ allocateWitnessTable(GenericWitnessTable *genericTable,
3116
3119
table[i] = defaultImpl;
3117
3120
}
3118
3121
3119
- return entry;
3122
+ auto castTable = reinterpret_cast <WitnessTable*>(table);
3123
+
3124
+ // Call the instantiation function if present.
3125
+ if (!genericTable->Instantiator .isNull ()) {
3126
+ genericTable->Instantiator (castTable, Type, instantiationArgs);
3127
+ }
3128
+
3129
+ return castTable;
3120
3130
}
3121
3131
3122
3132
const WitnessTable *
@@ -3127,30 +3137,11 @@ swift::swift_getGenericWitnessTable(GenericWitnessTable *genericTable,
3127
3137
return genericTable->Pattern ;
3128
3138
}
3129
3139
3130
- // If type is not nullptr, the witness table depends on the substituted
3131
- // conforming type, so use that are the key.
3132
- constexpr const size_t numGenericArgs = 1 ;
3133
- const void *args[] = { type };
3134
-
3135
3140
auto &cache = getCache (genericTable);
3136
- auto entry = cache.findOrAdd (args, numGenericArgs,
3137
- [&]() -> WitnessTableCacheEntry* {
3138
- // Allocate the witness table and fill it in.
3139
- auto entry = allocateWitnessTable (genericTable,
3140
- cache.getAllocator (),
3141
- args, numGenericArgs);
3142
-
3143
- // Call the instantiation function to initialize
3144
- // dependent associated type metadata.
3145
- if (!genericTable->Instantiator .isNull ()) {
3146
- genericTable->Instantiator (entry->get (genericTable),
3147
- type, instantiationArgs);
3148
- }
3149
-
3150
- return entry;
3151
- });
3141
+ auto result = cache.getOrInsert (type, genericTable, instantiationArgs);
3152
3142
3153
- return entry->get (genericTable);
3143
+ // Our returned 'status' is the witness table itself.
3144
+ return result.second ;
3154
3145
}
3155
3146
3156
3147
/* **************************************************************************/
0 commit comments