Skip to content

[Runtime] Change MetadataLookup section vectors to use ConcurrentReadableArray. #16753

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 5 commits into from
May 23, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
72 changes: 51 additions & 21 deletions include/swift/Runtime/Concurrent.h
Original file line number Diff line number Diff line change
Expand Up @@ -451,14 +451,57 @@ template <class ElemTy> struct ConcurrentReadableArray {
Mutex WriterLock;
std::vector<Storage *> FreeList;

void incrementReaders() {
ReaderCount.fetch_add(1, std::memory_order_acquire);
}

void decrementReaders() {
ReaderCount.fetch_sub(1, std::memory_order_release);
}

void deallocateFreeList() {
for (Storage *storage : FreeList)
storage->deallocate();
FreeList.clear();
FreeList.shrink_to_fit();
}

public:
struct Snapshot {
ConcurrentReadableArray *Array;
const ElemTy *Start;
size_t Count;

Snapshot(ConcurrentReadableArray *array, const ElemTy *start, size_t count)
: Array(array), Start(start), Count(count) {}

Snapshot(const Snapshot &other)
: Array(other.Array), Start(other.Start), Count(other.Count) {
Array->incrementReaders();
}

~Snapshot() {
Array->decrementReaders();
}

const ElemTy *begin() { return Start; }
const ElemTy *end() { return Start + Count; }
size_t count() { return Count; }
};

// This type cannot be safely copied, moved, or deleted.
ConcurrentReadableArray(const ConcurrentReadableArray &) = delete;
ConcurrentReadableArray(ConcurrentReadableArray &&) = delete;
ConcurrentReadableArray &operator=(const ConcurrentReadableArray &) = delete;

ConcurrentReadableArray() : Capacity(0), ReaderCount(0), Elements(nullptr) {}

~ConcurrentReadableArray() {
assert(ReaderCount.load(std::memory_order_acquire) == 0 &&
"deallocating ConcurrentReadableArray with outstanding snapshots");
deallocateFreeList();
}

void push_back(const ElemTy &elem) {
ScopedLock guard(WriterLock);

Expand All @@ -482,32 +525,19 @@ template <class ElemTy> struct ConcurrentReadableArray {
storage->Count.store(count + 1, std::memory_order_release);

if (ReaderCount.load(std::memory_order_acquire) == 0)
for (Storage *storage : FreeList)
storage->deallocate();
deallocateFreeList();
}

/// Read the contents of the array. The parameter `f` is called with
/// two parameters: a pointer to the elements in the array, and the
/// count. This represents a snapshot of the contents at the time
/// `read` was called. The pointer becomes invalid after `f` returns.
template <class F> auto read(F f) -> decltype(f(nullptr, 0)) {
ReaderCount.fetch_add(1, std::memory_order_acquire);
Snapshot snapshot() {
incrementReaders();
auto *storage = Elements.load(SWIFT_MEMORY_ORDER_CONSUME);
if (storage == nullptr) {
return Snapshot(this, nullptr, 0);
}

auto count = storage->Count.load(std::memory_order_acquire);
const auto *ptr = storage->data();

decltype(f(nullptr, 0)) result = f(ptr, count);

ReaderCount.fetch_sub(1, std::memory_order_release);

return result;
}

/// Get the current count. It's just a snapshot and may be obsolete immediately.
size_t count() {
return read([](const ElemTy *ptr, size_t count) -> size_t {
return count;
});
return Snapshot(this, ptr, count);
}
};

Expand Down
49 changes: 13 additions & 36 deletions stdlib/public/runtime/MetadataLookup.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@
#include "swift/Runtime/Concurrent.h"
#include "swift/Runtime/HeapObject.h"
#include "swift/Runtime/Metadata.h"
#include "swift/Runtime/Mutex.h"
#include "swift/Strings.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Optional.h"
Expand Down Expand Up @@ -106,11 +105,9 @@ namespace {

struct TypeMetadataPrivateState {
ConcurrentMap<NominalTypeDescriptorCacheEntry> NominalCache;
std::vector<TypeMetadataSection> SectionsToScan;
Mutex SectionsToScanLock;
ConcurrentReadableArray<TypeMetadataSection> SectionsToScan;

TypeMetadataPrivateState() {
SectionsToScan.reserve(16);
initializeTypeMetadataRecordLookup();
}

Expand All @@ -122,7 +119,6 @@ static void
_registerTypeMetadataRecords(TypeMetadataPrivateState &T,
const TypeMetadataRecord *begin,
const TypeMetadataRecord *end) {
ScopedLock guard(T.SectionsToScanLock);
T.SectionsToScan.push_back(TypeMetadataSection{begin, end});
}

Expand Down Expand Up @@ -296,12 +292,9 @@ swift::_contextDescriptorMatchesMangling(const ContextDescriptor *context,

// returns the nominal type descriptor for the type named by typeName
static const TypeContextDescriptor *
_searchTypeMetadataRecords(const TypeMetadataPrivateState &T,
_searchTypeMetadataRecords(TypeMetadataPrivateState &T,
Demangle::NodePointer node) {
unsigned sectionIdx = 0;
unsigned endSectionIdx = T.SectionsToScan.size();
for (; sectionIdx < endSectionIdx; ++sectionIdx) {
auto &section = T.SectionsToScan[sectionIdx];
for (auto &section : T.SectionsToScan.snapshot()) {
for (const auto &record : section) {
if (auto ntd = record.getTypeContextDescriptor()) {
if (_contextDescriptorMatchesMangling(ntd, node)) {
Expand Down Expand Up @@ -342,9 +335,7 @@ _findNominalTypeDescriptor(Demangle::NodePointer node,
return Value->getDescription();

// Check type metadata records
T.SectionsToScanLock.withLock([&] {
foundNominal = _searchTypeMetadataRecords(T, node);
});
foundNominal = _searchTypeMetadataRecords(T, node);

// Check protocol conformances table. Note that this has no support for
// resolving generic types yet.
Expand Down Expand Up @@ -395,11 +386,9 @@ namespace {

struct ProtocolMetadataPrivateState {
ConcurrentMap<ProtocolDescriptorCacheEntry> ProtocolCache;
std::vector<ProtocolSection> SectionsToScan;
Mutex SectionsToScanLock;
ConcurrentReadableArray<ProtocolSection> SectionsToScan;

ProtocolMetadataPrivateState() {
SectionsToScan.reserve(16);
initializeProtocolLookup();
}
};
Expand All @@ -411,7 +400,6 @@ static void
_registerProtocols(ProtocolMetadataPrivateState &C,
const ProtocolRecord *begin,
const ProtocolRecord *end) {
ScopedLock guard(C.SectionsToScanLock);
C.SectionsToScan.push_back(ProtocolSection{begin, end});
}

Expand Down Expand Up @@ -439,12 +427,9 @@ void swift::swift_registerProtocols(const ProtocolRecord *begin,
}

static const ProtocolDescriptor *
_searchProtocolRecords(const ProtocolMetadataPrivateState &C,
_searchProtocolRecords(ProtocolMetadataPrivateState &C,
const llvm::StringRef protocolName){
unsigned sectionIdx = 0;
unsigned endSectionIdx = C.SectionsToScan.size();
for (; sectionIdx < endSectionIdx; ++sectionIdx) {
auto &section = C.SectionsToScan[sectionIdx];
for (auto &section : C.SectionsToScan.snapshot()) {
for (const auto &record : section) {
if (auto protocol = record.Protocol.getPointer()) {
// Drop the "S$" prefix from the protocol record. It's not used in
Expand Down Expand Up @@ -472,9 +457,7 @@ _findProtocolDescriptor(llvm::StringRef mangledName) {
return Value->getDescription();

// Check type metadata records
T.SectionsToScanLock.withLock([&] {
foundProtocol = _searchProtocolRecords(T, mangledName);
});
foundProtocol = _searchProtocolRecords(T, mangledName);

if (foundProtocol) {
T.ProtocolCache.getOrInsert(mangledName, foundProtocol);
Expand Down Expand Up @@ -534,21 +517,18 @@ class DynamicFieldSection {
DynamicFieldSection(const FieldDescriptor **fields, size_t size)
: Begin(fields), End(fields + size) {}

const FieldDescriptor **begin() { return Begin; }
const FieldDescriptor **begin() const { return Begin; }

const FieldDescriptor **end() const { return End; }
};

struct FieldCacheState {
ConcurrentMap<FieldDescriptorCacheEntry> FieldCache;

Mutex SectionsLock;
std::vector<StaticFieldSection> StaticSections;
std::vector<DynamicFieldSection> DynamicSections;
ConcurrentReadableArray<StaticFieldSection> StaticSections;
ConcurrentReadableArray<DynamicFieldSection> DynamicSections;

FieldCacheState() {
StaticSections.reserve(16);
DynamicSections.reserve(8);
initializeTypeFieldLookup();
}
};
Expand All @@ -559,7 +539,6 @@ static Lazy<FieldCacheState> FieldCache;
void swift::swift_registerFieldDescriptors(const FieldDescriptor **records,
size_t size) {
auto &cache = FieldCache.get();
ScopedLock guard(cache.SectionsLock);
cache.DynamicSections.push_back({records, size});
}

Expand All @@ -570,7 +549,6 @@ void swift::addImageTypeFieldDescriptorBlockCallback(const void *recordsBegin,

// Field cache should always be sufficiently initialized by this point.
auto &cache = FieldCache.unsafeGetAlreadyInitialized();
ScopedLock guard(cache.SectionsLock);
cache.StaticSections.push_back({recordsBegin, recordsEnd});
}

Expand Down Expand Up @@ -1216,16 +1194,15 @@ void swift::swift_getFieldAt(
return;
}

ScopedLock guard(cache.SectionsLock);
// Otherwise let's try to find it in one of the sections.
for (auto &section : cache.DynamicSections) {
for (auto &section : cache.DynamicSections.snapshot()) {
for (const auto *descriptor : section) {
if (isRequestedDescriptor(*descriptor))
return;
}
}

for (const auto &section : cache.StaticSections) {
for (const auto &section : cache.StaticSections.snapshot()) {
for (auto &descriptor : section) {
if (isRequestedDescriptor(descriptor))
return;
Expand Down
Loading