Skip to content

Commit 1344da9

Browse files
authored
Merge pull request #40071 from mikeash/remote-mirror-async-slab-inspection
[Reflection] Add API for inspecting async task allocation slabs.
2 parents 1faf7ed + 7c7dc5d commit 1344da9

File tree

13 files changed

+201
-92
lines changed

13 files changed

+201
-92
lines changed

include/swift/Reflection/ReflectionContext.h

Lines changed: 36 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -131,6 +131,12 @@ class ReflectionContext
131131
ChunkKind Kind;
132132
};
133133

134+
struct AsyncTaskSlabInfo {
135+
StoredPointer NextSlab;
136+
StoredSize SlabSize;
137+
std::vector<AsyncTaskAllocationChunk> Chunks;
138+
};
139+
134140
explicit ReflectionContext(std::shared_ptr<MemoryReader> reader)
135141
: super(std::move(reader), *this)
136142
{}
@@ -1346,44 +1352,45 @@ class ReflectionContext
13461352
return llvm::None;
13471353
}
13481354

1349-
llvm::Optional<std::string> iterateAsyncTaskAllocations(
1350-
StoredPointer AsyncTaskPtr,
1351-
std::function<void(StoredPointer, unsigned, AsyncTaskAllocationChunk[])>
1352-
Call) {
1353-
using AsyncTask = AsyncTask<Runtime>;
1355+
std::pair<llvm::Optional<std::string>, AsyncTaskSlabInfo>
1356+
asyncTaskSlabAllocations(StoredPointer SlabPtr) {
13541357
using StackAllocator = StackAllocator<Runtime>;
1358+
auto SlabBytes = getReader().readBytes(
1359+
RemoteAddress(SlabPtr), sizeof(typename StackAllocator::Slab));
1360+
auto Slab = reinterpret_cast<const typename StackAllocator::Slab *>(
1361+
SlabBytes.get());
1362+
if (!Slab)
1363+
return {std::string("failure reading slab"), {}};
1364+
1365+
// For now, we won't try to walk the allocations in the slab, we'll just
1366+
// provide the whole thing as one big chunk.
1367+
size_t HeaderSize =
1368+
llvm::alignTo(sizeof(*Slab), llvm::Align(alignof(std::max_align_t)));
1369+
AsyncTaskAllocationChunk Chunk;
1370+
1371+
Chunk.Start = SlabPtr + HeaderSize;
1372+
Chunk.Length = Slab->CurrentOffset;
1373+
Chunk.Kind = AsyncTaskAllocationChunk::ChunkKind::Unknown;
1374+
1375+
// Total slab size is the slab's capacity plus the slab struct itself.
1376+
StoredPointer SlabSize = Slab->Capacity + sizeof(*Slab);
1377+
1378+
return {llvm::None, {Slab->Next, SlabSize, {Chunk}}};
1379+
}
1380+
1381+
std::pair<llvm::Optional<std::string>, StoredPointer>
1382+
asyncTaskSlabPtr(StoredPointer AsyncTaskPtr) {
1383+
using AsyncTask = AsyncTask<Runtime>;
13551384

13561385
auto AsyncTaskBytes =
13571386
getReader().readBytes(RemoteAddress(AsyncTaskPtr), sizeof(AsyncTask));
13581387
auto *AsyncTaskObj =
13591388
reinterpret_cast<const AsyncTask *>(AsyncTaskBytes.get());
13601389
if (!AsyncTaskObj)
1361-
return std::string("failure reading async task");
1390+
return {std::string("failure reading async task"), 0};
13621391

13631392
StoredPointer SlabPtr = AsyncTaskObj->PrivateStorage.Allocator.FirstSlab;
1364-
while (SlabPtr) {
1365-
auto SlabBytes = getReader().readBytes(
1366-
RemoteAddress(SlabPtr), sizeof(typename StackAllocator::Slab));
1367-
auto Slab = reinterpret_cast<const typename StackAllocator::Slab *>(
1368-
SlabBytes.get());
1369-
if (!Slab)
1370-
return std::string("failure reading slab");
1371-
1372-
// For now, we won't try to walk the allocations in the slab, we'll just
1373-
// provide the whole thing as one big chunk.
1374-
size_t HeaderSize =
1375-
llvm::alignTo(sizeof(*Slab), llvm::Align(alignof(std::max_align_t)));
1376-
AsyncTaskAllocationChunk Chunk;
1377-
1378-
Chunk.Start = SlabPtr + HeaderSize;
1379-
Chunk.Length = Slab->CurrentOffset;
1380-
Chunk.Kind = AsyncTaskAllocationChunk::ChunkKind::Unknown;
1381-
Call(SlabPtr, 1, &Chunk);
1382-
1383-
SlabPtr = Slab->Next;
1384-
}
1385-
1386-
return llvm::None;
1393+
return {llvm::None, SlabPtr};
13871394
}
13881395

13891396
private:

include/swift/Reflection/RuntimeInternals.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -86,6 +86,7 @@ struct StackAllocator {
8686
bool FirstSlabIsPreallocated;
8787

8888
struct Slab {
89+
typename Runtime::StoredPointer Metadata;
8990
typename Runtime::StoredPointer Next;
9091
uint32_t Capacity;
9192
uint32_t CurrentOffset;

include/swift/Remote/MetadataReader.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2242,25 +2242,21 @@ class MetadataReader {
22422242
return false;
22432243
};
22442244

2245-
bool isTypeContext = false;
22462245
switch (auto contextKind = descriptor->getKind()) {
22472246
case ContextDescriptorKind::Class:
22482247
if (!getContextName())
22492248
return nullptr;
22502249
nodeKind = Demangle::Node::Kind::Class;
2251-
isTypeContext = true;
22522250
break;
22532251
case ContextDescriptorKind::Struct:
22542252
if (!getContextName())
22552253
return nullptr;
22562254
nodeKind = Demangle::Node::Kind::Structure;
2257-
isTypeContext = true;
22582255
break;
22592256
case ContextDescriptorKind::Enum:
22602257
if (!getContextName())
22612258
return nullptr;
22622259
nodeKind = Demangle::Node::Kind::Enum;
2263-
isTypeContext = true;
22642260
break;
22652261
case ContextDescriptorKind::Protocol: {
22662262
if (!getContextName())

include/swift/SwiftRemoteMirror/SwiftRemoteMirror.h

Lines changed: 28 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,9 @@ extern unsigned long long swift_reflection_classIsSwiftMask;
4444
/// first attempted fix to use the right AsyncTask layout.
4545
/// 1 - Indicates that swift_reflection_iterateAsyncTaskAllocations has been
4646
/// actually fixed to use the right AsyncTask layout.
47+
/// 2 - swift_reflection_iterateAsyncTaskAllocations has been replaced by
48+
/// swift_reflection_asyncTaskSlabPointer and
49+
/// swift_reflection_asyncTaskSlabAllocations.
4750
SWIFT_REMOTE_MIRROR_LINKAGE extern uint32_t swift_reflection_libraryVersion;
4851

4952
/// Get the metadata version supported by the Remote Mirror library.
@@ -395,33 +398,40 @@ const char *swift_reflection_iterateMetadataAllocationBacktraces(
395398
SwiftReflectionContextRef ContextRef,
396399
swift_metadataAllocationBacktraceIterator Call, void *ContextPtr);
397400

398-
/// Allocation iterator passed to swift_reflection_iterateAsyncTaskAllocations
399-
typedef void (*swift_asyncTaskAllocationIterator)(
400-
swift_reflection_ptr_t AllocationPtr, unsigned Count,
401-
swift_async_task_allocation_chunk_t Chunks[], void *ContextPtr);
402-
403-
/// Iterate over the allocations associated with the given async task object.
401+
/// Get the first allocation slab for a given async task object.
404402
/// This object must have an isa value equal to
405403
/// _swift_concurrency_debug_asyncTaskMetadata.
406404
///
407-
/// Calls the passed in Call function for each allocation associated with the
408-
/// async task object. The function is passed the allocation pointer and an
409-
/// array of chunks. Each chunk consists of a start, length, and kind for that
410-
/// chunk of the allocated memory. Any regions of the allocation that are not
411-
/// covered by a chunk are unallocated or garbage. The chunk array is valid only
412-
/// for the duration of the call.
405+
/// It is possible that the async task object hasn't allocated a slab yet, in
406+
/// which case the slab pointer will be NULL. If non-NULL, the returned slab
407+
/// pointer may be a separate heap allocation, or it may be interior to some
408+
/// allocation used by the task.
409+
SWIFT_REMOTE_MIRROR_LINKAGE
410+
swift_async_task_slab_return_t
411+
swift_reflection_asyncTaskSlabPointer(SwiftReflectionContextRef ContextRef,
412+
swift_reflection_ptr_t AsyncTaskPtr);
413+
414+
/// Iterate over the allocations in the given async task allocator slab.
415+
/// This allocation must have an "isa" value (scare quotes because it's not a
416+
/// real object) equal to _swift_concurrency_debug_asyncTaskSlabMetadata.
417+
///
418+
/// Calls the passed in Call function for each allocation in the slab. The
419+
/// function is passed the allocation pointer and an array of chunks. Each chunk
420+
/// consists of a start, length, and kind for that chunk of the allocated
421+
/// memory. Any regions of the allocation that are not covered by a chunk are
422+
/// unallocated or garbage. The chunk array is valid only for the duration of
423+
/// the call.
413424
///
414-
/// An async task may have more than one allocation associated with it, so the
415-
/// function may be called more than once. It may also have no allocations, in
416-
/// which case the function is not called.
425+
/// A slab may be part of a chain of slabs, so the
426+
/// function may be called more than once.
417427
///
418428
/// Returns NULL on success. On error, returns a pointer to a C string
419429
/// describing the error. This pointer remains valid until the next
420430
/// swift_reflection call on the given context.
421431
SWIFT_REMOTE_MIRROR_LINKAGE
422-
const char *swift_reflection_iterateAsyncTaskAllocations(
423-
SwiftReflectionContextRef ContextRef, swift_reflection_ptr_t AsyncTaskPtr,
424-
swift_asyncTaskAllocationIterator Call, void *ContextPtr);
432+
swift_async_task_slab_allocations_return_t
433+
swift_reflection_asyncTaskSlabAllocations(SwiftReflectionContextRef ContextRef,
434+
swift_reflection_ptr_t SlabPtr);
425435

426436
#ifdef __cplusplus
427437
} // extern "C"

include/swift/SwiftRemoteMirror/SwiftRemoteMirrorTypes.h

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -189,12 +189,42 @@ typedef struct swift_metadata_cache_node {
189189
swift_reflection_ptr_t Right;
190190
} swift_metadata_cache_node_t;
191191

192+
/// The return value when getting an async task's slab pointer.
193+
typedef struct swift_async_task_slab_return {
194+
/// On failure, a pointer to a string describing the error. On success, NULL.
195+
/// This pointer remains valid until the next
196+
/// swift_reflection call on the given context.
197+
const char *Error;
198+
199+
/// The task's slab pointer, if no error occurred.
200+
swift_reflection_ptr_t SlabPtr;
201+
} swift_async_task_slab_return_t;
202+
192203
typedef struct swift_async_task_allocation_chunk {
193204
swift_reflection_ptr_t Start;
194205
unsigned Length;
195206
swift_layout_kind_t Kind;
196207
} swift_async_task_allocation_chunk_t;
197208

209+
typedef struct swift_async_task_slab_allocations_return {
210+
/// On failure, a pointer to a string describing the error. On success, NULL.
211+
/// This pointer remains valid until the next
212+
/// swift_reflection call on the given context.
213+
const char *Error;
214+
215+
/// The remote pointer to the next slab, or NULL/0 if none.
216+
swift_reflection_ptr_t NextSlab;
217+
218+
/// The size of the entire slab, in bytes.
219+
unsigned SlabSize;
220+
221+
/// The number of chunks pointed to by Chunks.
222+
unsigned ChunkCount;
223+
224+
/// A pointer to the chunks, if no error occurred.
225+
swift_async_task_allocation_chunk_t *Chunks;
226+
} swift_async_task_slab_allocations_return_t;
227+
198228
/// An opaque pointer to a context which maintains state and
199229
/// caching of reflection structure for heap instances.
200230
typedef struct SwiftReflectionContext *SwiftReflectionContextRef;

stdlib/public/Concurrency/Debug.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,10 @@ const void *const _swift_concurrency_debug_jobMetadata;
3232
SWIFT_EXPORT_FROM(swift_Concurrency)
3333
const void *const _swift_concurrency_debug_asyncTaskMetadata;
3434

35+
/// A fake metadata pointer placed at the start of async task slab allocations.
36+
SWIFT_EXPORT_FROM(swift_Concurrency)
37+
const void *const _swift_concurrency_debug_asyncTaskSlabMetadata;
38+
3539
} // namespace swift
3640

3741
#endif

stdlib/public/Concurrency/Task.cpp

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,10 @@ using namespace swift;
6868
using FutureFragment = AsyncTask::FutureFragment;
6969
using TaskGroup = swift::TaskGroup;
7070

71+
Metadata swift::TaskAllocatorSlabMetadata;
72+
const void *const swift::_swift_concurrency_debug_asyncTaskSlabMetadata =
73+
&TaskAllocatorSlabMetadata;
74+
7175
void FutureFragment::destroy() {
7276
auto queueHead = waitQueue.load(std::memory_order_acquire);
7377
switch (queueHead.getStatus()) {

stdlib/public/Concurrency/TaskPrivate.h

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -270,8 +270,9 @@ class alignas(sizeof(void*) * 2) ActiveTaskStatus {
270270

271271
/// The size of an allocator slab.
272272
static constexpr size_t SlabCapacity = 1000;
273+
extern Metadata TaskAllocatorSlabMetadata;
273274

274-
using TaskAllocator = StackAllocator<SlabCapacity>;
275+
using TaskAllocator = StackAllocator<SlabCapacity, &TaskAllocatorSlabMetadata>;
275276

276277
/// Private storage in an AsyncTask object.
277278
struct AsyncTask::PrivateStorage {

stdlib/public/SwiftRemoteMirror/SwiftRemoteMirror.cpp

Lines changed: 44 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ extern "C" {
1818
SWIFT_REMOTE_MIRROR_LINKAGE
1919
unsigned long long swift_reflection_classIsSwiftMask = 2;
2020

21-
SWIFT_REMOTE_MIRROR_LINKAGE uint32_t swift_reflection_libraryVersion = 1;
21+
SWIFT_REMOTE_MIRROR_LINKAGE uint32_t swift_reflection_libraryVersion = 2;
2222
}
2323

2424
#include "swift/Demangling/Demangler.h"
@@ -43,6 +43,7 @@ struct SwiftReflectionContext {
4343
std::vector<std::function<void()>> freeFuncs;
4444
std::vector<std::tuple<swift_addr_t, swift_addr_t>> dataSegments;
4545
std::string lastString;
46+
std::vector<swift_async_task_allocation_chunk_t> lastChunks;
4647

4748
SwiftReflectionContext(MemoryReaderImpl impl) {
4849
auto Reader = std::make_shared<CMemoryReader>(impl);
@@ -774,22 +775,46 @@ const char *swift_reflection_iterateMetadataAllocationBacktraces(
774775
return returnableCString(ContextRef, Error);
775776
}
776777

777-
const char *swift_reflection_iterateAsyncTaskAllocations(
778-
SwiftReflectionContextRef ContextRef, swift_reflection_ptr_t AsyncTaskPtr,
779-
swift_asyncTaskAllocationIterator Call, void *ContextPtr) {
780-
auto Context = ContextRef->nativeContext;
781-
auto Error = Context->iterateAsyncTaskAllocations(
782-
AsyncTaskPtr, [&](auto AllocationPtr, auto Count, auto Chunks) {
783-
std::vector<swift_async_task_allocation_chunk_t> ConvertedChunks;
784-
ConvertedChunks.reserve(Count);
785-
for (unsigned i = 0; i < Count; i++) {
786-
swift_async_task_allocation_chunk_t Chunk;
787-
Chunk.Start = Chunks[i].Start;
788-
Chunk.Length = Chunks[i].Length;
789-
Chunk.Kind = convertAllocationChunkKind(Chunks[i].Kind);
790-
ConvertedChunks.push_back(Chunk);
791-
}
792-
Call(AllocationPtr, Count, ConvertedChunks.data(), ContextPtr);
793-
});
794-
return returnableCString(ContextRef, Error);
778+
swift_async_task_slab_return_t
779+
swift_reflection_asyncTaskSlabPointer(SwiftReflectionContextRef ContextRef,
780+
swift_reflection_ptr_t AsyncTaskPtr) {
781+
auto Context = ContextRef->nativeContext;
782+
llvm::Optional<std::string> Error;
783+
NativeReflectionContext::StoredPointer SlabPtr;
784+
std::tie(Error, SlabPtr) = Context->asyncTaskSlabPtr(AsyncTaskPtr);
785+
786+
swift_async_task_slab_return_t Result = {};
787+
Result.Error = returnableCString(ContextRef, Error);
788+
Result.SlabPtr = SlabPtr;
789+
return Result;
790+
}
791+
792+
swift_async_task_slab_allocations_return_t
793+
swift_reflection_asyncTaskSlabAllocations(SwiftReflectionContextRef ContextRef,
794+
swift_reflection_ptr_t SlabPtr) {
795+
auto Context = ContextRef->nativeContext;
796+
llvm::Optional<std::string> Error;
797+
NativeReflectionContext::AsyncTaskSlabInfo Info;
798+
std::tie(Error, Info) = Context->asyncTaskSlabAllocations(SlabPtr);
799+
800+
swift_async_task_slab_allocations_return_t Result = {};
801+
Result.Error = returnableCString(ContextRef, Error);
802+
803+
Result.NextSlab = Info.NextSlab;
804+
Result.SlabSize = Info.SlabSize;
805+
806+
ContextRef->lastChunks.clear();
807+
ContextRef->lastChunks.reserve(Info.Chunks.size());
808+
for (auto &Chunk : Info.Chunks) {
809+
swift_async_task_allocation_chunk_t ConvertedChunk;
810+
ConvertedChunk.Start = Chunk.Start;
811+
ConvertedChunk.Length = Chunk.Length;
812+
ConvertedChunk.Kind = convertAllocationChunkKind(Chunk.Kind);
813+
ContextRef->lastChunks.push_back(ConvertedChunk);
814+
}
815+
816+
Result.ChunkCount = ContextRef->lastChunks.size();
817+
Result.Chunks = ContextRef->lastChunks.data();
818+
819+
return Result;
795820
}

stdlib/public/runtime/StackAllocator.h

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ namespace swift {
3030
///
3131
/// StackAllocator performs fast allocation and deallocation of memory by
3232
/// implementing a bump-pointer allocation strategy.
33-
///
33+
///
3434
/// This isn't strictly a bump-pointer allocator as it uses backing slabs of
3535
/// memory rather than relying on a boundless contiguous heap. However, it has
3636
/// bump-pointer semantics in that it is a monotonically growing pool of memory
@@ -45,7 +45,10 @@ namespace swift {
4545
/// It's possible to place the first slab into pre-allocated memory.
4646
///
4747
/// The SlabCapacity specifies the capacity for newly allocated slabs.
48-
template <size_t SlabCapacity>
48+
///
49+
/// SlabMetadataPtr specifies a fake metadata pointer to place at the beginning
50+
/// of slab allocations, so analysis tools can identify them.
51+
template <size_t SlabCapacity, Metadata *SlabMetadataPtr>
4952
class StackAllocator {
5053
private:
5154

@@ -86,6 +89,10 @@ class StackAllocator {
8689
/// This struct is actually just the slab header. The slab buffer is tail
8790
/// allocated after Slab.
8891
struct Slab {
92+
/// A fake metadata pointer that analysis tools can use to identify slab
93+
/// allocations.
94+
const void *metadata;
95+
8996
/// A single linked list of all allocated slabs.
9097
Slab *next = nullptr;
9198

@@ -95,7 +102,8 @@ class StackAllocator {
95102

96103
// Here starts the tail allocated memory buffer of the slab.
97104

98-
Slab(size_t newCapacity) : capacity(newCapacity) {
105+
Slab(size_t newCapacity)
106+
: metadata(SlabMetadataPtr), capacity(newCapacity) {
99107
assert((size_t)capacity == newCapacity && "capacity overflow");
100108
}
101109

0 commit comments

Comments
 (0)