Skip to content

Commit 4bcbcb6

Browse files
authored
Merge pull request #37689 from mikeash/concurrentreadablearray-use-after-free-fix-5.5
[5.5][Runtime] Fix use-after-free bug in Concurrent.h.
2 parents 10b00f2 + adb7aa2 commit 4bcbcb6

File tree

1 file changed

+22
-10
lines changed

1 file changed

+22
-10
lines changed

include/swift/Runtime/Concurrent.h

Lines changed: 22 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -556,17 +556,16 @@ template <class ElemTy> struct ConcurrentReadableArray {
556556
storage = newStorage;
557557
Capacity = newCapacity;
558558

559-
// Use seq_cst here to ensure that the subsequent load of ReaderCount is
560-
// ordered after this store. If ReaderCount is loaded first, then a new
561-
// reader could come in between that load and this store, and then we
562-
// could end up freeing the old storage pointer while it's still in use.
563-
Elements.store(storage, std::memory_order_seq_cst);
559+
Elements.store(storage, std::memory_order_release);
564560
}
565561

566562
new(&storage->data()[count]) ElemTy(elem);
567563
storage->Count.store(count + 1, std::memory_order_release);
568564

569-
if (ReaderCount.load(std::memory_order_seq_cst) == 0)
565+
// The standard says that std::memory_order_seq_cst only applies to
566+
// read-modify-write operations, so we need an explicit fence:
567+
std::atomic_thread_fence(std::memory_order_seq_cst);
568+
if (ReaderCount.load(std::memory_order_relaxed) == 0)
570569
deallocateFreeList();
571570
}
572571

@@ -666,6 +665,13 @@ struct ConcurrentReadableHashMap {
666665
return x <= 1 ? 0 : log2(x >> 1) + 1;
667666
}
668667

668+
// A crude way to detect trivial use-after-free bugs given that a lot of
669+
// data structure have a strong bias toward bits that are zero.
670+
#ifndef NDEBUG
671+
static constexpr uint8_t InlineCapacityDebugBits = 0xC0;
672+
#else
673+
static constexpr uint8_t InlineCapacityDebugBits = 0;
674+
#endif
669675
static constexpr uintptr_t InlineIndexBits = 4;
670676
static constexpr uintptr_t InlineIndexMask = 0xF;
671677
static constexpr uintptr_t InlineCapacity =
@@ -708,7 +714,7 @@ struct ConcurrentReadableHashMap {
708714
swift_unreachable("unknown index size");
709715
}
710716
Value = reinterpret_cast<uintptr_t>(ptr) | static_cast<uintptr_t>(mode);
711-
*reinterpret_cast<uint8_t *>(ptr) = capacityLog2;
717+
*reinterpret_cast<uint8_t *>(ptr) = capacityLog2 | InlineCapacityDebugBits;
712718
}
713719

714720
bool valueIsPointer() { return Value & 3; }
@@ -739,8 +745,11 @@ struct ConcurrentReadableHashMap {
739745
}
740746

741747
uint8_t getCapacityLog2() {
742-
if (auto *ptr = pointer())
743-
return *reinterpret_cast<uint8_t *>(ptr);
748+
if (auto *ptr = pointer()) {
749+
auto result = *reinterpret_cast<uint8_t *>(ptr);
750+
assert((result & InlineCapacityDebugBits) == InlineCapacityDebugBits);
751+
return result & ~InlineCapacityDebugBits;
752+
}
744753
return InlineCapacityLog2;
745754
}
746755

@@ -859,7 +868,10 @@ struct ConcurrentReadableHashMap {
859868
/// Free all the arrays in the free lists if there are no active readers. If
860869
/// there are active readers, do nothing.
861870
void deallocateFreeListIfSafe() {
862-
if (ReaderCount.load(std::memory_order_seq_cst) == 0)
871+
// The standard says that std::memory_order_seq_cst only applies to
872+
// read-modify-write operations, so we need an explicit fence:
873+
std::atomic_thread_fence(std::memory_order_seq_cst);
874+
if (ReaderCount.load(std::memory_order_relaxed) == 0)
863875
ConcurrentFreeListNode::freeAll(&FreeList, free);
864876
}
865877

0 commit comments

Comments
 (0)