|
12 | 12 | #ifndef SWIFT_RUNTIME_CONCURRENTUTILS_H
|
13 | 13 | #define SWIFT_RUNTIME_CONCURRENTUTILS_H
|
14 | 14 | #include <iterator>
|
| 15 | +#include <algorithm> |
15 | 16 | #include <atomic>
|
16 | 17 | #include <functional>
|
17 | 18 | #include <stdint.h>
|
18 | 19 | #include "llvm/Support/Allocator.h"
|
| 20 | +#include "Atomic.h" |
| 21 | +#include "Debug.h" |
| 22 | +#include "Mutex.h" |
19 | 23 |
|
20 | 24 | #if defined(__FreeBSD__) || defined(__CYGWIN__) || defined(__HAIKU__)
|
21 | 25 | #include <stdio.h>
|
@@ -406,6 +410,136 @@ class ConcurrentMap
|
406 | 410 | }
|
407 | 411 | };
|
408 | 412 |
|
| 413 | + |
| 414 | +/// An append-only array that can be read without taking locks. Writes |
| 415 | +/// are still locked and serialized, but only with respect to other |
| 416 | +/// writes. |
| 417 | +template <class ElemTy> struct ConcurrentReadableArray { |
| 418 | +private: |
| 419 | + /// The struct used for the array's storage. The `Elem` member is |
| 420 | + /// considered to be the first element of a variable-length array, |
| 421 | + /// whose size is determined by the allocation. The `Capacity` member |
| 422 | + /// from `ConcurrentReadableArray` indicates how large it can be. |
| 423 | + struct Storage { |
| 424 | + std::atomic<size_t> Count; |
| 425 | + typename std::aligned_storage<sizeof(ElemTy), alignof(ElemTy)>::type Elem; |
| 426 | + |
| 427 | + static Storage *allocate(size_t capacity) { |
| 428 | + auto size = sizeof(Storage) + (capacity - 1) * sizeof(Storage().Elem); |
| 429 | + auto *ptr = reinterpret_cast<Storage *>(malloc(size)); |
| 430 | + if (!ptr) swift::crash("Could not allocate memory."); |
| 431 | + ptr->Count.store(0, std::memory_order_relaxed); |
| 432 | + return ptr; |
| 433 | + } |
| 434 | + |
| 435 | + void deallocate() { |
| 436 | + for (size_t i = 0; i < Count; i++) { |
| 437 | + data()[i].~ElemTy(); |
| 438 | + } |
| 439 | + free(this); |
| 440 | + } |
| 441 | + |
| 442 | + ElemTy *data() { |
| 443 | + return reinterpret_cast<ElemTy *>(&Elem); |
| 444 | + } |
| 445 | + }; |
| 446 | + |
| 447 | + size_t Capacity; |
| 448 | + std::atomic<size_t> ReaderCount; |
| 449 | + std::atomic<Storage *> Elements; |
| 450 | + Mutex WriterLock; |
| 451 | + std::vector<Storage *> FreeList; |
| 452 | + |
| 453 | + void incrementReaders() { |
| 454 | + ReaderCount.fetch_add(1, std::memory_order_acquire); |
| 455 | + } |
| 456 | + |
| 457 | + void decrementReaders() { |
| 458 | + ReaderCount.fetch_sub(1, std::memory_order_release); |
| 459 | + } |
| 460 | + |
| 461 | + void deallocateFreeList() { |
| 462 | + for (Storage *storage : FreeList) |
| 463 | + storage->deallocate(); |
| 464 | + FreeList.clear(); |
| 465 | + FreeList.shrink_to_fit(); |
| 466 | + } |
| 467 | + |
| 468 | +public: |
| 469 | + struct Snapshot { |
| 470 | + ConcurrentReadableArray *Array; |
| 471 | + const ElemTy *Start; |
| 472 | + size_t Count; |
| 473 | + |
| 474 | + Snapshot(ConcurrentReadableArray *array, const ElemTy *start, size_t count) |
| 475 | + : Array(array), Start(start), Count(count) {} |
| 476 | + |
| 477 | + Snapshot(const Snapshot &other) |
| 478 | + : Array(other.Array), Start(other.Start), Count(other.Count) { |
| 479 | + Array->incrementReaders(); |
| 480 | + } |
| 481 | + |
| 482 | + ~Snapshot() { |
| 483 | + Array->decrementReaders(); |
| 484 | + } |
| 485 | + |
| 486 | + const ElemTy *begin() { return Start; } |
| 487 | + const ElemTy *end() { return Start + Count; } |
| 488 | + size_t count() { return Count; } |
| 489 | + }; |
| 490 | + |
| 491 | + // This type cannot be safely copied, moved, or deleted. |
| 492 | + ConcurrentReadableArray(const ConcurrentReadableArray &) = delete; |
| 493 | + ConcurrentReadableArray(ConcurrentReadableArray &&) = delete; |
| 494 | + ConcurrentReadableArray &operator=(const ConcurrentReadableArray &) = delete; |
| 495 | + |
| 496 | + ConcurrentReadableArray() : Capacity(0), ReaderCount(0), Elements(nullptr) {} |
| 497 | + |
| 498 | + ~ConcurrentReadableArray() { |
| 499 | + assert(ReaderCount.load(std::memory_order_acquire) == 0 && |
| 500 | + "deallocating ConcurrentReadableArray with outstanding snapshots"); |
| 501 | + deallocateFreeList(); |
| 502 | + } |
| 503 | + |
| 504 | + void push_back(const ElemTy &elem) { |
| 505 | + ScopedLock guard(WriterLock); |
| 506 | + |
| 507 | + auto *storage = Elements.load(std::memory_order_relaxed); |
| 508 | + auto count = storage ? storage->Count.load(std::memory_order_relaxed) : 0; |
| 509 | + if (count >= Capacity) { |
| 510 | + auto newCapacity = std::max((size_t)16, count * 2); |
| 511 | + auto *newStorage = Storage::allocate(newCapacity); |
| 512 | + if (storage) { |
| 513 | + std::copy(storage->data(), storage->data() + count, newStorage->data()); |
| 514 | + newStorage->Count.store(count, std::memory_order_relaxed); |
| 515 | + FreeList.push_back(storage); |
| 516 | + } |
| 517 | + |
| 518 | + storage = newStorage; |
| 519 | + Capacity = newCapacity; |
| 520 | + Elements.store(storage, std::memory_order_release); |
| 521 | + } |
| 522 | + |
| 523 | + new(&storage->data()[count]) ElemTy(elem); |
| 524 | + storage->Count.store(count + 1, std::memory_order_release); |
| 525 | + |
| 526 | + if (ReaderCount.load(std::memory_order_acquire) == 0) |
| 527 | + deallocateFreeList(); |
| 528 | + } |
| 529 | + |
| 530 | + Snapshot snapshot() { |
| 531 | + incrementReaders(); |
| 532 | + auto *storage = Elements.load(SWIFT_MEMORY_ORDER_CONSUME); |
| 533 | + if (storage == nullptr) { |
| 534 | + return Snapshot(this, nullptr, 0); |
| 535 | + } |
| 536 | + |
| 537 | + auto count = storage->Count.load(std::memory_order_acquire); |
| 538 | + const auto *ptr = storage->data(); |
| 539 | + return Snapshot(this, ptr, count); |
| 540 | + } |
| 541 | +}; |
| 542 | + |
409 | 543 | } // end namespace swift
|
410 | 544 |
|
411 | 545 | #endif // SWIFT_RUNTIME_CONCURRENTUTILS_H
|
0 commit comments