|
16 | 16 | #include <functional>
|
17 | 17 | #include <stdint.h>
|
18 | 18 | #include "llvm/Support/Allocator.h"
|
| 19 | +#include "Mutex.h" |
19 | 20 |
|
20 | 21 | #if defined(__FreeBSD__) || defined(__CYGWIN__) || defined(__HAIKU__)
|
21 | 22 | #include <stdio.h>
|
@@ -406,6 +407,91 @@ class ConcurrentMap
|
406 | 407 | }
|
407 | 408 | };
|
408 | 409 |
|
| 410 | + |
| 411 | +template <class ElemTy> struct ConcurrentReadableArray { |
| 412 | +private: |
| 413 | + struct Storage { |
| 414 | + std::atomic<size_t> Count; |
| 415 | + typename std::aligned_storage<sizeof(ElemTy), alignof(ElemTy)>::type Elem; |
| 416 | + |
| 417 | + ElemTy *data() { |
| 418 | + return reinterpret_cast<ElemTy *>(&Elem); |
| 419 | + } |
| 420 | + }; |
| 421 | + |
| 422 | + std::atomic<size_t> Capacity; |
| 423 | + std::atomic<size_t> ReaderCount; |
| 424 | + std::atomic<Storage *> Elements; |
| 425 | + Mutex WriterLock; |
| 426 | + std::vector<Storage *> FreeList; |
| 427 | + |
| 428 | + Storage *allocate(size_t capacity) { |
| 429 | + auto size = sizeof(Storage) + (capacity - 1) * sizeof(Storage().Elem); |
| 430 | + auto *ptr = reinterpret_cast<Storage *>(malloc(size)); |
| 431 | + ptr->Count.store(0, std::memory_order_relaxed); |
| 432 | + return ptr; |
| 433 | + } |
| 434 | + |
| 435 | + void deallocate(Storage *storage) { |
| 436 | + if (storage == nullptr) return; |
| 437 | + |
| 438 | + auto *data = storage->data(); |
| 439 | + for (size_t i = 0; i < storage->Count; i++) { |
| 440 | + data[i].~ElemTy(); |
| 441 | + } |
| 442 | + free(storage); |
| 443 | + } |
| 444 | + |
| 445 | + |
| 446 | +public: |
| 447 | + void push_back(const ElemTy &elem) { |
| 448 | + ScopedLock guard(WriterLock); |
| 449 | + |
| 450 | + auto *storage = Elements.load(std::memory_order_relaxed); |
| 451 | + if (storage == nullptr) { |
| 452 | + storage = allocate(16); |
| 453 | + Capacity = 16; |
| 454 | + Elements.store(storage, std::memory_order_release); |
| 455 | + } else if (storage->Count >= Capacity) { |
| 456 | + auto *newStorage = allocate(Capacity * 2); |
| 457 | + std::copy(storage->data(), storage->data() + storage->Count, newStorage->data()); |
| 458 | + FreeList.push_back(storage); |
| 459 | + |
| 460 | + storage = newStorage; |
| 461 | + Capacity = Capacity * 2; |
| 462 | + Elements.store(storage, std::memory_order_release); |
| 463 | + } |
| 464 | + |
| 465 | + auto Count = storage->Count.load(std::memory_order_relaxed); |
| 466 | + storage->data()[Count] = elem; |
| 467 | + storage->Count.store(Count + 1, std::memory_order_release); |
| 468 | + |
| 469 | + if (ReaderCount.load(std::memory_order_relaxed) == 0) |
| 470 | + for (Storage *storage : FreeList) |
| 471 | + deallocate(storage); |
| 472 | + } |
| 473 | + |
| 474 | + template <class F> auto read(F f) -> decltype(f(nullptr, 0)) { |
| 475 | + ReaderCount.fetch_add(1, std::memory_order_relaxed); |
| 476 | + auto *storage = Elements.load(std::memory_order_consume); |
| 477 | + auto count = storage->Count.load(std::memory_order_acquire); |
| 478 | + auto *ptr = storage->data(); |
| 479 | + |
| 480 | + decltype(f(nullptr, 0)) result = f(ptr, count); |
| 481 | + |
| 482 | + ReaderCount.fetch_sub(1, std::memory_order_relaxed); |
| 483 | + |
| 484 | + return result; |
| 485 | + } |
| 486 | + |
| 487 | + /// Get the current count. It's just a snapshot and may be obsolete immediately. |
| 488 | + size_t count() { |
| 489 | + return read([](ElemTy *ptr, size_t count) -> size_t { |
| 490 | + return count; |
| 491 | + }); |
| 492 | + } |
| 493 | +}; |
| 494 | + |
409 | 495 | } // end namespace swift
|
410 | 496 |
|
411 | 497 | #endif // SWIFT_RUNTIME_CONCURRENTUTILS_H
|
0 commit comments