12
12
#ifndef SWIFT_RUNTIME_CONCURRENTUTILS_H
13
13
#define SWIFT_RUNTIME_CONCURRENTUTILS_H
14
14
#include < iterator>
15
+ #include < algorithm>
15
16
#include < atomic>
16
17
#include < functional>
17
18
#include < stdint.h>
18
19
#include " llvm/Support/Allocator.h"
20
+ #include " Debug.h"
19
21
#include " Mutex.h"
20
22
21
23
#if defined(__FreeBSD__) || defined(__CYGWIN__) || defined(__HAIKU__)
@@ -408,8 +410,15 @@ class ConcurrentMap
408
410
};
409
411
410
412
413
+ // / An append-only array that can be read without taking locks. Writes
414
+ // / are still locked and serialized, but only with respect to other
415
+ // / writes.
411
416
template <class ElemTy > struct ConcurrentReadableArray {
412
417
private:
418
+ // / The struct used for the array's storage. The `Elem` member is
419
+ // / considered to be the first element of a variable-length array,
420
+ // / whose size is determined by the allocation. The `Capacity` member
421
+ // / from `ConcurrentReadableArray` indicates how large it can be.
413
422
struct Storage {
414
423
std::atomic<size_t > Count;
415
424
typename std::aligned_storage<sizeof (ElemTy), alignof (ElemTy)>::type Elem;
@@ -419,7 +428,7 @@ template <class ElemTy> struct ConcurrentReadableArray {
419
428
}
420
429
};
421
430
422
- std::atomic< size_t > Capacity;
431
+ size_t Capacity;
423
432
std::atomic<size_t > ReaderCount;
424
433
std::atomic<Storage *> Elements;
425
434
Mutex WriterLock;
@@ -428,6 +437,7 @@ template <class ElemTy> struct ConcurrentReadableArray {
428
437
Storage *allocate (size_t capacity) {
429
438
auto size = sizeof (Storage) + (capacity - 1 ) * sizeof (Storage ().Elem );
430
439
auto *ptr = reinterpret_cast <Storage *>(malloc (size));
440
+ if (!ptr) swift::crash (" Could not allocate memory." );
431
441
ptr->Count .store (0 , std::memory_order_relaxed);
432
442
return ptr;
433
443
}
@@ -442,35 +452,38 @@ template <class ElemTy> struct ConcurrentReadableArray {
442
452
free (storage);
443
453
}
444
454
445
-
446
455
public:
447
456
void push_back (const ElemTy &elem) {
448
457
ScopedLock guard (WriterLock);
449
458
450
459
auto *storage = Elements.load (std::memory_order_relaxed);
451
- if (storage == nullptr ) {
452
- storage = allocate ( 16 );
453
- Capacity = 16 ;
454
- Elements. store (storage, std::memory_order_release );
455
- } else if (storage-> Count >= Capacity ) {
456
- auto *newStorage = allocate (Capacity * 2 );
457
- std::copy (storage-> data (), storage-> data () + storage-> Count , newStorage-> data () );
458
- FreeList. push_back (storage);
460
+ auto count = storage ? storage-> Count . load (std::memory_order_relaxed) : 0 ;
461
+ if (count >= Capacity) {
462
+ auto newCapacity = std::max (( size_t ) 16 , count * 2 ) ;
463
+ auto *newStorage = allocate (newCapacity );
464
+ if (storage) {
465
+ std::copy (storage-> data (), storage-> data () + count, newStorage-> data () );
466
+ FreeList. push_back (storage );
467
+ }
459
468
460
469
storage = newStorage;
461
- Capacity = Capacity * 2 ;
470
+ Capacity = newCapacity ;
462
471
Elements.store (storage, std::memory_order_release);
463
472
}
464
473
465
474
auto Count = storage->Count .load (std::memory_order_relaxed);
466
- storage->data ()[Count] = elem;
475
+ new (& storage->data ()[Count]) ElemTy ( elem) ;
467
476
storage->Count .store (Count + 1 , std::memory_order_release);
468
477
469
478
if (ReaderCount.load (std::memory_order_relaxed) == 0 )
470
479
for (Storage *storage : FreeList)
471
480
deallocate (storage);
472
481
}
473
482
483
+ // / Read the contents of the array. The parameter `f` is called with
484
+ // / two parameters: a pointer to the elements in the array, and the
485
+ // / count. This represents a snapshot of the contents at the time
486
+ // / `read` was called. The pointer becomes invalid after `f` returns.
474
487
template <class F > auto read (F f) -> decltype(f(nullptr , 0 )) {
475
488
ReaderCount.fetch_add (1 , std::memory_order_relaxed);
476
489
auto *storage = Elements.load (std::memory_order_consume);
0 commit comments