@@ -56,7 +56,7 @@ StackTrace StackStore::Load(Id id) {
56
56
uptr idx = IdToOffset (id);
57
57
uptr block_idx = GetBlockIdx (idx);
58
58
CHECK_LT (block_idx, ARRAY_SIZE (blocks_));
59
- const uptr *stack_trace = blocks_[block_idx].GetOrUnpack ();
59
+ const uptr *stack_trace = blocks_[block_idx].GetOrUnpack (this );
60
60
if (!stack_trace)
61
61
return {};
62
62
stack_trace += GetInBlockIdx (idx);
@@ -65,11 +65,7 @@ StackTrace StackStore::Load(Id id) {
65
65
}
66
66
67
67
uptr StackStore::Allocated () const {
68
- uptr next_block = GetBlockIdx (
69
- RoundUpTo (atomic_load_relaxed (&total_frames_), kBlockSizeFrames ));
70
- uptr res = 0 ;
71
- for (uptr i = 0 ; i < next_block; ++i) res += blocks_[i].Allocated ();
72
- return res + sizeof (*this );
68
+ return atomic_load_relaxed (&allocated_) + sizeof (*this );
73
69
}
74
70
75
71
uptr *StackStore::Alloc (uptr count, uptr *idx, uptr *pack) {
@@ -83,7 +79,7 @@ uptr *StackStore::Alloc(uptr count, uptr *idx, uptr *pack) {
83
79
// Fits into the a single block.
84
80
CHECK_LT (block_idx, ARRAY_SIZE (blocks_));
85
81
*idx = start;
86
- return blocks_[block_idx].GetOrCreate () + GetInBlockIdx (start);
82
+ return blocks_[block_idx].GetOrCreate (this ) + GetInBlockIdx (start);
87
83
}
88
84
89
85
// Retry. We can't use range allocated in two different blocks.
@@ -96,14 +92,24 @@ uptr *StackStore::Alloc(uptr count, uptr *idx, uptr *pack) {
96
92
}
97
93
}
98
94
95
+ void *StackStore::Map (uptr size, const char *mem_type) {
96
+ atomic_fetch_add (&allocated_, size, memory_order_relaxed);
97
+ return MmapNoReserveOrDie (size, mem_type);
98
+ }
99
+
100
+ void StackStore::Unmap (void *addr, uptr size) {
101
+ atomic_fetch_sub (&allocated_, size, memory_order_relaxed);
102
+ UnmapOrDie (addr, size);
103
+ }
104
+
99
105
uptr StackStore::Pack (Compression type) {
100
106
uptr res = 0 ;
101
- for (BlockInfo &b : blocks_) res += b.Pack (type);
107
+ for (BlockInfo &b : blocks_) res += b.Pack (type, this );
102
108
return res;
103
109
}
104
110
105
111
void StackStore::TestOnlyUnmap () {
106
- for (BlockInfo &b : blocks_) b.TestOnlyUnmap ();
112
+ for (BlockInfo &b : blocks_) b.TestOnlyUnmap (this );
107
113
internal_memset (this , 0 , sizeof (*this ));
108
114
}
109
115
@@ -114,22 +120,21 @@ uptr *StackStore::BlockInfo::Get() const {
114
120
return reinterpret_cast <uptr *>(atomic_load_relaxed (&data_));
115
121
}
116
122
117
- uptr *StackStore::BlockInfo::Create () {
123
+ uptr *StackStore::BlockInfo::Create (StackStore *store ) {
118
124
SpinMutexLock l (&mtx_);
119
125
uptr *ptr = Get ();
120
126
if (!ptr) {
121
- ptr = reinterpret_cast <uptr *>(
122
- MmapNoReserveOrDie (kBlockSizeBytes , " StackStore" ));
127
+ ptr = reinterpret_cast <uptr *>(store->Map (kBlockSizeBytes , " StackStore" ));
123
128
atomic_store (&data_, reinterpret_cast <uptr>(ptr), memory_order_release);
124
129
}
125
130
return ptr;
126
131
}
127
132
128
- uptr *StackStore::BlockInfo::GetOrCreate () {
133
+ uptr *StackStore::BlockInfo::GetOrCreate (StackStore *store ) {
129
134
uptr *ptr = Get ();
130
135
if (LIKELY (ptr))
131
136
return ptr;
132
- return Create ();
137
+ return Create (store );
133
138
}
134
139
135
140
class SLeb128Encoder {
@@ -229,7 +234,7 @@ struct PackedHeader {
229
234
};
230
235
} // namespace
231
236
232
- uptr *StackStore::BlockInfo::GetOrUnpack () {
237
+ uptr *StackStore::BlockInfo::GetOrUnpack (StackStore *store ) {
233
238
SpinMutexLock l (&mtx_);
234
239
switch (state) {
235
240
case State::Storing:
@@ -249,8 +254,8 @@ uptr *StackStore::BlockInfo::GetOrUnpack() {
249
254
250
255
uptr packed_size_aligned = RoundUpTo (header->size , GetPageSizeCached ());
251
256
252
- uptr *unpacked = reinterpret_cast <uptr *>(
253
- MmapNoReserveOrDie (kBlockSizeBytes , " StackStoreUnpack" ));
257
+ uptr *unpacked =
258
+ reinterpret_cast <uptr *>(store-> Map (kBlockSizeBytes , " StackStoreUnpack" ));
254
259
255
260
uptr *unpacked_end;
256
261
switch (header->type ) {
@@ -271,13 +276,13 @@ uptr *StackStore::BlockInfo::GetOrUnpack() {
271
276
272
277
MprotectReadOnly (reinterpret_cast <uptr>(unpacked), kBlockSizeBytes );
273
278
atomic_store (&data_, reinterpret_cast <uptr>(unpacked), memory_order_release);
274
- UnmapOrDie (ptr, packed_size_aligned);
279
+ store-> Unmap (ptr, packed_size_aligned);
275
280
276
281
state = State::Unpacked;
277
282
return Get ();
278
283
}
279
284
280
- uptr StackStore::BlockInfo::Pack (Compression type) {
285
+ uptr StackStore::BlockInfo::Pack (Compression type, StackStore *store ) {
281
286
if (type == Compression::None)
282
287
return 0 ;
283
288
@@ -294,8 +299,8 @@ uptr StackStore::BlockInfo::Pack(Compression type) {
294
299
if (!ptr || !Stored (0 ))
295
300
return 0 ;
296
301
297
- u8 *packed = reinterpret_cast < u8 *>(
298
- MmapNoReserveOrDie (kBlockSizeBytes , " StackStorePack" ));
302
+ u8 *packed =
303
+ reinterpret_cast < u8 *>(store-> Map (kBlockSizeBytes , " StackStorePack" ));
299
304
PackedHeader *header = reinterpret_cast <PackedHeader *>(packed);
300
305
u8 *alloc_end = packed + kBlockSizeBytes ;
301
306
@@ -323,40 +328,26 @@ uptr StackStore::BlockInfo::Pack(Compression type) {
323
328
if (kBlockSizeBytes - header->size < kBlockSizeBytes / 8 ) {
324
329
VPrintf (1 , " Undo and keep block unpacked\n " );
325
330
MprotectReadOnly (reinterpret_cast <uptr>(ptr), kBlockSizeBytes );
326
- UnmapOrDie (packed, kBlockSizeBytes );
331
+ store-> Unmap (packed, kBlockSizeBytes );
327
332
state = State::Unpacked;
328
333
return 0 ;
329
334
}
330
335
331
336
uptr packed_size_aligned = RoundUpTo (header->size , GetPageSizeCached ());
332
- UnmapOrDie (packed + packed_size_aligned,
333
- kBlockSizeBytes - packed_size_aligned);
337
+ store-> Unmap (packed + packed_size_aligned,
338
+ kBlockSizeBytes - packed_size_aligned);
334
339
MprotectReadOnly (reinterpret_cast <uptr>(packed), packed_size_aligned);
335
340
336
341
atomic_store (&data_, reinterpret_cast <uptr>(packed), memory_order_release);
337
- UnmapOrDie (ptr, kBlockSizeBytes );
342
+ store-> Unmap (ptr, kBlockSizeBytes );
338
343
339
344
state = State::Packed;
340
345
return kBlockSizeBytes - packed_size_aligned;
341
346
}
342
347
343
- uptr StackStore::BlockInfo::Allocated () const {
344
- SpinMutexLock l (&mtx_);
345
- switch (state) {
346
- case State::Packed: {
347
- const PackedHeader *ptr = reinterpret_cast <const PackedHeader *>(Get ());
348
- CHECK_NE (nullptr , ptr);
349
- return RoundUpTo (ptr->size , GetPageSizeCached ());
350
- }
351
- case State::Unpacked:
352
- case State::Storing:
353
- return kBlockSizeBytes ;
354
- }
355
- }
356
-
357
- void StackStore::BlockInfo::TestOnlyUnmap () {
348
+ void StackStore::BlockInfo::TestOnlyUnmap (StackStore *store) {
358
349
if (uptr *ptr = Get ())
359
- UnmapOrDie (ptr, kBlockSizeBytes );
350
+ store-> Unmap (ptr, kBlockSizeBytes );
360
351
}
361
352
362
353
bool StackStore::BlockInfo::Stored (uptr n) {
0 commit comments