|
38 | 38 | *
|
39 | 39 | * (nactive >> opt_lg_dirty_mult) >= ndirty
|
40 | 40 | *
|
41 |
| - * So, supposing that opt_lg_dirty_mult is 5, there can be no less than 32 |
42 |
| - * times as many active pages as dirty pages. |
| 41 | + * So, supposing that opt_lg_dirty_mult is 3, there can be no less than 8 times |
| 42 | + * as many active pages as dirty pages. |
43 | 43 | */
|
44 |
| -#define LG_DIRTY_MULT_DEFAULT 5 |
| 44 | +#define LG_DIRTY_MULT_DEFAULT 3 |
45 | 45 |
|
46 | 46 | typedef struct arena_chunk_map_s arena_chunk_map_t;
|
47 | 47 | typedef struct arena_chunk_s arena_chunk_t;
|
@@ -69,7 +69,7 @@ struct arena_chunk_map_s {
|
69 | 69 | /*
|
70 | 70 | * Linkage for run trees. There are two disjoint uses:
|
71 | 71 | *
|
72 |
| - * 1) arena_t's runs_avail_{clean,dirty} trees. |
| 72 | + * 1) arena_t's runs_avail tree. |
73 | 73 | * 2) arena_run_t conceptually uses this linkage for in-use
|
74 | 74 | * non-full runs, rather than directly embedding linkage.
|
75 | 75 | */
|
@@ -162,28 +162,32 @@ typedef rb_tree(arena_chunk_map_t) arena_run_tree_t;
|
162 | 162 | /* Arena chunk header. */
|
163 | 163 | struct arena_chunk_s {
|
164 | 164 | /* Arena that owns the chunk. */
|
165 |
| - arena_t *arena; |
| 165 | + arena_t *arena; |
166 | 166 |
|
167 |
| - /* Linkage for the arena's chunks_dirty list. */ |
168 |
| - ql_elm(arena_chunk_t) link_dirty; |
169 |
| - |
170 |
| - /* |
171 |
| - * True if the chunk is currently in the chunks_dirty list, due to |
172 |
| - * having at some point contained one or more dirty pages. Removal |
173 |
| - * from chunks_dirty is lazy, so (dirtied && ndirty == 0) is possible. |
174 |
| - */ |
175 |
| - bool dirtied; |
| 167 | + /* Linkage for tree of arena chunks that contain dirty runs. */ |
| 168 | + rb_node(arena_chunk_t) dirty_link; |
176 | 169 |
|
177 | 170 | /* Number of dirty pages. */
|
178 |
| - size_t ndirty; |
| 171 | + size_t ndirty; |
| 172 | + |
| 173 | + /* Number of available runs. */ |
| 174 | + size_t nruns_avail; |
| 175 | + |
| 176 | + /* |
| 177 | + * Number of available run adjacencies. Clean and dirty available runs |
| 178 | + * are not coalesced, which causes virtual memory fragmentation. The |
| 179 | + * ratio of (nruns_avail-nruns_adjac):nruns_adjac is used for tracking |
| 180 | + * this fragmentation. |
| 181 | + * */ |
| 182 | + size_t nruns_adjac; |
179 | 183 |
|
180 | 184 | /*
|
181 | 185 | * Map of pages within chunk that keeps track of free/large/small. The
|
182 | 186 | * first map_bias entries are omitted, since the chunk header does not
|
183 | 187 | * need to be tracked in the map. This omission saves a header page
|
184 | 188 | * for common chunk sizes (e.g. 4 MiB).
|
185 | 189 | */
|
186 |
| - arena_chunk_map_t map[1]; /* Dynamically sized. */ |
| 190 | + arena_chunk_map_t map[1]; /* Dynamically sized. */ |
187 | 191 | };
|
188 | 192 | typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
|
189 | 193 |
|
@@ -333,8 +337,8 @@ struct arena_s {
|
333 | 337 |
|
334 | 338 | dss_prec_t dss_prec;
|
335 | 339 |
|
336 |
| - /* List of dirty-page-containing chunks this arena manages. */ |
337 |
| - ql_head(arena_chunk_t) chunks_dirty; |
| 340 | + /* Tree of dirty-page-containing chunks this arena manages. */ |
| 341 | + arena_chunk_tree_t chunks_dirty; |
338 | 342 |
|
339 | 343 | /*
|
340 | 344 | * In order to avoid rapid chunk allocation/deallocation when an arena
|
@@ -369,18 +373,9 @@ struct arena_s {
|
369 | 373 |
|
370 | 374 | /*
|
371 | 375 | * Size/address-ordered trees of this arena's available runs. The trees
|
372 |
| - * are used for first-best-fit run allocation. The dirty tree contains |
373 |
| - * runs with dirty pages (i.e. very likely to have been touched and |
374 |
| - * therefore have associated physical pages), whereas the clean tree |
375 |
| - * contains runs with pages that either have no associated physical |
376 |
| - * pages, or have pages that the kernel may recycle at any time due to |
377 |
| - * previous madvise(2) calls. The dirty tree is used in preference to |
378 |
| - * the clean tree for allocations, because using dirty pages reduces |
379 |
| - * the amount of dirty purging necessary to keep the active:dirty page |
380 |
| - * ratio below the purge threshold. |
| 376 | + * are used for first-best-fit run allocation. |
381 | 377 | */
|
382 |
| - arena_avail_tree_t runs_avail_clean; |
383 |
| - arena_avail_tree_t runs_avail_dirty; |
| 378 | + arena_avail_tree_t runs_avail; |
384 | 379 |
|
385 | 380 | /* bins is used to store trees of free regions. */
|
386 | 381 | arena_bin_t bins[NBINS];
|
|
0 commit comments