Skip to content
This repository was archived by the owner on Feb 5, 2019. It is now read-only.

Commit e3d1306

Browse files
author
Jason Evans
committed
Purge unused dirty pages in a fragmentation-reducing order.
Purge unused dirty pages in an order that first performs clean/dirty run defragmentation, in order to mitigate available run fragmentation. Remove the limitation that prevented purging unless at least one chunk worth of dirty pages had accumulated in an arena. This limitation was intended to avoid excessive purging for small applications, but the threshold was arbitrary, and the effect of questionable utility. Relax opt_lg_dirty_mult from 5 to 3. This compensates for increased likelihood of allocating clean runs, given the same ratio of clean:dirty runs, and reduces the potential for repeated purging in pathological large malloc/free loops that push the active:dirty page ratio just over the purge threshold.
1 parent 34457f5 commit e3d1306

File tree

4 files changed

+337
-221
lines changed

4 files changed

+337
-221
lines changed

ChangeLog

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,14 @@ found in the git revision history:
88

99
* 3.x.x (XXX Not released)
1010

11+
Incompatible changes:
12+
- Change the "opt.lg_dirty_mult" from 5 to 3 (32:1 to 8:1).
13+
1114
Bug fixes:
1215
- Fix dss/mmap allocation precedence code to use recyclable mmap memory only
1316
after primary dss allocation fails.
17+
- Fix deadlock in the "arenas.purge" mallctl. This regression was introduced
18+
in 3.1.0 by the addition of the "arena.<i>.purge" mallctl.
1419

1520
* 3.1.0 (October 16, 2012)
1621

doc/jemalloc.xml.in

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -833,7 +833,7 @@ for (i = 0; i < nbins; i++) {
833833
<manvolnum>2</manvolnum></citerefentry> or a similar system call. This
834834
provides the kernel with sufficient information to recycle dirty pages
835835
if physical memory becomes scarce and the pages remain unused. The
836-
default minimum ratio is 32:1 (2^5:1); an option value of -1 will
836+
default minimum ratio is 8:1 (2^3:1); an option value of -1 will
837837
disable dirty page purging.</para></listitem>
838838
</varlistentry>
839839

include/jemalloc/internal/arena.h

Lines changed: 24 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -38,10 +38,10 @@
3838
*
3939
* (nactive >> opt_lg_dirty_mult) >= ndirty
4040
*
41-
* So, supposing that opt_lg_dirty_mult is 5, there can be no less than 32
42-
* times as many active pages as dirty pages.
41+
* So, supposing that opt_lg_dirty_mult is 3, there can be no less than 8 times
42+
* as many active pages as dirty pages.
4343
*/
44-
#define LG_DIRTY_MULT_DEFAULT 5
44+
#define LG_DIRTY_MULT_DEFAULT 3
4545

4646
typedef struct arena_chunk_map_s arena_chunk_map_t;
4747
typedef struct arena_chunk_s arena_chunk_t;
@@ -69,7 +69,7 @@ struct arena_chunk_map_s {
6969
/*
7070
* Linkage for run trees. There are two disjoint uses:
7171
*
72-
* 1) arena_t's runs_avail_{clean,dirty} trees.
72+
* 1) arena_t's runs_avail tree.
7373
* 2) arena_run_t conceptually uses this linkage for in-use
7474
* non-full runs, rather than directly embedding linkage.
7575
*/
@@ -162,28 +162,32 @@ typedef rb_tree(arena_chunk_map_t) arena_run_tree_t;
162162
/* Arena chunk header. */
163163
struct arena_chunk_s {
164164
/* Arena that owns the chunk. */
165-
arena_t *arena;
165+
arena_t *arena;
166166

167-
/* Linkage for the arena's chunks_dirty list. */
168-
ql_elm(arena_chunk_t) link_dirty;
169-
170-
/*
171-
* True if the chunk is currently in the chunks_dirty list, due to
172-
* having at some point contained one or more dirty pages. Removal
173-
* from chunks_dirty is lazy, so (dirtied && ndirty == 0) is possible.
174-
*/
175-
bool dirtied;
167+
/* Linkage for tree of arena chunks that contain dirty runs. */
168+
rb_node(arena_chunk_t) dirty_link;
176169

177170
/* Number of dirty pages. */
178-
size_t ndirty;
171+
size_t ndirty;
172+
173+
/* Number of available runs. */
174+
size_t nruns_avail;
175+
176+
/*
177+
* Number of available run adjacencies. Clean and dirty available runs
178+
* are not coalesced, which causes virtual memory fragmentation. The
179+
* ratio of (nruns_avail-nruns_adjac):nruns_adjac is used for tracking
180+
* this fragmentation.
181+
* */
182+
size_t nruns_adjac;
179183

180184
/*
181185
* Map of pages within chunk that keeps track of free/large/small. The
182186
* first map_bias entries are omitted, since the chunk header does not
183187
* need to be tracked in the map. This omission saves a header page
184188
* for common chunk sizes (e.g. 4 MiB).
185189
*/
186-
arena_chunk_map_t map[1]; /* Dynamically sized. */
190+
arena_chunk_map_t map[1]; /* Dynamically sized. */
187191
};
188192
typedef rb_tree(arena_chunk_t) arena_chunk_tree_t;
189193

@@ -333,8 +337,8 @@ struct arena_s {
333337

334338
dss_prec_t dss_prec;
335339

336-
/* List of dirty-page-containing chunks this arena manages. */
337-
ql_head(arena_chunk_t) chunks_dirty;
340+
/* Tree of dirty-page-containing chunks this arena manages. */
341+
arena_chunk_tree_t chunks_dirty;
338342

339343
/*
340344
* In order to avoid rapid chunk allocation/deallocation when an arena
@@ -369,18 +373,9 @@ struct arena_s {
369373

370374
/*
371375
* Size/address-ordered trees of this arena's available runs. The trees
372-
* are used for first-best-fit run allocation. The dirty tree contains
373-
* runs with dirty pages (i.e. very likely to have been touched and
374-
* therefore have associated physical pages), whereas the clean tree
375-
* contains runs with pages that either have no associated physical
376-
* pages, or have pages that the kernel may recycle at any time due to
377-
* previous madvise(2) calls. The dirty tree is used in preference to
378-
* the clean tree for allocations, because using dirty pages reduces
379-
* the amount of dirty purging necessary to keep the active:dirty page
380-
* ratio below the purge threshold.
376+
* are used for first-best-fit run allocation.
381377
*/
382-
arena_avail_tree_t runs_avail_clean;
383-
arena_avail_tree_t runs_avail_dirty;
378+
arena_avail_tree_t runs_avail;
384379

385380
/* bins is used to store trees of free regions. */
386381
arena_bin_t bins[NBINS];

0 commit comments

Comments
 (0)