Skip to content

Commit c5f8fe2

Browse files
committed
---
yaml --- r: 27945 b: refs/heads/try c: f24e0e7 h: refs/heads/master i: 27943: 255da57 v: v3
1 parent d9004e7 commit c5f8fe2

File tree

2 files changed

+78
-23
lines changed

2 files changed

+78
-23
lines changed

[refs]

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
refs/heads/master: cd6f24f9d14ac90d167386a56e7a6ac1f0318195
33
refs/heads/snap-stage1: e33de59e47c5076a89eadeb38f4934f58a3618a6
44
refs/heads/snap-stage3: cd6f24f9d14ac90d167386a56e7a6ac1f0318195
5-
refs/heads/try: 5f32e180118b8163821e3276d4234f4d7e2f1eeb
5+
refs/heads/try: f24e0e7b13be0bbead7d7f27b0af5943235c04da
66
refs/tags/release-0.1: 1f5c5126e96c79d22cb7862f75304136e204f105
77
refs/heads/ndm: f3868061cd7988080c30d6d5bf352a5a5fe2460b
88
refs/heads/try2: d0c6ce338884ee21843f4b40bf6bf18d222ce5df

branches/try/src/libstd/arena.rs

Lines changed: 77 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -17,9 +17,10 @@
1717
// order bit of the tydesc pointer to encode whether the object it
1818
// describes has been fully initialized.
1919

20-
// A good extension of this scheme would be to segregate data with and
21-
// without destructors in order to avoid the overhead in the
22-
// plain-old-data case.
20+
// As an optimization, objects with destructors are stored in
21+
// different chunks than objects without destructors. This reduces
22+
// overhead when initializing plain-old-data and means we don't need
23+
// to waste time running the destructors of POD.
2324

2425
export arena, arena_with_size;
2526

@@ -32,6 +33,7 @@ import libc::size_t;
3233
#[abi = "rust-intrinsic"]
3334
extern mod rusti {
3435
fn move_val_init<T>(&dst: T, -src: T);
36+
fn needs_drop<T>() -> bool;
3537
}
3638
extern mod rustrt {
3739
#[rust_stack]
@@ -44,30 +46,34 @@ const tydesc_drop_glue_index: size_t = 3 as size_t;
4446
// The way arena uses arrays is really deeply awful. The arrays are
4547
// allocated, and have capacities reserved, but the fill for the array
4648
// will always stay at 0.
47-
type chunk = {data: ~[u8], mut fill: uint};
49+
type chunk = {data: ~[u8], mut fill: uint, is_pod: bool};
4850

4951
struct arena {
5052
// The head is seperated out from the list as a unbenchmarked
5153
// microoptimization, to avoid needing to case on the list to
5254
// access the head.
5355
priv mut head: @chunk;
56+
priv mut pod_head: @chunk;
5457
priv mut chunks: @list<@chunk>;
5558
drop {
5659
unsafe {
5760
destroy_chunk(self.head);
58-
for list::each(self.chunks) |chunk| { destroy_chunk(chunk); }
61+
for list::each(self.chunks) |chunk| {
62+
if !chunk.is_pod { destroy_chunk(chunk); }
63+
}
5964
}
6065
}
6166
}
6267

63-
fn chunk(size: uint) -> @chunk {
68+
fn chunk(size: uint, is_pod: bool) -> @chunk {
6469
let mut v = ~[];
6570
vec::reserve(v, size);
66-
@{ data: v, mut fill: 0u }
71+
@{ data: v, mut fill: 0u, is_pod: is_pod }
6772
}
6873

6974
fn arena_with_size(initial_size: uint) -> arena {
70-
return arena {mut head: chunk(initial_size),
75+
return arena {mut head: chunk(initial_size, false),
76+
mut pod_head: chunk(initial_size, true),
7177
mut chunks: @nil};
7278
}
7379

@@ -122,49 +128,90 @@ unsafe fn un_bitpack_tydesc_ptr(p: uint) -> (*TypeDesc, bool) {
122128
(reinterpret_cast(p & !1), p & 1 == 1)
123129
}
124130

125-
131+
// The duplication between the POD and non-POD functions is annoying.
126132
impl &arena {
127-
fn alloc_grow(n_bytes: uint, align: uint) -> (*u8, *u8) {
133+
// Functions for the POD part of the arena
134+
fn alloc_pod_grow(n_bytes: uint, align: uint) -> *u8 {
135+
// Allocate a new chunk.
136+
let chunk_size = vec::capacity(self.pod_head.data);
137+
let new_min_chunk_size = uint::max(n_bytes, chunk_size);
138+
self.chunks = @cons(self.pod_head, self.chunks);
139+
self.pod_head =
140+
chunk(uint::next_power_of_two(new_min_chunk_size + 1u), true);
141+
142+
return self.alloc_pod_inner(n_bytes, align);
143+
}
144+
145+
#[inline(always)]
146+
fn alloc_pod_inner(n_bytes: uint, align: uint) -> *u8 {
147+
let head = self.pod_head;
148+
149+
let start = round_up_to(head.fill, align);
150+
let end = start + n_bytes;
151+
if end > vec::capacity(head.data) {
152+
return self.alloc_pod_grow(n_bytes, align);
153+
}
154+
head.fill = end;
155+
156+
//debug!("idx = %u, size = %u, align = %u, fill = %u",
157+
// start, n_bytes, align, head.fill);
158+
159+
unsafe {
160+
ptr::offset(vec::unsafe::to_ptr(head.data), start)
161+
}
162+
}
163+
164+
#[inline(always)]
165+
fn alloc_pod<T>(op: fn() -> T) -> &self/T {
166+
unsafe {
167+
let tydesc = sys::get_type_desc::<T>();
168+
let ptr = self.alloc_pod_inner((*tydesc).size, (*tydesc).align);
169+
let ptr: *mut T = reinterpret_cast(ptr);
170+
rusti::move_val_init(*ptr, op());
171+
return reinterpret_cast(ptr);
172+
}
173+
}
174+
175+
// Functions for the non-POD part of the arena
176+
fn alloc_nonpod_grow(n_bytes: uint, align: uint) -> (*u8, *u8) {
128177
// Allocate a new chunk.
129178
let chunk_size = vec::capacity(self.head.data);
130179
let new_min_chunk_size = uint::max(n_bytes, chunk_size);
131180
self.chunks = @cons(self.head, self.chunks);
132-
self.head = chunk(uint::next_power_of_two(new_min_chunk_size + 1u));
181+
self.head =
182+
chunk(uint::next_power_of_two(new_min_chunk_size + 1u), false);
133183

134-
return self.alloc_inner(n_bytes, align);
184+
return self.alloc_nonpod_inner(n_bytes, align);
135185
}
136186

137187
#[inline(always)]
138-
fn alloc_inner(n_bytes: uint, align: uint) -> (*u8, *u8) {
188+
fn alloc_nonpod_inner(n_bytes: uint, align: uint) -> (*u8, *u8) {
139189
let head = self.head;
140190

191+
let tydesc_start = head.fill;
141192
let after_tydesc = head.fill + sys::size_of::<*TypeDesc>();
142-
143193
let start = round_up_to(after_tydesc, align);
144194
let end = start + n_bytes;
145195
if end > vec::capacity(head.data) {
146-
return self.alloc_grow(n_bytes, align);
196+
return self.alloc_nonpod_grow(n_bytes, align);
147197
}
198+
head.fill = round_up_to(end, sys::pref_align_of::<*TypeDesc>());
148199

149200
//debug!("idx = %u, size = %u, align = %u, fill = %u",
150201
// start, n_bytes, align, head.fill);
151202

152203
unsafe {
153204
let buf = vec::unsafe::to_ptr(head.data);
154-
let tydesc_p = ptr::offset(buf, head.fill);
155-
let p = ptr::offset(buf, start);
156-
head.fill = round_up_to(end, sys::pref_align_of::<*TypeDesc>());
157-
158-
return (tydesc_p, p);
205+
return (ptr::offset(buf, tydesc_start), ptr::offset(buf, start));
159206
}
160207
}
161208

162209
#[inline(always)]
163-
fn alloc<T>(op: fn() -> T) -> &self/T {
210+
fn alloc_nonpod<T>(op: fn() -> T) -> &self/T {
164211
unsafe {
165212
let tydesc = sys::get_type_desc::<T>();
166213
let (ty_ptr, ptr) =
167-
self.alloc_inner((*tydesc).size, (*tydesc).align);
214+
self.alloc_nonpod_inner((*tydesc).size, (*tydesc).align);
168215
let ty_ptr: *mut uint = reinterpret_cast(ty_ptr);
169216
let ptr: *mut T = reinterpret_cast(ptr);
170217
// Write in our tydesc along with a bit indicating that it
@@ -179,6 +226,14 @@ impl &arena {
179226
return reinterpret_cast(ptr);
180227
}
181228
}
229+
230+
// The external interface
231+
#[inline(always)]
232+
fn alloc<T>(op: fn() -> T) -> &self/T {
233+
if !rusti::needs_drop::<T>() {
234+
self.alloc_pod(op)
235+
} else { self.alloc_nonpod(op) }
236+
}
182237
}
183238

184239
#[test]

0 commit comments

Comments
 (0)