Skip to content
This repository was archived by the owner on Aug 19, 2021. It is now read-only.

Commit d8bdcdf

Browse files
committed
Added specialized autochunking allocator to support dynamic allocations
The autochunking allocator has some interesting properties: - O(1) allocation and deallocation - zero fragmentation for same-sized allocations - no external fragmentation over time The biggest downside of the allocator is that the allocator performs no coalescing. This can create excessive fragmentation when used with a large number of different-sized allocation.
1 parent f83507a commit d8bdcdf

File tree

2 files changed

+104
-91
lines changed

2 files changed

+104
-91
lines changed

events.c

Lines changed: 91 additions & 85 deletions
Original file line numberDiff line numberDiff line change
@@ -2,62 +2,33 @@
22

33
#include <stdlib.h>
44
#include <stddef.h>
5+
#include <string.h>
56

67

7-
// internal callback callback
8-
struct ecallback {
9-
void (*cb)(void*);
10-
void *data;
11-
};
12-
13-
static void ecallback_dispatch(void *p) {
14-
struct ecallback *e = (struct ecallback*)p;
15-
e->cb(e->data);
16-
}
17-
18-
// equeue functions
19-
static inline struct event *equeue_event(struct equeue *q, unsigned i) {
20-
return (struct event*)((char*)q->buffer + i*q->size);
21-
}
22-
23-
static inline unsigned equeue_size(unsigned size) {
24-
if (size < sizeof(struct ecallback)) {
25-
size = sizeof(struct ecallback);
26-
}
27-
28-
unsigned alignment = offsetof(struct { char c; struct event e; }, e);
29-
size += sizeof(struct event);
30-
return (size + alignment-1) & ~(alignment-1);
31-
}
32-
33-
int equeue_create(struct equeue *q, unsigned count, unsigned size) {
34-
void *buffer = malloc(count * equeue_size(size));
8+
int equeue_create(struct equeue *q, unsigned size) {
9+
void *buffer = malloc(size);
3510
if (!buffer) {
3611
return -1;
3712
}
3813

39-
return equeue_create_inplace(q, count, size, buffer);
14+
int err = equeue_create_inplace(q, size, buffer);
15+
q->buffer = buffer;
16+
return err;
4017
}
4118

42-
int equeue_create_inplace(struct equeue *q,
43-
unsigned count, unsigned size, void *buffer) {
44-
q->size = equeue_size(size);
45-
q->buffer = buffer;
46-
q->free = (struct event*)buffer;
19+
int equeue_create_inplace(struct equeue *q, unsigned size, void *buffer) {
20+
q->slab.size = size;
21+
q->slab.data = buffer;
22+
memset(q->chunks, 0, EVENT_CHUNK_LISTS*sizeof(struct event*));
23+
q->buffer = 0;
24+
4725
q->queue = 0;
4826
q->next_id = 42;
4927
q->break_ = (struct event){
5028
.id = 0,
5129
.period = -1,
5230
};
5331

54-
if (q->free) {
55-
for (unsigned i = 0; i < count-1; i++) {
56-
equeue_event(q, i)->next = equeue_event(q, i+1);
57-
}
58-
equeue_event(q, count-1)->next = 0;
59-
}
60-
6132
int err;
6233
err = events_sema_create(&q->eventsema);
6334
if (err < 0) {
@@ -84,56 +55,100 @@ void equeue_destroy(struct equeue *q) {
8455
free(q->buffer);
8556
}
8657

87-
// equeue mem functions
88-
static int equeue_next_id(struct equeue *q) {
58+
// equeue allocation functions
59+
static inline unsigned equeue_size(unsigned size) {
60+
size += sizeof(struct event);
61+
unsigned alignment = offsetof(struct { char c; struct event e; }, e);
62+
return (size + alignment-1) & ~(alignment-1);
63+
}
64+
65+
static struct event *equeue_alloc(struct equeue *q, unsigned size) {
66+
size = equeue_size(size);
67+
68+
events_mutex_lock(&q->freelock);
69+
70+
for (int i = 0; i < EVENT_CHUNK_LISTS; i++) {
71+
if (q->chunks[i] && q->chunks[i]->size >= size) {
72+
struct event *e = q->chunks[i];
73+
q->chunks[i] = e->next;
74+
events_mutex_unlock(&q->freelock);
75+
return e;
76+
}
77+
}
78+
79+
if (q->slab.size >= size) {
80+
struct event *e = (struct event *)q->slab.data;
81+
q->slab.data += size;
82+
q->slab.size -= size;
83+
e->size = size;
84+
events_mutex_unlock(&q->freelock);
85+
return e;
86+
}
87+
88+
events_mutex_unlock(&q->freelock);
89+
return 0;
90+
}
91+
92+
static void equeue_dealloc(struct equeue *q, struct event *e) {
93+
int i = 0;
94+
95+
events_mutex_lock(&q->freelock);
96+
97+
for (; i < EVENT_CHUNK_LISTS-1; i++) {
98+
if (q->chunks[i+1] && q->chunks[i+1]->size >= e->size) {
99+
break;
100+
}
101+
}
102+
103+
e->next = q->chunks[i];
104+
q->chunks[i] = e;
105+
106+
events_mutex_unlock(&q->freelock);
107+
}
108+
109+
// event allocation functions
110+
static inline int event_next_id(struct equeue *q) {
89111
int id = q->next_id++;
90112
if (q->next_id < 0) {
91113
q->next_id = 42;
92114
}
93115
return id;
94116
}
95117

96-
static struct event *equeue_alloc(struct equeue *q) {
97-
struct event *e = 0;
98-
99-
events_mutex_lock(&q->freelock);
100-
if (!q->free) {
101-
events_mutex_unlock(&q->freelock);
118+
void *event_alloc(struct equeue *q, unsigned size) {
119+
struct event *e = equeue_alloc(q, size);
120+
if (!e) {
102121
return 0;
103122
}
104123

105-
e = q->free;
106-
q->free = e->next;
107-
events_mutex_unlock(&q->freelock);
108-
109-
e->id = equeue_next_id(q);
124+
e->id = event_next_id(q);
110125
e->target = 0;
111126
e->period = -1;
112127
e->dtor = 0;
113-
return e;
128+
129+
return e + 1;
114130
}
115131

116-
static void equeue_dealloc(struct equeue *q, struct event *e) {
132+
void event_dealloc(struct equeue *q, void *p) {
133+
struct event *e = (struct event*)p - 1;
134+
117135
if (e->dtor) {
118136
e->dtor(e+1);
119137
}
120138

121-
events_mutex_lock(&q->freelock);
122-
e->next = q->free;
123-
q->free = e;
124-
events_mutex_unlock(&q->freelock);
139+
equeue_dealloc(q, e);
125140
}
126141

127142
// equeue scheduling functions
128-
static inline int tickdiff(unsigned a, unsigned b) {
143+
static inline int equeue_tickdiff(unsigned a, unsigned b) {
129144
return (int)(a - b);
130145
}
131146

132147
static int equeue_enqueue(struct equeue *q, struct event *e, int ms) {
133148
e->target = events_tick() + (unsigned)ms;
134149

135150
struct event **p = &q->queue;
136-
while (*p && tickdiff((*p)->target, e->target) <= 0) {
151+
while (*p && equeue_tickdiff((*p)->target, e->target) <= 0) {
137152
p = &(*p)->next;
138153
}
139154

@@ -169,7 +184,7 @@ static void equeue_cancel(struct equeue *q, int id) {
169184
events_mutex_unlock(&q->queuelock);
170185

171186
if (e) {
172-
equeue_dealloc(q, e);
187+
event_dealloc(q, e+1);
173188
}
174189
}
175190

@@ -194,7 +209,7 @@ void equeue_dispatch(struct equeue *q, int ms) {
194209
break;
195210
}
196211

197-
deadline = tickdiff(q->queue->target, events_tick());
212+
deadline = equeue_tickdiff(q->queue->target, events_tick());
198213
if (deadline > 0) {
199214
events_mutex_unlock(&q->queuelock);
200215
break;
@@ -218,7 +233,7 @@ void equeue_dispatch(struct equeue *q, int ms) {
218233
e->cb(e + 1);
219234

220235
if (e->period < 0) {
221-
equeue_dealloc(q, e);
236+
event_dealloc(q, e+1);
222237
}
223238
}
224239

@@ -227,25 +242,6 @@ void equeue_dispatch(struct equeue *q, int ms) {
227242
}
228243

229244
// event functions
230-
void *event_alloc(struct equeue *q, unsigned size) {
231-
if (size > q->size - sizeof(struct event)) {
232-
return 0;
233-
}
234-
235-
struct event *e = equeue_alloc(q);
236-
if (!e) {
237-
return 0;
238-
}
239-
240-
return e + 1;
241-
}
242-
243-
void event_dealloc(struct equeue *q, void *p) {
244-
struct event *e = (struct event*)p - 1;
245-
equeue_dealloc(q, e);
246-
}
247-
248-
// configuring events
249245
void event_delay(void *p, int ms) {
250246
struct event *e = (struct event*)p - 1;
251247
e->target = ms;
@@ -273,7 +269,17 @@ void event_cancel(struct equeue *q, int id) {
273269
return equeue_cancel(q, id);
274270
}
275271

276-
// event helper functions
272+
// simple callbacks
273+
struct ecallback {
274+
void (*cb)(void*);
275+
void *data;
276+
};
277+
278+
static void ecallback_dispatch(void *p) {
279+
struct ecallback *e = (struct ecallback*)p;
280+
e->cb(e->data);
281+
}
282+
277283
int event_call(struct equeue *q, void (*cb)(void*), void *data) {
278284
struct ecallback *e = event_alloc(q, sizeof(struct ecallback));
279285
if (!e) {

events.h

Lines changed: 13 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -14,8 +14,12 @@ extern "C" {
1414
#include "sys/events_sema.h"
1515

1616

17+
// Number of free-chunk lists per equeue
18+
#define EVENT_CHUNK_LISTS 4
19+
1720
// Event/queue structures
1821
struct event {
22+
unsigned size;
1923
struct event *next;
2024
int id;
2125
unsigned target;
@@ -27,12 +31,16 @@ struct event {
2731
};
2832

2933
struct equeue {
30-
unsigned size;
3134
struct event *queue;
32-
struct event *free;
33-
void *buffer;
3435
int next_id;
3536

37+
void *buffer;
38+
struct event *chunks[EVENT_CHUNK_LISTS];
39+
struct {
40+
unsigned size;
41+
unsigned char *data;
42+
} slab;
43+
3644
struct event break_;
3745

3846
events_sema_t eventsema;
@@ -43,9 +51,8 @@ struct equeue {
4351
// Queue operations
4452
//
4553
// Creation results in negative value on failure.
46-
int equeue_create(struct equeue*, unsigned count, unsigned size);
47-
int equeue_create_inplace(struct equeue*,
48-
unsigned count, unsigned size, void *buffer);
54+
int equeue_create(struct equeue*, unsigned size);
55+
int equeue_create_inplace(struct equeue*, unsigned size, void *buffer);
4956
void equeue_destroy(struct equeue*);
5057

5158
// Dispatch events

0 commit comments

Comments
 (0)