2
2
3
3
#include <stdlib.h>
4
4
#include <stddef.h>
5
+ #include <string.h>
5
6
6
7
7
- // internal callback callback
8
- struct ecallback {
9
- void (* cb )(void * );
10
- void * data ;
11
- };
12
-
13
- static void ecallback_dispatch (void * p ) {
14
- struct ecallback * e = (struct ecallback * )p ;
15
- e -> cb (e -> data );
16
- }
17
-
18
- // equeue functions
19
- static inline struct event * equeue_event (struct equeue * q , unsigned i ) {
20
- return (struct event * )((char * )q -> buffer + i * q -> size );
21
- }
22
-
23
- static inline unsigned equeue_size (unsigned size ) {
24
- if (size < sizeof (struct ecallback )) {
25
- size = sizeof (struct ecallback );
26
- }
27
-
28
- unsigned alignment = offsetof(struct { char c ; struct event e ; }, e );
29
- size += sizeof (struct event );
30
- return (size + alignment - 1 ) & ~(alignment - 1 );
31
- }
32
-
33
- int equeue_create (struct equeue * q , unsigned count , unsigned size ) {
34
- void * buffer = malloc (count * equeue_size (size ));
8
+ int equeue_create (struct equeue * q , unsigned size ) {
9
+ void * buffer = malloc (size );
35
10
if (!buffer ) {
36
11
return -1 ;
37
12
}
38
13
39
- return equeue_create_inplace (q , count , size , buffer );
14
+ int err = equeue_create_inplace (q , size , buffer );
15
+ q -> buffer = buffer ;
16
+ return err ;
40
17
}
41
18
42
- int equeue_create_inplace (struct equeue * q ,
43
- unsigned count , unsigned size , void * buffer ) {
44
- q -> size = equeue_size (size );
45
- q -> buffer = buffer ;
46
- q -> free = (struct event * )buffer ;
19
+ int equeue_create_inplace (struct equeue * q , unsigned size , void * buffer ) {
20
+ q -> slab .size = size ;
21
+ q -> slab .data = buffer ;
22
+ memset (q -> chunks , 0 , EVENT_CHUNK_LISTS * sizeof (struct event * ));
23
+ q -> buffer = 0 ;
24
+
47
25
q -> queue = 0 ;
48
26
q -> next_id = 42 ;
49
27
q -> break_ = (struct event ){
50
28
.id = 0 ,
51
29
.period = -1 ,
52
30
};
53
31
54
- if (q -> free ) {
55
- for (unsigned i = 0 ; i < count - 1 ; i ++ ) {
56
- equeue_event (q , i )-> next = equeue_event (q , i + 1 );
57
- }
58
- equeue_event (q , count - 1 )-> next = 0 ;
59
- }
60
-
61
32
int err ;
62
33
err = events_sema_create (& q -> eventsema );
63
34
if (err < 0 ) {
@@ -84,56 +55,100 @@ void equeue_destroy(struct equeue *q) {
84
55
free (q -> buffer );
85
56
}
86
57
87
- // equeue mem functions
88
- static int equeue_next_id (struct equeue * q ) {
58
+ // equeue allocation functions
59
+ static inline unsigned equeue_size (unsigned size ) {
60
+ size += sizeof (struct event );
61
+ unsigned alignment = offsetof(struct { char c ; struct event e ; }, e );
62
+ return (size + alignment - 1 ) & ~(alignment - 1 );
63
+ }
64
+
65
+ static struct event * equeue_alloc (struct equeue * q , unsigned size ) {
66
+ size = equeue_size (size );
67
+
68
+ events_mutex_lock (& q -> freelock );
69
+
70
+ for (int i = 0 ; i < EVENT_CHUNK_LISTS ; i ++ ) {
71
+ if (q -> chunks [i ] && q -> chunks [i ]-> size >= size ) {
72
+ struct event * e = q -> chunks [i ];
73
+ q -> chunks [i ] = e -> next ;
74
+ events_mutex_unlock (& q -> freelock );
75
+ return e ;
76
+ }
77
+ }
78
+
79
+ if (q -> slab .size >= size ) {
80
+ struct event * e = (struct event * )q -> slab .data ;
81
+ q -> slab .data += size ;
82
+ q -> slab .size -= size ;
83
+ e -> size = size ;
84
+ events_mutex_unlock (& q -> freelock );
85
+ return e ;
86
+ }
87
+
88
+ events_mutex_unlock (& q -> freelock );
89
+ return 0 ;
90
+ }
91
+
92
+ static void equeue_dealloc (struct equeue * q , struct event * e ) {
93
+ int i = 0 ;
94
+
95
+ events_mutex_lock (& q -> freelock );
96
+
97
+ for (; i < EVENT_CHUNK_LISTS - 1 ; i ++ ) {
98
+ if (q -> chunks [i + 1 ] && q -> chunks [i + 1 ]-> size >= e -> size ) {
99
+ break ;
100
+ }
101
+ }
102
+
103
+ e -> next = q -> chunks [i ];
104
+ q -> chunks [i ] = e ;
105
+
106
+ events_mutex_unlock (& q -> freelock );
107
+ }
108
+
109
+ // event allocation functions
110
+ static inline int event_next_id (struct equeue * q ) {
89
111
int id = q -> next_id ++ ;
90
112
if (q -> next_id < 0 ) {
91
113
q -> next_id = 42 ;
92
114
}
93
115
return id ;
94
116
}
95
117
96
- static struct event * equeue_alloc (struct equeue * q ) {
97
- struct event * e = 0 ;
98
-
99
- events_mutex_lock (& q -> freelock );
100
- if (!q -> free ) {
101
- events_mutex_unlock (& q -> freelock );
118
+ void * event_alloc (struct equeue * q , unsigned size ) {
119
+ struct event * e = equeue_alloc (q , size );
120
+ if (!e ) {
102
121
return 0 ;
103
122
}
104
123
105
- e = q -> free ;
106
- q -> free = e -> next ;
107
- events_mutex_unlock (& q -> freelock );
108
-
109
- e -> id = equeue_next_id (q );
124
+ e -> id = event_next_id (q );
110
125
e -> target = 0 ;
111
126
e -> period = -1 ;
112
127
e -> dtor = 0 ;
113
- return e ;
128
+
129
+ return e + 1 ;
114
130
}
115
131
116
- static void equeue_dealloc (struct equeue * q , struct event * e ) {
132
+ void event_dealloc (struct equeue * q , void * p ) {
133
+ struct event * e = (struct event * )p - 1 ;
134
+
117
135
if (e -> dtor ) {
118
136
e -> dtor (e + 1 );
119
137
}
120
138
121
- events_mutex_lock (& q -> freelock );
122
- e -> next = q -> free ;
123
- q -> free = e ;
124
- events_mutex_unlock (& q -> freelock );
139
+ equeue_dealloc (q , e );
125
140
}
126
141
127
142
// equeue scheduling functions
128
- static inline int tickdiff (unsigned a , unsigned b ) {
143
+ static inline int equeue_tickdiff (unsigned a , unsigned b ) {
129
144
return (int )(a - b );
130
145
}
131
146
132
147
static int equeue_enqueue (struct equeue * q , struct event * e , int ms ) {
133
148
e -> target = events_tick () + (unsigned )ms ;
134
149
135
150
struct event * * p = & q -> queue ;
136
- while (* p && tickdiff ((* p )-> target , e -> target ) <= 0 ) {
151
+ while (* p && equeue_tickdiff ((* p )-> target , e -> target ) <= 0 ) {
137
152
p = & (* p )-> next ;
138
153
}
139
154
@@ -169,7 +184,7 @@ static void equeue_cancel(struct equeue *q, int id) {
169
184
events_mutex_unlock (& q -> queuelock );
170
185
171
186
if (e ) {
172
- equeue_dealloc (q , e );
187
+ event_dealloc (q , e + 1 );
173
188
}
174
189
}
175
190
@@ -194,7 +209,7 @@ void equeue_dispatch(struct equeue *q, int ms) {
194
209
break ;
195
210
}
196
211
197
- deadline = tickdiff (q -> queue -> target , events_tick ());
212
+ deadline = equeue_tickdiff (q -> queue -> target , events_tick ());
198
213
if (deadline > 0 ) {
199
214
events_mutex_unlock (& q -> queuelock );
200
215
break ;
@@ -218,7 +233,7 @@ void equeue_dispatch(struct equeue *q, int ms) {
218
233
e -> cb (e + 1 );
219
234
220
235
if (e -> period < 0 ) {
221
- equeue_dealloc (q , e );
236
+ event_dealloc (q , e + 1 );
222
237
}
223
238
}
224
239
@@ -227,25 +242,6 @@ void equeue_dispatch(struct equeue *q, int ms) {
227
242
}
228
243
229
244
// event functions
230
- void * event_alloc (struct equeue * q , unsigned size ) {
231
- if (size > q -> size - sizeof (struct event )) {
232
- return 0 ;
233
- }
234
-
235
- struct event * e = equeue_alloc (q );
236
- if (!e ) {
237
- return 0 ;
238
- }
239
-
240
- return e + 1 ;
241
- }
242
-
243
- void event_dealloc (struct equeue * q , void * p ) {
244
- struct event * e = (struct event * )p - 1 ;
245
- equeue_dealloc (q , e );
246
- }
247
-
248
- // configuring events
249
245
void event_delay (void * p , int ms ) {
250
246
struct event * e = (struct event * )p - 1 ;
251
247
e -> target = ms ;
@@ -273,7 +269,17 @@ void event_cancel(struct equeue *q, int id) {
273
269
return equeue_cancel (q , id );
274
270
}
275
271
276
- // event helper functions
272
+ // simple callbacks
273
+ struct ecallback {
274
+ void (* cb )(void * );
275
+ void * data ;
276
+ };
277
+
278
+ static void ecallback_dispatch (void * p ) {
279
+ struct ecallback * e = (struct ecallback * )p ;
280
+ e -> cb (e -> data );
281
+ }
282
+
277
283
int event_call (struct equeue * q , void (* cb )(void * ), void * data ) {
278
284
struct ecallback * e = event_alloc (q , sizeof (struct ecallback ));
279
285
if (!e ) {
0 commit comments