@@ -46,6 +46,7 @@ int equeue_create_inplace(struct equeue *q,
46
46
q -> free = (struct event * )buffer ;
47
47
q -> queue = 0 ;
48
48
q -> next_id = 42 ;
49
+ q -> break_ = (struct event ){0 };
49
50
50
51
if (q -> free ) {
51
52
for (unsigned i = 0 ; i < count - 1 ; i ++ ) {
@@ -125,7 +126,7 @@ static inline int tickdiff(unsigned a, unsigned b) {
125
126
return (int )(a - b );
126
127
}
127
128
128
- static int equeue_requeue (struct equeue * q , struct event * e , int ms ) {
129
+ static int equeue_enqueue (struct equeue * q , struct event * e , int ms ) {
129
130
e -> target = events_tick () + (unsigned )ms ;
130
131
131
132
struct event * * p = & q -> queue ;
@@ -139,37 +140,48 @@ static int equeue_requeue(struct equeue *q, struct event *e, int ms) {
139
140
return e -> id ;
140
141
}
141
142
142
- static int equeue_enqueue (struct equeue * q , struct event * e , int ms ) {
143
+ static struct event * equeue_dequeue (struct equeue * q , int id ) {
144
+ for (struct event * * p = & q -> queue ; * p ; p = & (* p )-> next ) {
145
+ if ((* p )-> id == id ) {
146
+ struct event * e = * p ;
147
+ * p = (* p )-> next ;
148
+ return e ;
149
+ }
150
+ }
151
+
152
+ return 0 ;
153
+ }
154
+
155
+ static int equeue_post (struct equeue * q , struct event * e , int ms ) {
143
156
events_mutex_lock (& q -> queuelock );
144
- int id = equeue_requeue (q , e , ms );
157
+ int id = equeue_enqueue (q , e , ms );
145
158
events_mutex_unlock (& q -> queuelock );
146
159
events_sema_release (& q -> eventsema );
147
160
return id ;
148
161
}
149
162
150
163
static void equeue_cancel (struct equeue * q , int id ) {
151
- struct event * e = 0 ;
152
-
153
164
events_mutex_lock (& q -> queuelock );
154
- for (struct event * * p = & q -> queue ; * p ; p = & (* p )-> next ) {
155
- if ((* p )-> id == id ) {
156
- e = * p ;
157
- * p = (* p )-> next ;
158
- break ;
159
- }
160
- }
165
+ struct event * e = equeue_dequeue (q , id );
161
166
events_mutex_unlock (& q -> queuelock );
162
167
163
168
if (e ) {
164
169
equeue_dealloc (q , e );
165
170
}
166
171
}
167
172
173
+ void equeue_break (struct equeue * q ) {
174
+ equeue_post (q , & q -> break_ , 0 );
175
+ }
176
+
168
177
void equeue_dispatch (struct equeue * q , int ms ) {
169
- unsigned timeout = events_tick () + (unsigned )ms ;
170
- int deadline = -1 ;
178
+ if (ms >= 0 ) {
179
+ equeue_post (q , & q -> break_ , ms );
180
+ }
171
181
172
182
while (1 ) {
183
+ int deadline = -1 ;
184
+
173
185
while (q -> queue ) {
174
186
deadline = -1 ;
175
187
@@ -191,10 +203,14 @@ void equeue_dispatch(struct equeue *q, int ms) {
191
203
if (e -> period >= 0 ) {
192
204
// requeue periodic tasks to avoid race conditions
193
205
// in event_cancel
194
- equeue_requeue (q , e , e -> period );
206
+ equeue_enqueue (q , e , e -> period );
195
207
}
196
208
events_mutex_unlock (& q -> queuelock );
197
209
210
+ if (e == & q -> break_ ) {
211
+ return ;
212
+ }
213
+
198
214
// actually dispatch the callback
199
215
e -> cb (e + 1 );
200
216
@@ -203,18 +219,7 @@ void equeue_dispatch(struct equeue *q, int ms) {
203
219
}
204
220
}
205
221
206
- if (ms >= 0 ) {
207
- int nms = tickdiff (timeout , events_tick ());
208
- if ((unsigned )nms < (unsigned )deadline ) {
209
- deadline = nms ;
210
- }
211
- }
212
-
213
222
events_sema_wait (& q -> eventsema , deadline );
214
-
215
- if (ms >= 0 && tickdiff (timeout , events_tick ()) <= 0 ) {
216
- return ;
217
- }
218
223
}
219
224
}
220
225
@@ -257,7 +262,8 @@ void event_dtor(void *p, void (*dtor)(void *)) {
257
262
int event_post (struct equeue * q , void (* cb )(void * ), void * p ) {
258
263
struct event * e = (struct event * )p - 1 ;
259
264
e -> cb = cb ;
260
- return equeue_enqueue (q , e , e -> target );
265
+ int id = equeue_post (q , e , e -> target );
266
+ return id ;
261
267
}
262
268
263
269
void event_cancel (struct equeue * q , int id ) {
0 commit comments