Skip to content
This repository was archived by the owner on Aug 19, 2021. It is now read-only.

Commit 1d0f10e

Browse files
authored
Merge pull request #1 from geky/break
Added support for breaking out of an unbounded event loop
2 parents a7359f0 + 34a4491 commit 1d0f10e

File tree

2 files changed

+41
-27
lines changed

2 files changed

+41
-27
lines changed

events.c

Lines changed: 33 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,7 @@ int equeue_create_inplace(struct equeue *q,
4646
q->free = (struct event*)buffer;
4747
q->queue = 0;
4848
q->next_id = 42;
49+
q->break_ = (struct event){0};
4950

5051
if (q->free) {
5152
for (unsigned i = 0; i < count-1; i++) {
@@ -125,7 +126,7 @@ static inline int tickdiff(unsigned a, unsigned b) {
125126
return (int)(a - b);
126127
}
127128

128-
static int equeue_requeue(struct equeue *q, struct event *e, int ms) {
129+
static int equeue_enqueue(struct equeue *q, struct event *e, int ms) {
129130
e->target = events_tick() + (unsigned)ms;
130131

131132
struct event **p = &q->queue;
@@ -139,37 +140,48 @@ static int equeue_requeue(struct equeue *q, struct event *e, int ms) {
139140
return e->id;
140141
}
141142

142-
static int equeue_enqueue(struct equeue *q, struct event *e, int ms) {
143+
static struct event *equeue_dequeue(struct equeue *q, int id) {
144+
for (struct event **p = &q->queue; *p; p = &(*p)->next) {
145+
if ((*p)->id == id) {
146+
struct event *e = *p;
147+
*p = (*p)->next;
148+
return e;
149+
}
150+
}
151+
152+
return 0;
153+
}
154+
155+
static int equeue_post(struct equeue *q, struct event *e, int ms) {
143156
events_mutex_lock(&q->queuelock);
144-
int id = equeue_requeue(q, e, ms);
157+
int id = equeue_enqueue(q, e, ms);
145158
events_mutex_unlock(&q->queuelock);
146159
events_sema_release(&q->eventsema);
147160
return id;
148161
}
149162

150163
static void equeue_cancel(struct equeue *q, int id) {
151-
struct event *e = 0;
152-
153164
events_mutex_lock(&q->queuelock);
154-
for (struct event **p = &q->queue; *p; p = &(*p)->next) {
155-
if ((*p)->id == id) {
156-
e = *p;
157-
*p = (*p)->next;
158-
break;
159-
}
160-
}
165+
struct event *e = equeue_dequeue(q, id);
161166
events_mutex_unlock(&q->queuelock);
162167

163168
if (e) {
164169
equeue_dealloc(q, e);
165170
}
166171
}
167172

173+
void equeue_break(struct equeue *q) {
174+
equeue_post(q, &q->break_, 0);
175+
}
176+
168177
void equeue_dispatch(struct equeue *q, int ms) {
169-
unsigned timeout = events_tick() + (unsigned)ms;
170-
int deadline = -1;
178+
if (ms >= 0) {
179+
equeue_post(q, &q->break_, ms);
180+
}
171181

172182
while (1) {
183+
int deadline = -1;
184+
173185
while (q->queue) {
174186
deadline = -1;
175187

@@ -191,10 +203,14 @@ void equeue_dispatch(struct equeue *q, int ms) {
191203
if (e->period >= 0) {
192204
// requeue periodic tasks to avoid race conditions
193205
// in event_cancel
194-
equeue_requeue(q, e, e->period);
206+
equeue_enqueue(q, e, e->period);
195207
}
196208
events_mutex_unlock(&q->queuelock);
197209

210+
if (e == &q->break_) {
211+
return;
212+
}
213+
198214
// actually dispatch the callback
199215
e->cb(e + 1);
200216

@@ -203,18 +219,7 @@ void equeue_dispatch(struct equeue *q, int ms) {
203219
}
204220
}
205221

206-
if (ms >= 0) {
207-
int nms = tickdiff(timeout, events_tick());
208-
if ((unsigned)nms < (unsigned)deadline) {
209-
deadline = nms;
210-
}
211-
}
212-
213222
events_sema_wait(&q->eventsema, deadline);
214-
215-
if (ms >= 0 && tickdiff(timeout, events_tick()) <= 0) {
216-
return;
217-
}
218223
}
219224
}
220225

@@ -257,7 +262,8 @@ void event_dtor(void *p, void (*dtor)(void *)) {
257262
int event_post(struct equeue *q, void (*cb)(void*), void *p) {
258263
struct event *e = (struct event*)p - 1;
259264
e->cb = cb;
260-
return equeue_enqueue(q, e, e->target);
265+
int id = equeue_post(q, e, e->target);
266+
return id;
261267
}
262268

263269
void event_cancel(struct equeue *q, int id) {

events.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,8 @@ struct equeue {
3333
void *buffer;
3434
int next_id;
3535

36+
struct event break_;
37+
3638
events_sema_t eventsema;
3739
events_mutex_t queuelock;
3840
events_mutex_t freelock;
@@ -52,6 +54,12 @@ void equeue_destroy(struct equeue*);
5254
// or forever if ms is negative
5355
void equeue_dispatch(struct equeue*, int ms);
5456

57+
// Break a running event loop
58+
//
59+
// Shuts down an unbounded event loop. Already pending events may finish executing,
60+
// but the queue will not continue looping indefinitely.
61+
void equeue_break(struct equeue*);
62+
5563
// Simple event calls
5664
//
5765
// Passed callback will be executed in the associated equeue's

0 commit comments

Comments
 (0)