|
21 | 21 | #include <stdint.h>
|
22 | 22 | #include <string.h>
|
23 | 23 |
|
| 24 | +// check if the event is allocaded by user - event address is outside queues internal buffer address range |
| 25 | +#define IS_USER_ALLOCATED_EVENT(e) (((intptr_t)(e) < (intptr_t)q->buffer) || ((intptr_t)(e) > ((intptr_t)q->slab.data))) |
| 26 | +// for user allocated events set id as its address with first bit set |
| 27 | +#define MAKE_USER_ALLOCATED_EVENT_ID(e) (((intptr_t)e) | 1) |
| 28 | +#define IS_USER_ALLOCATED_EVENT_ID(id) (((id) & 1) == 1) |
| 29 | + |
24 | 30 | // calculate the relative-difference between absolute times while
|
25 | 31 | // correctly handling overflow conditions
|
26 | 32 | static inline int equeue_tickdiff(unsigned a, unsigned b)
|
@@ -220,15 +226,16 @@ void equeue_dealloc(equeue_t *q, void *p)
|
220 | 226 | e->dtor(e + 1);
|
221 | 227 | }
|
222 | 228 |
|
223 |
| - equeue_mem_dealloc(q, e); |
| 229 | + if (!IS_USER_ALLOCATED_EVENT(e)) { |
| 230 | + equeue_mem_dealloc(q, e); |
| 231 | + } |
224 | 232 | }
|
225 | 233 |
|
226 |
| - |
227 | 234 | // equeue scheduling functions
|
228 | 235 | static int equeue_enqueue(equeue_t *q, struct equeue_event *e, unsigned tick)
|
229 | 236 | {
|
230 | 237 | // setup event and hash local id with buffer offset for unique id
|
231 |
| - int id = (e->id << q->npw2) | ((unsigned char *)e - q->buffer); |
| 238 | + int id = IS_USER_ALLOCATED_EVENT(e) ? MAKE_USER_ALLOCATED_EVENT_ID(e) : ((e->id << q->npw2) | ((unsigned char *)e - q->buffer)); |
232 | 239 | e->target = tick + equeue_clampdiff(e->target, tick);
|
233 | 240 | e->generation = q->generation;
|
234 | 241 |
|
@@ -275,14 +282,29 @@ static int equeue_enqueue(equeue_t *q, struct equeue_event *e, unsigned tick)
|
275 | 282 |
|
276 | 283 | static struct equeue_event *equeue_unqueue(equeue_t *q, int id)
|
277 | 284 | {
|
278 |
| - // decode event from unique id and check that the local id matches |
279 |
| - struct equeue_event *e = (struct equeue_event *) |
280 |
| - &q->buffer[id & ((1 << q->npw2) - 1)]; |
281 |
| - |
282 |
| - equeue_mutex_lock(&q->queuelock); |
283 |
| - if (e->id != id >> q->npw2) { |
284 |
| - equeue_mutex_unlock(&q->queuelock); |
285 |
| - return 0; |
| 285 | + struct equeue_event *e = 0; |
| 286 | + if (IS_USER_ALLOCATED_EVENT_ID(id)) { |
| 287 | + equeue_mutex_lock(&q->queuelock); |
| 288 | + struct equeue_event *cur = q->queue; |
| 289 | + while (cur) { |
| 290 | + if (MAKE_USER_ALLOCATED_EVENT_ID(cur) == id) { |
| 291 | + e = cur; |
| 292 | + break; |
| 293 | + } |
| 294 | + cur = cur->next; |
| 295 | + } |
| 296 | + if (!e) { |
| 297 | + equeue_mutex_unlock(&q->queuelock); |
| 298 | + return 0; |
| 299 | + } |
| 300 | + } else { |
| 301 | + // decode event from unique id and check that the local id matches |
| 302 | + e = (struct equeue_event *)&q->buffer[id & ((1 << q->npw2) - 1)]; |
| 303 | + equeue_mutex_lock(&q->queuelock); |
| 304 | + if (e->id != id >> q->npw2) { |
| 305 | + equeue_mutex_unlock(&q->queuelock); |
| 306 | + return 0; |
| 307 | + } |
286 | 308 | }
|
287 | 309 |
|
288 | 310 | // clear the event and check if already in-flight
|
@@ -311,7 +333,9 @@ static struct equeue_event *equeue_unqueue(equeue_t *q, int id)
|
311 | 333 | }
|
312 | 334 | }
|
313 | 335 |
|
314 |
| - equeue_incid(q, e); |
| 336 | + if (!IS_USER_ALLOCATED_EVENT_ID(id)) { |
| 337 | + equeue_incid(q, e); |
| 338 | + } |
315 | 339 | equeue_mutex_unlock(&q->queuelock);
|
316 | 340 |
|
317 | 341 | return e;
|
@@ -394,11 +418,22 @@ int equeue_timeleft(equeue_t *q, int id)
|
394 | 418 | return -1;
|
395 | 419 | }
|
396 | 420 |
|
397 |
| - // decode event from unique id and check that the local id matches |
398 |
| - struct equeue_event *e = (struct equeue_event *) |
399 |
| - &q->buffer[id & ((1 << q->npw2) - 1)]; |
400 |
| - |
| 421 | + struct equeue_event *e = 0; |
401 | 422 | equeue_mutex_lock(&q->queuelock);
|
| 423 | + if (IS_USER_ALLOCATED_EVENT_ID(id)) { |
| 424 | + struct equeue_event *cur = q->queue; |
| 425 | + while (cur) { |
| 426 | + if (MAKE_USER_ALLOCATED_EVENT_ID(cur) == id) { |
| 427 | + e = cur; |
| 428 | + break; |
| 429 | + } |
| 430 | + cur = cur->next; |
| 431 | + } |
| 432 | + } else { |
| 433 | + // decode event from unique id and check that the local id matches |
| 434 | + e = (struct equeue_event *)&q->buffer[id & ((1 << q->npw2) - 1)]; |
| 435 | + } |
| 436 | + |
402 | 437 | if (e->id == id >> q->npw2) {
|
403 | 438 | ret = equeue_clampdiff(e->target, equeue_tick());
|
404 | 439 | }
|
|
0 commit comments