23
23
24
24
// calculate the relative-difference between absolute times while
25
25
// correctly handling overflow conditions
26
- static inline int equeue_tickdiff (unsigned a , unsigned b ) {
26
+ static inline int equeue_tickdiff (unsigned a , unsigned b )
27
+ {
27
28
return (int )(unsigned )(a - b );
28
29
}
29
30
30
31
// calculate the relative-difference between absolute times, but
31
32
// also clamp to zero, resulting in only non-zero values.
32
- static inline int equeue_clampdiff (unsigned a , unsigned b ) {
33
+ static inline int equeue_clampdiff (unsigned a , unsigned b )
34
+ {
33
35
int diff = equeue_tickdiff (a , b );
34
- return ~(diff >> (8 * sizeof (int )-1 )) & diff ;
36
+ return ~(diff >> (8 * sizeof (int ) -1 )) & diff ;
35
37
}
36
38
37
39
// Increment the unique id in an event, hiding the event from cancel
38
- static inline void equeue_incid (equeue_t * q , struct equeue_event * e ) {
40
+ static inline void equeue_incid (equeue_t * q , struct equeue_event * e )
41
+ {
39
42
e -> id += 1 ;
40
43
if ((e -> id << q -> npw2 ) == 0 ) {
41
44
e -> id = 1 ;
@@ -44,7 +47,8 @@ static inline void equeue_incid(equeue_t *q, struct equeue_event *e) {
44
47
45
48
46
49
// equeue lifetime management
47
- int equeue_create (equeue_t * q , size_t size ) {
50
+ int equeue_create (equeue_t * q , size_t size )
51
+ {
48
52
// dynamically allocate the specified buffer
49
53
void * buffer = malloc (size );
50
54
if (!buffer ) {
@@ -56,7 +60,8 @@ int equeue_create(equeue_t *q, size_t size) {
56
60
return err ;
57
61
}
58
62
59
- int equeue_create_inplace (equeue_t * q , size_t size , void * buffer ) {
63
+ int equeue_create_inplace (equeue_t * q , size_t size , void * buffer )
64
+ {
60
65
// setup queue around provided buffer
61
66
q -> buffer = buffer ;
62
67
q -> allocated = 0 ;
@@ -99,7 +104,8 @@ int equeue_create_inplace(equeue_t *q, size_t size, void *buffer) {
99
104
return 0 ;
100
105
}
101
106
102
- void equeue_destroy (equeue_t * q ) {
107
+ void equeue_destroy (equeue_t * q )
108
+ {
103
109
// call destructors on pending events
104
110
for (struct equeue_event * es = q -> queue ; es ; es = es -> next ) {
105
111
for (struct equeue_event * e = q -> queue ; e ; e = e -> sibling ) {
@@ -123,10 +129,11 @@ void equeue_destroy(equeue_t *q) {
123
129
124
130
125
131
// equeue chunk allocation functions
126
- static struct equeue_event * equeue_mem_alloc (equeue_t * q , size_t size ) {
132
+ static struct equeue_event * equeue_mem_alloc (equeue_t * q , size_t size )
133
+ {
127
134
// add event overhead
128
135
size += sizeof (struct equeue_event );
129
- size = (size + sizeof (void * ) -1 ) & ~(sizeof (void * ) -1 );
136
+ size = (size + sizeof (void * ) -1 ) & ~(sizeof (void * ) -1 );
130
137
131
138
equeue_mutex_lock (& q -> memlock );
132
139
@@ -162,7 +169,8 @@ static struct equeue_event *equeue_mem_alloc(equeue_t *q, size_t size) {
162
169
return 0 ;
163
170
}
164
171
165
- static void equeue_mem_dealloc (equeue_t * q , struct equeue_event * e ) {
172
+ static void equeue_mem_dealloc (equeue_t * q , struct equeue_event * e )
173
+ {
166
174
equeue_mutex_lock (& q -> memlock );
167
175
168
176
// stick chunk into list of chunks
@@ -183,7 +191,8 @@ static void equeue_mem_dealloc(equeue_t *q, struct equeue_event *e) {
183
191
equeue_mutex_unlock (& q -> memlock );
184
192
}
185
193
186
- void * equeue_alloc (equeue_t * q , size_t size ) {
194
+ void * equeue_alloc (equeue_t * q , size_t size )
195
+ {
187
196
struct equeue_event * e = equeue_mem_alloc (q , size );
188
197
if (!e ) {
189
198
return 0 ;
@@ -196,19 +205,21 @@ void *equeue_alloc(equeue_t *q, size_t size) {
196
205
return e + 1 ;
197
206
}
198
207
199
- void equeue_dealloc (equeue_t * q , void * p ) {
200
- struct equeue_event * e = (struct equeue_event * )p - 1 ;
208
+ void equeue_dealloc (equeue_t * q , void * p )
209
+ {
210
+ struct equeue_event * e = (struct equeue_event * )p - 1 ;
201
211
202
212
if (e -> dtor ) {
203
- e -> dtor (e + 1 );
213
+ e -> dtor (e + 1 );
204
214
}
205
215
206
216
equeue_mem_dealloc (q , e );
207
217
}
208
218
209
219
210
220
// equeue scheduling functions
211
- static int equeue_enqueue (equeue_t * q , struct equeue_event * e , unsigned tick ) {
221
+ static int equeue_enqueue (equeue_t * q , struct equeue_event * e , unsigned tick )
222
+ {
212
223
// setup event and hash local id with buffer offset for unique id
213
224
int id = (e -> id << q -> npw2 ) | ((unsigned char * )e - q -> buffer );
214
225
e -> target = tick + equeue_clampdiff (e -> target , tick );
@@ -245,20 +256,21 @@ static int equeue_enqueue(equeue_t *q, struct equeue_event *e, unsigned tick) {
245
256
246
257
// notify background timer
247
258
if ((q -> background .update && q -> background .active ) &&
248
- (q -> queue == e && !e -> sibling )) {
259
+ (q -> queue == e && !e -> sibling )) {
249
260
q -> background .update (q -> background .timer ,
250
- equeue_clampdiff (e -> target , tick ));
261
+ equeue_clampdiff (e -> target , tick ));
251
262
}
252
263
253
264
equeue_mutex_unlock (& q -> queuelock );
254
265
255
266
return id ;
256
267
}
257
268
258
- static struct equeue_event * equeue_unqueue (equeue_t * q , int id ) {
269
+ static struct equeue_event * equeue_unqueue (equeue_t * q , int id )
270
+ {
259
271
// decode event from unique id and check that the local id matches
260
272
struct equeue_event * e = (struct equeue_event * )
261
- & q -> buffer [id & ((1 << q -> npw2 )- 1 )];
273
+ & q -> buffer [id & ((1 << q -> npw2 ) - 1 )];
262
274
263
275
equeue_mutex_lock (& q -> queuelock );
264
276
if (e -> id != id >> q -> npw2 ) {
@@ -298,7 +310,8 @@ static struct equeue_event *equeue_unqueue(equeue_t *q, int id) {
298
310
return e ;
299
311
}
300
312
301
- static struct equeue_event * equeue_dequeue (equeue_t * q , unsigned target ) {
313
+ static struct equeue_event * equeue_dequeue (equeue_t * q , unsigned target )
314
+ {
302
315
equeue_mutex_lock (& q -> queuelock );
303
316
304
317
// find all expired events and mark a new generation
@@ -342,8 +355,9 @@ static struct equeue_event *equeue_dequeue(equeue_t *q, unsigned target) {
342
355
return head ;
343
356
}
344
357
345
- int equeue_post (equeue_t * q , void (* cb )(void * ), void * p ) {
346
- struct equeue_event * e = (struct equeue_event * )p - 1 ;
358
+ int equeue_post (equeue_t * q , void (* cb )(void * ), void * p )
359
+ {
360
+ struct equeue_event * e = (struct equeue_event * )p - 1 ;
347
361
unsigned tick = equeue_tick ();
348
362
e -> cb = cb ;
349
363
e -> target = tick + e -> target ;
@@ -353,7 +367,8 @@ int equeue_post(equeue_t *q, void (*cb)(void*), void *p) {
353
367
return id ;
354
368
}
355
369
356
- void equeue_cancel (equeue_t * q , int id ) {
370
+ void equeue_cancel (equeue_t * q , int id )
371
+ {
357
372
if (!id ) {
358
373
return ;
359
374
}
@@ -364,7 +379,8 @@ void equeue_cancel(equeue_t *q, int id) {
364
379
}
365
380
}
366
381
367
- int equeue_timeleft (equeue_t * q , int id ) {
382
+ int equeue_timeleft (equeue_t * q , int id )
383
+ {
368
384
int ret = -1 ;
369
385
370
386
if (!id ) {
@@ -373,7 +389,7 @@ int equeue_timeleft(equeue_t *q, int id) {
373
389
374
390
// decode event from unique id and check that the local id matches
375
391
struct equeue_event * e = (struct equeue_event * )
376
- & q -> buffer [id & ((1 << q -> npw2 )- 1 )];
392
+ & q -> buffer [id & ((1 << q -> npw2 ) - 1 )];
377
393
378
394
equeue_mutex_lock (& q -> queuelock );
379
395
if (e -> id == id >> q -> npw2 ) {
@@ -383,14 +399,16 @@ int equeue_timeleft(equeue_t *q, int id) {
383
399
return ret ;
384
400
}
385
401
386
- void equeue_break (equeue_t * q ) {
402
+ void equeue_break (equeue_t * q )
403
+ {
387
404
equeue_mutex_lock (& q -> queuelock );
388
405
q -> break_requested = true;
389
406
equeue_mutex_unlock (& q -> queuelock );
390
407
equeue_sema_signal (& q -> eventsema );
391
408
}
392
409
393
- void equeue_dispatch (equeue_t * q , int ms ) {
410
+ void equeue_dispatch (equeue_t * q , int ms )
411
+ {
394
412
unsigned tick = equeue_tick ();
395
413
unsigned timeout = tick + ms ;
396
414
q -> background .active = false;
@@ -416,7 +434,7 @@ void equeue_dispatch(equeue_t *q, int ms) {
416
434
equeue_enqueue (q , e , equeue_tick ());
417
435
} else {
418
436
equeue_incid (q , e );
419
- equeue_dealloc (q , e + 1 );
437
+ equeue_dealloc (q , e + 1 );
420
438
}
421
439
}
422
440
@@ -432,7 +450,7 @@ void equeue_dispatch(equeue_t *q, int ms) {
432
450
equeue_mutex_lock (& q -> queuelock );
433
451
if (q -> background .update && q -> queue ) {
434
452
q -> background .update (q -> background .timer ,
435
- equeue_clampdiff (q -> queue -> target , tick ));
453
+ equeue_clampdiff (q -> queue -> target , tick ));
436
454
}
437
455
q -> background .active = true;
438
456
equeue_mutex_unlock (& q -> queuelock );
@@ -473,34 +491,39 @@ void equeue_dispatch(equeue_t *q, int ms) {
473
491
474
492
475
493
// event functions
476
- void equeue_event_delay (void * p , int ms ) {
477
- struct equeue_event * e = (struct equeue_event * )p - 1 ;
494
+ void equeue_event_delay (void * p , int ms )
495
+ {
496
+ struct equeue_event * e = (struct equeue_event * )p - 1 ;
478
497
e -> target = ms ;
479
498
}
480
499
481
- void equeue_event_period (void * p , int ms ) {
482
- struct equeue_event * e = (struct equeue_event * )p - 1 ;
500
+ void equeue_event_period (void * p , int ms )
501
+ {
502
+ struct equeue_event * e = (struct equeue_event * )p - 1 ;
483
503
e -> period = ms ;
484
504
}
485
505
486
- void equeue_event_dtor (void * p , void (* dtor )(void * )) {
487
- struct equeue_event * e = (struct equeue_event * )p - 1 ;
506
+ void equeue_event_dtor (void * p , void (* dtor )(void * ))
507
+ {
508
+ struct equeue_event * e = (struct equeue_event * )p - 1 ;
488
509
e -> dtor = dtor ;
489
510
}
490
511
491
512
492
513
// simple callbacks
493
514
struct ecallback {
494
- void (* cb )(void * );
515
+ void (* cb )(void * );
495
516
void * data ;
496
517
};
497
518
498
- static void ecallback_dispatch (void * p ) {
499
- struct ecallback * e = (struct ecallback * )p ;
519
+ static void ecallback_dispatch (void * p )
520
+ {
521
+ struct ecallback * e = (struct ecallback * )p ;
500
522
e -> cb (e -> data );
501
523
}
502
524
503
- int equeue_call (equeue_t * q , void (* cb )(void * ), void * data ) {
525
+ int equeue_call (equeue_t * q , void (* cb )(void * ), void * data )
526
+ {
504
527
struct ecallback * e = equeue_alloc (q , sizeof (struct ecallback ));
505
528
if (!e ) {
506
529
return 0 ;
@@ -511,7 +534,8 @@ int equeue_call(equeue_t *q, void (*cb)(void*), void *data) {
511
534
return equeue_post (q , ecallback_dispatch , e );
512
535
}
513
536
514
- int equeue_call_in (equeue_t * q , int ms , void (* cb )(void * ), void * data ) {
537
+ int equeue_call_in (equeue_t * q , int ms , void (* cb )(void * ), void * data )
538
+ {
515
539
struct ecallback * e = equeue_alloc (q , sizeof (struct ecallback ));
516
540
if (!e ) {
517
541
return 0 ;
@@ -523,7 +547,8 @@ int equeue_call_in(equeue_t *q, int ms, void (*cb)(void*), void *data) {
523
547
return equeue_post (q , ecallback_dispatch , e );
524
548
}
525
549
526
- int equeue_call_every (equeue_t * q , int ms , void (* cb )(void * ), void * data ) {
550
+ int equeue_call_every (equeue_t * q , int ms , void (* cb )(void * ), void * data )
551
+ {
527
552
struct ecallback * e = equeue_alloc (q , sizeof (struct ecallback ));
528
553
if (!e ) {
529
554
return 0 ;
@@ -539,7 +564,8 @@ int equeue_call_every(equeue_t *q, int ms, void (*cb)(void*), void *data) {
539
564
540
565
// backgrounding
541
566
void equeue_background (equeue_t * q ,
542
- void (* update )(void * timer , int ms ), void * timer ) {
567
+ void (* update )(void * timer , int ms ), void * timer )
568
+ {
543
569
equeue_mutex_lock (& q -> queuelock );
544
570
if (q -> background .update ) {
545
571
q -> background .update (q -> background .timer , -1 );
@@ -550,7 +576,7 @@ void equeue_background(equeue_t *q,
550
576
551
577
if (q -> background .update && q -> queue ) {
552
578
q -> background .update (q -> background .timer ,
553
- equeue_clampdiff (q -> queue -> target , equeue_tick ()));
579
+ equeue_clampdiff (q -> queue -> target , equeue_tick ()));
554
580
}
555
581
q -> background .active = true;
556
582
equeue_mutex_unlock (& q -> queuelock );
@@ -562,11 +588,13 @@ struct equeue_chain_context {
562
588
int id ;
563
589
};
564
590
565
- static void equeue_chain_dispatch (void * p ) {
591
+ static void equeue_chain_dispatch (void * p )
592
+ {
566
593
equeue_dispatch ((equeue_t * )p , 0 );
567
594
}
568
595
569
- static void equeue_chain_update (void * p , int ms ) {
596
+ static void equeue_chain_update (void * p , int ms )
597
+ {
570
598
struct equeue_chain_context * c = (struct equeue_chain_context * )p ;
571
599
equeue_cancel (c -> target , c -> id );
572
600
@@ -577,14 +605,15 @@ static void equeue_chain_update(void *p, int ms) {
577
605
}
578
606
}
579
607
580
- void equeue_chain (equeue_t * q , equeue_t * target ) {
608
+ void equeue_chain (equeue_t * q , equeue_t * target )
609
+ {
581
610
if (!target ) {
582
611
equeue_background (q , 0 , 0 );
583
612
return ;
584
613
}
585
614
586
615
struct equeue_chain_context * c = equeue_alloc (q ,
587
- sizeof (struct equeue_chain_context ));
616
+ sizeof (struct equeue_chain_context ));
588
617
589
618
c -> q = q ;
590
619
c -> target = target ;
0 commit comments