Skip to content

Event loop with mbed-events #2860

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 6 commits into from
Sep 30, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
258 changes: 258 additions & 0 deletions TESTS/events/queue/main.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,258 @@
#include "mbed_events.h"
#include "mbed.h"
#include "rtos.h"
#include "greentea-client/test_env.h"
#include "unity.h"
#include "utest.h"

using namespace utest::v1;


// flag for called
volatile bool touched = false;

// static functions
void func5(int a0, int a1, int a2, int a3, int a4) {
touched = true;
TEST_ASSERT_EQUAL(a0 | a1 | a2 | a3 | a4, 0x1f);
}

void func4(int a0, int a1, int a2, int a3) {
touched = true;
TEST_ASSERT_EQUAL(a0 | a1 | a2 | a3, 0xf);
}

void func3(int a0, int a1, int a2) {
touched = true;
TEST_ASSERT_EQUAL(a0 | a1 | a2, 0x7);
}

void func2(int a0, int a1) {
touched = true;
TEST_ASSERT_EQUAL(a0 | a1, 0x3);
}

void func1(int a0) {
touched = true;
TEST_ASSERT_EQUAL(a0, 0x1);
}

void func0() {
touched = true;
}

#define SIMPLE_POSTS_TEST(i, ...) \
void simple_posts_test##i() { \
EventQueue queue; \
\
touched = false; \
queue.call(func##i,##__VA_ARGS__); \
queue.dispatch(0); \
TEST_ASSERT(touched); \
\
touched = false; \
queue.call_in(1, func##i,##__VA_ARGS__); \
queue.dispatch(2); \
TEST_ASSERT(touched); \
\
touched = false; \
queue.call_every(1, func##i,##__VA_ARGS__); \
queue.dispatch(2); \
TEST_ASSERT(touched); \
}

SIMPLE_POSTS_TEST(5, 0x01, 0x02, 0x04, 0x08, 0x010)
SIMPLE_POSTS_TEST(4, 0x01, 0x02, 0x04, 0x08)
SIMPLE_POSTS_TEST(3, 0x01, 0x02, 0x04)
SIMPLE_POSTS_TEST(2, 0x01, 0x02)
SIMPLE_POSTS_TEST(1, 0x01)
SIMPLE_POSTS_TEST(0)


void time_func(Timer *t, int ms) {
TEST_ASSERT_INT_WITHIN(2, ms, t->read_ms());
t->reset();
}

template <int N>
void call_in_test() {
Timer tickers[N];

EventQueue queue;

for (int i = 0; i < N; i++) {
tickers[i].start();
queue.call_in((i+1)*100, time_func, &tickers[i], (i+1)*100);
}

queue.dispatch(N*100);
}

template <int N>
void call_every_test() {
Timer tickers[N];

EventQueue queue;

for (int i = 0; i < N; i++) {
tickers[i].start();
queue.call_every((i+1)*100, time_func, &tickers[i], (i+1)*100);
}

queue.dispatch(N*100);
}

void allocate_failure_test() {
EventQueue queue;
int id;

for (int i = 0; i < 100; i++) {
id = queue.call((void (*)())0);
}

TEST_ASSERT(!id);
}

void no() {
TEST_ASSERT(false);
}

template <int N>
void cancel_test1() {
EventQueue queue;

int ids[N];

for (int i = 0; i < N; i++) {
ids[i] = queue.call_in(1000, no);
}

for (int i = N-1; i >= 0; i--) {
queue.cancel(ids[i]);
}

queue.dispatch(0);
}


// Testing the dynamic arguments to the event class
unsigned counter = 0;

void count5(unsigned a0, unsigned a1, unsigned a2, unsigned a3, unsigned a5) {
counter += a0 + a1 + a2 + a3 + a5;
}

void count4(unsigned a0, unsigned a1, unsigned a2, unsigned a3) {
counter += a0 + a1 + a2 + a3;
}

void count3(unsigned a0, unsigned a1, unsigned a2) {
counter += a0 + a1 + a2;
}

void count2(unsigned a0, unsigned a1) {
counter += a0 + a1;
}

void count1(unsigned a0) {
counter += a0;
}

void count0() {
counter += 0;
}

void event_class_test() {
counter = 0;
EventQueue queue(2048);

Event<void(int, int, int, int, int)> e5(&queue, count5);
Event<void(int, int, int, int)> e4(&queue, count5, 1);
Event<void(int, int, int)> e3(&queue, count5, 1, 1);
Event<void(int, int)> e2(&queue, count5, 1, 1, 1);
Event<void(int)> e1(&queue, count5, 1, 1, 1, 1);
Event<void()> e0(&queue, count5, 1, 1, 1, 1, 1);

e5.post(1, 1, 1, 1, 1);
e4.post(1, 1, 1, 1);
e3.post(1, 1, 1);
e2.post(1, 1);
e1.post(1);
e0.post();

queue.dispatch(0);

TEST_ASSERT_EQUAL(counter, 30);
}

void event_class_helper_test() {
counter = 0;
EventQueue queue(2048);

Event<void()> e5 = queue.event(count5, 1, 1, 1, 1, 1);
Event<void()> e4 = queue.event(count4, 1, 1, 1, 1);
Event<void()> e3 = queue.event(count3, 1, 1, 1);
Event<void()> e2 = queue.event(count2, 1, 1);
Event<void()> e1 = queue.event(count1, 1);
Event<void()> e0 = queue.event(count0);

e5.post();
e4.post();
e3.post();
e2.post();
e1.post();
e0.post();

queue.dispatch(0);

TEST_ASSERT_EQUAL(counter, 15);
}

void event_inference_test() {
counter = 0;
EventQueue queue (2048);

queue.event(count5, 1, 1, 1, 1, 1).post();
queue.event(count5, 1, 1, 1, 1).post(1);
queue.event(count5, 1, 1, 1).post(1, 1);
queue.event(count5, 1, 1).post(1, 1, 1);
queue.event(count5, 1).post(1, 1, 1, 1);
queue.event(count5).post(1, 1, 1, 1, 1);

queue.dispatch(0);

TEST_ASSERT_EQUAL(counter, 30);
}


// Test setup
utest::v1::status_t test_setup(const size_t number_of_cases) {
GREENTEA_SETUP(20, "default_auto");
return verbose_test_setup_handler(number_of_cases);
}

const Case cases[] = {
Case("Testing calls with 5 args", simple_posts_test5),
Case("Testing calls with 4 args", simple_posts_test4),
Case("Testing calls with 3 args", simple_posts_test3),
Case("Testing calls with 2 args", simple_posts_test2),
Case("Testing calls with 1 args", simple_posts_test1),
Case("Testing calls with 0 args", simple_posts_test0),

Case("Testing call_in", call_in_test<20>),
Case("Testing call_every", call_every_test<20>),

Case("Testing allocate failure", allocate_failure_test),

Case("Testing event cancel 1", cancel_test1<20>),
Case("Testing the event class", event_class_test),
Case("Testing the event class helpers", event_class_helper_test),
Case("Testing the event inference", event_inference_test),
};

Specification specification(test_setup, cases);

int main() {
return !Harness::run(specification);
}

139 changes: 139 additions & 0 deletions docs/events.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,139 @@
# About the mbed OS event loop

One of the optional mbed OS features is an event loop mechanism that can be used to defer the execution of code in a different context. In particular, a common uses of an event loop is to postpone the execution of a code sequence from an interrupt handler to an user context. This is useful because of the specific constraints of code that runs in an interrupt handler:

- the execution of certain functions (notably some functions in the C library) is not safe.
- various RTOS objects and functions can't be used from an interrupt context.
- as a general rule, the code needs to finish as fast as possible, to allow other interrupts to be handled.

The event loop offers a solution to these issues in the form of an API that can be used to defer execution of code from the interrupt context to the user context. More generally, the event loop can be used anywhere in a program (not necessarily in an interrupt handler) to defer code execution to a different context.

# Overview of the mbed OS event loop

An event loop has two main components:

1. an **event queue**, used to store events. In mbed OS, *events* are pointers to functions (and optionally function arguments).
2. an **event loop** that extracts events from the queue and executes them.

The mbed OS event queue is implemented by the [mbed-events library](http://github.com/ARMmbed/mbed-os/tree/master/events). It's a good idea to go through the [README of mbed-events](https://github.com/ARMmbed/mbed-os/blob/master/events/README.md), as it shows how to use the event queue.

The event loop must be created and started manually. The simplest way to achieve that is to create a `Thread` and run the event queue's `dispatch` method in the thread:

```
#include "mbed.h"
#include "mbed_events.h"

// Create a queue that can hold a maximum of 32 events
Queue queue(32 * EVENTS_EVENT_SIZE);
// Create a thread that'll run the event queue's dispatch function
Thread t;

int main () {
// Start the event queue's dispatch thread
t.start(callback(&queue, &EventQueue::dispatch_forever));
...
}
```

Note that although this document assumes the presence of a single event loop in the system, there's nothing preventing the programmer to run more than one event loop, simply by following the create/start pattern above for each of them.

## Using the event loop

Once the event loop is created, it can be used for posting events. Let's consider a very simple example of a program that attaches two interrupt handlers for an InterruptIn object, using the InterruptIn `rise` and `fall` functions. The `rise` handler will run in interrupt context, while the `fall` handler will run in user context (more specifically, in the context of the event loop's thread). The full code for the example can be found below:

```
#include "mbed.h"
#include "mbed_events.h"

DigitalOut led1(LED1);
InterruptIn sw(SW2);
EventQueue queue(32 * EVENTS_EVENT_SIZE);
Thread t;

void rise_handler(void) {
printf("rise_handler in context %p\r\n", Thread::gettid());
// Toggle LED
led1 = !led1;
}

void fall_handler(void) {
printf("fall_handler in context %p\r\n", Thread::gettid());
// Toggle LED
led1 = !led1;
}

int main() {
// Start the event queue
t.start(callback(&queue, &EventQueue::dispatch_forever));
printf("Starting in context %p\r\n", Thread::gettid());
// The 'rise' handler will execute in IRQ context
sw.rise(rise_handler);
// The 'fall' handler will execute in the context of thread 't'
sw.fall(queue.event(fall_handler));
}

```

The above code executes two handler functions (`rise_handler` and `fall_handler`) in two different contexts:

1. in interrupt context when a rising edge is detected on `SW2` (`rise_handler`).
2. in the context of the event loop's thread function when a falling edge is detected on `SW2` (`fall_handler`). `queue.event()` is called with `fall_handler` as an argument to specify that `fall_handler` will run in user context instead of interrupt context.

This is the output of the above program on a FRDM-K64F board after resetting the board and pressing the SW2 button twice:

```
Starting in context 0x20002c50
fall_handler in context 0x20002c90
rise_handler in context 0x0
fall_handler in context 0x20002c90
rise_handler in context 0x0
```

The program starts in the context of the thread that runs the `main` function (`0x29992c5`). When the uses presses SW2, `fall_handler` is automatically queued in the event queue, and it runs later in the context of thread `t` (`0x20002c90`). When the user releases the button, `rise_handler` is executed immediately, and it displays `0x0`, indicating that the code runs in interrupt context.

The code for `rise_handler` is problematic, since it calls `printf` in interrupt context, which is a potentially unsafe operation. Fortunately, this is exactly the kind of problem that event queues can solve. We can make the code safe by running `rise_handler` in user context (like we already do with `fall_handler`) by replacing this line:

```
sw.rise(rise_handler);
```

with this line:

```
sw.rise(queue.event(rise_handler));
```

The code is safe now, but we might've introduced another problem: latency. After the change above, the call to `rise_handler` will be queued, which means that it doesn't run immediately after the interrupt is raised anymore. For this example code, this isn't a problem, but some applications might require the code to respond as fast as possible to an interrupt. Let's assume that `rise_handler` must toggle the LED as quickly as possible in response to the user's action on SW2. To do that, in must run in interrupt context. However, `rise_handler` still needs to print a message indicating that the handler was called, but that's problematic since it's not safe to call `printf` from an interrupt context. The solution is to split `rise_handler` in two parts: the time critical part will run in interrupt context, while the non-critical part (displaying the message) will run in user context. This is easily doable using `queue.call`:

```
void rise_handler_user_context(void) {
printf("rise_handler_user_context in context %p\r\n", Thread::gettid());
}

void rise_handler(void) {
// Execute the time critical part first
led1 = !led1;
// The rest can execute later in user context (and can contain code that's not interrupt safe).
// We use the 'queue.call' function to add an event (the call to 'rise_handler_user_context') to the queue.
queue.call(rise_handler_user_context);
}

```

After replacing the code for `rise_handler` as above, the output of our example becomes:

```
Starting in context 0x20002c50
fall_handler in context 0x20002c90
rise_handler_user_context in context 0x20002c90
fall_handler in context 0x20002c90
rise_handler_user_context in context 0x20002c90
```

The scenario above (splitting an interrupt handler's code into time critical code and non-time critical code) is another common pattern that's easily implemented with event queues. Another thing to learn from this example is that queuing code that's not interrupt safe is not the only thing that event queues can be used for. Any kind of code can be queued and deferred for later execution.

We used `InterruptIn` for the example above, but the same kind of code can be used with any `attach()`-like functions in the SDK. Example include `Serial::attach()`, `Ticker::attach()`, `Ticker::attach_us()`, `Timeout::attach()`.

## Where to go from here

We just scratched the surface of how event queues work in mbed OS. The `EventQueue` and `Event` classes in the `mbed-events` library offer a lot of features that are not covered in this document, including calling functions with arguments, queueing functions to be called after a delay, or queueing functions to be called periodically. The [README of the mbed-events library](https://github.com/ARMmbed/mbed-os/blob/master/events/README.md) shows more ways to use events and event queues. For more details about how the events library is implemented, check [this file](https://github.com/ARMmbed/mbed-os/blob/master/events/equeue/README.md).
Loading