|
| 1 | +/* mbed Microcontroller Library |
| 2 | + * Copyright (c) 2006-2012 ARM Limited |
| 3 | + * |
| 4 | + * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 5 | + * of this software and associated documentation files (the "Software"), to deal |
| 6 | + * in the Software without restriction, including without limitation the rights |
| 7 | + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
| 8 | + * copies of the Software, and to permit persons to whom the Software is |
| 9 | + * furnished to do so, subject to the following conditions: |
| 10 | + * |
| 11 | + * The above copyright notice and this permission notice shall be included in |
| 12 | + * all copies or substantial portions of the Software. |
| 13 | + * |
| 14 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 17 | + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 18 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 19 | + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 20 | + * SOFTWARE. |
| 21 | + */ |
| 22 | + |
| 23 | +#include "hal/us_ticker_api.h" |
| 24 | +#include "hal/lp_ticker_api.h" |
| 25 | +#include "mbed_critical.h" |
| 26 | +#include "mbed_assert.h" |
| 27 | +#include "platform/mbed_power_mgmt.h" |
| 28 | +#include "platform/CriticalSectionLock.h" |
| 29 | +#include "platform/internal/SysTimer.h" |
| 30 | +extern "C" { |
| 31 | +#if MBED_CONF_RTOS_PRESENT |
| 32 | +#include "rtx_lib.h" |
| 33 | +#else |
| 34 | +#define OS_TICK_FREQ 1000 |
| 35 | +#endif |
| 36 | +#if defined(TARGET_CORTEX_A) |
| 37 | +#include "irq_ctrl.h" |
| 38 | +#endif |
| 39 | +} |
| 40 | + |
| 41 | +#define US_IN_TICK (1000000 / OS_TICK_FREQ) |
| 42 | +MBED_STATIC_ASSERT(1000000 % OS_TICK_FREQ == 0, "OS_TICK_FREQ must be a divisor of 1000000 for correct tick calculations"); |
| 43 | + |
| 44 | +#if (defined(NO_SYSTICK)) |
| 45 | +/** |
| 46 | + * Return an IRQ number that can be used in the absence of SysTick |
| 47 | + * |
| 48 | + * @return Free IRQ number that can be used |
| 49 | + */ |
| 50 | +extern "C" IRQn_Type mbed_get_m0_tick_irqn(void); |
| 51 | +#endif |
| 52 | + |
| 53 | +#if defined(TARGET_CORTEX_A) |
| 54 | +extern "C" IRQn_ID_t mbed_get_a9_tick_irqn(void); |
| 55 | +#endif |
| 56 | + |
| 57 | +namespace mbed { |
| 58 | +namespace internal { |
| 59 | + |
| 60 | +SysTimer::SysTimer() : |
| 61 | +#if DEVICE_LPTICKER |
| 62 | + TimerEvent(get_lp_ticker_data()), |
| 63 | +#else |
| 64 | + TimerEvent(get_us_ticker_data()), |
| 65 | +#endif |
| 66 | + _time_us(ticker_read_us(_ticker_data)), |
| 67 | + _tick(0), |
| 68 | + _unacknowledged_ticks(0), |
| 69 | + _wake_time_set(false), |
| 70 | + _wake_time_passed(false), |
| 71 | + _ticking(false), |
| 72 | + _deep_sleep_locked(false) |
| 73 | +{ |
| 74 | + if (!_ticker_data->interface->runs_in_deep_sleep) { |
| 75 | + sleep_manager_lock_deep_sleep(); |
| 76 | + } |
| 77 | +} |
| 78 | + |
| 79 | +SysTimer::SysTimer(const ticker_data_t *data) : |
| 80 | + TimerEvent(data), |
| 81 | + _time_us(ticker_read_us(_ticker_data)), |
| 82 | + _tick(0), |
| 83 | + _unacknowledged_ticks(0), |
| 84 | + _wake_time_set(false), |
| 85 | + _wake_time_passed(false), |
| 86 | + _ticking(false), |
| 87 | + _deep_sleep_locked(false) |
| 88 | +{ |
| 89 | + if (!_ticker_data->interface->runs_in_deep_sleep) { |
| 90 | + sleep_manager_lock_deep_sleep(); |
| 91 | + } |
| 92 | +} |
| 93 | + |
| 94 | +void SysTimer::set_wake_time(uint64_t at) |
| 95 | +{ |
| 96 | + // SysTimer must not be active - we must be in suspend state |
| 97 | + MBED_ASSERT(!_ticking); |
| 98 | + |
| 99 | + // There is a potential race here, when called from outside |
| 100 | + // a critical section. See function documentation for notes on |
| 101 | + // handling it. |
| 102 | + if (core_util_atomic_load_bool(&_wake_time_set)) { |
| 103 | + return; |
| 104 | + } |
| 105 | + |
| 106 | + // Analyse the timers |
| 107 | + if (update_and_get_tick() >= at) { |
| 108 | + _wake_time_passed = true; |
| 109 | + return; |
| 110 | + } |
| 111 | + |
| 112 | + uint64_t ticks_to_sleep = at - _tick; |
| 113 | + uint64_t wake_time = at * US_IN_TICK; |
| 114 | + |
| 115 | + /* Set this first, before attaching the interrupt that can unset it */ |
| 116 | + _wake_time_set = true; |
| 117 | + _wake_time_passed = false; |
| 118 | + |
| 119 | + /* If deep sleep is unlocked, and we have enough time, let's go for it */ |
| 120 | + if (MBED_CONF_TARGET_DEEP_SLEEP_LATENCY > 0 && |
| 121 | + ticks_to_sleep > MBED_CONF_TARGET_DEEP_SLEEP_LATENCY && |
| 122 | + sleep_manager_can_deep_sleep()) { |
| 123 | + /* Schedule the wake up interrupt early, allowing for the deep sleep latency */ |
| 124 | + _wake_early = true; |
| 125 | + insert_absolute(wake_time - MBED_CONF_TARGET_DEEP_SLEEP_LATENCY * US_IN_TICK); |
| 126 | + } else { |
| 127 | + /* Otherwise, we'll set up for shallow sleep at the precise time. |
| 128 | + * To make absolutely sure it's shallow so we don't incur the latency, |
| 129 | + * take our own lock, to avoid a race on a thread unlocking it. |
| 130 | + */ |
| 131 | + _wake_early = false; |
| 132 | + if (MBED_CONF_TARGET_DEEP_SLEEP_LATENCY > 0 && !_deep_sleep_locked) { |
| 133 | + _deep_sleep_locked = true; |
| 134 | + sleep_manager_lock_deep_sleep(); |
| 135 | + } |
| 136 | + insert_absolute(wake_time); |
| 137 | + } |
| 138 | +} |
| 139 | + |
| 140 | +void SysTimer::cancel_wake() |
| 141 | +{ |
| 142 | + MBED_ASSERT(!_ticking); |
| 143 | + // Remove ensures serialized access to SysTimer by stopping timer interrupt |
| 144 | + remove(); |
| 145 | + |
| 146 | + _wake_time_set = false; |
| 147 | + _wake_time_passed = false; |
| 148 | + |
| 149 | + if (_deep_sleep_locked) { |
| 150 | + _deep_sleep_locked = false; |
| 151 | + sleep_manager_unlock_deep_sleep(); |
| 152 | + } |
| 153 | +} |
| 154 | + |
| 155 | +uint64_t SysTimer::_elapsed_ticks() const |
| 156 | +{ |
| 157 | + uint64_t elapsed_us = ticker_read_us(_ticker_data) - _time_us; |
| 158 | + if (elapsed_us < US_IN_TICK) { |
| 159 | + return 0; |
| 160 | + } else if (elapsed_us < 2*US_IN_TICK) { |
| 161 | + return 1; |
| 162 | + } else if (elapsed_us <= 0xFFFFFFFF) { |
| 163 | + // Fast common case avoiding 64-bit division |
| 164 | + return (uint32_t) elapsed_us / US_IN_TICK; |
| 165 | + } else { |
| 166 | + return elapsed_us / US_IN_TICK; |
| 167 | + } |
| 168 | +} |
| 169 | + |
| 170 | +void SysTimer::start_tick() |
| 171 | +{ |
| 172 | + _ticking = true; |
| 173 | + if (_unacknowledged_ticks > 0) { |
| 174 | + _set_irq_pending(); |
| 175 | + } |
| 176 | + _schedule_tick(); |
| 177 | +} |
| 178 | + |
| 179 | +void SysTimer::_schedule_tick() |
| 180 | +{ |
| 181 | + insert_absolute((get_tick() + 1) * US_IN_TICK); |
| 182 | +} |
| 183 | + |
| 184 | +void SysTimer::acknowledge_tick() |
| 185 | +{ |
| 186 | + // Try to avoid missed ticks if OS's IRQ level is not keeping |
| 187 | + // up with our handler. |
| 188 | + // 8-bit counter to save space, and also make sure it we don't |
| 189 | + // try TOO hard to resync if something goes really awry - |
| 190 | + // resync will reset if the count hits 256. |
| 191 | + if (core_util_atomic_decr_u8(&_unacknowledged_ticks, 1) > 0) { |
| 192 | + _set_irq_pending(); |
| 193 | + } |
| 194 | +} |
| 195 | + |
| 196 | +void SysTimer::cancel_tick() |
| 197 | +{ |
| 198 | + // Underlying call is interrupt safe |
| 199 | + |
| 200 | + remove(); |
| 201 | + _ticking = false; |
| 202 | + |
| 203 | + _clear_irq_pending(); |
| 204 | +} |
| 205 | + |
| 206 | +uint64_t SysTimer::get_tick() const |
| 207 | +{ |
| 208 | + // Atomic is necessary as this can be called from any foreground context, |
| 209 | + // while IRQ can update it. |
| 210 | + return core_util_atomic_load_u64(&_tick); |
| 211 | +} |
| 212 | + |
| 213 | +uint64_t SysTimer::update_and_get_tick() |
| 214 | +{ |
| 215 | + MBED_ASSERT(!_ticking && !_wake_time_set); |
| 216 | + // Can only be used when no interrupts are scheduled |
| 217 | + // Update counters to reflect elapsed time |
| 218 | + uint64_t elapsed_ticks = _elapsed_ticks(); |
| 219 | + _unacknowledged_ticks = 0; |
| 220 | + _time_us += elapsed_ticks * US_IN_TICK; |
| 221 | + _tick += elapsed_ticks; |
| 222 | + |
| 223 | + return _tick; |
| 224 | +} |
| 225 | + |
| 226 | +us_timestamp_t SysTimer::get_time() const |
| 227 | +{ |
| 228 | + // Underlying call is interrupt safe |
| 229 | + |
| 230 | + return ticker_read_us(_ticker_data); |
| 231 | +} |
| 232 | + |
| 233 | +us_timestamp_t SysTimer::get_time_since_tick() const |
| 234 | +{ |
| 235 | + // Underlying call is interrupt safe, and _time_us is not updated by IRQ |
| 236 | + |
| 237 | + return get_time() - _time_us; |
| 238 | +} |
| 239 | + |
| 240 | +#if (defined(NO_SYSTICK)) |
| 241 | +IRQn_Type SysTimer::get_irq_number() |
| 242 | +{ |
| 243 | + return mbed_get_m0_tick_irqn(); |
| 244 | +} |
| 245 | +#elif (TARGET_CORTEX_M) |
| 246 | +IRQn_Type SysTimer::get_irq_number() |
| 247 | +{ |
| 248 | + return SysTick_IRQn; |
| 249 | +} |
| 250 | +#elif (TARGET_CORTEX_A) |
| 251 | +IRQ_ID_t SysTimer::get_irq_number() |
| 252 | +{ |
| 253 | + return mbed_get_a9_tick_irqn(); |
| 254 | +} |
| 255 | +#endif |
| 256 | + |
| 257 | +#if MBED_CONF_RTOS_PRESENT |
| 258 | +void SysTimer::_set_irq_pending() |
| 259 | +{ |
| 260 | + // Protected function synchronized externally |
| 261 | + |
| 262 | +#if (defined(NO_SYSTICK)) |
| 263 | + NVIC_SetPendingIRQ(mbed_get_m0_tick_irqn()); |
| 264 | +#elif (TARGET_CORTEX_M) |
| 265 | + SCB->ICSR = SCB_ICSR_PENDSTSET_Msk; |
| 266 | +#else |
| 267 | + IRQ_SetPending(mbed_get_a9_tick_irqn()); |
| 268 | +#endif |
| 269 | +} |
| 270 | + |
| 271 | +void SysTimer::_clear_irq_pending() |
| 272 | +{ |
| 273 | + // Protected function synchronized externally |
| 274 | + |
| 275 | +#if (defined(NO_SYSTICK)) |
| 276 | + NVIC_ClearPendingIRQ(mbed_get_m0_tick_irqn()); |
| 277 | +#elif (TARGET_CORTEX_M) |
| 278 | + SCB->ICSR = SCB_ICSR_PENDSTCLR_Msk; |
| 279 | +#else |
| 280 | + IRQ_ClearPending(mbed_get_a9_tick_irqn()); |
| 281 | +#endif |
| 282 | +} |
| 283 | + |
| 284 | +void SysTimer::_increment_tick() |
| 285 | +{ |
| 286 | + // Protected function synchronized externally |
| 287 | + |
| 288 | + _tick++; |
| 289 | + _time_us += US_IN_TICK; |
| 290 | +} |
| 291 | +#endif |
| 292 | + |
| 293 | +void SysTimer::handler() |
| 294 | +{ |
| 295 | + /* To reduce IRQ latency problems, we do not re-arm in the interrupt handler */ |
| 296 | + if (_wake_time_set) { |
| 297 | + _wake_time_set = false; |
| 298 | + if (!_wake_early) { |
| 299 | + _wake_time_passed = true; |
| 300 | + } |
| 301 | + /* If this was an early interrupt, user has the responsibility to check and |
| 302 | + * note the combination of (!set, !passed), and re-arm the wake timer if |
| 303 | + * necessary. |
| 304 | + */ |
| 305 | + } |
| 306 | +#if MBED_CONF_RTOS_PRESENT |
| 307 | + else if (_ticking) { |
| 308 | + _unacknowledged_ticks++; |
| 309 | + _set_irq_pending(); |
| 310 | + _increment_tick(); |
| 311 | + // We do this now, rather than in acknowledgement, as we get it "for free" |
| 312 | + // here - because we're in the ticker handler, the programming gets deferred |
| 313 | + // until end of dispatch, and the ticker would likely be rescheduling |
| 314 | + // anyway after dispatch. |
| 315 | + |
| 316 | + _schedule_tick(); |
| 317 | + } |
| 318 | +#endif |
| 319 | +} |
| 320 | + |
| 321 | +} // namespace internal |
| 322 | +} // namespace mbed |
0 commit comments