@@ -190,17 +190,19 @@ enum CalibrateRTCSource {
190
190
191
191
// static ClockControl to allow DFS, etc.
192
192
static mut CLOCK_CONTROL : Option < ClockControl > = None ;
193
+ // mutex to allow safe multi-threaded access
194
+ static CLOCK_CONTROL_MUTEX : spin:: Mutex < ( ) > = spin:: Mutex :: new ( ( ) ) ;
193
195
194
196
/// Clock configuration & locking for Dynamic Frequency Switching.
195
197
/// It allows thread and interrupt safe way to switch between default,
196
198
/// high CPU and APB frequency configuration.
197
-
198
- // All the single word reads of frequencies and sources are thread and interrupt safe
199
- // as these are atomic.
200
199
#[ derive( Copy , Clone ) ]
201
200
pub struct ClockControlConfig { }
202
201
203
202
impl < ' a > ClockControlConfig {
203
+ // All the single word reads of frequencies and sources are thread and interrupt safe
204
+ // as these are atomic.
205
+
204
206
/// The current CPU frequency
205
207
pub fn cpu_frequency ( & self ) -> Hertz {
206
208
unsafe { CLOCK_CONTROL . as_ref ( ) . unwrap ( ) . cpu_frequency }
@@ -306,6 +308,9 @@ impl<'a> ClockControlConfig {
306
308
unsafe { CLOCK_CONTROL . as_ref ( ) . unwrap ( ) . fast_rtc_source }
307
309
}
308
310
311
+ // The lock and unlock calls are thread and interrupt safe because this is handled inside
312
+ // the DFS routines
313
+
309
314
/// Obtain a RAII lock to use the high CPU frequency
310
315
pub fn lock_cpu_frequency ( & self ) -> dfs:: LockCPU {
311
316
unsafe { CLOCK_CONTROL . as_mut ( ) . unwrap ( ) . lock_cpu_frequency ( ) }
@@ -343,16 +348,52 @@ impl<'a> ClockControlConfig {
343
348
unsafe { CLOCK_CONTROL . as_mut ( ) . unwrap ( ) . get_lock_count ( ) }
344
349
}
345
350
351
+ // The following routines are made thread and interrupt safe here
352
+
353
+ /// Halt the designated core
346
354
pub unsafe fn park_core ( & mut self , core : crate :: Core ) {
347
- CLOCK_CONTROL . as_mut ( ) . unwrap ( ) . park_core ( core)
355
+ interrupt:: free ( |_| {
356
+ CLOCK_CONTROL_MUTEX . lock ( ) ;
357
+ CLOCK_CONTROL . as_mut ( ) . unwrap ( ) . park_core ( core) ;
358
+ } )
348
359
}
349
360
361
+ /// Start the APP (second) core
362
+ ///
363
+ /// The second core will start running with the function `entry`.
350
364
pub fn unpark_core ( & mut self , core : crate :: Core ) {
351
- unsafe { CLOCK_CONTROL . as_mut ( ) . unwrap ( ) . unpark_core ( core) }
365
+ interrupt:: free ( |_| {
366
+ CLOCK_CONTROL_MUTEX . lock ( ) ;
367
+ unsafe { CLOCK_CONTROL . as_mut ( ) . unwrap ( ) . unpark_core ( core) }
368
+ } )
352
369
}
353
370
354
- pub fn start_core ( & mut self , core : crate :: Core , f : fn ( ) -> !) -> Result < ( ) , Error > {
355
- unsafe { CLOCK_CONTROL . as_mut ( ) . unwrap ( ) . start_core ( core, f) }
371
+ /// Start the APP (second) core
372
+ ///
373
+ /// The second core will start running with the function `entry`.
374
+ pub fn start_app_core ( & mut self , entry : fn ( ) -> !) -> Result < ( ) , Error > {
375
+ interrupt:: free ( |_| {
376
+ CLOCK_CONTROL_MUTEX . lock ( ) ;
377
+ unsafe { CLOCK_CONTROL . as_mut ( ) . unwrap ( ) . start_app_core ( entry) }
378
+ } )
379
+ }
380
+
381
+ // The following routines handle thread and interrupt safety themselves
382
+
383
+ /// Get RTC tick count since boot
384
+ ///
385
+ /// *Note: this function takes up to one slow RTC clock cycle (can be up to 300us) and
386
+ /// interrupts are blocked during this time.*
387
+ pub fn rtc_tick_count ( & self ) -> TicksU64 {
388
+ unsafe { CLOCK_CONTROL . as_mut ( ) . unwrap ( ) . rtc_tick_count ( ) }
389
+ }
390
+
391
+ /// Get nanoseconds since boot based on RTC tick count
392
+ ///
393
+ /// *Note: this function takes up to one slow RTC clock cycle (can be up to 300us) and
394
+ /// interrupts are blocked during this time.*
395
+ pub fn rtc_nanoseconds ( & self ) -> NanoSecondsU64 {
396
+ unsafe { CLOCK_CONTROL . as_mut ( ) . unwrap ( ) . rtc_nanoseconds ( ) }
356
397
}
357
398
}
358
399
@@ -1462,4 +1503,65 @@ impl ClockControl {
1462
1503
CPUSource :: APLL => unimplemented ! ( ) ,
1463
1504
}
1464
1505
}
1506
+
1507
+ /// Get RTC tick count since boot
1508
+ ///
1509
+ /// This function can usually take up to one RTC clock cycles (~300us).
1510
+ ///
1511
+ /// In exceptional circumstances it could take up to two RTC clock cycles. This can happen
1512
+ /// when an interrupt routine or the other core calls this function exactly in between
1513
+ /// the loop checking for the valid bit and entering the critical section.
1514
+ ///
1515
+ /// Interrupts are only blocked during the actual reading of the clock register,
1516
+ /// not during the wait for valid data.
1517
+ pub fn rtc_tick_count ( & self ) -> TicksU64 {
1518
+ self . rtc_control
1519
+ . time_update
1520
+ . modify ( |_, w| w. time_update ( ) . set_bit ( ) ) ;
1521
+
1522
+ loop {
1523
+ // do this check outside the critical section, to prevent blocking interrupts and
1524
+ // the other core for a long time
1525
+ while self
1526
+ . rtc_control
1527
+ . time_update
1528
+ . read ( )
1529
+ . time_valid ( )
1530
+ . bit_is_clear ( )
1531
+ { }
1532
+
1533
+ if let Some ( ticks) = interrupt:: free ( |_| {
1534
+ CLOCK_CONTROL_MUTEX . lock ( ) ;
1535
+
1536
+ // there is a small chance that this function is interrupted or called from
1537
+ // the other core between detecting the valid and entering the interrupt free
1538
+ // and mutex protection reading check again inside the critical section
1539
+
1540
+ if self
1541
+ . rtc_control
1542
+ . time_update
1543
+ . read ( )
1544
+ . time_valid ( )
1545
+ . bit_is_set ( )
1546
+ {
1547
+ // this needs to be interrupt and thread safe, because if the time value
1548
+ // changes in between reading the upper and lower part this results in an
1549
+ // invalid value.
1550
+ let hi = self . rtc_control . time1 . read ( ) . time_hi ( ) . bits ( ) as u64 ;
1551
+ let lo = self . rtc_control . time0 . read ( ) . bits ( ) as u64 ;
1552
+ let ticks: TicksU64 = TicksU64 :: from ( ( hi << 32 ) | lo) ;
1553
+ Some ( ticks)
1554
+ } else {
1555
+ None
1556
+ }
1557
+ } ) {
1558
+ return ticks;
1559
+ }
1560
+ }
1561
+ }
1562
+
1563
+ /// Get nanoseconds since boot based on RTC tick count
1564
+ pub fn rtc_nanoseconds ( & self ) -> NanoSecondsU64 {
1565
+ self . rtc_tick_count ( ) / self . slow_rtc_frequency
1566
+ }
1465
1567
}
0 commit comments