Skip to content
This repository was archived by the owner on Aug 9, 2022. It is now read-only.

Commit cd43221

Browse files
committed
Added retrieval of rtc_tick_count
Changed interrupt free/mtulithreaded to be handled in ClockControlConfig
1 parent 52d8af6 commit cd43221

File tree

3 files changed

+193
-97
lines changed

3 files changed

+193
-97
lines changed

examples/timer.rs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -142,8 +142,9 @@ fn main() -> ! {
142142
if let Some(ref mut tx) = TX.lock().deref_mut() {
143143
writeln!(
144144
tx,
145-
"Loop: {} {} {} {} {}",
145+
"Loop: {} {:.3} {} {} {} {}",
146146
x,
147+
u64::from(clkcntrl_config.rtc_nanoseconds()) as f64 / 1000000000.0,
147148
timer0.get_value(),
148149
timer1.get_value(),
149150
timer2.get_value(),

src/clock_control/cpu.rs

Lines changed: 82 additions & 89 deletions
Original file line numberDiff line numberDiff line change
@@ -4,61 +4,51 @@
44
use super::Error;
55
use crate::target;
66
use crate::Core::{self, APP, PRO};
7-
use xtensa_lx6::interrupt;
87
use xtensa_lx6::set_stack_pointer;
98

109
static mut START_CORE1_FUNCTION: Option<fn() -> !> = None;
1110

12-
static CPU_MUTEX: spin::Mutex<()> = spin::Mutex::new(());
13-
1411
impl super::ClockControl {
1512
pub unsafe fn park_core(&mut self, core: Core) {
16-
interrupt::free(|_| {
17-
let _lock = CPU_MUTEX.lock();
18-
19-
match core {
20-
PRO => {
21-
self.rtc_control
22-
.sw_cpu_stall
23-
.modify(|_, w| w.sw_stall_procpu_c1().bits(0x21));
24-
self.rtc_control
25-
.options0
26-
.modify(|_, w| w.sw_stall_procpu_c0().bits(0x02));
27-
}
28-
APP => {
29-
self.rtc_control
30-
.sw_cpu_stall
31-
.modify(|_, w| w.sw_stall_appcpu_c1().bits(0x21));
32-
self.rtc_control
33-
.options0
34-
.modify(|_, w| w.sw_stall_appcpu_c0().bits(0x02));
35-
}
36-
};
37-
});
13+
match core {
14+
PRO => {
15+
self.rtc_control
16+
.sw_cpu_stall
17+
.modify(|_, w| w.sw_stall_procpu_c1().bits(0x21));
18+
self.rtc_control
19+
.options0
20+
.modify(|_, w| w.sw_stall_procpu_c0().bits(0x02));
21+
}
22+
APP => {
23+
self.rtc_control
24+
.sw_cpu_stall
25+
.modify(|_, w| w.sw_stall_appcpu_c1().bits(0x21));
26+
self.rtc_control
27+
.options0
28+
.modify(|_, w| w.sw_stall_appcpu_c0().bits(0x02));
29+
}
30+
}
3831
}
3932

4033
pub fn unpark_core(&mut self, core: Core) {
41-
interrupt::free(|_| {
42-
let _lock = CPU_MUTEX.lock();
43-
match core {
44-
PRO => {
45-
self.rtc_control
46-
.sw_cpu_stall
47-
.modify(|_, w| unsafe { w.sw_stall_procpu_c1().bits(0) });
48-
self.rtc_control
49-
.options0
50-
.modify(|_, w| unsafe { w.sw_stall_procpu_c0().bits(0) });
51-
}
52-
APP => {
53-
self.rtc_control
54-
.sw_cpu_stall
55-
.modify(|_, w| unsafe { w.sw_stall_appcpu_c1().bits(0) });
56-
self.rtc_control
57-
.options0
58-
.modify(|_, w| unsafe { w.sw_stall_appcpu_c0().bits(0) });
59-
}
60-
};
61-
});
34+
match core {
35+
PRO => {
36+
self.rtc_control
37+
.sw_cpu_stall
38+
.modify(|_, w| unsafe { w.sw_stall_procpu_c1().bits(0) });
39+
self.rtc_control
40+
.options0
41+
.modify(|_, w| unsafe { w.sw_stall_procpu_c0().bits(0) });
42+
}
43+
APP => {
44+
self.rtc_control
45+
.sw_cpu_stall
46+
.modify(|_, w| unsafe { w.sw_stall_appcpu_c1().bits(0) });
47+
self.rtc_control
48+
.options0
49+
.modify(|_, w| unsafe { w.sw_stall_appcpu_c0().bits(0) });
50+
}
51+
}
6252
}
6353

6454
fn flush_cache(&mut self, core: Core) {
@@ -128,58 +118,61 @@ impl super::ClockControl {
128118
static mut _stack_end_cpu1: u32;
129119
}
130120

121+
// disables interrupts
122+
xtensa_lx6::interrupt::set_mask(0);
123+
124+
// reset cycle compare registers
125+
xtensa_lx6::timer::set_ccompare0(0);
126+
xtensa_lx6::timer::set_ccompare1(0);
127+
xtensa_lx6::timer::set_ccompare2(0);
128+
131129
// set stack pointer to end of memory: no need to retain stack up to this point
132130
set_stack_pointer(&mut _stack_end_cpu1);
133131

134132
START_CORE1_FUNCTION.unwrap()();
135133
}
136134

137-
pub fn start_core(&mut self, core: Core, f: fn() -> !) -> Result<(), Error> {
138-
match core {
139-
PRO => return Err(Error::CoreAlreadyRunning),
140-
APP => interrupt::free(|_| {
141-
// no mutex lock needed here:
142-
// only starts if other core not running yet
143-
144-
if self
145-
.dport_control
146-
.appcpu_ctrl_b()
147-
.read()
148-
.appcpu_clkgate_en()
149-
.bit_is_set()
150-
{
151-
return Err(Error::CoreAlreadyRunning);
152-
}
153-
154-
self.flush_cache(core);
155-
self.enable_cache(core);
156-
157-
unsafe {
158-
START_CORE1_FUNCTION = Some(f);
159-
}
135+
/// Start the APP (second) core
136+
///
137+
/// The second core will start running with the function `entry`.
138+
pub fn start_app_core(&mut self, entry: fn() -> !) -> Result<(), Error> {
139+
if self
140+
.dport_control
141+
.appcpu_ctrl_b()
142+
.read()
143+
.appcpu_clkgate_en()
144+
.bit_is_set()
145+
{
146+
return Err(Error::CoreAlreadyRunning);
147+
}
160148

161-
self.dport_control.appcpu_ctrl_d().write(|w| unsafe {
162-
w.appcpu_boot_addr()
163-
.bits(Self::start_core1_init as *const u32 as u32)
164-
});
149+
self.flush_cache(Core::APP);
150+
self.enable_cache(Core::APP);
165151

166-
self.dport_control
167-
.appcpu_ctrl_b()
168-
.modify(|_, w| w.appcpu_clkgate_en().set_bit());
169-
self.dport_control
170-
.appcpu_ctrl_c()
171-
.modify(|_, w| w.appcpu_runstall().clear_bit());
172-
self.dport_control
173-
.appcpu_ctrl_a()
174-
.modify(|_, w| w.appcpu_resetting().set_bit());
175-
self.dport_control
176-
.appcpu_ctrl_a()
177-
.modify(|_, w| w.appcpu_resetting().clear_bit());
152+
unsafe {
153+
START_CORE1_FUNCTION = Some(entry);
154+
}
178155

179-
self.unpark_core(core);
156+
self.dport_control.appcpu_ctrl_d().write(|w| unsafe {
157+
w.appcpu_boot_addr()
158+
.bits(Self::start_core1_init as *const u32 as u32)
159+
});
180160

181-
Ok(())
182-
}),
183-
}
161+
self.dport_control
162+
.appcpu_ctrl_b()
163+
.modify(|_, w| w.appcpu_clkgate_en().set_bit());
164+
self.dport_control
165+
.appcpu_ctrl_c()
166+
.modify(|_, w| w.appcpu_runstall().clear_bit());
167+
self.dport_control
168+
.appcpu_ctrl_a()
169+
.modify(|_, w| w.appcpu_resetting().set_bit());
170+
self.dport_control
171+
.appcpu_ctrl_a()
172+
.modify(|_, w| w.appcpu_resetting().clear_bit());
173+
174+
self.unpark_core(Core::APP);
175+
176+
Ok(())
184177
}
185178
}

src/clock_control/mod.rs

Lines changed: 109 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -190,17 +190,19 @@ enum CalibrateRTCSource {
190190

191191
// static ClockControl to allow DFS, etc.
192192
static mut CLOCK_CONTROL: Option<ClockControl> = None;
193+
// mutex to allow safe multi-threaded access
194+
static CLOCK_CONTROL_MUTEX: spin::Mutex<()> = spin::Mutex::new(());
193195

194196
/// Clock configuration & locking for Dynamic Frequency Switching.
195197
/// It allows thread and interrupt safe way to switch between default,
196198
/// high CPU and APB frequency configuration.
197-
198-
// All the single word reads of frequencies and sources are thread and interrupt safe
199-
// as these are atomic.
200199
#[derive(Copy, Clone)]
201200
pub struct ClockControlConfig {}
202201

203202
impl<'a> ClockControlConfig {
203+
// All the single word reads of frequencies and sources are thread and interrupt safe
204+
// as these are atomic.
205+
204206
/// The current CPU frequency
205207
pub fn cpu_frequency(&self) -> Hertz {
206208
unsafe { CLOCK_CONTROL.as_ref().unwrap().cpu_frequency }
@@ -306,6 +308,9 @@ impl<'a> ClockControlConfig {
306308
unsafe { CLOCK_CONTROL.as_ref().unwrap().fast_rtc_source }
307309
}
308310

311+
// The lock and unlock calls are thread and interrupt safe because this is handled inside
312+
// the DFS routines
313+
309314
/// Obtain a RAII lock to use the high CPU frequency
310315
pub fn lock_cpu_frequency(&self) -> dfs::LockCPU {
311316
unsafe { CLOCK_CONTROL.as_mut().unwrap().lock_cpu_frequency() }
@@ -343,16 +348,52 @@ impl<'a> ClockControlConfig {
343348
unsafe { CLOCK_CONTROL.as_mut().unwrap().get_lock_count() }
344349
}
345350

351+
// The following routines are made thread and interrupt safe here
352+
353+
/// Halt the designated core
346354
pub unsafe fn park_core(&mut self, core: crate::Core) {
347-
CLOCK_CONTROL.as_mut().unwrap().park_core(core)
355+
interrupt::free(|_| {
356+
CLOCK_CONTROL_MUTEX.lock();
357+
CLOCK_CONTROL.as_mut().unwrap().park_core(core);
358+
})
348359
}
349360

361+
/// Start the APP (second) core
362+
///
363+
/// The second core will start running with the function `entry`.
350364
pub fn unpark_core(&mut self, core: crate::Core) {
351-
unsafe { CLOCK_CONTROL.as_mut().unwrap().unpark_core(core) }
365+
interrupt::free(|_| {
366+
CLOCK_CONTROL_MUTEX.lock();
367+
unsafe { CLOCK_CONTROL.as_mut().unwrap().unpark_core(core) }
368+
})
352369
}
353370

354-
pub fn start_core(&mut self, core: crate::Core, f: fn() -> !) -> Result<(), Error> {
355-
unsafe { CLOCK_CONTROL.as_mut().unwrap().start_core(core, f) }
371+
/// Start the APP (second) core
372+
///
373+
/// The second core will start running with the function `entry`.
374+
pub fn start_app_core(&mut self, entry: fn() -> !) -> Result<(), Error> {
375+
interrupt::free(|_| {
376+
CLOCK_CONTROL_MUTEX.lock();
377+
unsafe { CLOCK_CONTROL.as_mut().unwrap().start_app_core(entry) }
378+
})
379+
}
380+
381+
// The following routines handle thread and interrupt safety themselves
382+
383+
/// Get RTC tick count since boot
384+
///
385+
/// *Note: this function takes up to one slow RTC clock cycle (can be up to 300us) and
386+
/// interrupts are blocked during this time.*
387+
pub fn rtc_tick_count(&self) -> TicksU64 {
388+
unsafe { CLOCK_CONTROL.as_mut().unwrap().rtc_tick_count() }
389+
}
390+
391+
/// Get nanoseconds since boot based on RTC tick count
392+
///
393+
/// *Note: this function takes up to one slow RTC clock cycle (can be up to 300us) and
394+
/// interrupts are blocked during this time.*
395+
pub fn rtc_nanoseconds(&self) -> NanoSecondsU64 {
396+
unsafe { CLOCK_CONTROL.as_mut().unwrap().rtc_nanoseconds() }
356397
}
357398
}
358399

@@ -1462,4 +1503,65 @@ impl ClockControl {
14621503
CPUSource::APLL => unimplemented!(),
14631504
}
14641505
}
1506+
1507+
/// Get RTC tick count since boot
1508+
///
1509+
/// This function can usually take up to one RTC clock cycles (~300us).
1510+
///
1511+
/// In exceptional circumstances it could take up to two RTC clock cycles. This can happen
1512+
/// when an interrupt routine or the other core calls this function exactly in between
1513+
/// the loop checking for the valid bit and entering the critical section.
1514+
///
1515+
/// Interrupts are only blocked during the actual reading of the clock register,
1516+
/// not during the wait for valid data.
1517+
pub fn rtc_tick_count(&self) -> TicksU64 {
1518+
self.rtc_control
1519+
.time_update
1520+
.modify(|_, w| w.time_update().set_bit());
1521+
1522+
loop {
1523+
// do this check outside the critical section, to prevent blocking interrupts and
1524+
// the other core for a long time
1525+
while self
1526+
.rtc_control
1527+
.time_update
1528+
.read()
1529+
.time_valid()
1530+
.bit_is_clear()
1531+
{}
1532+
1533+
if let Some(ticks) = interrupt::free(|_| {
1534+
CLOCK_CONTROL_MUTEX.lock();
1535+
1536+
// there is a small chance that this function is interrupted or called from
1537+
// the other core between detecting the valid and entering the interrupt free
1538+
// and mutex protection reading check again inside the critical section
1539+
1540+
if self
1541+
.rtc_control
1542+
.time_update
1543+
.read()
1544+
.time_valid()
1545+
.bit_is_set()
1546+
{
1547+
// this needs to be interrupt and thread safe, because if the time value
1548+
// changes in between reading the upper and lower part this results in an
1549+
// invalid value.
1550+
let hi = self.rtc_control.time1.read().time_hi().bits() as u64;
1551+
let lo = self.rtc_control.time0.read().bits() as u64;
1552+
let ticks: TicksU64 = TicksU64::from((hi << 32) | lo);
1553+
Some(ticks)
1554+
} else {
1555+
None
1556+
}
1557+
}) {
1558+
return ticks;
1559+
}
1560+
}
1561+
}
1562+
1563+
/// Get nanoseconds since boot based on RTC tick count
1564+
pub fn rtc_nanoseconds(&self) -> NanoSecondsU64 {
1565+
self.rtc_tick_count() / self.slow_rtc_frequency
1566+
}
14651567
}

0 commit comments

Comments
 (0)