diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 0e65bd7..26316fe 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -45,7 +45,8 @@ jobs: working-directory: zephyr-lang-rust run: | # Note that the above build doesn't set Zephyrbase, so we'll need to do that here. - west build -t rustdoc -b qemu_cortex_m3 docgen + west build -b qemu_cortex_m3 docgen + west build -t rustdoc mkdir rustdocs mv build/rust/target/thumbv7m-none-eabi/doc rustdocs/nostd diff --git a/docgen/prj.conf b/docgen/prj.conf index 4c55eb9..b23ff32 100644 --- a/docgen/prj.conf +++ b/docgen/prj.conf @@ -7,3 +7,6 @@ CONFIG_GPIO=y CONFIG_GPIO_ENABLE_DISABLE_INTERRUPT=y CONFIG_PRINTK=y CONFIG_POLL=y +CONFIG_I2C=y +CONFIG_I2C_RTIO=y +CONFIG_RTIO=y diff --git a/dt-rust.yaml b/dt-rust.yaml index c848277..4209e7a 100644 --- a/dt-rust.yaml +++ b/dt-rust.yaml @@ -7,13 +7,10 @@ # few instances were we can actually just match on a property. - name: gpio-controller rules: - - type: has_prop - value: gpio-controller + - !HasProp gpio-controller actions: - - type: instance - value: - raw: - type: myself + - !Instance + raw: !Myself device: crate::device::gpio::Gpio static_type: crate::device::gpio::GpioStatic @@ -22,36 +19,42 @@ # with each gpio. - name: gpio-leds rules: - - type: compatible - value: + - !Compatible + names: [gpio-leds] + level: 1 + actions: + - !Instance + raw: !Phandle gpios + device: crate::device::gpio::GpioPin + +# Hook up the gpio-keys as gpio pins as well +- name: gpio-keys + rules: + - !Compatible names: - - gpio-leds + - gpio-keys level: 1 actions: - - type: instance - value: - raw: - type: phandle - value: gpios + - !Instance + raw: !Phandle gpios device: crate::device::gpio::GpioPin # Flash controllers don't have any particular property to identify them, so we need a list of # compatible values that should match. - name: flash-controller rules: - - type: compatible - value: + - !Compatible names: - - "nordic,nrf52-flash-controller" - - "nordic,nrf51-flash-controller" - - "raspberrypi,pico-flash-controller" - - "zephyr,sim-flash" + - "nordic,nrf52-flash-controller" + - "nordic,nrf51-flash-controller" + - "raspberrypi,pico-flash-controller" + - "st,stm32g4-flash-controller" + - "st,stm32l5-flash-controller" + - "zephyr,sim-flash" level: 0 actions: - - type: instance - value: - raw: - type: myself + - !Instance + raw: !Myself device: crate::device::flash::FlashController # Flash partitions exist as children of a node compatible with "soc-nv-flash" that itself is a child @@ -59,31 +62,36 @@ # TODO: Get the write and erase property from the DT if present. - name: flash-partition rules: - - type: compatible - value: + - !Compatible names: - - "fixed-partitions" + - "fixed-partitions" level: 1 - - type: compatible - value: + - !Compatible names: - - "soc-nv-flash" + - "soc-nv-flash" level: 2 actions: - - type: instance - value: - raw: - type: parent - value: - level: 3 - args: - - type: reg + - !Instance + raw: !Parent + level: 3 + args: + - !Reg device: "crate::device::flash::FlashPartition" -# Generate a pseudo node that matches all of the labels across the tree with their nodes. -- name: labels +# I2C. +- name: i2c rules: - - type: root + - !Compatible + names: + - "snps,designware-i2c" + level: 0 actions: - - type: labels + - !Instance + raw: !Myself + device: crate::device::i2c::I2C +# Generate a pseudo node that matches all of the labels across the tree with their nodes. +- name: labels + rules: !Root + actions: + - !Labels diff --git a/samples/bench/src/lib.rs b/samples/bench/src/lib.rs index 432384f..5a8c7f4 100644 --- a/samples/bench/src/lib.rs +++ b/samples/bench/src/lib.rs @@ -16,11 +16,11 @@ use alloc::collections::vec_deque::VecDeque; use alloc::vec; use executor::AsyncTests; use static_cell::StaticCell; -use zephyr::kobj_define; +use zephyr::define_work_queue; use zephyr::raw::k_yield; use zephyr::sync::{PinWeak, SpinMutex}; use zephyr::time::NoWait; -use zephyr::work::{SimpleAction, Work, WorkQueueBuilder}; +use zephyr::work::{SimpleAction, Work}; use zephyr::{ kconfig::CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC, printkln, @@ -80,7 +80,7 @@ extern "C" fn rust_main() { spin_bench(); sem_bench(); - let simple = Simple::new(tester.workq.clone()); + let simple = Simple::new(tester.workq); let mut num = 6; while num < 250 { simple.run(num, TOTAL_ITERS / num); @@ -147,7 +147,7 @@ struct ThreadTests { high_command: Sender, /// A work queue for the main runners. - workq: Arc, + workq: &'static WorkQueue, /// The test also all return their result to the main. The threads Send, the main running /// receives. @@ -163,15 +163,7 @@ impl ThreadTests { let (low_send, low_recv) = bounded(1); let (high_send, high_recv) = bounded(1); - let workq = Arc::new( - WorkQueueBuilder::new() - .set_priority(5) - .set_no_yield(true) - .start(WORK_STACK.init_once(()).unwrap()), - ); - - // Leak the workqueue so it doesn't get dropped. - let _ = Arc::into_raw(workq.clone()); + let workq = WORKQ.start(); let mut result = Self { sems: &SEMS, @@ -581,20 +573,20 @@ enum TestResult { /// The Simple test just does a ping pong test using manually submitted work. struct Simple { - workq: Arc, + workq: &'static WorkQueue, } impl Simple { - fn new(workq: Arc) -> Self { + fn new(workq: &'static WorkQueue) -> Self { Self { workq } } fn run(&self, workers: usize, iterations: usize) { // printkln!("Running Simple"); - let main = Work::new(SimpleMain::new(workers * iterations, self.workq.clone())); + let main = Work::new(SimpleMain::new(workers * iterations, self.workq)); let children: VecDeque<_> = (0..workers) - .map(|n| Work::new(SimpleWorker::new(main.clone(), self.workq.clone(), n))) + .map(|n| Work::new(SimpleWorker::new(main.clone(), self.workq, n))) .collect(); let mut locked = main.action().locked.lock().unwrap(); @@ -603,7 +595,7 @@ impl Simple { let start = now(); // Fire off main, which will run everything. - Work::submit_to_queue(main.clone(), &self.workq).unwrap(); + Work::submit_to_queue(main.clone(), self.workq).unwrap(); // And wait for the completion semaphore. main.action().done.take(Forever).unwrap(); @@ -642,12 +634,12 @@ impl Simple { /// A simple worker. When run, it submits the main worker to do the next work. struct SimpleWorker { main: PinWeak>, - workq: Arc, + workq: &'static WorkQueue, _id: usize, } impl SimpleWorker { - fn new(main: Pin>>, workq: Arc, id: usize) -> Self { + fn new(main: Pin>>, workq: &'static WorkQueue, id: usize) -> Self { Self { main: PinWeak::downgrade(main), workq, @@ -660,7 +652,7 @@ impl SimpleAction for SimpleWorker { fn act(self: Pin<&Self>) { // Each time we are run, fire the main worker back up. let main = self.main.upgrade().unwrap(); - Work::submit_to_queue(main.clone(), &self.workq).unwrap(); + Work::submit_to_queue(main.clone(), self.workq).unwrap(); } } @@ -670,7 +662,7 @@ impl SimpleAction for SimpleWorker { struct SimpleMain { /// All of the work items. locked: SpinMutex, - workq: Arc, + workq: &'static WorkQueue, done: Semaphore, } @@ -690,12 +682,12 @@ impl SimpleAction for SimpleMain { lock.count -= 1; drop(lock); - Work::submit_to_queue(worker.clone(), &self.workq).unwrap(); + Work::submit_to_queue(worker.clone(), self.workq).unwrap(); } } impl SimpleMain { - fn new(count: usize, workq: Arc) -> Self { + fn new(count: usize, workq: &'static WorkQueue) -> Self { Self { locked: SpinMutex::new(Locked::new(count)), done: Semaphore::new(0, 1), @@ -812,9 +804,7 @@ impl<'a> BenchTimer<'a> { } } -kobj_define! { - static WORK_STACK: ThreadStack; -} +define_work_queue!(WORKQ, WORK_STACK_SIZE, priority = 5, no_yield = true); static SEMS: [Semaphore; NUM_THREADS] = [const { Semaphore::new(0, u32::MAX) }; NUM_THREADS]; static BACK_SEMS: [Semaphore; NUM_THREADS] = [const { Semaphore::new(0, u32::MAX) }; NUM_THREADS]; diff --git a/samples/embassy/Cargo.toml b/samples/embassy/Cargo.toml index 6301463..beeee52 100644 --- a/samples/embassy/Cargo.toml +++ b/samples/embassy/Cargo.toml @@ -28,17 +28,16 @@ features = [ [dependencies.embassy-futures] version = "0.1.1" -# path = "../../embassy/embassy-futures" [dependencies.embassy-sync] version = "0.6.2" -# path = "../../embassy/embassy-sync" [dependencies.embassy-time] version = "0.4.0" -# path = "../../embassy/embassy-time" -# This is board specific. -features = ["tick-hz-10_000"] +# For real builds, you should figure out your target's tick rate and set the appropriate feature, +# like in these examples. Without this, embassy-time will assume a 1Mhz tick rate, and every time +# operation will involve a conversion. +#features = ["tick-hz-10_000"] [dependencies.critical-section] version = "1.2" diff --git a/tests/drivers/gpio-async/README.md b/tests/drivers/gpio-async/README.md new file mode 100644 index 0000000..de7a383 --- /dev/null +++ b/tests/drivers/gpio-async/README.md @@ -0,0 +1,6 @@ +# Async gpio + +This demo makes use of the GPIO `wait_for_high()` and `wait_for_low()` async operations. + +Unfortunately, not all GPIO controllers support level triggered interrupts. Notably, most of the +stm32 line does not support level triggered interrupts. diff --git a/tests/drivers/gpio-async/prj.conf b/tests/drivers/gpio-async/prj.conf index f9a269b..4ac5fcd 100644 --- a/tests/drivers/gpio-async/prj.conf +++ b/tests/drivers/gpio-async/prj.conf @@ -13,4 +13,8 @@ CONFIG_RUST_ALLOC=y CONFIG_GPIO=y CONFIG_GPIO_ENABLE_DISABLE_INTERRUPT=y -CONFIG_LOG_BACKEND_RTT=n +# CONFIG_LOG_BACKEND_RTT=n + +CONFIG_UART_CONSOLE=n +CONFIG_RTT_CONSOLE=y +CONFIG_USE_SEGGER_RTT=y diff --git a/tests/drivers/gpio-async/src/lib.rs b/tests/drivers/gpio-async/src/lib.rs index ebccc5f..076d8c3 100644 --- a/tests/drivers/gpio-async/src/lib.rs +++ b/tests/drivers/gpio-async/src/lib.rs @@ -35,13 +35,18 @@ async fn main(spawner: Spawner) { info!("Hello world"); let _ = spawner; + /* let mut col0 = zephyr::devicetree::labels::col0::get_instance().unwrap(); let mut row0 = zephyr::devicetree::labels::row0::get_instance().unwrap(); + */ + let mut row0 = zephyr::devicetree::aliases::sw0::get_instance().unwrap(); let mut gpio_token = unsafe { zephyr::device::gpio::GpioToken::get_instance().unwrap() }; unsafe { + /* col0.configure(&mut gpio_token, GPIO_OUTPUT_ACTIVE); col0.set(&mut gpio_token, true); + */ row0.configure(&mut gpio_token, GPIO_INPUT | GPIO_PULL_DOWN); } diff --git a/zephyr-build/src/devicetree/augment.rs b/zephyr-build/src/devicetree/augment.rs index 3b781da..7b333a4 100644 --- a/zephyr-build/src/devicetree/augment.rs +++ b/zephyr-build/src/devicetree/augment.rs @@ -28,6 +28,12 @@ pub trait Augment { /// The default implementation checks if this node matches and calls a generator if it does, or /// does nothing if not. fn augment(&self, node: &Node, tree: &DeviceTree) -> TokenStream { + // If there is a status field present, and it is not set to "okay", don't augment this node. + if let Some(status) = node.get_single_string("status") { + if status != "okay" { + return TokenStream::new(); + } + } if self.is_compatible(node) { self.generate(node, tree) } else { @@ -75,7 +81,6 @@ impl Augment for Augmentation { /// A matching rule. #[derive(Debug, Serialize, Deserialize)] -#[serde(tag = "type", rename_all = "snake_case", content = "value")] pub enum Rule { /// A set of "or" matches. Or(Vec), @@ -121,7 +126,6 @@ fn parent_compatible(node: &Node, names: &[String], level: usize) -> bool { /// An action to perform #[derive(Debug, Serialize, Deserialize)] -#[serde(tag = "type", rename_all = "snake_case", content = "value")] pub enum Action { /// Generate an "instance" with a specific device name. Instance { @@ -171,7 +175,6 @@ impl Action { } #[derive(Debug, Serialize, Deserialize)] -#[serde(tag = "type", rename_all = "snake_case", content = "value")] pub enum RawInfo { /// Get the raw device directly from this node. Myself, @@ -270,7 +273,6 @@ impl RawInfo { /// /// At this point, we assume these all come from the current node. #[derive(Debug, Serialize, Deserialize)] -#[serde(tag = "type", rename_all = "snake_case", content = "value")] pub enum ArgInfo { /// The arguments come from a 'reg' property. Reg, diff --git a/zephyr-macros/Cargo.toml b/zephyr-macros/Cargo.toml index a9c386b..72c2a1b 100644 --- a/zephyr-macros/Cargo.toml +++ b/zephyr-macros/Cargo.toml @@ -9,7 +9,7 @@ descriptions = "Macros for managing tasks and work queues in Zephyr" proc-macro = true [dependencies] -syn = { version = "2.0.85", features = ["full", "visit"] } +syn = { version = "2.0.79", features = ["full", "visit"] } quote = "1.0.37" proc-macro2 = "1.0.86" darling = "0.20.1" diff --git a/zephyr-sys/build.rs b/zephyr-sys/build.rs index 05c6e94..2f50cb9 100644 --- a/zephyr-sys/build.rs +++ b/zephyr-sys/build.rs @@ -76,8 +76,13 @@ fn main() -> Result<()> { .derive_copy(false) .allowlist_function("k_.*") .allowlist_function("gpio_.*") + .allowlist_function("i2c_.*") .allowlist_function("flash_.*") + .allowlist_function("zr_.*") + .allowlist_function("mpsc_.*") + .allowlist_function("rtio.*") .allowlist_item("GPIO_.*") + .allowlist_item("I2C_.*") .allowlist_item("FLASH_.*") .allowlist_item("Z_.*") .allowlist_item("ZR_.*") diff --git a/zephyr-sys/wrapper.h b/zephyr-sys/wrapper.h index 98bb957..95f0fd2 100644 --- a/zephyr-sys/wrapper.h +++ b/zephyr-sys/wrapper.h @@ -42,6 +42,10 @@ extern int errno; #include #include #include +#include +#include +#include +#include /* * bindgen will only output #defined constants that resolve to simple numbers. These are some @@ -61,3 +65,21 @@ const uint32_t ZR_POLL_TYPE_DATA_AVAILABLE = K_POLL_TYPE_DATA_AVAILABLE; const uint32_t ZR_GPIO_INT_MODE_DISABLE_ONLY = GPIO_INT_MODE_DISABLE_ONLY; const uint32_t ZR_GPIO_INT_MODE_ENABLE_ONLY = GPIO_INT_MODE_ENABLE_ONLY; #endif + +const uint8_t ZR_I2C_MSG_WRITE = I2C_MSG_WRITE; +const uint8_t ZR_I2C_MSG_READ = I2C_MSG_READ; +const uint8_t ZR_I2C_MSG_STOP = I2C_MSG_STOP; + +const uint16_t ZR_RTIO_SQE_NO_RESPONSE = RTIO_SQE_NO_RESPONSE; + +/* + * Zephyr's irq_lock() and irq_unlock() are macros not inline functions, so we need some inlines to + * access them. + */ +static inline int zr_irq_lock(void) { + return irq_lock(); +} + +static inline void zr_irq_unlock(int key) { + irq_unlock(key); +} diff --git a/zephyr/src/device.rs b/zephyr/src/device.rs index 0bd80ac..5ad6076 100644 --- a/zephyr/src/device.rs +++ b/zephyr/src/device.rs @@ -12,6 +12,7 @@ use crate::sync::atomic::{AtomicBool, Ordering}; pub mod flash; pub mod gpio; +pub mod i2c; // Allow dead code, because it isn't required for a given build to have any devices. /// Device uniqueness. diff --git a/zephyr/src/device/gpio.rs b/zephyr/src/device/gpio.rs index f0e52c8..dfbea07 100644 --- a/zephyr/src/device/gpio.rs +++ b/zephyr/src/device/gpio.rs @@ -35,7 +35,10 @@ mod async_io { ZR_GPIO_INT_MODE_DISABLE_ONLY, }; - use crate::sync::atomic::{AtomicBool, AtomicU32}; + use crate::{ + printkln, + sync::atomic::{AtomicBool, AtomicU32}, + }; use super::{GpioPin, GpioToken}; @@ -112,6 +115,7 @@ mod async_io { cb: *mut gpio_callback, mut pins: gpio_port_pins_t, ) { + printkln!("GPIO callback: {}", pins); let data = unsafe { cb.cast::() .sub(mem::offset_of!(Self, callback)) diff --git a/zephyr/src/device/i2c.rs b/zephyr/src/device/i2c.rs new file mode 100644 index 0000000..654bf04 --- /dev/null +++ b/zephyr/src/device/i2c.rs @@ -0,0 +1,100 @@ +//! Zpehyr I2C interface + +use core::{ffi::c_int, marker::PhantomData}; + +use crate::{error::to_result, printkln, raw}; + +use super::{NoStatic, Unique}; + +/// A single I2C controller. +pub struct I2C { + /// The underlying device itself. + #[allow(dead_code)] + pub(crate) device: *const raw::device, +} + +unsafe impl Send for I2C {} + +impl I2C { + /// Constructor, used by the devicetree generated code. + #[allow(dead_code)] + pub(crate) unsafe fn new( + unique: &Unique, + _data: &'static NoStatic, + device: *const raw::device, + ) -> Option { + if !unique.once() { + return None; + } + Some(I2C { device }) + } + + /// Do a write/read. + pub fn write_read(&mut self, write: &[u8], read: &mut [u8]) -> crate::Result { + let mut msg = [ + raw::i2c_msg { + buf: write.as_ptr() as *mut _, + len: write.len() as u32, + flags: raw::ZR_I2C_MSG_WRITE, + }, + raw::i2c_msg { + buf: read.as_mut_ptr(), + len: read.len() as u32, + flags: raw::ZR_I2C_MSG_READ | raw::ZR_I2C_MSG_STOP, + }, + ]; + let res = unsafe { to_result(raw::i2c_transfer(self.device, msg.as_mut_ptr(), 2, 0x42)) }; + + printkln!("res: {} {}", msg[1].len, msg[1].flags); + + res + } + + /// Add an i2c operation to the RTIO. + /// + /// TODO: Unclear how to indicate that the buffers must live long enough for the submittion. + /// As it is, this is actually completely unsound. + pub fn rtio_write_read(&mut self, write: &[u8], read: &mut [u8]) -> crate::Result<()> { + let _msg = [ + raw::i2c_msg { + buf: write.as_ptr() as *mut _, + len: write.len() as u32, + flags: raw::ZR_I2C_MSG_WRITE, + }, + raw::i2c_msg { + buf: read.as_mut_ptr(), + len: read.len() as u32, + flags: raw::ZR_I2C_MSG_READ | raw::ZR_I2C_MSG_STOP, + }, + ]; + + todo!() + } +} + +/// An i2c transaction. +pub struct ReadWrite<'a> { + _phantom: PhantomData<&'a ()>, + msgs: [raw::i2c_msg; 2], +} + +impl<'a> ReadWrite<'a> { + /// Construct a new read/write transaction. + pub fn new(write: &'a [u8], read: &'a mut [u8]) -> Self { + Self { + _phantom: PhantomData, + msgs: [ + raw::i2c_msg { + buf: write.as_ptr() as *mut _, + len: write.len() as u32, + flags: raw::ZR_I2C_MSG_WRITE, + }, + raw::i2c_msg { + buf: read.as_mut_ptr(), + len: read.len() as u32, + flags: raw::ZR_I2C_MSG_READ | raw::ZR_I2C_MSG_STOP, + }, + ], + } + } +} diff --git a/zephyr/src/embassy.rs b/zephyr/src/embassy.rs index 8bbc254..d924ae9 100644 --- a/zephyr/src/embassy.rs +++ b/zephyr/src/embassy.rs @@ -71,7 +71,7 @@ //! //! ## Caveats //! -//! [`Semaphore::take_async`]: crate::sys::sync::Semaphore::take_async +//! The executor currently doesn't support async waits on Zephyr primitives, such as Semaphore. #[cfg(feature = "time-driver")] mod time_driver; diff --git a/zephyr/src/embassy/time_driver.rs b/zephyr/src/embassy/time_driver.rs index c9e7324..f561613 100644 --- a/zephyr/src/embassy/time_driver.rs +++ b/zephyr/src/embassy/time_driver.rs @@ -14,6 +14,18 @@ use embassy_time_queue_utils::Queue; use crate::raw::{k_timeout_t, k_timer, k_timer_init, k_timer_start}; use crate::sys::K_FOREVER; +/// The time base configured into Zephyr. +pub const ZEPHYR_TICK_HZ: u64 = crate::time::SYS_FREQUENCY as u64; + +/// The configured Embassy time tick rate. +pub const EMBASSY_TICK_HZ: u64 = embassy_time_driver::TICK_HZ; + +/// When the zephyr and embassy rates differ, use this intermediate type. This can be selected by +/// feature. At the worst case, with Embassy's tick at 1Mhz, and Zephyr's at 50k, it is a little +/// over 11 years. Higher of either will reduce that further. But, 128-bit arithmetic is fairly +/// inefficient. +type InterTime = u128; + embassy_time_driver::time_driver_impl!(static DRIVER: ZephyrTimeDriver = ZephyrTimeDriver { queue: Mutex::new(RefCell::new(Queue::new())), timer: Mutex::new(RefCell::new(unsafe { mem::zeroed() })), @@ -63,9 +75,40 @@ impl ZTimer { } } +/// Convert from a zephyr tick count, to an embassy tick count. +/// +/// This is done using an intermediate type defined above. +/// This conversion truncates. +fn zephyr_to_embassy(ticks: u64) -> u64 { + if ZEPHYR_TICK_HZ == EMBASSY_TICK_HZ { + // This should happen at compile time. + return ticks; + } + + // Otherwise do the intermediate conversion. + let prod = (ticks as InterTime) * (EMBASSY_TICK_HZ as InterTime); + (prod / (ZEPHYR_TICK_HZ as InterTime)) as u64 +} + +/// Convert from an embassy tick count to a zephyr. +/// +/// This conversion use ceil so that values are always large enough. +fn embassy_to_zephyr(ticks: u64) -> u64 { + if ZEPHYR_TICK_HZ == EMBASSY_TICK_HZ { + return ticks; + } + + let prod = (ticks as InterTime) * (ZEPHYR_TICK_HZ as InterTime); + prod.div_ceil(EMBASSY_TICK_HZ as InterTime) as u64 +} + +fn zephyr_now() -> u64 { + crate::time::now().ticks() +} + impl Driver for ZephyrTimeDriver { fn now(&self) -> u64 { - crate::time::now().ticks() + zephyr_to_embassy(zephyr_now()) } fn schedule_wake(&self, at: u64, waker: &core::task::Waker) { @@ -73,10 +116,13 @@ impl Driver for ZephyrTimeDriver { let mut queue = self.queue.borrow(cs).borrow_mut(); let mut timer = self.timer.borrow(cs).borrow_mut(); + // All times below are in Zephyr units. + let at = embassy_to_zephyr(at); + if queue.schedule_wake(at, waker) { - let mut next = queue.next_expiration(self.now()); - while !timer.set_alarm(next, self.now()) { - next = queue.next_expiration(self.now()); + let mut next = queue.next_expiration(zephyr_now()); + while !timer.set_alarm(next, zephyr_now()) { + next = queue.next_expiration(zephyr_now()); } } }) @@ -89,9 +135,9 @@ impl ZephyrTimeDriver { let mut queue = self.queue.borrow(cs).borrow_mut(); let mut timer = self.timer.borrow(cs).borrow_mut(); - let mut next = queue.next_expiration(self.now()); - while !timer.set_alarm(next, self.now()) { - next = queue.next_expiration(self.now()); + let mut next = queue.next_expiration(zephyr_now()); + while !timer.set_alarm(next, zephyr_now()) { + next = queue.next_expiration(zephyr_now()); } }) } diff --git a/zephyr/src/lib.rs b/zephyr/src/lib.rs index cccdfaf..cef1757 100644 --- a/zephyr/src/lib.rs +++ b/zephyr/src/lib.rs @@ -39,7 +39,7 @@ //! level operation that is still quite useful in regular code. //! - [`timer`]: Rust interfaces to Zephyr timers. These timers can be used either by registering a //! callback, or polled or waited for for an elapsed time. -//! - [`work`]: Zephyr work queues for Rust. The [`work::WorkQueueBuilder`] and resulting +//! - [`work`]: Zephyr work queues for Rust. The [`define_work_queue`] macro and resulting //! [`work::WorkQueue`] allow creation of Zephyr work queues to be used from Rust. The //! [`work::Work`] item had an action that will be invoked by the work queue, and can be manually //! submitted when needed. @@ -74,6 +74,8 @@ pub mod embassy; pub mod error; pub mod logging; pub mod object; +#[cfg(CONFIG_RTIO)] +pub mod rtio; #[cfg(CONFIG_RUST_ALLOC)] pub mod simpletls; pub mod sync; diff --git a/zephyr/src/rtio.rs b/zephyr/src/rtio.rs new file mode 100644 index 0000000..c400025 --- /dev/null +++ b/zephyr/src/rtio.rs @@ -0,0 +1,195 @@ +//! Interface to Zephyr 'rtio' infrastructure. + +use core::ffi::c_void; + +use crate::error::to_result_void; +use crate::object::{ObjectInit, ZephyrObject}; +use crate::raw; + +/// The underlying structure, holding the rtio, it's semaphores, and pools. +/// +/// Note that putting these together in a single struct makes this "pleasant" to use from Rust, but +/// does make the end result incompatible with userspace. +#[repr(C)] +pub struct RtioData { + /// The overall rtio struct. + rtio: raw::rtio, + /// Sempahore used for the submission queue. + #[cfg(CONFIG_RTIO_SUBMIT_SEM)] + submit_sem: raw::k_sem, + /// Semaphore used for the consumption queue. + #[cfg(CONFIG_RTIO_CONSUME_SEM)] + consume_sem: raw::k_sem, + /// The SQE items. + sqe_pool_items: [raw::rtio_iodev_sqe; SQE_SZ], + /// The SQE pool itself. + sqe_pool: raw::rtio_sqe_pool, + /// The CQE items. + cqe_pool_items: [raw::rtio_cqe; CQE_SZ], + /// The pool of CQEs. + cqe_pool: raw::rtio_cqe_pool, +} + +/// Init based reference to the the underlying rtio object. +/// +/// Note that this declaration will _not_ support userspace currently, as the object will not be +/// placed in an iterable linker section. Also, the linker sevction will not work as the +/// ZephyrObject will have an attached atomic used to ensure proper initialization. +pub struct RtioObject( + pub(crate) ZephyrObject>, +); + +unsafe impl Sync for RtioObject {} + +impl RtioObject { + /// Construct a new RTIO pool. + /// + /// Create a new RTIO object. These objects generally need to be statically allocated. + pub const fn new() -> Self { + let this = >>::new_raw(); + RtioObject(this) + } + + /// Acquire a submission object. + pub fn sqe_acquire(&'static self) -> Option { + let this = unsafe { self.0.get() }; + + let ptr = unsafe { raw::rtio_sqe_acquire(&raw mut (*this).rtio) }; + + if ptr.is_null() { + None + } else { + Some(Sqe { item: ptr }) + } + } + + /// Submit the work. + pub fn submit(&'static self, wait: usize) -> crate::Result<()> { + let this = unsafe { self.0.get() }; + + unsafe { to_result_void(raw::rtio_submit(&raw mut (*this).rtio, wait as u32)) } + } + + /// Consume a single completion. + /// + /// Will return the completion if available. If returned, it will be released upon drop. + pub fn cqe_consume(&'static self) -> Option { + let this = unsafe { self.0.get() }; + + let ptr = unsafe { raw::rtio_cqe_consume(&raw mut (*this).rtio) }; + + if ptr.is_null() { + None + } else { + Some(Cqe { + item: ptr, + rtio: unsafe { &raw mut (*this).rtio }, + }) + } + } +} + +impl ObjectInit> + for ZephyrObject> +{ + fn init(item: *mut RtioData) { + #[cfg(CONFIG_RTIO_SUBMIT_SEM)] + unsafe { + raw::k_sem_init(&raw mut (*item).submit_sem, 0, raw::K_SEM_MAX_LIMIT); + (*item).rtio.submit_sem = &raw mut (*item).submit_sem; + (*item).rtio.submit_count = 0; + } + #[cfg(CONFIG_RTIO_CONSUME_SEM)] + unsafe { + raw::k_sem_init(&raw mut (*item).consume_sem, 0, raw::K_SEM_MAX_LIMIT); + (*item).rtio.consume_sem = &raw mut (*item).consume_sem; + } + unsafe { + // TODO: Zephyr atomic init? + (*item).rtio.cq_count = 0; + (*item).rtio.xcqcnt = 0; + + // Set up the sqe pool. + raw::mpsc_init(&raw mut (*item).sqe_pool.free_q); + (*item).sqe_pool.pool_size = SQE_SZ as u16; + (*item).sqe_pool.pool_free = SQE_SZ as u16; + (*item).sqe_pool.pool = (*item).sqe_pool_items.as_mut_ptr(); + + for p in &mut (*item).sqe_pool_items { + raw::mpsc_push(&raw mut (*item).sqe_pool.free_q, &raw mut p.q); + } + + // Set up the cqe pool + raw::mpsc_init(&raw mut (*item).cqe_pool.free_q); + (*item).cqe_pool.pool_size = CQE_SZ as u16; + (*item).cqe_pool.pool_free = CQE_SZ as u16; + (*item).cqe_pool.pool = (*item).cqe_pool_items.as_mut_ptr(); + + for p in &mut (*item).cqe_pool_items { + raw::mpsc_push(&raw mut (*item).cqe_pool.free_q, &raw mut p.q); + } + + (*item).rtio.sqe_pool = &raw mut (*item).sqe_pool; + (*item).rtio.cqe_pool = &raw mut (*item).cqe_pool; + + raw::mpsc_init(&raw mut (*item).rtio.sq); + raw::mpsc_init(&raw mut (*item).rtio.cq); + } + } +} + +/// A single Sqe. +/// +/// TODO: How to bind the lifetime to the Rtio meaningfully, even though it is all static. +pub struct Sqe { + item: *mut raw::rtio_sqe, +} + +impl Sqe { + /// Configure this SQE as a callback. + pub fn prep_callback( + &mut self, + callback: raw::rtio_callback_t, + arg0: *mut c_void, + userdata: *mut c_void, + ) { + unsafe { + raw::rtio_sqe_prep_callback(self.item, callback, arg0, userdata); + } + } + + /// Configure this SQE as a nop. + pub fn prep_nop(&mut self, dev: *mut raw::rtio_iodev, userdata: *mut c_void) { + unsafe { + raw::rtio_sqe_prep_nop(self.item, dev, userdata); + } + } + + /// Add flags. + pub fn or_flags(&mut self, flags: u16) { + unsafe { + (*self.item).flags |= flags; + } + } +} + +/// A single Cqe. +pub struct Cqe { + item: *mut raw::rtio_cqe, + rtio: *mut raw::rtio, +} + +impl Cqe { + /// Retrieve the result of this operation. + pub fn result(&self) -> i32 { + unsafe { (*self.item).result } + } +} + +impl Drop for Cqe { + fn drop(&mut self) { + unsafe { + raw::rtio_cqe_release(self.rtio, self.item); + } + } +} diff --git a/zephyr/src/sys.rs b/zephyr/src/sys.rs index 7f127d8..e16c255 100644 --- a/zephyr/src/sys.rs +++ b/zephyr/src/sys.rs @@ -39,42 +39,42 @@ pub fn uptime_get() -> i64 { unsafe { crate::raw::k_uptime_get() } } +// The below implementation, based on interrupt locking has only been tested on single CPU. The +// implementation suggests it should work on SMP, and can be tested. The docs for irq_lock() +// explicitly state that it cannot be used from userspace. Unfortunately, spinlocks have +// incompatible semantics with critical sections, so to work with userspace we'd need probably a +// syscall. +#[cfg(CONFIG_USERSPACE)] +compile_error!("Critical-section implementation does not work with CONFIG_USERSPACE"); + pub mod critical { //! Zephyr implementation of critical sections. //! - //! Critical sections from Rust are handled with a single Zephyr spinlock. This doesn't allow - //! any nesting, but neither does the `critical-section` crate. - //! - //! This provides the underlying critical section crate, which is useful for external crates - //! that want this interface. However, it isn't a particularly hygienic interface to use. For - //! something a bit nicer, please see [`sync::SpinMutex`]. - //! - //! [`sync::SpinMutex`]: crate::sync::SpinMutex + //! The critical-section crate explicitly states that critical sections can be nested. + //! Unfortunately, Zephyr spinlocks cannot be nested. It is possible to nest different ones, + //! but the critical-section implementation API doesn't give access to the stack. - use core::{ffi::c_int, ptr::addr_of_mut}; + use core::{ + ffi::c_int, + sync::atomic::{fence, Ordering}, + }; use critical_section::RawRestoreState; - use zephyr_sys::{k_spin_lock, k_spin_unlock, k_spinlock, k_spinlock_key_t}; + use zephyr_sys::{zr_irq_lock, zr_irq_unlock}; struct ZephyrCriticalSection; critical_section::set_impl!(ZephyrCriticalSection); - // The critical section shares a single spinlock. - static mut LOCK: k_spinlock = unsafe { core::mem::zeroed() }; - unsafe impl critical_section::Impl for ZephyrCriticalSection { unsafe fn acquire() -> RawRestoreState { - let res = k_spin_lock(addr_of_mut!(LOCK)); - res.key as RawRestoreState + let res = zr_irq_lock(); + fence(Ordering::Acquire); + res as RawRestoreState } unsafe fn release(token: RawRestoreState) { - k_spin_unlock( - addr_of_mut!(LOCK), - k_spinlock_key_t { - key: token as c_int, - }, - ); + fence(Ordering::Release); + zr_irq_unlock(token as c_int); } } } diff --git a/zephyr/src/work.rs b/zephyr/src/work.rs index c3ed8d2..21ca411 100644 --- a/zephyr/src/work.rs +++ b/zephyr/src/work.rs @@ -17,227 +17,149 @@ //! having the `k_work` embedded in their structure, and Zephyr schedules the work when the given //! reason happens. //! -//! At this time, only the basic work queue type is supported. +//! At this point, this code supports the simple work queues, with [`Work`] items. //! -//! Zephyr's work queues can be used in different ways: +//! Work Queues should be declared with the `define_work_queue!` macro, this macro requires the name +//! of the symbol for the work queue, the stack size, and then zero or more optional arguments, +//! defined by the fields in the [`WorkQueueDeclArgs`] struct. For example: //! -//! - Work can be scheduled as needed. For example, an IRQ handler can queue a work item to process -//! data it has received from a device. -//! - Work can be scheduled periodically. -//! -//! As most C use of Zephyr statically allocates things like work, these are typically rescheduled -//! when the work is complete. The work queue scheduling functions are designed, and intended, for -//! a given work item to be able to reschedule itself, and such usage is common. -//! -//! ## Ownership -//! -//! The remaining challenge with implementing `k_work` for Rust is that of ownership. The model -//! taken here is that the work items are held in a `Box` that is effectively owned by the work -//! itself. When the work item is scheduled to Zephyr, ownership of that box is effectively handed -//! off to C, and then when the work item is called, the Box re-constructed. This repeats until the -//! work is no longer needed, at which point the work will be dropped. -//! -//! There are two common ways the lifecycle of work can be managed in an embedded system: -//! -//! - A set of `Future`'s are allocated once at the start, and these never return a value. Work -//! Futures inside of this (which correspond to `.await` in async code) can have lives and return -//! values, but the main loops will not return values, or be dropped. Embedded Futures will -//! typically not be boxed. -//! -//! One consequence of the ownership being passed through to C code is that if the work cancellation -//! mechanism is used on a work queue, the work items themselves will be leaked. -//! -//! These work items are also `Pin`, to ensure that the work actions are not moved. -//! -//! ## The work queues themselves -//! -//! Workqueues themselves are built using [`WorkQueueBuilder`]. This needs a statically defined -//! stack. Typical usage will be along the lines of: //! ```rust -//! kobj_define! { -//! WORKER_STACK: ThreadStack<2048>; -//! } -//! // ... -//! let main_worker = Box::new( -//! WorkQueueBuilder::new() -//! .set_priority(2). -//! .set_name(c"mainloop") -//! .set_no_yield(true) -//! .start(MAIN_LOOP_STACK.init_once(()).unwrap()) -//! ); -//! -//! let _ = zephyr::kio::spawn( -//! mainloop(), // Async or function returning Future. -//! &main_worker, -//! c"w:mainloop", -//! ); -//! -//! ... -//! -//! // Leak the Box so that the worker is never freed. -//! let _ = Box::leak(main_worker); +//! define_work_queue!(MY_WORKQ, 2048, no_yield = true, priority = 2); //! ``` //! -//! It is important that WorkQueues never be dropped. It has a Drop implementation that invokes -//! panic. Zephyr provides no mechanism to stop work queue threads, so dropping would result in -//! undefined behavior. -//! -//! # Current Status -//! -//! Although Zephyr has 3 types of work queues, the `k_work_poll` is sufficient to implement all of -//! the behavior, and this implementation only implements this type. Non Future work could be built -//! around the other work types. -//! -//! As such, this means that manually constructed work is still built using `Future`. The `_async` -//! primitives throughout this crate can be used just as readily by hand-written Futures as by async -//! code. Notable, the use of [`Signal`] will likely be common, along with possible timeouts. -//! -//! [`sys::sync::Semaphore`]: crate::sys::sync::Semaphore -//! [`sync::channel`]: crate::sync::channel -//! [`sync::Mutex`]: crate::sync::Mutex -//! [`join`]: futures::JoinHandle::join -//! [`join_async`]: futures::JoinHandle::join_async +//! Then, in code, the work queue can be started, and used to issue work. +//! ```rust +//! let my_workq = MY_WORKQ.start(); +//! let action = Work::new(action_item); +//! action.submit(my_workq); +//! ``` extern crate alloc; use core::{ - cell::UnsafeCell, - ffi::{c_int, c_uint, CStr}, + cell::{RefCell, UnsafeCell}, + ffi::{c_char, c_int, c_uint}, mem, pin::Pin, - ptr, + sync::atomic::Ordering, }; +use critical_section::Mutex; +use portable_atomic::AtomicBool; +use portable_atomic_util::Arc; use zephyr_sys::{ k_poll_signal, k_poll_signal_check, k_poll_signal_init, k_poll_signal_raise, k_poll_signal_reset, k_work, k_work_init, k_work_q, k_work_queue_config, k_work_queue_init, - k_work_queue_start, k_work_submit, k_work_submit_to_queue, + k_work_queue_start, k_work_submit, k_work_submit_to_queue, z_thread_stack_element, }; -use crate::{ - error::to_result_void, - object::Fixed, - simpletls::SimpleTls, - sync::{Arc, Mutex}, - sys::thread::ThreadStack, -}; +use crate::{error::to_result_void, object::Fixed, simpletls::SimpleTls}; -/// A builder for work queues themselves. -/// -/// A work queue is a Zephyr thread that instead of directly running a piece of code, manages a work -/// queue. Various types of `Work` can be submitted to these queues, along with various types of -/// triggering conditions. -pub struct WorkQueueBuilder { - /// The "config" value passed in. - config: k_work_queue_config, - /// Priority for the work queue thread. - priority: c_int, +/// The WorkQueue decl args as a struct, so we can have a default, and the macro can fill in those +/// specified by the user. +pub struct WorkQueueDeclArgs { + /// Should this work queue call yield after each queued item. + pub no_yield: bool, + /// Is this work queue thread "essential". + /// + /// Threads marked essential will panic if they stop running. + pub essential: bool, + /// Zephyr thread priority for the work queue thread. + pub priority: c_int, } -impl WorkQueueBuilder { - /// Construct a new WorkQueueBuilder with default values. - pub fn new() -> Self { +impl WorkQueueDeclArgs { + /// Like `Default::default`, but const. + pub const fn default_values() -> Self { Self { - config: k_work_queue_config { - name: ptr::null(), - no_yield: false, - essential: false, - }, + no_yield: false, + essential: false, priority: 0, } } +} - /// Set the name for the WorkQueue thread. - /// - /// This name shows up in debuggers and some analysis tools. - pub fn set_name(&mut self, name: &'static CStr) -> &mut Self { - self.config.name = name.as_ptr(); - self - } - - /// Set the "no yield" flag for the created worker. - /// - /// If this is not set, the work queue will call `k_yield` between each enqueued work item. For - /// non-preemptible threads, this will allow other threads to run. For preemptible threads, - /// this will allow other threads at the same priority to run. - /// - /// This method has a negative in the name, which goes against typical conventions. This is - /// done to match the field in the Zephyr config. - pub fn set_no_yield(&mut self, value: bool) -> &mut Self { - self.config.no_yield = value; - self - } - - /// Set the "essential" flag for the created worker. - /// - /// This sets the essential flag on the running thread. The system considers the termination of - /// an essential thread to be a fatal error. - pub fn set_essential(&mut self, value: bool) -> &mut Self { - self.config.essential = value; - self - } - - /// Set the priority for the worker thread. - /// - /// See the Zephyr docs for the meaning of priority. - pub fn set_priority(&mut self, value: c_int) -> &mut Self { - self.priority = value; - self - } +/// A static declaration of a work-queue. This associates a work queue, with a stack, and an atomic +/// to determine if it has been initialized. +// TODO: Remove the pub on the fields, and make a constructor. +pub struct WorkQueueDecl { + queue: WorkQueue, + stack: &'static crate::thread::ThreadStack, + config: k_work_queue_config, + priority: c_int, + started: AtomicBool, +} - /// Start the given work queue thread. - /// - /// TODO: Implement a 'start' that works from a static work queue. - pub fn start(&self, stack: ThreadStack) -> WorkQueue { - let item: Fixed = Fixed::new(unsafe { mem::zeroed() }); - unsafe { - // SAFETY: Initialize zeroed memory. - k_work_queue_init(item.get()); - - // SAFETY: This associates the workqueue with the thread ID that runs it. The thread is - // a pointer into this work item, which will not move, because of the Fixed. - let this = &mut *item.get(); - WORK_QUEUES - .lock() - .unwrap() - .insert(&this.thread, WorkQueueRef(item.get())); - - // SAFETY: Start work queue thread. The main issue here is that the work queue cannot - // be deallocated once the thread has started. We enforce this by making Drop panic. - k_work_queue_start( - item.get(), - stack.base, - stack.size, - self.priority, - &self.config, - ); +// SAFETY: Sync is needed here to make a static declaration, despite the `*const i8` that is burried +// in the config. +unsafe impl Sync for WorkQueueDecl {} + +impl WorkQueueDecl { + /// Static constructor. Mostly for use by the macro. + pub const fn new( + stack: &'static crate::thread::ThreadStack, + name: *const c_char, + args: WorkQueueDeclArgs, + ) -> Self { + Self { + queue: unsafe { mem::zeroed() }, + stack, + config: k_work_queue_config { + name, + no_yield: args.no_yield, + essential: args.essential, + }, + priority: args.priority, + started: AtomicBool::new(false), } + } - WorkQueue { item } + /// Start the work queue thread, if needed, and return a reference to it. + pub fn start(&'static self) -> &'static WorkQueue { + critical_section::with(|cs| { + if self.started.load(Ordering::Relaxed) { + // Already started, just return it. + return &self.queue; + } + + // SAFETY: Starting is coordinated by the atomic, as well as being protected in a + // critical section. + unsafe { + let this = &mut *self.queue.item.get(); + + k_work_queue_init(self.queue.item.get()); + + // Add to the WORK_QUEUES data. That needs to be changed to a critical + // section Mutex from a Zephyr Mutex, as that would deadlock if called while in a + // critrical section. + let mut tls = WORK_QUEUES.borrow_ref_mut(cs); + tls.insert(&this.thread, WorkQueueRef(self.queue.item.get())); + + // Start the work queue thread. + k_work_queue_start( + self.queue.item.get(), + self.stack.data.get() as *mut z_thread_stack_element, + self.stack.size(), + self.priority, + &self.config, + ); + } + + &self.queue + }) } } /// A running work queue thread. /// -/// # Panic -/// -/// Allowing a work queue to drop will result in a panic. There are two ways to handle this, -/// depending on whether the WorkQueue is in a Box, or an Arc: -/// ``` -/// // Leak a work queue in an Arc. -/// let wq = Arc::new(WorkQueueBuilder::new().start(...)); -/// // If the Arc is used after this: -/// let _ = Arc::into_raw(wq.clone()); -/// // If the Arc is no longer needed: -/// let _ = Arc::into_raw(wq); +/// This must be declared statically, and initialized once. Please see the macro +/// [`define_work_queue`] which declares this with a [`WorkQueue`] to help with the +/// association with a stack, and making sure the queue is only started once. /// -/// // Leak a work queue in a Box. -/// let wq = Box::new(WorkQueueBuilder::new().start(...)); -/// let _ = Box::leak(wq); -/// ``` +/// [`define_work_queue`]: crate::define_work_queue pub struct WorkQueue { #[allow(dead_code)] - item: Fixed, + item: UnsafeCell, } /// Work queues can be referenced from multiple threads, and thus are Send and Sync. @@ -265,7 +187,8 @@ impl Drop for WorkQueue { /// /// This is a little bit messy as we don't have a lazy mechanism, so we have to handle this a bit /// manually right now. -static WORK_QUEUES: Mutex> = Mutex::new(SimpleTls::new()); +static WORK_QUEUES: Mutex>> = + Mutex::new(RefCell::new(SimpleTls::new())); /// For the queue mapping, we need a simple wrapper around the underlying pointer, one that doesn't /// implement stop. @@ -278,7 +201,7 @@ unsafe impl Sync for WorkQueueRef {} /// Retrieve the current work queue, if we are running within one. pub fn get_current_workq() -> Option<*mut k_work_q> { - WORK_QUEUES.lock().unwrap().get().map(|wq| wq.0) + critical_section::with(|cs| WORK_QUEUES.borrow_ref(cs).get().map(|wq| wq.0)) } /// A Rust wrapper for `k_poll_signal`. @@ -408,6 +331,24 @@ impl SubmitResult { } } +/* +pub trait Queueable: Send + Sync { + fn as_ptr(&self) -> *const (); +} + +impl Queueable for Arc { + fn as_ptr(&self) -> *const () { + todo!() + } +} + +impl Queueable for &'static T { + fn as_ptr(&self) -> *const () { + todo!() + } +} +*/ + /// A simple action that just does something with its data. /// /// This is similar to a Future, except there is no concept of it completing. It manages its @@ -480,17 +421,24 @@ impl Work { // SAFETY: C the code does not perform moves on the data, and the `from_raw` below puts it // back into a Pin when it reconstructs the Arc. let this = unsafe { Pin::into_inner_unchecked(this) }; - let _ = Arc::into_raw(this); + let _ = Arc::into_raw(this.clone()); // SAFETY: The Pin ensures this will not move. Our implementation of drop ensures that the // work item is no longer queued when the data is dropped. - SubmitResult::to_result(unsafe { k_work_submit(work) }) + let result = SubmitResult::to_result(unsafe { k_work_submit(work) }); + + Self::check_drop(work, &result); + + result } /// Submit this work to a specified work queue. /// /// TODO: Change when we have better wrappers for work queues. - pub fn submit_to_queue(this: Pin>, queue: &WorkQueue) -> crate::Result { + pub fn submit_to_queue( + this: Pin>, + queue: &'static WorkQueue, + ) -> crate::Result { let work = this.work.get(); // "leak" the arc to give to C. We'll reconstruct it in the handler. @@ -501,7 +449,12 @@ impl Work { // SAFETY: The Pin ensures this will not move. Our implementation of drop ensures that the // work item is no longer queued when the data is dropped. - SubmitResult::to_result(unsafe { k_work_submit_to_queue(queue.item.get(), work) }) + let result = + SubmitResult::to_result(unsafe { k_work_submit_to_queue(queue.item.get(), work) }); + + Self::check_drop(work, &result); + + result } /// Callback, through C, but bound by a specific type. @@ -541,8 +494,50 @@ impl Work { Pin::new_unchecked(this) } + /// Determine if this work was submitted, and cause a drop of the Arc to happen if it was not. + pub fn check_drop(work: *const k_work, result: &crate::Result) { + if matches!(result, Ok(SubmitResult::AlreadySubmitted) | Err(_)) { + // SAFETY: If the above submit indicates that it was already running, the work will not + // be submitted (no additional handle will be called). "un leak" the work so that it + // will be dropped. Also, any error indicates that the work did not enqueue. + unsafe { + let this = Self::from_raw(work); + drop(this); + } + } + } + /// Access the inner action. pub fn action(&self) -> &T { &self.action } } + +/// Declare a static work queue. +/// +/// This declares a static work queue (of type [`WorkQueueDecl`]). This will have a single method +/// `.start()` which can be used to start the work queue, as well as return the persistent handle +/// that can be used to enqueue to it. +#[macro_export] +macro_rules! define_work_queue { + ($name:ident, $stack_size:expr) => { + $crate::define_work_queue!($name, $stack_size,); + }; + ($name:ident, $stack_size:expr, $($key:ident = $value:expr),* $(,)?) => { + static $name: $crate::work::WorkQueueDecl<$stack_size> = { + #[link_section = concat!(".noinit.workq.", stringify!($name))] + static _ZEPHYR_STACK: $crate::thread::ThreadStack<$stack_size> = + $crate::thread::ThreadStack::new(); + const _ZEPHYR_C_NAME: &[u8] = concat!(stringify!($name), "\0").as_bytes(); + const _ZEPHYR_ARGS: $crate::work::WorkQueueDeclArgs = $crate::work::WorkQueueDeclArgs { + $($key: $value,)* + ..$crate::work::WorkQueueDeclArgs::default_values() + }; + $crate::work::WorkQueueDecl::new( + &_ZEPHYR_STACK, + _ZEPHYR_C_NAME.as_ptr() as *const ::core::ffi::c_char, + _ZEPHYR_ARGS, + ) + }; + }; +}