From f02e274cfa0f080ab34f2d8af8752ae5292523bf Mon Sep 17 00:00:00 2001 From: David Brown Date: Thu, 13 Mar 2025 15:24:49 -0600 Subject: [PATCH 01/17] zephyr-sys: Export irq_lock/unlock These are macros in Zephyr, so write explicit wrappers for them, that bindgen will be able to directly use. Signed-off-by: David Brown --- zephyr-sys/build.rs | 1 + zephyr-sys/wrapper.h | 13 +++++++++++++ 2 files changed, 14 insertions(+) diff --git a/zephyr-sys/build.rs b/zephyr-sys/build.rs index 05c6e94..fd1e711 100644 --- a/zephyr-sys/build.rs +++ b/zephyr-sys/build.rs @@ -77,6 +77,7 @@ fn main() -> Result<()> { .allowlist_function("k_.*") .allowlist_function("gpio_.*") .allowlist_function("flash_.*") + .allowlist_function("zr_.*") .allowlist_item("GPIO_.*") .allowlist_item("FLASH_.*") .allowlist_item("Z_.*") diff --git a/zephyr-sys/wrapper.h b/zephyr-sys/wrapper.h index 98bb957..69bab65 100644 --- a/zephyr-sys/wrapper.h +++ b/zephyr-sys/wrapper.h @@ -42,6 +42,7 @@ extern int errno; #include #include #include +#include /* * bindgen will only output #defined constants that resolve to simple numbers. These are some @@ -61,3 +62,15 @@ const uint32_t ZR_POLL_TYPE_DATA_AVAILABLE = K_POLL_TYPE_DATA_AVAILABLE; const uint32_t ZR_GPIO_INT_MODE_DISABLE_ONLY = GPIO_INT_MODE_DISABLE_ONLY; const uint32_t ZR_GPIO_INT_MODE_ENABLE_ONLY = GPIO_INT_MODE_ENABLE_ONLY; #endif + +/* + * Zephyr's irq_lock() and irq_unlock() are macros not inline functions, so we need some inlines to + * access them. + */ +static inline int zr_irq_lock(void) { + return irq_lock(); +} + +static inline void zr_irq_unlock(int key) { + irq_unlock(key); +} From a4df84155c96c849bda4a788f748afd6ed4de99e Mon Sep 17 00:00:00 2001 From: David Brown Date: Wed, 12 Mar 2025 11:02:42 -0600 Subject: [PATCH 02/17] zephyr: Replace critical-section implementation There is a fairly fundamental incompatibility between Zephyr spin locks and the Critical Section specification. Zephyr spin locks do not allow nesting from within a single spin lock. The critical section API only has an `acquire` and `release` entry, and provides no way (such as a stack frame) to have a unique context for different invocation places. Unfortunately, this means we cannot use spin locks for critical sections. Instead, this change implements critical sections using irq locking. The implementation of these macros on Zephyr does try to make them SMP safe, with a simple atomic lock, but there is still something preventing the riscv SMP from working. Also, these entries cannot be called from user mode. There are various other reasons we don't support usermode, so at this time, just have a compile time assertion that usermode is not enabled in the build. If it is needed, we will have to come up with another way to implement this. Signed-off-by: David Brown --- zephyr/src/sys.rs | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/zephyr/src/sys.rs b/zephyr/src/sys.rs index 7f127d8..e16c255 100644 --- a/zephyr/src/sys.rs +++ b/zephyr/src/sys.rs @@ -39,42 +39,42 @@ pub fn uptime_get() -> i64 { unsafe { crate::raw::k_uptime_get() } } +// The below implementation, based on interrupt locking has only been tested on single CPU. The +// implementation suggests it should work on SMP, and can be tested. The docs for irq_lock() +// explicitly state that it cannot be used from userspace. Unfortunately, spinlocks have +// incompatible semantics with critical sections, so to work with userspace we'd need probably a +// syscall. +#[cfg(CONFIG_USERSPACE)] +compile_error!("Critical-section implementation does not work with CONFIG_USERSPACE"); + pub mod critical { //! Zephyr implementation of critical sections. //! - //! Critical sections from Rust are handled with a single Zephyr spinlock. This doesn't allow - //! any nesting, but neither does the `critical-section` crate. - //! - //! This provides the underlying critical section crate, which is useful for external crates - //! that want this interface. However, it isn't a particularly hygienic interface to use. For - //! something a bit nicer, please see [`sync::SpinMutex`]. - //! - //! [`sync::SpinMutex`]: crate::sync::SpinMutex + //! The critical-section crate explicitly states that critical sections can be nested. + //! Unfortunately, Zephyr spinlocks cannot be nested. It is possible to nest different ones, + //! but the critical-section implementation API doesn't give access to the stack. - use core::{ffi::c_int, ptr::addr_of_mut}; + use core::{ + ffi::c_int, + sync::atomic::{fence, Ordering}, + }; use critical_section::RawRestoreState; - use zephyr_sys::{k_spin_lock, k_spin_unlock, k_spinlock, k_spinlock_key_t}; + use zephyr_sys::{zr_irq_lock, zr_irq_unlock}; struct ZephyrCriticalSection; critical_section::set_impl!(ZephyrCriticalSection); - // The critical section shares a single spinlock. - static mut LOCK: k_spinlock = unsafe { core::mem::zeroed() }; - unsafe impl critical_section::Impl for ZephyrCriticalSection { unsafe fn acquire() -> RawRestoreState { - let res = k_spin_lock(addr_of_mut!(LOCK)); - res.key as RawRestoreState + let res = zr_irq_lock(); + fence(Ordering::Acquire); + res as RawRestoreState } unsafe fn release(token: RawRestoreState) { - k_spin_unlock( - addr_of_mut!(LOCK), - k_spinlock_key_t { - key: token as c_int, - }, - ); + fence(Ordering::Release); + zr_irq_unlock(token as c_int); } } } From cd91abe0eb6690b166fd5eb19b4a7e77db8d8027 Mon Sep 17 00:00:00 2001 From: David Brown Date: Fri, 7 Mar 2025 11:28:54 -0700 Subject: [PATCH 03/17] zephyr: Add proc macro for thread declaration Implement a proc macro that allows a declaration like: ```rust fn mythread(arg: usize, arg2: &'static Thing) { .. } ``` With this change, creation of threads, with arbitrary "Send" arguments can be done without needing allocation. The macros reserves a static area alongside the `k_thread` structure to use for handing the threads over. This creates a function `mythread` with the given args that returns a ReadyThread. This has a `start()` method which will begin execution of the thread. This results in a RunningThread, which has a join method that can be used to wait for termination. Signed-off-by: David Brown --- zephyr-macros/Cargo.toml | 15 ++ zephyr-macros/src/lib.rs | 36 +++++ zephyr-macros/src/task.rs | 271 +++++++++++++++++++++++++++++++ zephyr/Cargo.toml | 1 + zephyr/src/lib.rs | 4 + zephyr/src/thread.rs | 327 ++++++++++++++++++++++++++++++++++++++ 6 files changed, 654 insertions(+) create mode 100644 zephyr-macros/Cargo.toml create mode 100644 zephyr-macros/src/lib.rs create mode 100644 zephyr-macros/src/task.rs create mode 100644 zephyr/src/thread.rs diff --git a/zephyr-macros/Cargo.toml b/zephyr-macros/Cargo.toml new file mode 100644 index 0000000..a9c386b --- /dev/null +++ b/zephyr-macros/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "zephyr-macros" +version = "0.1.0" +edition = "2024" +license = "MIT OR Apache-2.0" +descriptions = "Macros for managing tasks and work queues in Zephyr" + +[lib] +proc-macro = true + +[dependencies] +syn = { version = "2.0.85", features = ["full", "visit"] } +quote = "1.0.37" +proc-macro2 = "1.0.86" +darling = "0.20.1" diff --git a/zephyr-macros/src/lib.rs b/zephyr-macros/src/lib.rs new file mode 100644 index 0000000..1806821 --- /dev/null +++ b/zephyr-macros/src/lib.rs @@ -0,0 +1,36 @@ +//! Zephyr macros + +use proc_macro::TokenStream; + +mod task; + +/// Declares a Zephyr thread (or pool of threads) that can be spawned. +/// +/// There are some restrictions on this: +/// - All arguments to the function must be Send. +/// - The function must not use generics. +/// - The optional `pool_size` attribute must be 1 or greater. +/// - The `stack_size` must be specified, and will set the size of the pre-defined stack for _each_ +/// task in the pool. +/// +/// ## Examples +/// +/// Declaring a task with a simple argument: +/// +/// ```rust +/// #[zephyr::thread(stack_size = 1024)] +/// fn mytask(arg: u32) { +/// // Function body. +/// } +/// ``` +/// +/// The result will be a function `mytask` that takes this argument, and returns a `ReadyThread`. A +/// simple use case is to call `.start()` on this, to start the Zephyr thread. +/// +/// Threads can be reused after they have exited. Calling the `mytask` function before the thread +/// has exited will result in a panic. The `RunningThread`'s `join` method can be used to wait for +/// thread termination. +#[proc_macro_attribute] +pub fn thread(args: TokenStream, item: TokenStream) -> TokenStream { + task::run(args.into(), item.into()).into() +} diff --git a/zephyr-macros/src/task.rs b/zephyr-macros/src/task.rs new file mode 100644 index 0000000..0331718 --- /dev/null +++ b/zephyr-macros/src/task.rs @@ -0,0 +1,271 @@ +//! Expansion of `#[zephyr::task(...)]`. + +use std::fmt::Display; + +use darling::FromMeta; +use darling::export::NestedMeta; +use proc_macro2::{Span, TokenStream}; +use quote::{ToTokens, format_ident, quote}; +use syn::{ + Expr, ExprLit, ItemFn, Lit, LitInt, ReturnType, Type, + visit::{self, Visit}, +}; + +#[derive(Debug, FromMeta, Default)] +struct Args { + #[darling(default)] + pool_size: Option, + #[darling(default)] + stack_size: Option, +} + +pub fn run(args: TokenStream, item: TokenStream) -> TokenStream { + let mut errors = TokenStream::new(); + + // If any of the steps for this macro fail, we still want to expand to an item that is as close + // to the expected output as possible. This helps out IDEs such that completions and other + // related features keep working. + let f: ItemFn = match syn::parse2(item.clone()) { + Ok(x) => x, + Err(e) => return token_stream_with_error(item, e), + }; + + let args = match NestedMeta::parse_meta_list(args) { + Ok(x) => x, + Err(e) => return token_stream_with_error(item, e), + }; + + let args = match Args::from_list(&args) { + Ok(x) => x, + Err(e) => { + errors.extend(e.write_errors()); + Args::default() + } + }; + + let pool_size = args.pool_size.unwrap_or(Expr::Lit(ExprLit { + attrs: vec![], + lit: Lit::Int(LitInt::new("1", Span::call_site())), + })); + + let stack_size = args.stack_size.unwrap_or(Expr::Lit(ExprLit { + attrs: vec![], + // TODO: Instead of a default, require this. + lit: Lit::Int(LitInt::new("2048", Span::call_site())), + })); + + if !f.sig.asyncness.is_none() { + error(&mut errors, &f.sig, "thread function must not be async"); + } + + if !f.sig.generics.params.is_empty() { + error(&mut errors, &f.sig, "thread function must not be generic"); + } + + if !f.sig.generics.where_clause.is_none() { + error( + &mut errors, + &f.sig, + "thread function must not have `where` clauses", + ); + } + + if !f.sig.abi.is_none() { + error( + &mut errors, + &f.sig, + "thread function must not have an ABI qualifier", + ); + } + + if !f.sig.variadic.is_none() { + error(&mut errors, &f.sig, "thread function must not be variadic"); + } + + match &f.sig.output { + ReturnType::Default => {} + ReturnType::Type(_, ty) => match &**ty { + Type::Tuple(tuple) if tuple.elems.is_empty() => {} + Type::Never(_) => {} + _ => error( + &mut errors, + &f.sig, + "thread functions must either not return a value, return (), or return `!`", + ), + }, + } + + let mut args = Vec::new(); + let mut fargs = f.sig.inputs.clone(); + let mut inner_calling = Vec::new(); + let mut inner_args = Vec::new(); + + for arg in fargs.iter_mut() { + match arg { + syn::FnArg::Receiver(_) => { + error( + &mut errors, + arg, + "thread functions must not have `self` arguments", + ); + } + syn::FnArg::Typed(t) => { + check_arg_ty(&mut errors, &t.ty); + match t.pat.as_mut() { + syn::Pat::Ident(id) => { + id.mutability = None; + args.push((id.clone(), t.attrs.clone())); + inner_calling.push(quote! { + data.#id, + }); + inner_args.push(quote! {#id,}); + } + _ => { + error( + &mut errors, + arg, + "pattern matching in task arguments is not yet supported", + ); + } + } + } + } + } + + let thread_ident = f.sig.ident.clone(); + let thread_inner_ident = format_ident!("__{}_thread", thread_ident); + + let mut thread_inner = f.clone(); + let visibility = thread_inner.vis.clone(); + thread_inner.vis = syn::Visibility::Inherited; + thread_inner.sig.ident = thread_inner_ident.clone(); + + // Assemble the original input arguments. + let mut full_args = Vec::new(); + for (arg, cfgs) in &args { + full_args.push(quote! { + #(#cfgs)* + #arg + }); + } + + let mut thread_outer_body = quote! { + const _ZEPHYR_INTERNAL_STACK_SIZE: usize = zephyr::thread::stack_len(#stack_size); + const _ZEPHYR_INTERNAL_POOL_SIZE: usize = #pool_size; + struct _ZephyrInternalArgs { + // This depends on the argument syntax being valid as a struct definition, which should + // be the case with the above constraints. + #fargs + } + + static THREAD: [zephyr::thread::ThreadData<_ZephyrInternalArgs>; _ZEPHYR_INTERNAL_POOL_SIZE] + = [const { zephyr::thread::ThreadData::new() }; _ZEPHYR_INTERNAL_POOL_SIZE]; + #[unsafe(link_section = ".noinit.TODO_STACK")] + static STACK: [zephyr::thread::ThreadStack<_ZEPHYR_INTERNAL_STACK_SIZE>; _ZEPHYR_INTERNAL_POOL_SIZE] + = [const { zephyr::thread::ThreadStack::new() }; _ZEPHYR_INTERNAL_POOL_SIZE]; + + extern "C" fn startup( + arg0: *mut ::core::ffi::c_void, + _: *mut ::core::ffi::c_void, + _: *mut ::core::ffi::c_void, + ) { + let init = unsafe { &mut *(arg0 as *mut ::zephyr::thread::InitData<_ZephyrInternalArgs>) }; + let init = init.0.get(); + match unsafe { init.replace(None) } { + None => { + ::core::panic!("Incorrect thread initialization"); + } + Some(data) => { + #thread_inner_ident(#(#inner_calling)*); + } + } + } + + zephyr::thread::ThreadData::acquire( + &THREAD, + &STACK, + _ZephyrInternalArgs { #(#inner_args)* }, + Some(startup), + 0, + ) + }; + + let thread_outer_attrs = thread_inner.attrs.clone(); + + if !errors.is_empty() { + thread_outer_body = quote! { + #[allow(unused_variables, unreachable_code)] + let _x: ::zephyr::thread::ReadyThread = ::core::todo!(); + _x + }; + } + + // Copy the generics + where clause to avoid more spurious errors. + let generics = &f.sig.generics; + let where_clause = &f.sig.generics.where_clause; + + quote! { + // This is the user's thread function, renamed. + #[doc(hidden)] + #thread_inner + + #(#thread_outer_attrs)* + #visibility fn #thread_ident #generics (#fargs) -> ::zephyr::thread::ReadyThread #where_clause { + #thread_outer_body + } + + #errors + } +} + +// Taken from embassy-executor-macros. +fn check_arg_ty(errors: &mut TokenStream, ty: &Type) { + struct Visitor<'a> { + errors: &'a mut TokenStream, + } + + impl<'a, 'ast> Visit<'ast> for Visitor<'a> { + fn visit_type_reference(&mut self, i: &'ast syn::TypeReference) { + // only check for elided lifetime here. If not elided, it is checked by + // `visit_lifetime`. + if i.lifetime.is_none() { + error( + self.errors, + i.and_token, + "Arguments for threads must live forever. Try using the `'static` lifetime.", + ); + } + visit::visit_type_reference(self, i); + } + + fn visit_lifetime(&mut self, i: &'ast syn::Lifetime) { + if i.ident.to_string() != "static" { + error( + self.errors, + i, + "Arguments for threads must live forever. Try using the `'static` lifetime.", + ); + } + } + + fn visit_type_impl_trait(&mut self, i: &'ast syn::TypeImplTrait) { + error( + self.errors, + i, + "`impl Trait` is not allowed in thread arguments. It is syntax sugar for generics, and threads cannot be generic.", + ); + } + } + + Visit::visit_type(&mut Visitor { errors }, ty); +} + +// Utility borrowed from embassy-executor-macros. +pub fn token_stream_with_error(mut tokens: TokenStream, error: syn::Error) -> TokenStream { + tokens.extend(error.into_compile_error()); + tokens +} + +pub fn error(s: &mut TokenStream, obj: A, msg: T) { + s.extend(syn::Error::new_spanned(obj.into_token_stream(), msg).into_compile_error()) +} diff --git a/zephyr/Cargo.toml b/zephyr/Cargo.toml index bb70549..b5551db 100644 --- a/zephyr/Cargo.toml +++ b/zephyr/Cargo.toml @@ -11,6 +11,7 @@ Functionality for Rust-based applications that run on Zephyr. [dependencies] zephyr-sys = { version = "0.1.0", path = "../zephyr-sys" } +zephyr-macros = { version = "0.1.0", path = "../zephyr-macros" } # Although paste is brought in, it is a compile-time macro, and is not linked into the application. paste = "1.0" diff --git a/zephyr/src/lib.rs b/zephyr/src/lib.rs index 7ccb767..dd53166 100644 --- a/zephyr/src/lib.rs +++ b/zephyr/src/lib.rs @@ -88,6 +88,7 @@ pub mod object; pub mod simpletls; pub mod sync; pub mod sys; +pub mod thread; pub mod time; #[cfg(CONFIG_RUST_ALLOC)] pub mod timer; @@ -101,6 +102,9 @@ pub use logging::set_logger; /// Re-exported for local macro use. pub use paste::paste; +/// Re-export the proc macros. +pub use zephyr_macros::thread; + // Bring in the generated kconfig module pub mod kconfig { //! Zephyr Kconfig values. diff --git a/zephyr/src/thread.rs b/zephyr/src/thread.rs new file mode 100644 index 0000000..7b97ad3 --- /dev/null +++ b/zephyr/src/thread.rs @@ -0,0 +1,327 @@ +//! Thread support. +//! +//! Implement the friendly Thread types used by the `zephyr::thread` proc macro to declare new +//! threads. +//! +//! This is intended to be completely usable without alloc, while still allow threads to be +//! started with any arbitrary Send arguments. Threads can be joined, and reused after they have +//! exited. The model intentionally tries to be similar to how async tasks work in something like +//! Embassy, but some changes due to the different semantics of Zephyr threads. + +use core::{ + cell::UnsafeCell, + ffi::{c_int, c_void}, + mem, + ptr::null_mut, + sync::atomic::Ordering, +}; + +use portable_atomic::AtomicU8; +use zephyr_sys::{ + k_thread, k_thread_create, k_thread_entry_t, k_thread_join, k_thread_priority_set, k_wakeup, + z_thread_stack_element, ZR_STACK_ALIGN, ZR_STACK_RESERVED, +}; + +use crate::{ + align::AlignAs, + sys::{K_FOREVER, K_NO_WAIT}, +}; + +/// Adjust a given requested stack size up for the alignment. This is just the stack, and the +/// reservation is explicitly included in the stack declaration below. +pub const fn stack_len(size: usize) -> usize { + size.next_multiple_of(ZR_STACK_ALIGN) +} + +/// States a Zephyr thread can be in. +#[repr(u8)] +pub enum ThreadState { + /// A non running thread, that is free. + Init, + /// An allocated thread. There is a ThreadHandle for this thread, but it has not been started. + Allocated, + /// A thread that is running, as far as we know. Termination is not checked unless demanded. + Running, +} + +/// The holder of data that is to be shared with the target thread. +/// +/// # Safety +/// +/// The Option is kept in an UnsafeCell, and it's use governed by an atomic in the `TaskData` +/// below. When the task is not initialized/not running, this should be set to None. It will be +/// set to Some in a critical section during startup, where the critical section provides the +/// barrier. Once the atomic is set to true, the thread owns this data. +/// +/// The Send constraint force arguments passed to threads to be Send. +pub struct InitData(pub UnsafeCell>); + +impl InitData { + /// Construct new Shared init state. + pub const fn new() -> Self { + Self(UnsafeCell::new(None)) + } +} + +unsafe impl Sync for InitData {} + +/// The static data associated with each thread. The stack is kept separate, as it is intended to +/// go into an uninitialized linker section. +pub struct ThreadData { + init: InitData, + state: AtomicU8, + thread: Thread, +} + +impl ThreadData { + /// Construct new ThreadData. + pub const fn new() -> Self { + Self { + init: InitData::new(), + state: AtomicU8::new(ThreadState::Init as u8), + thread: unsafe { Thread::new() }, + } + } + + /// Acquire the thread, in preparation to run it. + pub fn acquire_old( + &'static self, + args: T, + stack: &'static ThreadStack, + entry: k_thread_entry_t, + ) { + critical_section::with(|_| { + // Relaxed is sufficient, as the critical section provides both synchronization and + // a memory barrier. + let old = self.state.load(Ordering::Relaxed); + if old != ThreadState::Init as u8 { + // TODO: This is where we should check for termination. + panic!("Attempt to use a thread that is already in use"); + } + self.state + .store(ThreadState::Allocated as u8, Ordering::Relaxed); + + let init = self.init.0.get(); + unsafe { + *init = Some(args); + } + }); + + // For now, just directly start the thread. We'll want to delay this so that parameters + // (priority and/or flags) can be passed, as well as to have a handle to be able to join and + // restart threads. + let _tid = unsafe { + k_thread_create( + self.thread.0.get(), + stack.data.get() as *mut z_thread_stack_element, + stack.size(), + entry, + self.init.0.get() as *mut c_void, + null_mut(), + null_mut(), + 0, + 0, + K_NO_WAIT, + ) + }; + } + + /// Acquire a thread from the pool of threads, panicing if the pool is exhausted. + pub fn acquire( + pool: &'static [Self], + stacks: &'static [ThreadStack], + args: T, + entry: k_thread_entry_t, + priority: c_int, + ) -> ReadyThread { + let id = Self::find_thread(pool); + + let init = pool[id].init.0.get(); + unsafe { + *init = Some(args); + } + + // Create the thread in Zephyr, in a non-running state. + let tid = unsafe { + k_thread_create( + pool[id].thread.0.get(), + stacks[id].data.get() as *mut z_thread_stack_element, + SIZE, + entry, + pool[id].init.0.get() as *mut c_void, + null_mut(), + null_mut(), + priority, + 0, + K_FOREVER, + ) + }; + + ReadyThread { id: tid } + } + + /// Scan the pool of threads, looking for an available thread. + /// + /// Returns the index of a newly allocated thread. The thread will be marked 'Allocated' after + /// this. + fn find_thread(pool: &'static [Self]) -> usize { + let id = critical_section::with(|_| { + for (id, thread) in pool.iter().enumerate() { + // Relaxed is sufficient, due to the critical section. + let old = thread.state.load(Ordering::Relaxed); + + match old { + v if v == ThreadState::Init as u8 => { + // This is available. Mark as allocated and return from the closure. + thread + .state + .store(ThreadState::Allocated as u8, Ordering::Relaxed); + return Some(id); + } + v if v == ThreadState::Allocated as u8 => { + // Allocate threads haven't started, so aren't available. + } + v if v == ThreadState::Running as u8 => { + // A running thread might be available if it has terminated. We could + // improve performance here by not checking these until after the pool has + // been checked for Init threads. + if unsafe { k_thread_join(thread.thread.0.get(), K_NO_WAIT) } == 0 { + thread + .state + .store(ThreadState::Allocated as u8, Ordering::Relaxed); + return Some(id); + } + } + _ => unreachable!(), + } + } + + None + }); + + if let Some(id) = id { + id + } else { + panic!("Attempt to use more threads than declared pool size"); + } + } +} + +/// A thread that has been set up and is ready to start. +/// +/// Represents a thread that has been created, but not yet started. +pub struct ReadyThread { + id: *mut k_thread, +} + +impl ReadyThread { + /// Change the priority of this thread before starting it. The initial default priority was + /// determined by the declaration of the thread. + pub fn set_priority(&self, priority: c_int) { + // SAFETY: ReadyThread should only exist for valid created threads. + unsafe { + k_thread_priority_set(self.id, priority); + } + } + + /// Start this thread. + pub fn start(self) -> RunningThread { + // SAFETY: ReadyThread should only exist for valid created threads. + unsafe { + // As per the docs, this should no longer be `k_thread_start`, but `k_wakeup` is fine + // these days. + k_wakeup(self.id); + } + + RunningThread { id: self.id } + } +} + +/// A thread that has been started. +pub struct RunningThread { + id: *mut k_thread, +} + +impl RunningThread { + /// Wait for this thread to finish executing. + /// + /// Will block until the thread has terminated. + /// + /// TODO: Allow a timeout? + /// TODO: Should we try to return a value? + pub fn join(&self) { + unsafe { + // TODO: Can we do something meaningful with the result? + k_thread_join(self.id, K_FOREVER); + + // TODO: Ideally, we could put the thread state back to avoid the need for another join + // check when re-allocating the thread. + } + } +} + +/// A Zephyr stack declaration. +/// +/// This isn't intended to be used directly, as it needs additional decoration about linker sections +/// and such. Unlike the C declaration, the reservation is a separate field. As long as the SIZE +/// is properly aligned, this should work without padding between the fields. +/// +/// Generally, this should be placed in a noinit linker section to avoid having to initialize the +/// memory. +#[repr(C)] +pub struct ThreadStack { + /// Align the stack itself according to the Kconfig determined alignment. + #[allow(dead_code)] + align: AlignAs, + /// The data of the stack itself. + #[allow(dead_code)] + pub data: UnsafeCell<[z_thread_stack_element; SIZE]>, + /// Extra data, used by Zephyr. + #[allow(dead_code)] + extra: [z_thread_stack_element; ZR_STACK_RESERVED], +} + +unsafe impl Sync for ThreadStack {} + +impl ThreadStack { + /// Construct a new ThreadStack + /// + /// # Safety + /// + /// This is unsafe as the memory remains uninitialized, and it is the responsibility of the + /// caller to use the stack correctly. The stack should be associated with a single thread. + pub const fn new() -> Self { + // SAFETY: Although this is declared as zeroed, the linker section actually used on the + // stack can be used to place it in no-init memory. + unsafe { mem::zeroed() } + } + + /// Retrieve the size of this stack. + pub const fn size(&self) -> usize { + SIZE + } +} + +/// A zephyr thread. +/// +/// This declares a single k_thread in Zephyr. +pub struct Thread(pub UnsafeCell); + +// Threads are "sort of" thread safe. But, this declaration is needed to be able to declare these +// statically, and all generated versions will protect the thread with a critical section. +unsafe impl Sync for Thread {} + +impl Thread { + /// Static allocation of a thread + /// + /// This makes the zero-initialized memory that can later be used as a thread. + /// + /// # Safety + /// + /// The caller is responsible for using operations such as `create` to construct the thread, + /// according to the underlying semantics of the Zephyr operations. + pub const unsafe fn new() -> Self { + // SAFETY: Zero initialized to match thread declarations in the C macros. + unsafe { mem::zeroed() } + } +} From a32a8e76ec8fb3c2c420298e54ecef9ed9a8c8a6 Mon Sep 17 00:00:00 2001 From: David Brown Date: Fri, 7 Mar 2025 11:35:51 -0700 Subject: [PATCH 04/17] samples: philosophers: Migrate to new task declaration Move away from the `kobj_define` task declaration to use the new `#[zephyr::thread]` to define these. This allows for a more natural declaration where the thread just looks like an attribute added to a regular function declaration. This also eliminates the static Mutex, as the Mutex now has a constructor that avoids allocation (it is still put in an Arc, though). Signed-off-by: David Brown --- samples/philosophers/src/lib.rs | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/samples/philosophers/src/lib.rs b/samples/philosophers/src/lib.rs index f602ec4..473e84b 100644 --- a/samples/philosophers/src/lib.rs +++ b/samples/philosophers/src/lib.rs @@ -14,7 +14,7 @@ use alloc::boxed::Box; use alloc::vec::Vec; use zephyr::time::{sleep, Duration, Tick}; use zephyr::{ - kobj_define, printkln, + printkln, sync::{Arc, Mutex}, sys::uptime_get, }; @@ -75,12 +75,7 @@ extern "C" fn rust_main() { printkln!("Pre fork"); for (i, syncer) in (0..NUM_PHIL).zip(syncers.into_iter()) { - let thread = PHIL_THREADS[i] - .init_once(PHIL_STACKS[i].init_once(()).unwrap()) - .unwrap(); - thread.spawn(move || { - phil_thread(i, syncer, stats); - }); + phil_thread(i, syncer, stats).start(); } let delay = Duration::secs_at_least(10); @@ -129,6 +124,7 @@ fn get_syncer() -> Vec> { get_channel_syncer() } +#[zephyr::thread(stack_size = PHIL_STACK_SIZE, pool_size = NUM_PHIL)] fn phil_thread(n: usize, syncer: Arc, stats: &'static Mutex) { printkln!("Child {} started: {:?}", n, syncer); @@ -219,8 +215,3 @@ impl Stats { } static STAT_MUTEX: Mutex = Mutex::new(Stats::new()); - -kobj_define! { - static PHIL_THREADS: [StaticThread; NUM_PHIL]; - static PHIL_STACKS: [ThreadStack; NUM_PHIL]; -} From 0a78a5e6197b67f9ad39c9c1d88c662138256ad5 Mon Sep 17 00:00:00 2001 From: David Brown Date: Thu, 13 Mar 2025 15:23:43 -0600 Subject: [PATCH 05/17] zephyr-macros: Incorporate fn name into thread name To help with debugging, try to give created Zephyr threads a readable name. Currently, this is based off of the name of the function used in the declaration of the thread. Signed-off-by: David Brown --- zephyr-macros/src/task.rs | 7 +++++-- zephyr/src/thread.rs | 12 +++++++++--- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/zephyr-macros/src/task.rs b/zephyr-macros/src/task.rs index 0331718..32f029a 100644 --- a/zephyr-macros/src/task.rs +++ b/zephyr-macros/src/task.rs @@ -1,10 +1,10 @@ //! Expansion of `#[zephyr::task(...)]`. -use std::fmt::Display; +use std::{ffi::CString, fmt::Display}; use darling::FromMeta; use darling::export::NestedMeta; -use proc_macro2::{Span, TokenStream}; +use proc_macro2::{Literal, Span, TokenStream}; use quote::{ToTokens, format_ident, quote}; use syn::{ Expr, ExprLit, ItemFn, Lit, LitInt, ReturnType, Type, @@ -149,6 +149,8 @@ pub fn run(args: TokenStream, item: TokenStream) -> TokenStream { }); } + let thread_name = Literal::c_string(&CString::new(thread_ident.to_string()).unwrap()); + let mut thread_outer_body = quote! { const _ZEPHYR_INTERNAL_STACK_SIZE: usize = zephyr::thread::stack_len(#stack_size); const _ZEPHYR_INTERNAL_POOL_SIZE: usize = #pool_size; @@ -187,6 +189,7 @@ pub fn run(args: TokenStream, item: TokenStream) -> TokenStream { _ZephyrInternalArgs { #(#inner_args)* }, Some(startup), 0, + #thread_name, ) }; diff --git a/zephyr/src/thread.rs b/zephyr/src/thread.rs index 7b97ad3..8af186d 100644 --- a/zephyr/src/thread.rs +++ b/zephyr/src/thread.rs @@ -10,7 +10,7 @@ use core::{ cell::UnsafeCell, - ffi::{c_int, c_void}, + ffi::{c_int, c_void, CStr}, mem, ptr::null_mut, sync::atomic::Ordering, @@ -18,8 +18,8 @@ use core::{ use portable_atomic::AtomicU8; use zephyr_sys::{ - k_thread, k_thread_create, k_thread_entry_t, k_thread_join, k_thread_priority_set, k_wakeup, - z_thread_stack_element, ZR_STACK_ALIGN, ZR_STACK_RESERVED, + k_thread, k_thread_create, k_thread_entry_t, k_thread_join, k_thread_name_set, + k_thread_priority_set, k_wakeup, z_thread_stack_element, ZR_STACK_ALIGN, ZR_STACK_RESERVED, }; use crate::{ @@ -133,6 +133,7 @@ impl ThreadData { args: T, entry: k_thread_entry_t, priority: c_int, + name: &'static CStr, ) -> ReadyThread { let id = Self::find_thread(pool); @@ -157,6 +158,11 @@ impl ThreadData { ) }; + // Set the name. + unsafe { + k_thread_name_set(tid, name.as_ptr()); + } + ReadyThread { id: tid } } From acb2eaf1c35cee8a88f4e323c2700964712c47c5 Mon Sep 17 00:00:00 2001 From: David Brown Date: Thu, 10 Apr 2025 15:44:23 -0600 Subject: [PATCH 06/17] wip: rtio/i2c --- dt-rust.yaml | 33 +++++++ zephyr-sys/build.rs | 4 + zephyr-sys/wrapper.h | 9 ++ zephyr/src/device.rs | 1 + zephyr/src/device/gpio.rs | 6 +- zephyr/src/device/i2c.rs | 100 +++++++++++++++++++ zephyr/src/lib.rs | 2 + zephyr/src/rtio.rs | 195 ++++++++++++++++++++++++++++++++++++++ 8 files changed, 349 insertions(+), 1 deletion(-) create mode 100644 zephyr/src/device/i2c.rs create mode 100644 zephyr/src/rtio.rs diff --git a/dt-rust.yaml b/dt-rust.yaml index c848277..1932750 100644 --- a/dt-rust.yaml +++ b/dt-rust.yaml @@ -35,6 +35,22 @@ value: gpios device: crate::device::gpio::GpioPin +# Hook up the gpio-keys as gpio pins as well +- name: gpio-keys + rules: + - type: compatible + value: + names: + - gpio-keys + level: 1 + actions: + - type: instance + value: + raw: + type: phandle + value: gpios + device: crate::device::gpio::GpioPin + # Flash controllers don't have any particular property to identify them, so we need a list of # compatible values that should match. - name: flash-controller @@ -45,6 +61,8 @@ - "nordic,nrf52-flash-controller" - "nordic,nrf51-flash-controller" - "raspberrypi,pico-flash-controller" + - "st,stm32g4-flash-controller" + - "st,stm32l5-flash-controller" - "zephyr,sim-flash" level: 0 actions: @@ -80,6 +98,21 @@ - type: reg device: "crate::device::flash::FlashPartition" +# I2C. +- name: i2c + rules: + - type: compatible + value: + names: + - "snps,designware-i2c" + level: 0 + actions: + - type: instance + value: + raw: + type: myself + device: crate::device::i2c::I2C + # Generate a pseudo node that matches all of the labels across the tree with their nodes. - name: labels rules: diff --git a/zephyr-sys/build.rs b/zephyr-sys/build.rs index fd1e711..2f50cb9 100644 --- a/zephyr-sys/build.rs +++ b/zephyr-sys/build.rs @@ -76,9 +76,13 @@ fn main() -> Result<()> { .derive_copy(false) .allowlist_function("k_.*") .allowlist_function("gpio_.*") + .allowlist_function("i2c_.*") .allowlist_function("flash_.*") .allowlist_function("zr_.*") + .allowlist_function("mpsc_.*") + .allowlist_function("rtio.*") .allowlist_item("GPIO_.*") + .allowlist_item("I2C_.*") .allowlist_item("FLASH_.*") .allowlist_item("Z_.*") .allowlist_item("ZR_.*") diff --git a/zephyr-sys/wrapper.h b/zephyr-sys/wrapper.h index 69bab65..95f0fd2 100644 --- a/zephyr-sys/wrapper.h +++ b/zephyr-sys/wrapper.h @@ -43,6 +43,9 @@ extern int errno; #include #include #include +#include +#include +#include /* * bindgen will only output #defined constants that resolve to simple numbers. These are some @@ -63,6 +66,12 @@ const uint32_t ZR_GPIO_INT_MODE_DISABLE_ONLY = GPIO_INT_MODE_DISABLE_ONLY; const uint32_t ZR_GPIO_INT_MODE_ENABLE_ONLY = GPIO_INT_MODE_ENABLE_ONLY; #endif +const uint8_t ZR_I2C_MSG_WRITE = I2C_MSG_WRITE; +const uint8_t ZR_I2C_MSG_READ = I2C_MSG_READ; +const uint8_t ZR_I2C_MSG_STOP = I2C_MSG_STOP; + +const uint16_t ZR_RTIO_SQE_NO_RESPONSE = RTIO_SQE_NO_RESPONSE; + /* * Zephyr's irq_lock() and irq_unlock() are macros not inline functions, so we need some inlines to * access them. diff --git a/zephyr/src/device.rs b/zephyr/src/device.rs index 0bd80ac..5ad6076 100644 --- a/zephyr/src/device.rs +++ b/zephyr/src/device.rs @@ -12,6 +12,7 @@ use crate::sync::atomic::{AtomicBool, Ordering}; pub mod flash; pub mod gpio; +pub mod i2c; // Allow dead code, because it isn't required for a given build to have any devices. /// Device uniqueness. diff --git a/zephyr/src/device/gpio.rs b/zephyr/src/device/gpio.rs index f0e52c8..dfbea07 100644 --- a/zephyr/src/device/gpio.rs +++ b/zephyr/src/device/gpio.rs @@ -35,7 +35,10 @@ mod async_io { ZR_GPIO_INT_MODE_DISABLE_ONLY, }; - use crate::sync::atomic::{AtomicBool, AtomicU32}; + use crate::{ + printkln, + sync::atomic::{AtomicBool, AtomicU32}, + }; use super::{GpioPin, GpioToken}; @@ -112,6 +115,7 @@ mod async_io { cb: *mut gpio_callback, mut pins: gpio_port_pins_t, ) { + printkln!("GPIO callback: {}", pins); let data = unsafe { cb.cast::() .sub(mem::offset_of!(Self, callback)) diff --git a/zephyr/src/device/i2c.rs b/zephyr/src/device/i2c.rs new file mode 100644 index 0000000..654bf04 --- /dev/null +++ b/zephyr/src/device/i2c.rs @@ -0,0 +1,100 @@ +//! Zpehyr I2C interface + +use core::{ffi::c_int, marker::PhantomData}; + +use crate::{error::to_result, printkln, raw}; + +use super::{NoStatic, Unique}; + +/// A single I2C controller. +pub struct I2C { + /// The underlying device itself. + #[allow(dead_code)] + pub(crate) device: *const raw::device, +} + +unsafe impl Send for I2C {} + +impl I2C { + /// Constructor, used by the devicetree generated code. + #[allow(dead_code)] + pub(crate) unsafe fn new( + unique: &Unique, + _data: &'static NoStatic, + device: *const raw::device, + ) -> Option { + if !unique.once() { + return None; + } + Some(I2C { device }) + } + + /// Do a write/read. + pub fn write_read(&mut self, write: &[u8], read: &mut [u8]) -> crate::Result { + let mut msg = [ + raw::i2c_msg { + buf: write.as_ptr() as *mut _, + len: write.len() as u32, + flags: raw::ZR_I2C_MSG_WRITE, + }, + raw::i2c_msg { + buf: read.as_mut_ptr(), + len: read.len() as u32, + flags: raw::ZR_I2C_MSG_READ | raw::ZR_I2C_MSG_STOP, + }, + ]; + let res = unsafe { to_result(raw::i2c_transfer(self.device, msg.as_mut_ptr(), 2, 0x42)) }; + + printkln!("res: {} {}", msg[1].len, msg[1].flags); + + res + } + + /// Add an i2c operation to the RTIO. + /// + /// TODO: Unclear how to indicate that the buffers must live long enough for the submittion. + /// As it is, this is actually completely unsound. + pub fn rtio_write_read(&mut self, write: &[u8], read: &mut [u8]) -> crate::Result<()> { + let _msg = [ + raw::i2c_msg { + buf: write.as_ptr() as *mut _, + len: write.len() as u32, + flags: raw::ZR_I2C_MSG_WRITE, + }, + raw::i2c_msg { + buf: read.as_mut_ptr(), + len: read.len() as u32, + flags: raw::ZR_I2C_MSG_READ | raw::ZR_I2C_MSG_STOP, + }, + ]; + + todo!() + } +} + +/// An i2c transaction. +pub struct ReadWrite<'a> { + _phantom: PhantomData<&'a ()>, + msgs: [raw::i2c_msg; 2], +} + +impl<'a> ReadWrite<'a> { + /// Construct a new read/write transaction. + pub fn new(write: &'a [u8], read: &'a mut [u8]) -> Self { + Self { + _phantom: PhantomData, + msgs: [ + raw::i2c_msg { + buf: write.as_ptr() as *mut _, + len: write.len() as u32, + flags: raw::ZR_I2C_MSG_WRITE, + }, + raw::i2c_msg { + buf: read.as_mut_ptr(), + len: read.len() as u32, + flags: raw::ZR_I2C_MSG_READ | raw::ZR_I2C_MSG_STOP, + }, + ], + } + } +} diff --git a/zephyr/src/lib.rs b/zephyr/src/lib.rs index cccdfaf..fcc3965 100644 --- a/zephyr/src/lib.rs +++ b/zephyr/src/lib.rs @@ -74,6 +74,8 @@ pub mod embassy; pub mod error; pub mod logging; pub mod object; +#[cfg(CONFIG_RTIO)] +pub mod rtio; #[cfg(CONFIG_RUST_ALLOC)] pub mod simpletls; pub mod sync; diff --git a/zephyr/src/rtio.rs b/zephyr/src/rtio.rs new file mode 100644 index 0000000..c400025 --- /dev/null +++ b/zephyr/src/rtio.rs @@ -0,0 +1,195 @@ +//! Interface to Zephyr 'rtio' infrastructure. + +use core::ffi::c_void; + +use crate::error::to_result_void; +use crate::object::{ObjectInit, ZephyrObject}; +use crate::raw; + +/// The underlying structure, holding the rtio, it's semaphores, and pools. +/// +/// Note that putting these together in a single struct makes this "pleasant" to use from Rust, but +/// does make the end result incompatible with userspace. +#[repr(C)] +pub struct RtioData { + /// The overall rtio struct. + rtio: raw::rtio, + /// Sempahore used for the submission queue. + #[cfg(CONFIG_RTIO_SUBMIT_SEM)] + submit_sem: raw::k_sem, + /// Semaphore used for the consumption queue. + #[cfg(CONFIG_RTIO_CONSUME_SEM)] + consume_sem: raw::k_sem, + /// The SQE items. + sqe_pool_items: [raw::rtio_iodev_sqe; SQE_SZ], + /// The SQE pool itself. + sqe_pool: raw::rtio_sqe_pool, + /// The CQE items. + cqe_pool_items: [raw::rtio_cqe; CQE_SZ], + /// The pool of CQEs. + cqe_pool: raw::rtio_cqe_pool, +} + +/// Init based reference to the the underlying rtio object. +/// +/// Note that this declaration will _not_ support userspace currently, as the object will not be +/// placed in an iterable linker section. Also, the linker sevction will not work as the +/// ZephyrObject will have an attached atomic used to ensure proper initialization. +pub struct RtioObject( + pub(crate) ZephyrObject>, +); + +unsafe impl Sync for RtioObject {} + +impl RtioObject { + /// Construct a new RTIO pool. + /// + /// Create a new RTIO object. These objects generally need to be statically allocated. + pub const fn new() -> Self { + let this = >>::new_raw(); + RtioObject(this) + } + + /// Acquire a submission object. + pub fn sqe_acquire(&'static self) -> Option { + let this = unsafe { self.0.get() }; + + let ptr = unsafe { raw::rtio_sqe_acquire(&raw mut (*this).rtio) }; + + if ptr.is_null() { + None + } else { + Some(Sqe { item: ptr }) + } + } + + /// Submit the work. + pub fn submit(&'static self, wait: usize) -> crate::Result<()> { + let this = unsafe { self.0.get() }; + + unsafe { to_result_void(raw::rtio_submit(&raw mut (*this).rtio, wait as u32)) } + } + + /// Consume a single completion. + /// + /// Will return the completion if available. If returned, it will be released upon drop. + pub fn cqe_consume(&'static self) -> Option { + let this = unsafe { self.0.get() }; + + let ptr = unsafe { raw::rtio_cqe_consume(&raw mut (*this).rtio) }; + + if ptr.is_null() { + None + } else { + Some(Cqe { + item: ptr, + rtio: unsafe { &raw mut (*this).rtio }, + }) + } + } +} + +impl ObjectInit> + for ZephyrObject> +{ + fn init(item: *mut RtioData) { + #[cfg(CONFIG_RTIO_SUBMIT_SEM)] + unsafe { + raw::k_sem_init(&raw mut (*item).submit_sem, 0, raw::K_SEM_MAX_LIMIT); + (*item).rtio.submit_sem = &raw mut (*item).submit_sem; + (*item).rtio.submit_count = 0; + } + #[cfg(CONFIG_RTIO_CONSUME_SEM)] + unsafe { + raw::k_sem_init(&raw mut (*item).consume_sem, 0, raw::K_SEM_MAX_LIMIT); + (*item).rtio.consume_sem = &raw mut (*item).consume_sem; + } + unsafe { + // TODO: Zephyr atomic init? + (*item).rtio.cq_count = 0; + (*item).rtio.xcqcnt = 0; + + // Set up the sqe pool. + raw::mpsc_init(&raw mut (*item).sqe_pool.free_q); + (*item).sqe_pool.pool_size = SQE_SZ as u16; + (*item).sqe_pool.pool_free = SQE_SZ as u16; + (*item).sqe_pool.pool = (*item).sqe_pool_items.as_mut_ptr(); + + for p in &mut (*item).sqe_pool_items { + raw::mpsc_push(&raw mut (*item).sqe_pool.free_q, &raw mut p.q); + } + + // Set up the cqe pool + raw::mpsc_init(&raw mut (*item).cqe_pool.free_q); + (*item).cqe_pool.pool_size = CQE_SZ as u16; + (*item).cqe_pool.pool_free = CQE_SZ as u16; + (*item).cqe_pool.pool = (*item).cqe_pool_items.as_mut_ptr(); + + for p in &mut (*item).cqe_pool_items { + raw::mpsc_push(&raw mut (*item).cqe_pool.free_q, &raw mut p.q); + } + + (*item).rtio.sqe_pool = &raw mut (*item).sqe_pool; + (*item).rtio.cqe_pool = &raw mut (*item).cqe_pool; + + raw::mpsc_init(&raw mut (*item).rtio.sq); + raw::mpsc_init(&raw mut (*item).rtio.cq); + } + } +} + +/// A single Sqe. +/// +/// TODO: How to bind the lifetime to the Rtio meaningfully, even though it is all static. +pub struct Sqe { + item: *mut raw::rtio_sqe, +} + +impl Sqe { + /// Configure this SQE as a callback. + pub fn prep_callback( + &mut self, + callback: raw::rtio_callback_t, + arg0: *mut c_void, + userdata: *mut c_void, + ) { + unsafe { + raw::rtio_sqe_prep_callback(self.item, callback, arg0, userdata); + } + } + + /// Configure this SQE as a nop. + pub fn prep_nop(&mut self, dev: *mut raw::rtio_iodev, userdata: *mut c_void) { + unsafe { + raw::rtio_sqe_prep_nop(self.item, dev, userdata); + } + } + + /// Add flags. + pub fn or_flags(&mut self, flags: u16) { + unsafe { + (*self.item).flags |= flags; + } + } +} + +/// A single Cqe. +pub struct Cqe { + item: *mut raw::rtio_cqe, + rtio: *mut raw::rtio, +} + +impl Cqe { + /// Retrieve the result of this operation. + pub fn result(&self) -> i32 { + unsafe { (*self.item).result } + } +} + +impl Drop for Cqe { + fn drop(&mut self) { + unsafe { + raw::rtio_cqe_release(self.rtio, self.item); + } + } +} From 506fdf4fc5c176937e87c734fa05bbabbbaf7c22 Mon Sep 17 00:00:00 2001 From: David Brown Date: Thu, 10 Apr 2025 15:46:57 -0600 Subject: [PATCH 07/17] zephyr: embassy: Decouple embassy-time and Zephyr-time Don't require that the two time bases be the same. This allows applications to work using the default embassy-time base of 1MHz. There is a performance cost to the conversion (which depends on the exact ratios). If the time bases are the same (which would be common for an application build for a single target), then no conversion is needed. Signed-off-by: David Brown --- samples/embassy/Cargo.toml | 9 +++-- zephyr/src/embassy/time_driver.rs | 60 +++++++++++++++++++++++++++---- 2 files changed, 57 insertions(+), 12 deletions(-) diff --git a/samples/embassy/Cargo.toml b/samples/embassy/Cargo.toml index 6301463..beeee52 100644 --- a/samples/embassy/Cargo.toml +++ b/samples/embassy/Cargo.toml @@ -28,17 +28,16 @@ features = [ [dependencies.embassy-futures] version = "0.1.1" -# path = "../../embassy/embassy-futures" [dependencies.embassy-sync] version = "0.6.2" -# path = "../../embassy/embassy-sync" [dependencies.embassy-time] version = "0.4.0" -# path = "../../embassy/embassy-time" -# This is board specific. -features = ["tick-hz-10_000"] +# For real builds, you should figure out your target's tick rate and set the appropriate feature, +# like in these examples. Without this, embassy-time will assume a 1Mhz tick rate, and every time +# operation will involve a conversion. +#features = ["tick-hz-10_000"] [dependencies.critical-section] version = "1.2" diff --git a/zephyr/src/embassy/time_driver.rs b/zephyr/src/embassy/time_driver.rs index c9e7324..f561613 100644 --- a/zephyr/src/embassy/time_driver.rs +++ b/zephyr/src/embassy/time_driver.rs @@ -14,6 +14,18 @@ use embassy_time_queue_utils::Queue; use crate::raw::{k_timeout_t, k_timer, k_timer_init, k_timer_start}; use crate::sys::K_FOREVER; +/// The time base configured into Zephyr. +pub const ZEPHYR_TICK_HZ: u64 = crate::time::SYS_FREQUENCY as u64; + +/// The configured Embassy time tick rate. +pub const EMBASSY_TICK_HZ: u64 = embassy_time_driver::TICK_HZ; + +/// When the zephyr and embassy rates differ, use this intermediate type. This can be selected by +/// feature. At the worst case, with Embassy's tick at 1Mhz, and Zephyr's at 50k, it is a little +/// over 11 years. Higher of either will reduce that further. But, 128-bit arithmetic is fairly +/// inefficient. +type InterTime = u128; + embassy_time_driver::time_driver_impl!(static DRIVER: ZephyrTimeDriver = ZephyrTimeDriver { queue: Mutex::new(RefCell::new(Queue::new())), timer: Mutex::new(RefCell::new(unsafe { mem::zeroed() })), @@ -63,9 +75,40 @@ impl ZTimer { } } +/// Convert from a zephyr tick count, to an embassy tick count. +/// +/// This is done using an intermediate type defined above. +/// This conversion truncates. +fn zephyr_to_embassy(ticks: u64) -> u64 { + if ZEPHYR_TICK_HZ == EMBASSY_TICK_HZ { + // This should happen at compile time. + return ticks; + } + + // Otherwise do the intermediate conversion. + let prod = (ticks as InterTime) * (EMBASSY_TICK_HZ as InterTime); + (prod / (ZEPHYR_TICK_HZ as InterTime)) as u64 +} + +/// Convert from an embassy tick count to a zephyr. +/// +/// This conversion use ceil so that values are always large enough. +fn embassy_to_zephyr(ticks: u64) -> u64 { + if ZEPHYR_TICK_HZ == EMBASSY_TICK_HZ { + return ticks; + } + + let prod = (ticks as InterTime) * (ZEPHYR_TICK_HZ as InterTime); + prod.div_ceil(EMBASSY_TICK_HZ as InterTime) as u64 +} + +fn zephyr_now() -> u64 { + crate::time::now().ticks() +} + impl Driver for ZephyrTimeDriver { fn now(&self) -> u64 { - crate::time::now().ticks() + zephyr_to_embassy(zephyr_now()) } fn schedule_wake(&self, at: u64, waker: &core::task::Waker) { @@ -73,10 +116,13 @@ impl Driver for ZephyrTimeDriver { let mut queue = self.queue.borrow(cs).borrow_mut(); let mut timer = self.timer.borrow(cs).borrow_mut(); + // All times below are in Zephyr units. + let at = embassy_to_zephyr(at); + if queue.schedule_wake(at, waker) { - let mut next = queue.next_expiration(self.now()); - while !timer.set_alarm(next, self.now()) { - next = queue.next_expiration(self.now()); + let mut next = queue.next_expiration(zephyr_now()); + while !timer.set_alarm(next, zephyr_now()) { + next = queue.next_expiration(zephyr_now()); } } }) @@ -89,9 +135,9 @@ impl ZephyrTimeDriver { let mut queue = self.queue.borrow(cs).borrow_mut(); let mut timer = self.timer.borrow(cs).borrow_mut(); - let mut next = queue.next_expiration(self.now()); - while !timer.set_alarm(next, self.now()) { - next = queue.next_expiration(self.now()); + let mut next = queue.next_expiration(zephyr_now()); + while !timer.set_alarm(next, zephyr_now()) { + next = queue.next_expiration(zephyr_now()); } }) } From 054e09f6c2197ef4db33159979734ae1fc682826 Mon Sep 17 00:00:00 2001 From: David Brown Date: Mon, 24 Mar 2025 15:53:22 -0600 Subject: [PATCH 08/17] zephyr-build: Only generate nodes that are enabled Don't generate instance access code for DT nodes that aren't actually enabled. Signed-off-by: David Brown --- zephyr-build/src/devicetree/augment.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/zephyr-build/src/devicetree/augment.rs b/zephyr-build/src/devicetree/augment.rs index 3b781da..c5cfd77 100644 --- a/zephyr-build/src/devicetree/augment.rs +++ b/zephyr-build/src/devicetree/augment.rs @@ -28,6 +28,12 @@ pub trait Augment { /// The default implementation checks if this node matches and calls a generator if it does, or /// does nothing if not. fn augment(&self, node: &Node, tree: &DeviceTree) -> TokenStream { + // If there is a status field present, and it is not set to "okay", don't augment this node. + if let Some(status) = node.get_single_string("status") { + if status != "okay" { + return TokenStream::new(); + } + } if self.is_compatible(node) { self.generate(node, tree) } else { From 0e4fb7c0c6f92fd98110aeb00117f6718ed40ac1 Mon Sep 17 00:00:00 2001 From: David Brown Date: Thu, 10 Apr 2025 15:55:25 -0600 Subject: [PATCH 09/17] wip: rtio/i2c --- docgen/prj.conf | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docgen/prj.conf b/docgen/prj.conf index 4c55eb9..b23ff32 100644 --- a/docgen/prj.conf +++ b/docgen/prj.conf @@ -7,3 +7,6 @@ CONFIG_GPIO=y CONFIG_GPIO_ENABLE_DISABLE_INTERRUPT=y CONFIG_PRINTK=y CONFIG_POLL=y +CONFIG_I2C=y +CONFIG_I2C_RTIO=y +CONFIG_RTIO=y From 6b20ed47fe5c594bfdcc6f1370b544001df69ba9 Mon Sep 17 00:00:00 2001 From: David Brown Date: Thu, 10 Apr 2025 15:56:03 -0600 Subject: [PATCH 10/17] tests: drivers: gpio-async: Add small readme Add a short readme to explain some of the difficulties with level triggered interrupts (notably, most STM32 devices do not support level triggered interrupts). Signed-off-by: David Brown --- tests/drivers/gpio-async/README.md | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 tests/drivers/gpio-async/README.md diff --git a/tests/drivers/gpio-async/README.md b/tests/drivers/gpio-async/README.md new file mode 100644 index 0000000..de7a383 --- /dev/null +++ b/tests/drivers/gpio-async/README.md @@ -0,0 +1,6 @@ +# Async gpio + +This demo makes use of the GPIO `wait_for_high()` and `wait_for_low()` async operations. + +Unfortunately, not all GPIO controllers support level triggered interrupts. Notably, most of the +stm32 line does not support level triggered interrupts. From ac52031951573e9443b615cb8df780804c830421 Mon Sep 17 00:00:00 2001 From: David Brown Date: Thu, 10 Apr 2025 16:00:42 -0600 Subject: [PATCH 11/17] wip: Drivers hacks for debugging Make some small changes to try testing this on stm32. This change can be discarded. Signed-off-by: David Brown --- tests/drivers/gpio-async/prj.conf | 6 +++++- tests/drivers/gpio-async/src/lib.rs | 5 +++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/tests/drivers/gpio-async/prj.conf b/tests/drivers/gpio-async/prj.conf index f9a269b..4ac5fcd 100644 --- a/tests/drivers/gpio-async/prj.conf +++ b/tests/drivers/gpio-async/prj.conf @@ -13,4 +13,8 @@ CONFIG_RUST_ALLOC=y CONFIG_GPIO=y CONFIG_GPIO_ENABLE_DISABLE_INTERRUPT=y -CONFIG_LOG_BACKEND_RTT=n +# CONFIG_LOG_BACKEND_RTT=n + +CONFIG_UART_CONSOLE=n +CONFIG_RTT_CONSOLE=y +CONFIG_USE_SEGGER_RTT=y diff --git a/tests/drivers/gpio-async/src/lib.rs b/tests/drivers/gpio-async/src/lib.rs index ebccc5f..076d8c3 100644 --- a/tests/drivers/gpio-async/src/lib.rs +++ b/tests/drivers/gpio-async/src/lib.rs @@ -35,13 +35,18 @@ async fn main(spawner: Spawner) { info!("Hello world"); let _ = spawner; + /* let mut col0 = zephyr::devicetree::labels::col0::get_instance().unwrap(); let mut row0 = zephyr::devicetree::labels::row0::get_instance().unwrap(); + */ + let mut row0 = zephyr::devicetree::aliases::sw0::get_instance().unwrap(); let mut gpio_token = unsafe { zephyr::device::gpio::GpioToken::get_instance().unwrap() }; unsafe { + /* col0.configure(&mut gpio_token, GPIO_OUTPUT_ACTIVE); col0.set(&mut gpio_token, true); + */ row0.configure(&mut gpio_token, GPIO_INPUT | GPIO_PULL_DOWN); } From cd5fdefd38e8a1ee15f95679d2dbb1bf77fed305 Mon Sep 17 00:00:00 2001 From: David Brown Date: Thu, 10 Apr 2025 16:01:22 -0600 Subject: [PATCH 12/17] zephyr-macros: Fix dependency version A version mismatch here causes compilation errors due to other crates depending on this specific version. Signed-off-by: David Brown --- zephyr-macros/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zephyr-macros/Cargo.toml b/zephyr-macros/Cargo.toml index a9c386b..72c2a1b 100644 --- a/zephyr-macros/Cargo.toml +++ b/zephyr-macros/Cargo.toml @@ -9,7 +9,7 @@ descriptions = "Macros for managing tasks and work queues in Zephyr" proc-macro = true [dependencies] -syn = { version = "2.0.85", features = ["full", "visit"] } +syn = { version = "2.0.79", features = ["full", "visit"] } quote = "1.0.37" proc-macro2 = "1.0.86" darling = "0.20.1" From 91741b9723b516493c44b7e2a5138abd85cc8aaa Mon Sep 17 00:00:00 2001 From: David Brown Date: Thu, 10 Apr 2025 16:02:10 -0600 Subject: [PATCH 13/17] zephyr: Change from Arc to declaration on Work Queues Instead of allocating work queues, and hoping they don't get freed, instead move to a static declaration. This is facilitated by a macro `define_work_queue` that makes it easy to declare these at the top level. Signed-off-by: David Brown --- samples/bench/src/lib.rs | 44 ++--- zephyr/src/lib.rs | 2 +- zephyr/src/work.rs | 343 +++++++++++++++++++++------------------ 3 files changed, 202 insertions(+), 187 deletions(-) diff --git a/samples/bench/src/lib.rs b/samples/bench/src/lib.rs index 432384f..5a8c7f4 100644 --- a/samples/bench/src/lib.rs +++ b/samples/bench/src/lib.rs @@ -16,11 +16,11 @@ use alloc::collections::vec_deque::VecDeque; use alloc::vec; use executor::AsyncTests; use static_cell::StaticCell; -use zephyr::kobj_define; +use zephyr::define_work_queue; use zephyr::raw::k_yield; use zephyr::sync::{PinWeak, SpinMutex}; use zephyr::time::NoWait; -use zephyr::work::{SimpleAction, Work, WorkQueueBuilder}; +use zephyr::work::{SimpleAction, Work}; use zephyr::{ kconfig::CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC, printkln, @@ -80,7 +80,7 @@ extern "C" fn rust_main() { spin_bench(); sem_bench(); - let simple = Simple::new(tester.workq.clone()); + let simple = Simple::new(tester.workq); let mut num = 6; while num < 250 { simple.run(num, TOTAL_ITERS / num); @@ -147,7 +147,7 @@ struct ThreadTests { high_command: Sender, /// A work queue for the main runners. - workq: Arc, + workq: &'static WorkQueue, /// The test also all return their result to the main. The threads Send, the main running /// receives. @@ -163,15 +163,7 @@ impl ThreadTests { let (low_send, low_recv) = bounded(1); let (high_send, high_recv) = bounded(1); - let workq = Arc::new( - WorkQueueBuilder::new() - .set_priority(5) - .set_no_yield(true) - .start(WORK_STACK.init_once(()).unwrap()), - ); - - // Leak the workqueue so it doesn't get dropped. - let _ = Arc::into_raw(workq.clone()); + let workq = WORKQ.start(); let mut result = Self { sems: &SEMS, @@ -581,20 +573,20 @@ enum TestResult { /// The Simple test just does a ping pong test using manually submitted work. struct Simple { - workq: Arc, + workq: &'static WorkQueue, } impl Simple { - fn new(workq: Arc) -> Self { + fn new(workq: &'static WorkQueue) -> Self { Self { workq } } fn run(&self, workers: usize, iterations: usize) { // printkln!("Running Simple"); - let main = Work::new(SimpleMain::new(workers * iterations, self.workq.clone())); + let main = Work::new(SimpleMain::new(workers * iterations, self.workq)); let children: VecDeque<_> = (0..workers) - .map(|n| Work::new(SimpleWorker::new(main.clone(), self.workq.clone(), n))) + .map(|n| Work::new(SimpleWorker::new(main.clone(), self.workq, n))) .collect(); let mut locked = main.action().locked.lock().unwrap(); @@ -603,7 +595,7 @@ impl Simple { let start = now(); // Fire off main, which will run everything. - Work::submit_to_queue(main.clone(), &self.workq).unwrap(); + Work::submit_to_queue(main.clone(), self.workq).unwrap(); // And wait for the completion semaphore. main.action().done.take(Forever).unwrap(); @@ -642,12 +634,12 @@ impl Simple { /// A simple worker. When run, it submits the main worker to do the next work. struct SimpleWorker { main: PinWeak>, - workq: Arc, + workq: &'static WorkQueue, _id: usize, } impl SimpleWorker { - fn new(main: Pin>>, workq: Arc, id: usize) -> Self { + fn new(main: Pin>>, workq: &'static WorkQueue, id: usize) -> Self { Self { main: PinWeak::downgrade(main), workq, @@ -660,7 +652,7 @@ impl SimpleAction for SimpleWorker { fn act(self: Pin<&Self>) { // Each time we are run, fire the main worker back up. let main = self.main.upgrade().unwrap(); - Work::submit_to_queue(main.clone(), &self.workq).unwrap(); + Work::submit_to_queue(main.clone(), self.workq).unwrap(); } } @@ -670,7 +662,7 @@ impl SimpleAction for SimpleWorker { struct SimpleMain { /// All of the work items. locked: SpinMutex, - workq: Arc, + workq: &'static WorkQueue, done: Semaphore, } @@ -690,12 +682,12 @@ impl SimpleAction for SimpleMain { lock.count -= 1; drop(lock); - Work::submit_to_queue(worker.clone(), &self.workq).unwrap(); + Work::submit_to_queue(worker.clone(), self.workq).unwrap(); } } impl SimpleMain { - fn new(count: usize, workq: Arc) -> Self { + fn new(count: usize, workq: &'static WorkQueue) -> Self { Self { locked: SpinMutex::new(Locked::new(count)), done: Semaphore::new(0, 1), @@ -812,9 +804,7 @@ impl<'a> BenchTimer<'a> { } } -kobj_define! { - static WORK_STACK: ThreadStack; -} +define_work_queue!(WORKQ, WORK_STACK_SIZE, priority = 5, no_yield = true); static SEMS: [Semaphore; NUM_THREADS] = [const { Semaphore::new(0, u32::MAX) }; NUM_THREADS]; static BACK_SEMS: [Semaphore; NUM_THREADS] = [const { Semaphore::new(0, u32::MAX) }; NUM_THREADS]; diff --git a/zephyr/src/lib.rs b/zephyr/src/lib.rs index fcc3965..cef1757 100644 --- a/zephyr/src/lib.rs +++ b/zephyr/src/lib.rs @@ -39,7 +39,7 @@ //! level operation that is still quite useful in regular code. //! - [`timer`]: Rust interfaces to Zephyr timers. These timers can be used either by registering a //! callback, or polled or waited for for an elapsed time. -//! - [`work`]: Zephyr work queues for Rust. The [`work::WorkQueueBuilder`] and resulting +//! - [`work`]: Zephyr work queues for Rust. The [`define_work_queue`] macro and resulting //! [`work::WorkQueue`] allow creation of Zephyr work queues to be used from Rust. The //! [`work::Work`] item had an action that will be invoked by the work queue, and can be manually //! submitted when needed. diff --git a/zephyr/src/work.rs b/zephyr/src/work.rs index c3ed8d2..af0dd8b 100644 --- a/zephyr/src/work.rs +++ b/zephyr/src/work.rs @@ -51,193 +51,145 @@ //! //! ## The work queues themselves //! -//! Workqueues themselves are built using [`WorkQueueBuilder`]. This needs a statically defined -//! stack. Typical usage will be along the lines of: -//! ```rust -//! kobj_define! { -//! WORKER_STACK: ThreadStack<2048>; -//! } -//! // ... -//! let main_worker = Box::new( -//! WorkQueueBuilder::new() -//! .set_priority(2). -//! .set_name(c"mainloop") -//! .set_no_yield(true) -//! .start(MAIN_LOOP_STACK.init_once(()).unwrap()) -//! ); -//! -//! let _ = zephyr::kio::spawn( -//! mainloop(), // Async or function returning Future. -//! &main_worker, -//! c"w:mainloop", -//! ); +//! Work Queues should be declared with the `define_work_queue!` macro, this macro requires the name +//! of the symbol for the work queue, the stack size, and then zero or more optional arguments, +//! defined by the fields in the [`WorkQueueDeclArgs`] struct. For example: //! -//! ... -//! -//! // Leak the Box so that the worker is never freed. -//! let _ = Box::leak(main_worker); +//! ```rust +//! define_work_queue!(MY_WORKQ, 2048, no_yield = true, priority = 2); //! ``` //! -//! It is important that WorkQueues never be dropped. It has a Drop implementation that invokes -//! panic. Zephyr provides no mechanism to stop work queue threads, so dropping would result in -//! undefined behavior. -//! -//! # Current Status -//! -//! Although Zephyr has 3 types of work queues, the `k_work_poll` is sufficient to implement all of -//! the behavior, and this implementation only implements this type. Non Future work could be built -//! around the other work types. -//! -//! As such, this means that manually constructed work is still built using `Future`. The `_async` -//! primitives throughout this crate can be used just as readily by hand-written Futures as by async -//! code. Notable, the use of [`Signal`] will likely be common, along with possible timeouts. -//! -//! [`sys::sync::Semaphore`]: crate::sys::sync::Semaphore -//! [`sync::channel`]: crate::sync::channel -//! [`sync::Mutex`]: crate::sync::Mutex -//! [`join`]: futures::JoinHandle::join -//! [`join_async`]: futures::JoinHandle::join_async +//! Then, in code, the work queue can be started, and used to issue work. +//! ```rust +//! let my_workq = MY_WORKQ.start(); +//! let action = Work::new(action_item); +//! action.submit(my_workq); +//! ``` extern crate alloc; use core::{ - cell::UnsafeCell, - ffi::{c_int, c_uint, CStr}, + cell::{RefCell, UnsafeCell}, + ffi::{c_char, c_int, c_uint}, mem, pin::Pin, - ptr, + sync::atomic::Ordering, }; +use critical_section::Mutex; +use portable_atomic::AtomicBool; +use portable_atomic_util::Arc; use zephyr_sys::{ k_poll_signal, k_poll_signal_check, k_poll_signal_init, k_poll_signal_raise, k_poll_signal_reset, k_work, k_work_init, k_work_q, k_work_queue_config, k_work_queue_init, - k_work_queue_start, k_work_submit, k_work_submit_to_queue, + k_work_queue_start, k_work_submit, k_work_submit_to_queue, z_thread_stack_element, }; -use crate::{ - error::to_result_void, - object::Fixed, - simpletls::SimpleTls, - sync::{Arc, Mutex}, - sys::thread::ThreadStack, -}; +use crate::{error::to_result_void, object::Fixed, simpletls::SimpleTls}; -/// A builder for work queues themselves. -/// -/// A work queue is a Zephyr thread that instead of directly running a piece of code, manages a work -/// queue. Various types of `Work` can be submitted to these queues, along with various types of -/// triggering conditions. -pub struct WorkQueueBuilder { - /// The "config" value passed in. - config: k_work_queue_config, - /// Priority for the work queue thread. - priority: c_int, +/// The WorkQueue decl args as a struct, so we can have a default, and the macro can fill in those +/// specified by the user. +pub struct WorkQueueDeclArgs { + /// Should this work queue call yield after each queued item. + pub no_yield: bool, + /// Is this work queue thread "essential". + /// + /// Threads marked essential will panic if they stop running. + pub essential: bool, + /// Zephyr thread priority for the work queue thread. + pub priority: c_int, } -impl WorkQueueBuilder { - /// Construct a new WorkQueueBuilder with default values. - pub fn new() -> Self { +impl WorkQueueDeclArgs { + /// Like `Default::default`, but const. + pub const fn default_values() -> Self { Self { - config: k_work_queue_config { - name: ptr::null(), - no_yield: false, - essential: false, - }, + no_yield: false, + essential: false, priority: 0, } } +} - /// Set the name for the WorkQueue thread. - /// - /// This name shows up in debuggers and some analysis tools. - pub fn set_name(&mut self, name: &'static CStr) -> &mut Self { - self.config.name = name.as_ptr(); - self - } - - /// Set the "no yield" flag for the created worker. - /// - /// If this is not set, the work queue will call `k_yield` between each enqueued work item. For - /// non-preemptible threads, this will allow other threads to run. For preemptible threads, - /// this will allow other threads at the same priority to run. - /// - /// This method has a negative in the name, which goes against typical conventions. This is - /// done to match the field in the Zephyr config. - pub fn set_no_yield(&mut self, value: bool) -> &mut Self { - self.config.no_yield = value; - self - } - - /// Set the "essential" flag for the created worker. - /// - /// This sets the essential flag on the running thread. The system considers the termination of - /// an essential thread to be a fatal error. - pub fn set_essential(&mut self, value: bool) -> &mut Self { - self.config.essential = value; - self - } - - /// Set the priority for the worker thread. - /// - /// See the Zephyr docs for the meaning of priority. - pub fn set_priority(&mut self, value: c_int) -> &mut Self { - self.priority = value; - self - } +/// A static declaration of a work-queue. This associates a work queue, with a stack, and an atomic +/// to determine if it has been initialized. +// TODO: Remove the pub on the fields, and make a constructor. +pub struct WorkQueueDecl { + queue: WorkQueue, + stack: &'static crate::thread::ThreadStack, + config: k_work_queue_config, + priority: c_int, + started: AtomicBool, +} - /// Start the given work queue thread. - /// - /// TODO: Implement a 'start' that works from a static work queue. - pub fn start(&self, stack: ThreadStack) -> WorkQueue { - let item: Fixed = Fixed::new(unsafe { mem::zeroed() }); - unsafe { - // SAFETY: Initialize zeroed memory. - k_work_queue_init(item.get()); - - // SAFETY: This associates the workqueue with the thread ID that runs it. The thread is - // a pointer into this work item, which will not move, because of the Fixed. - let this = &mut *item.get(); - WORK_QUEUES - .lock() - .unwrap() - .insert(&this.thread, WorkQueueRef(item.get())); - - // SAFETY: Start work queue thread. The main issue here is that the work queue cannot - // be deallocated once the thread has started. We enforce this by making Drop panic. - k_work_queue_start( - item.get(), - stack.base, - stack.size, - self.priority, - &self.config, - ); +// SAFETY: Sync is needed here to make a static declaration, despite the `*const i8` that is burried +// in the config. +unsafe impl Sync for WorkQueueDecl {} + +impl WorkQueueDecl { + /// Static constructor. Mostly for use by the macro. + pub const fn new( + stack: &'static crate::thread::ThreadStack, + name: *const c_char, + args: WorkQueueDeclArgs, + ) -> Self { + Self { + queue: unsafe { mem::zeroed() }, + stack, + config: k_work_queue_config { + name, + no_yield: args.no_yield, + essential: args.essential, + }, + priority: args.priority, + started: AtomicBool::new(false), } + } - WorkQueue { item } + /// Start the work queue thread, if needed, and return a reference to it. + pub fn start(&'static self) -> &'static WorkQueue { + critical_section::with(|cs| { + if self.started.load(Ordering::Relaxed) { + // Already started, just return it. + return &self.queue; + } + + // SAFETY: Starting is coordinated by the atomic, as well as being protected in a + // critical section. + unsafe { + let this = &mut *self.queue.item.get(); + + k_work_queue_init(self.queue.item.get()); + + // Add to the WORK_QUEUES data. That needs to be changed to a critical + // section Mutex from a Zephyr Mutex, as that would deadlock if called while in a + // critrical section. + let mut tls = WORK_QUEUES.borrow_ref_mut(cs); + tls.insert(&this.thread, WorkQueueRef(self.queue.item.get())); + + // Start the work queue thread. + k_work_queue_start( + self.queue.item.get(), + self.stack.data.get() as *mut z_thread_stack_element, + self.stack.size(), + self.priority, + &self.config, + ); + } + + &self.queue + }) } } /// A running work queue thread. /// -/// # Panic -/// -/// Allowing a work queue to drop will result in a panic. There are two ways to handle this, -/// depending on whether the WorkQueue is in a Box, or an Arc: -/// ``` -/// // Leak a work queue in an Arc. -/// let wq = Arc::new(WorkQueueBuilder::new().start(...)); -/// // If the Arc is used after this: -/// let _ = Arc::into_raw(wq.clone()); -/// // If the Arc is no longer needed: -/// let _ = Arc::into_raw(wq); -/// -/// // Leak a work queue in a Box. -/// let wq = Box::new(WorkQueueBuilder::new().start(...)); -/// let _ = Box::leak(wq); -/// ``` +/// This must be declared statically, and initialized once. Please see the macro +/// [`define_work_queue`] which declares this with a [`StaticWorkQueue`] to help with the +/// association with a stack, and making sure the queue is only started once. pub struct WorkQueue { #[allow(dead_code)] - item: Fixed, + item: UnsafeCell, } /// Work queues can be referenced from multiple threads, and thus are Send and Sync. @@ -265,7 +217,8 @@ impl Drop for WorkQueue { /// /// This is a little bit messy as we don't have a lazy mechanism, so we have to handle this a bit /// manually right now. -static WORK_QUEUES: Mutex> = Mutex::new(SimpleTls::new()); +static WORK_QUEUES: Mutex>> = + Mutex::new(RefCell::new(SimpleTls::new())); /// For the queue mapping, we need a simple wrapper around the underlying pointer, one that doesn't /// implement stop. @@ -278,7 +231,7 @@ unsafe impl Sync for WorkQueueRef {} /// Retrieve the current work queue, if we are running within one. pub fn get_current_workq() -> Option<*mut k_work_q> { - WORK_QUEUES.lock().unwrap().get().map(|wq| wq.0) + critical_section::with(|cs| WORK_QUEUES.borrow_ref(cs).get().map(|wq| wq.0)) } /// A Rust wrapper for `k_poll_signal`. @@ -408,6 +361,24 @@ impl SubmitResult { } } +/* +pub trait Queueable: Send + Sync { + fn as_ptr(&self) -> *const (); +} + +impl Queueable for Arc { + fn as_ptr(&self) -> *const () { + todo!() + } +} + +impl Queueable for &'static T { + fn as_ptr(&self) -> *const () { + todo!() + } +} +*/ + /// A simple action that just does something with its data. /// /// This is similar to a Future, except there is no concept of it completing. It manages its @@ -480,17 +451,24 @@ impl Work { // SAFETY: C the code does not perform moves on the data, and the `from_raw` below puts it // back into a Pin when it reconstructs the Arc. let this = unsafe { Pin::into_inner_unchecked(this) }; - let _ = Arc::into_raw(this); + let _ = Arc::into_raw(this.clone()); // SAFETY: The Pin ensures this will not move. Our implementation of drop ensures that the // work item is no longer queued when the data is dropped. - SubmitResult::to_result(unsafe { k_work_submit(work) }) + let result = SubmitResult::to_result(unsafe { k_work_submit(work) }); + + Self::check_drop(work, &result); + + result } /// Submit this work to a specified work queue. /// /// TODO: Change when we have better wrappers for work queues. - pub fn submit_to_queue(this: Pin>, queue: &WorkQueue) -> crate::Result { + pub fn submit_to_queue( + this: Pin>, + queue: &'static WorkQueue, + ) -> crate::Result { let work = this.work.get(); // "leak" the arc to give to C. We'll reconstruct it in the handler. @@ -501,7 +479,12 @@ impl Work { // SAFETY: The Pin ensures this will not move. Our implementation of drop ensures that the // work item is no longer queued when the data is dropped. - SubmitResult::to_result(unsafe { k_work_submit_to_queue(queue.item.get(), work) }) + let result = + SubmitResult::to_result(unsafe { k_work_submit_to_queue(queue.item.get(), work) }); + + Self::check_drop(work, &result); + + result } /// Callback, through C, but bound by a specific type. @@ -541,8 +524,50 @@ impl Work { Pin::new_unchecked(this) } + /// Determine if this work was submitted, and cause a drop of the Arc to happen if it was not. + pub fn check_drop(work: *const k_work, result: &crate::Result) { + if matches!(result, Ok(SubmitResult::AlreadySubmitted) | Err(_)) { + // SAFETY: If the above submit indicates that it was already running, the work will not + // be submitted (no additional handle will be called). "un leak" the work so that it + // will be dropped. Also, any error indicates that the work did not enqueue. + unsafe { + let this = Self::from_raw(work); + drop(this); + } + } + } + /// Access the inner action. pub fn action(&self) -> &T { &self.action } } + +/// Declare a static work queue. +/// +/// This declares a static work queue (of type [`WorkQueueDecl`]). This will have a single method +/// `.start()` which can be used to start the work queue, as well as return the persistent handle +/// that can be used to enqueue to it. +#[macro_export] +macro_rules! define_work_queue { + ($name:ident, $stack_size:expr) => { + $crate::define_work_queue!($name, $stack_size,); + }; + ($name:ident, $stack_size:expr, $($key:ident = $value:expr),* $(,)?) => { + static $name: $crate::work::WorkQueueDecl<$stack_size> = { + #[link_section = concat!(".noinit.workq.", stringify!($name))] + static _ZEPHYR_STACK: $crate::thread::ThreadStack<$stack_size> = + $crate::thread::ThreadStack::new(); + const _ZEPHYR_C_NAME: &[u8] = concat!(stringify!($name), "\0").as_bytes(); + const _ZEPHYR_ARGS: $crate::work::WorkQueueDeclArgs = $crate::work::WorkQueueDeclArgs { + $($key: $value,)* + ..$crate::work::WorkQueueDeclArgs::default_values() + }; + $crate::work::WorkQueueDecl::new( + &_ZEPHYR_STACK, + _ZEPHYR_C_NAME.as_ptr() as *const ::core::ffi::c_char, + _ZEPHYR_ARGS, + ) + }; + }; +} From 8a9dc75b958f26289f6fc17b42c597673af4d920 Mon Sep 17 00:00:00 2001 From: David Brown Date: Fri, 11 Apr 2025 11:10:41 -0600 Subject: [PATCH 14/17] ci: Workaround for docgen dependency problem The docgen target seems to try building the docs before the generated headers are present. Work around this by making a full build first, and then generating the docs. Signed-off-by: David Brown --- .github/workflows/docs.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 0e65bd7..26316fe 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -45,7 +45,8 @@ jobs: working-directory: zephyr-lang-rust run: | # Note that the above build doesn't set Zephyrbase, so we'll need to do that here. - west build -t rustdoc -b qemu_cortex_m3 docgen + west build -b qemu_cortex_m3 docgen + west build -t rustdoc mkdir rustdocs mv build/rust/target/thumbv7m-none-eabi/doc rustdocs/nostd From 4bc73bb96055527248085f9b642af34d09de409d Mon Sep 17 00:00:00 2001 From: David Brown Date: Fri, 11 Apr 2025 11:38:35 -0600 Subject: [PATCH 15/17] zephyr: work: Remove documentation for removed code The work-queue-based Future has been removed, so also removed the documentation that was describing it. Signed-off-by: David Brown --- zephyr/src/work.rs | 38 ++++---------------------------------- 1 file changed, 4 insertions(+), 34 deletions(-) diff --git a/zephyr/src/work.rs b/zephyr/src/work.rs index af0dd8b..21ca411 100644 --- a/zephyr/src/work.rs +++ b/zephyr/src/work.rs @@ -17,39 +17,7 @@ //! having the `k_work` embedded in their structure, and Zephyr schedules the work when the given //! reason happens. //! -//! At this time, only the basic work queue type is supported. -//! -//! Zephyr's work queues can be used in different ways: -//! -//! - Work can be scheduled as needed. For example, an IRQ handler can queue a work item to process -//! data it has received from a device. -//! - Work can be scheduled periodically. -//! -//! As most C use of Zephyr statically allocates things like work, these are typically rescheduled -//! when the work is complete. The work queue scheduling functions are designed, and intended, for -//! a given work item to be able to reschedule itself, and such usage is common. -//! -//! ## Ownership -//! -//! The remaining challenge with implementing `k_work` for Rust is that of ownership. The model -//! taken here is that the work items are held in a `Box` that is effectively owned by the work -//! itself. When the work item is scheduled to Zephyr, ownership of that box is effectively handed -//! off to C, and then when the work item is called, the Box re-constructed. This repeats until the -//! work is no longer needed, at which point the work will be dropped. -//! -//! There are two common ways the lifecycle of work can be managed in an embedded system: -//! -//! - A set of `Future`'s are allocated once at the start, and these never return a value. Work -//! Futures inside of this (which correspond to `.await` in async code) can have lives and return -//! values, but the main loops will not return values, or be dropped. Embedded Futures will -//! typically not be boxed. -//! -//! One consequence of the ownership being passed through to C code is that if the work cancellation -//! mechanism is used on a work queue, the work items themselves will be leaked. -//! -//! These work items are also `Pin`, to ensure that the work actions are not moved. -//! -//! ## The work queues themselves +//! At this point, this code supports the simple work queues, with [`Work`] items. //! //! Work Queues should be declared with the `define_work_queue!` macro, this macro requires the name //! of the symbol for the work queue, the stack size, and then zero or more optional arguments, @@ -185,8 +153,10 @@ impl WorkQueueDecl { /// A running work queue thread. /// /// This must be declared statically, and initialized once. Please see the macro -/// [`define_work_queue`] which declares this with a [`StaticWorkQueue`] to help with the +/// [`define_work_queue`] which declares this with a [`WorkQueue`] to help with the /// association with a stack, and making sure the queue is only started once. +/// +/// [`define_work_queue`]: crate::define_work_queue pub struct WorkQueue { #[allow(dead_code)] item: UnsafeCell, From f26a507d4fd1206659177de3eb68cab5568b067b Mon Sep 17 00:00:00 2001 From: David Brown Date: Fri, 11 Apr 2025 11:39:13 -0600 Subject: [PATCH 16/17] zephyr: Documentation fixes Fix various broken links in documentation comments. Signed-off-by: David Brown --- zephyr/src/embassy.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zephyr/src/embassy.rs b/zephyr/src/embassy.rs index 8bbc254..d924ae9 100644 --- a/zephyr/src/embassy.rs +++ b/zephyr/src/embassy.rs @@ -71,7 +71,7 @@ //! //! ## Caveats //! -//! [`Semaphore::take_async`]: crate::sys::sync::Semaphore::take_async +//! The executor currently doesn't support async waits on Zephyr primitives, such as Semaphore. #[cfg(feature = "time-driver")] mod time_driver; From 0f75e9eee8aa803809726627ee4f6057bdd67fc3 Mon Sep 17 00:00:00 2001 From: David Brown Date: Thu, 17 Apr 2025 12:02:58 -0600 Subject: [PATCH 17/17] zephyr-build: Simplify dt-yaml syntax The Serde "tag" rules was being used for enums, which results in enums being generated in a somewhat awkward format. Remove this, and change the syntax of the dt-rust.yaml file to match. This typically results in changes like: - type: instance value: raw: type: myself to be simplified to just: - !Instance raw: !Myself Signed-off-by: David Brown --- dt-rust.yaml | 101 ++++++++++--------------- zephyr-build/src/devicetree/augment.rs | 4 - 2 files changed, 38 insertions(+), 67 deletions(-) diff --git a/dt-rust.yaml b/dt-rust.yaml index 1932750..4209e7a 100644 --- a/dt-rust.yaml +++ b/dt-rust.yaml @@ -7,13 +7,10 @@ # few instances were we can actually just match on a property. - name: gpio-controller rules: - - type: has_prop - value: gpio-controller + - !HasProp gpio-controller actions: - - type: instance - value: - raw: - type: myself + - !Instance + raw: !Myself device: crate::device::gpio::Gpio static_type: crate::device::gpio::GpioStatic @@ -22,54 +19,42 @@ # with each gpio. - name: gpio-leds rules: - - type: compatible - value: - names: - - gpio-leds + - !Compatible + names: [gpio-leds] level: 1 actions: - - type: instance - value: - raw: - type: phandle - value: gpios + - !Instance + raw: !Phandle gpios device: crate::device::gpio::GpioPin # Hook up the gpio-keys as gpio pins as well - name: gpio-keys rules: - - type: compatible - value: + - !Compatible names: - - gpio-keys + - gpio-keys level: 1 actions: - - type: instance - value: - raw: - type: phandle - value: gpios + - !Instance + raw: !Phandle gpios device: crate::device::gpio::GpioPin # Flash controllers don't have any particular property to identify them, so we need a list of # compatible values that should match. - name: flash-controller rules: - - type: compatible - value: + - !Compatible names: - - "nordic,nrf52-flash-controller" - - "nordic,nrf51-flash-controller" - - "raspberrypi,pico-flash-controller" - - "st,stm32g4-flash-controller" - - "st,stm32l5-flash-controller" - - "zephyr,sim-flash" + - "nordic,nrf52-flash-controller" + - "nordic,nrf51-flash-controller" + - "raspberrypi,pico-flash-controller" + - "st,stm32g4-flash-controller" + - "st,stm32l5-flash-controller" + - "zephyr,sim-flash" level: 0 actions: - - type: instance - value: - raw: - type: myself + - !Instance + raw: !Myself device: crate::device::flash::FlashController # Flash partitions exist as children of a node compatible with "soc-nv-flash" that itself is a child @@ -77,46 +62,36 @@ # TODO: Get the write and erase property from the DT if present. - name: flash-partition rules: - - type: compatible - value: + - !Compatible names: - - "fixed-partitions" + - "fixed-partitions" level: 1 - - type: compatible - value: + - !Compatible names: - - "soc-nv-flash" + - "soc-nv-flash" level: 2 actions: - - type: instance - value: - raw: - type: parent - value: - level: 3 - args: - - type: reg + - !Instance + raw: !Parent + level: 3 + args: + - !Reg device: "crate::device::flash::FlashPartition" # I2C. - name: i2c rules: - - type: compatible - value: - names: - - "snps,designware-i2c" - level: 0 + - !Compatible + names: + - "snps,designware-i2c" + level: 0 actions: - - type: instance - value: - raw: - type: myself - device: crate::device::i2c::I2C + - !Instance + raw: !Myself + device: crate::device::i2c::I2C # Generate a pseudo node that matches all of the labels across the tree with their nodes. - name: labels - rules: - - type: root + rules: !Root actions: - - type: labels - + - !Labels diff --git a/zephyr-build/src/devicetree/augment.rs b/zephyr-build/src/devicetree/augment.rs index c5cfd77..7b333a4 100644 --- a/zephyr-build/src/devicetree/augment.rs +++ b/zephyr-build/src/devicetree/augment.rs @@ -81,7 +81,6 @@ impl Augment for Augmentation { /// A matching rule. #[derive(Debug, Serialize, Deserialize)] -#[serde(tag = "type", rename_all = "snake_case", content = "value")] pub enum Rule { /// A set of "or" matches. Or(Vec), @@ -127,7 +126,6 @@ fn parent_compatible(node: &Node, names: &[String], level: usize) -> bool { /// An action to perform #[derive(Debug, Serialize, Deserialize)] -#[serde(tag = "type", rename_all = "snake_case", content = "value")] pub enum Action { /// Generate an "instance" with a specific device name. Instance { @@ -177,7 +175,6 @@ impl Action { } #[derive(Debug, Serialize, Deserialize)] -#[serde(tag = "type", rename_all = "snake_case", content = "value")] pub enum RawInfo { /// Get the raw device directly from this node. Myself, @@ -276,7 +273,6 @@ impl RawInfo { /// /// At this point, we assume these all come from the current node. #[derive(Debug, Serialize, Deserialize)] -#[serde(tag = "type", rename_all = "snake_case", content = "value")] pub enum ArgInfo { /// The arguments come from a 'reg' property. Reg,