Skip to content

Commit 492fba6

Browse files
committed
zephyr: device: uart: irq: Implement async read
Change the read interface to also be async. The user must enqueue one or more buffers for the driver to place data into. Signed-off-by: David Brown <[email protected]>
1 parent 2bfd951 commit 492fba6

File tree

1 file changed

+145
-78
lines changed

1 file changed

+145
-78
lines changed

zephyr/src/device/uart/irq.rs

+145-78
Original file line numberDiff line numberDiff line change
@@ -16,19 +16,14 @@ use crate::raw;
1616
use crate::error::{Result, to_result_void};
1717
use crate::sys::sync::Semaphore;
1818
use crate::sync::{Arc, SpinMutex};
19-
use crate::time::{NoWait, Timeout};
19+
use crate::time::Timeout;
2020

2121
use core::ffi::c_void;
2222
use core::ops::Range;
2323
use core::{fmt, result};
2424

2525
use super::Uart;
2626

27-
/// Size of the irq buffer used for UartIrq.
28-
///
29-
/// TODO: Make this a parameter of the type.
30-
const BUFFER_SIZE: usize = 256;
31-
3227
/// The "outer" struct holds the semaphore, and the mutex. The semaphore has to live outside of the
3328
/// mutex because it can only be waited on when the Mutex is not locked.
3429
struct IrqOuterData<const WS: usize, const RS: usize> {
@@ -40,13 +35,15 @@ struct IrqOuterData<const WS: usize, const RS: usize> {
4035

4136
/// Data for communication with the UART IRQ.
4237
struct IrqInnerData<const WS: usize, const RS: usize> {
43-
/// The Ring buffer holding incoming and read data.
44-
buffer: ArrayDeque<u8, BUFFER_SIZE>,
4538
/// Write request. The 'head' is the one being worked on. Once completed, they will move into
4639
/// the completion queue.
4740
write_requests: ArrayDeque<WriteRequest, WS>,
4841
/// Completed writes.
4942
write_dones: ArrayDeque<WriteDone, WS>,
43+
/// Read requests. The 'head' is the one data will come into.
44+
read_requests: ArrayDeque<ReadRequest, RS>,
45+
/// Completed writes. Generally, these will be full, but a read might move an early one here.
46+
read_dones: ArrayDeque<ReadDone, RS>,
5047
}
5148

5249
/// A single requested write. This is a managed buffer, and a range of the buffer to actually
@@ -65,23 +62,41 @@ struct WriteDone {
6562
data: Vec<u8>,
6663
}
6764

68-
/// Represents a slice of data that the irq is going to write.
69-
struct WriteSlice {
70-
data: *const u8,
65+
/// A single read request. This is a buffer to hold data being read, along with the part still
66+
/// valid to hold data.
67+
struct ReadRequest {
68+
/// The data to read.
69+
data: Vec<u8>,
70+
/// How much of the data has been read so far.
7171
len: usize,
7272
}
7373

74-
impl WriteSlice {
75-
/// Add an offset to the beginning of this slice, returning a new slice. This is equivalent to
76-
/// &item[count..] with a slice.
77-
pub unsafe fn add(&self, count: usize) -> WriteSlice {
78-
WriteSlice {
79-
data: unsafe { self.data.add(count) },
80-
len: self.len - count,
81-
}
74+
impl ReadRequest {
75+
fn into_done(self) -> ReadDone {
76+
ReadDone { data: self.data, len: self.len }
77+
}
78+
}
79+
80+
/// A completed read.
81+
struct ReadDone {
82+
/// The buffer holding the data.
83+
data: Vec<u8>,
84+
/// How much of `data` contains read data. Should always be > 0.
85+
len: usize,
86+
}
87+
88+
impl ReadDone {
89+
fn into_result(self) -> ReadResult {
90+
ReadResult { data: self.data, len: self.len }
8291
}
8392
}
8493

94+
/// The result of a read.
95+
pub struct ReadResult {
96+
data: Vec<u8>,
97+
len: usize,
98+
}
99+
85100
/// The error type from write requests. Used to return the buffer.
86101
pub struct WriteError(pub Vec<u8>);
87102

@@ -92,9 +107,22 @@ impl fmt::Debug for WriteError {
92107
}
93108
}
94109

110+
/// The error type from read requests. Used to return the buffer.
111+
pub struct ReadError(pub Vec<u8>);
112+
113+
// The default Debug for Write error will print the whole buffer, which isn't particularly useful.
114+
impl fmt::Debug for ReadError {
115+
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
116+
write!(f, "ReadError(...)")
117+
}
118+
}
119+
95120
/// The wait for write completion timed out.
96121
pub struct WriteWaitTimedOut;
97122

123+
/// The wait for read completion timed out.
124+
pub struct ReadWaitTimedOut;
125+
98126
/// An interface to the UART, that uses the "legacy" IRQ API.
99127
///
100128
/// The interface is parameterized by two value, `WS` is the number of elements in the write ring,
@@ -119,9 +147,10 @@ impl<const WS: usize, const RS: usize> UartIrq<WS, RS> {
119147
read_sem: Semaphore::new(0, RS as u32)?,
120148
write_sem: Semaphore::new(0, WS as u32)?,
121149
inner: SpinMutex::new(IrqInnerData {
122-
buffer: ArrayDeque::new(),
123150
write_requests: ArrayDeque::new(),
124151
write_dones: ArrayDeque::new(),
152+
read_requests: ArrayDeque::new(),
153+
read_dones: ArrayDeque::new(),
125154
}),
126155
});
127156

@@ -153,26 +182,6 @@ impl<const WS: usize, const RS: usize> UartIrq<WS, RS> {
153182
&self.uart
154183
}
155184

156-
/// Attempt to read data from the UART into the buffer. If no data is available, it will
157-
/// attempt, once, to wait using the given timeout.
158-
///
159-
/// Returns the number of bytes that were read, with zero indicating that a timeout occurred.
160-
pub unsafe fn try_read<T>(&mut self, buf: &mut [u8], timeout: T) -> usize
161-
where T: Into<Timeout>,
162-
{
163-
// Start with a read, before any blocking.
164-
let count = self.data.try_read(buf);
165-
if count > 0 {
166-
return count;
167-
}
168-
169-
// Otherwise, wait for the semaphore. Ignore the result, as we will try to read again, in
170-
// case there was a race.
171-
let _ = self.data.read_sem.take(timeout);
172-
173-
self.data.try_read(buf)
174-
}
175-
176185
/// Enqueue a single write request.
177186
///
178187
/// If the queue is full, the `WriteError` returned will return the buffer.
@@ -218,6 +227,63 @@ impl<const WS: usize, const RS: usize> UartIrq<WS, RS> {
218227
let mut inner = self.data.inner.lock().unwrap();
219228
Ok(inner.write_dones.pop_front().expect("Write done empty, despite semaphore").data)
220229
}
230+
231+
/// Enqueue a buffer for reading data.
232+
///
233+
/// Enqueues a buffer to hold read data. Can enqueue up to RS of these.
234+
pub fn read_enqueue(&mut self, data: Vec<u8>) -> result::Result<(), ReadError> {
235+
let mut inner = self.data.inner.lock().unwrap();
236+
237+
let req = ReadRequest { data, len: 0 };
238+
match inner.read_requests.push_back(req) {
239+
Ok(()) => {
240+
// Enable the rx fifo so incoming data will be placed.
241+
if inner.read_requests.len() == 1 {
242+
unsafe { raw::uart_irq_rx_enable(self.uart.device); }
243+
}
244+
Ok(())
245+
}
246+
Err(e) => Err(ReadError(e.element.data))
247+
}
248+
}
249+
250+
/// Wait up to 'timeout' for a read to complete, and returns the data.
251+
///
252+
/// Note that if there is a buffer that has been partially filled, this will return that buffer,
253+
/// so that there isn't a delay with read data.
254+
pub fn read_wait<T>(&mut self, timeout: T) -> result::Result<ReadResult, ReadWaitTimedOut>
255+
where T: Into<Timeout>,
256+
{
257+
// If there is no read data available, see if we have a partial block we can consider a
258+
// completion.
259+
let mut inner = self.data.inner.lock().unwrap();
260+
if inner.read_dones.is_empty() {
261+
if let Some(req) = inner.read_requests.pop_front() {
262+
// TODO: User defined threshold?
263+
if req.len > 0 {
264+
// Queue this up as a completion.
265+
inner.read_dones.push_back(req.into_done()).unwrap();
266+
267+
// Signal the sem, as we've pushed.
268+
self.data.read_sem.give();
269+
} else {
270+
// Stick it back on the queue.
271+
inner.read_requests.push_front(req).unwrap();
272+
}
273+
}
274+
}
275+
drop(inner);
276+
277+
match self.data.read_sem.take(timeout) {
278+
Ok(()) => (),
279+
// TODO: Handle other errors?
280+
Err(_) => return Err(ReadWaitTimedOut),
281+
}
282+
283+
let mut inner = self.data.inner.lock().unwrap();
284+
let done = inner.read_dones.pop_front().expect("Semaphore mismatched with read done queue");
285+
Ok(done.into_result())
286+
}
221287
}
222288

223289
// TODO: It could actually be possible to implement drop, but we would need to make sure the irq
@@ -229,29 +295,6 @@ impl<const WS: usize, const RS: usize> Drop for UartIrq<WS, RS> {
229295
}
230296
}
231297

232-
impl<const WS: usize, const RS: usize> IrqOuterData<WS, RS> {
233-
/// Try reading from the inner data, filling the buffer with as much data as makes sense.
234-
/// Returns the number of bytes actually read, or Zero if none.
235-
fn try_read(&self, buf: &mut [u8]) -> usize {
236-
let mut inner = self.inner.lock().unwrap();
237-
let mut pos = 0;
238-
while pos < buf.len() {
239-
if let Some(elt) = inner.buffer.pop_front() {
240-
buf[pos] = elt;
241-
pos += 1;
242-
} else {
243-
break;
244-
}
245-
}
246-
247-
if pos > 0 {
248-
// Any time we do a read, clear the semaphore.
249-
let _ = self.read_sem.take(NoWait);
250-
}
251-
pos
252-
}
253-
}
254-
255298
extern "C" fn irq_callback<const WS: usize, const RS: usize>(
256299
dev: *const raw::device,
257300
user_data: *mut c_void,
@@ -260,26 +303,45 @@ extern "C" fn irq_callback<const WS: usize, const RS: usize>(
260303
let outer = unsafe { &*(user_data as *const IrqOuterData<WS, RS>) };
261304
let mut inner = outer.inner.lock().unwrap();
262305

263-
// TODO: Make this more efficient.
264-
let mut byte = 0u8;
265-
let mut did_read = false;
306+
// Handle any read requests.
266307
loop {
267-
match unsafe { raw::uart_fifo_read(dev, &mut byte, 1) } {
268-
0 => break,
269-
1 => {
270-
// TODO: should we warn about overflow here?
271-
let _ = inner.buffer.push_back(byte);
272-
did_read = true;
308+
if let Some(mut req) = inner.read_requests.pop_front() {
309+
if req.len == req.data.len() {
310+
// This buffer is full, make it a completion.
311+
inner.read_dones.push_back(req.into_done())
312+
.expect("Completion queue not large enough");
313+
outer.read_sem.give();
314+
} else {
315+
// Read as much as we can.
316+
let piece = &mut req.data[req.len..];
317+
let count = unsafe {
318+
raw::uart_fifo_read(dev, piece.as_mut_ptr(), piece.len() as i32)
319+
};
320+
if count < 0 {
321+
panic!("Incorrect use of read");
322+
}
323+
let count = count as usize;
324+
325+
// Adjust the piece. The next time through the loop will notice if the write is
326+
// full.
327+
req.len += count;
328+
inner.read_requests.push_front(req)
329+
.expect("Unexpected read request overflow");
330+
331+
if count == 0 {
332+
// There is no more data in the fifo.
333+
break;
334+
}
273335
}
274-
e => panic!("Uart fifo read not implemented: {}", e),
336+
} else {
337+
// No place to store results. Turn off the irq and stop.
338+
// The doc's don't describe this as being possible, but hopefully the implementations
339+
// are sane.
340+
unsafe { raw::uart_irq_rx_disable(dev); }
341+
break;
275342
}
276343
}
277344

278-
// This is safe (and important) to do while the mutex is held.
279-
if did_read {
280-
outer.read_sem.give();
281-
}
282-
283345
// Handle any write requests.
284346
loop {
285347
if let Some(mut req) = inner.write_requests.pop_front() {
@@ -303,6 +365,11 @@ extern "C" fn irq_callback<const WS: usize, const RS: usize>(
303365
req.part.start += count;
304366
inner.write_requests.push_front(req)
305367
.expect("Unexpected write_dones overflow");
368+
369+
// If the count reaches 0, the fifo is full.
370+
if count == 0 {
371+
break;
372+
}
306373
}
307374
} else {
308375
// No work. Turn off the irq, and stop.

0 commit comments

Comments
 (0)