@@ -16,19 +16,14 @@ use crate::raw;
16
16
use crate :: error:: { Result , to_result_void} ;
17
17
use crate :: sys:: sync:: Semaphore ;
18
18
use crate :: sync:: { Arc , SpinMutex } ;
19
- use crate :: time:: { NoWait , Timeout } ;
19
+ use crate :: time:: Timeout ;
20
20
21
21
use core:: ffi:: c_void;
22
22
use core:: ops:: Range ;
23
23
use core:: { fmt, result} ;
24
24
25
25
use super :: Uart ;
26
26
27
- /// Size of the irq buffer used for UartIrq.
28
- ///
29
- /// TODO: Make this a parameter of the type.
30
- const BUFFER_SIZE : usize = 256 ;
31
-
32
27
/// The "outer" struct holds the semaphore, and the mutex. The semaphore has to live outside of the
33
28
/// mutex because it can only be waited on when the Mutex is not locked.
34
29
struct IrqOuterData < const WS : usize , const RS : usize > {
@@ -40,13 +35,15 @@ struct IrqOuterData<const WS: usize, const RS: usize> {
40
35
41
36
/// Data for communication with the UART IRQ.
42
37
struct IrqInnerData < const WS : usize , const RS : usize > {
43
- /// The Ring buffer holding incoming and read data.
44
- buffer : ArrayDeque < u8 , BUFFER_SIZE > ,
45
38
/// Write request. The 'head' is the one being worked on. Once completed, they will move into
46
39
/// the completion queue.
47
40
write_requests : ArrayDeque < WriteRequest , WS > ,
48
41
/// Completed writes.
49
42
write_dones : ArrayDeque < WriteDone , WS > ,
43
+ /// Read requests. The 'head' is the one data will come into.
44
+ read_requests : ArrayDeque < ReadRequest , RS > ,
45
+ /// Completed writes. Generally, these will be full, but a read might move an early one here.
46
+ read_dones : ArrayDeque < ReadDone , RS > ,
50
47
}
51
48
52
49
/// A single requested write. This is a managed buffer, and a range of the buffer to actually
@@ -65,23 +62,41 @@ struct WriteDone {
65
62
data : Vec < u8 > ,
66
63
}
67
64
68
- /// Represents a slice of data that the irq is going to write.
69
- struct WriteSlice {
70
- data : * const u8 ,
65
+ /// A single read request. This is a buffer to hold data being read, along with the part still
66
+ /// valid to hold data.
67
+ struct ReadRequest {
68
+ /// The data to read.
69
+ data : Vec < u8 > ,
70
+ /// How much of the data has been read so far.
71
71
len : usize ,
72
72
}
73
73
74
- impl WriteSlice {
75
- /// Add an offset to the beginning of this slice, returning a new slice. This is equivalent to
76
- /// &item[count..] with a slice.
77
- pub unsafe fn add ( & self , count : usize ) -> WriteSlice {
78
- WriteSlice {
79
- data : unsafe { self . data . add ( count) } ,
80
- len : self . len - count,
81
- }
74
+ impl ReadRequest {
75
+ fn into_done ( self ) -> ReadDone {
76
+ ReadDone { data : self . data , len : self . len }
77
+ }
78
+ }
79
+
80
+ /// A completed read.
81
+ struct ReadDone {
82
+ /// The buffer holding the data.
83
+ data : Vec < u8 > ,
84
+ /// How much of `data` contains read data. Should always be > 0.
85
+ len : usize ,
86
+ }
87
+
88
+ impl ReadDone {
89
+ fn into_result ( self ) -> ReadResult {
90
+ ReadResult { data : self . data , len : self . len }
82
91
}
83
92
}
84
93
94
+ /// The result of a read.
95
+ pub struct ReadResult {
96
+ data : Vec < u8 > ,
97
+ len : usize ,
98
+ }
99
+
85
100
/// The error type from write requests. Used to return the buffer.
86
101
pub struct WriteError ( pub Vec < u8 > ) ;
87
102
@@ -92,9 +107,22 @@ impl fmt::Debug for WriteError {
92
107
}
93
108
}
94
109
110
+ /// The error type from read requests. Used to return the buffer.
111
+ pub struct ReadError ( pub Vec < u8 > ) ;
112
+
113
+ // The default Debug for Write error will print the whole buffer, which isn't particularly useful.
114
+ impl fmt:: Debug for ReadError {
115
+ fn fmt ( & self , f : & mut fmt:: Formatter < ' _ > ) -> fmt:: Result {
116
+ write ! ( f, "ReadError(...)" )
117
+ }
118
+ }
119
+
95
120
/// The wait for write completion timed out.
96
121
pub struct WriteWaitTimedOut ;
97
122
123
+ /// The wait for read completion timed out.
124
+ pub struct ReadWaitTimedOut ;
125
+
98
126
/// An interface to the UART, that uses the "legacy" IRQ API.
99
127
///
100
128
/// The interface is parameterized by two value, `WS` is the number of elements in the write ring,
@@ -119,9 +147,10 @@ impl<const WS: usize, const RS: usize> UartIrq<WS, RS> {
119
147
read_sem : Semaphore :: new ( 0 , RS as u32 ) ?,
120
148
write_sem : Semaphore :: new ( 0 , WS as u32 ) ?,
121
149
inner : SpinMutex :: new ( IrqInnerData {
122
- buffer : ArrayDeque :: new ( ) ,
123
150
write_requests : ArrayDeque :: new ( ) ,
124
151
write_dones : ArrayDeque :: new ( ) ,
152
+ read_requests : ArrayDeque :: new ( ) ,
153
+ read_dones : ArrayDeque :: new ( ) ,
125
154
} ) ,
126
155
} ) ;
127
156
@@ -153,26 +182,6 @@ impl<const WS: usize, const RS: usize> UartIrq<WS, RS> {
153
182
& self . uart
154
183
}
155
184
156
- /// Attempt to read data from the UART into the buffer. If no data is available, it will
157
- /// attempt, once, to wait using the given timeout.
158
- ///
159
- /// Returns the number of bytes that were read, with zero indicating that a timeout occurred.
160
- pub unsafe fn try_read < T > ( & mut self , buf : & mut [ u8 ] , timeout : T ) -> usize
161
- where T : Into < Timeout > ,
162
- {
163
- // Start with a read, before any blocking.
164
- let count = self . data . try_read ( buf) ;
165
- if count > 0 {
166
- return count;
167
- }
168
-
169
- // Otherwise, wait for the semaphore. Ignore the result, as we will try to read again, in
170
- // case there was a race.
171
- let _ = self . data . read_sem . take ( timeout) ;
172
-
173
- self . data . try_read ( buf)
174
- }
175
-
176
185
/// Enqueue a single write request.
177
186
///
178
187
/// If the queue is full, the `WriteError` returned will return the buffer.
@@ -218,6 +227,63 @@ impl<const WS: usize, const RS: usize> UartIrq<WS, RS> {
218
227
let mut inner = self . data . inner . lock ( ) . unwrap ( ) ;
219
228
Ok ( inner. write_dones . pop_front ( ) . expect ( "Write done empty, despite semaphore" ) . data )
220
229
}
230
+
231
+ /// Enqueue a buffer for reading data.
232
+ ///
233
+ /// Enqueues a buffer to hold read data. Can enqueue up to RS of these.
234
+ pub fn read_enqueue ( & mut self , data : Vec < u8 > ) -> result:: Result < ( ) , ReadError > {
235
+ let mut inner = self . data . inner . lock ( ) . unwrap ( ) ;
236
+
237
+ let req = ReadRequest { data, len : 0 } ;
238
+ match inner. read_requests . push_back ( req) {
239
+ Ok ( ( ) ) => {
240
+ // Enable the rx fifo so incoming data will be placed.
241
+ if inner. read_requests . len ( ) == 1 {
242
+ unsafe { raw:: uart_irq_rx_enable ( self . uart . device ) ; }
243
+ }
244
+ Ok ( ( ) )
245
+ }
246
+ Err ( e) => Err ( ReadError ( e. element . data ) )
247
+ }
248
+ }
249
+
250
+ /// Wait up to 'timeout' for a read to complete, and returns the data.
251
+ ///
252
+ /// Note that if there is a buffer that has been partially filled, this will return that buffer,
253
+ /// so that there isn't a delay with read data.
254
+ pub fn read_wait < T > ( & mut self , timeout : T ) -> result:: Result < ReadResult , ReadWaitTimedOut >
255
+ where T : Into < Timeout > ,
256
+ {
257
+ // If there is no read data available, see if we have a partial block we can consider a
258
+ // completion.
259
+ let mut inner = self . data . inner . lock ( ) . unwrap ( ) ;
260
+ if inner. read_dones . is_empty ( ) {
261
+ if let Some ( req) = inner. read_requests . pop_front ( ) {
262
+ // TODO: User defined threshold?
263
+ if req. len > 0 {
264
+ // Queue this up as a completion.
265
+ inner. read_dones . push_back ( req. into_done ( ) ) . unwrap ( ) ;
266
+
267
+ // Signal the sem, as we've pushed.
268
+ self . data . read_sem . give ( ) ;
269
+ } else {
270
+ // Stick it back on the queue.
271
+ inner. read_requests . push_front ( req) . unwrap ( ) ;
272
+ }
273
+ }
274
+ }
275
+ drop ( inner) ;
276
+
277
+ match self . data . read_sem . take ( timeout) {
278
+ Ok ( ( ) ) => ( ) ,
279
+ // TODO: Handle other errors?
280
+ Err ( _) => return Err ( ReadWaitTimedOut ) ,
281
+ }
282
+
283
+ let mut inner = self . data . inner . lock ( ) . unwrap ( ) ;
284
+ let done = inner. read_dones . pop_front ( ) . expect ( "Semaphore mismatched with read done queue" ) ;
285
+ Ok ( done. into_result ( ) )
286
+ }
221
287
}
222
288
223
289
// TODO: It could actually be possible to implement drop, but we would need to make sure the irq
@@ -229,29 +295,6 @@ impl<const WS: usize, const RS: usize> Drop for UartIrq<WS, RS> {
229
295
}
230
296
}
231
297
232
- impl < const WS : usize , const RS : usize > IrqOuterData < WS , RS > {
233
- /// Try reading from the inner data, filling the buffer with as much data as makes sense.
234
- /// Returns the number of bytes actually read, or Zero if none.
235
- fn try_read ( & self , buf : & mut [ u8 ] ) -> usize {
236
- let mut inner = self . inner . lock ( ) . unwrap ( ) ;
237
- let mut pos = 0 ;
238
- while pos < buf. len ( ) {
239
- if let Some ( elt) = inner. buffer . pop_front ( ) {
240
- buf[ pos] = elt;
241
- pos += 1 ;
242
- } else {
243
- break ;
244
- }
245
- }
246
-
247
- if pos > 0 {
248
- // Any time we do a read, clear the semaphore.
249
- let _ = self . read_sem . take ( NoWait ) ;
250
- }
251
- pos
252
- }
253
- }
254
-
255
298
extern "C" fn irq_callback < const WS : usize , const RS : usize > (
256
299
dev : * const raw:: device ,
257
300
user_data : * mut c_void ,
@@ -260,26 +303,45 @@ extern "C" fn irq_callback<const WS: usize, const RS: usize>(
260
303
let outer = unsafe { & * ( user_data as * const IrqOuterData < WS , RS > ) } ;
261
304
let mut inner = outer. inner . lock ( ) . unwrap ( ) ;
262
305
263
- // TODO: Make this more efficient.
264
- let mut byte = 0u8 ;
265
- let mut did_read = false ;
306
+ // Handle any read requests.
266
307
loop {
267
- match unsafe { raw:: uart_fifo_read ( dev, & mut byte, 1 ) } {
268
- 0 => break ,
269
- 1 => {
270
- // TODO: should we warn about overflow here?
271
- let _ = inner. buffer . push_back ( byte) ;
272
- did_read = true ;
308
+ if let Some ( mut req) = inner. read_requests . pop_front ( ) {
309
+ if req. len == req. data . len ( ) {
310
+ // This buffer is full, make it a completion.
311
+ inner. read_dones . push_back ( req. into_done ( ) )
312
+ . expect ( "Completion queue not large enough" ) ;
313
+ outer. read_sem . give ( ) ;
314
+ } else {
315
+ // Read as much as we can.
316
+ let piece = & mut req. data [ req. len ..] ;
317
+ let count = unsafe {
318
+ raw:: uart_fifo_read ( dev, piece. as_mut_ptr ( ) , piece. len ( ) as i32 )
319
+ } ;
320
+ if count < 0 {
321
+ panic ! ( "Incorrect use of read" ) ;
322
+ }
323
+ let count = count as usize ;
324
+
325
+ // Adjust the piece. The next time through the loop will notice if the write is
326
+ // full.
327
+ req. len += count;
328
+ inner. read_requests . push_front ( req)
329
+ . expect ( "Unexpected read request overflow" ) ;
330
+
331
+ if count == 0 {
332
+ // There is no more data in the fifo.
333
+ break ;
334
+ }
273
335
}
274
- e => panic ! ( "Uart fifo read not implemented: {}" , e) ,
336
+ } else {
337
+ // No place to store results. Turn off the irq and stop.
338
+ // The doc's don't describe this as being possible, but hopefully the implementations
339
+ // are sane.
340
+ unsafe { raw:: uart_irq_rx_disable ( dev) ; }
341
+ break ;
275
342
}
276
343
}
277
344
278
- // This is safe (and important) to do while the mutex is held.
279
- if did_read {
280
- outer. read_sem . give ( ) ;
281
- }
282
-
283
345
// Handle any write requests.
284
346
loop {
285
347
if let Some ( mut req) = inner. write_requests . pop_front ( ) {
@@ -303,6 +365,11 @@ extern "C" fn irq_callback<const WS: usize, const RS: usize>(
303
365
req. part . start += count;
304
366
inner. write_requests . push_front ( req)
305
367
. expect ( "Unexpected write_dones overflow" ) ;
368
+
369
+ // If the count reaches 0, the fifo is full.
370
+ if count == 0 {
371
+ break ;
372
+ }
306
373
}
307
374
} else {
308
375
// No work. Turn off the irq, and stop.
0 commit comments