embassy_stm32/usart/
ringbuffered.rs

1use core::future::poll_fn;
2use core::mem;
3use core::sync::atomic::{compiler_fence, Ordering};
4use core::task::Poll;
5
6use embassy_embedded_hal::SetConfig;
7use embassy_hal_internal::PeripheralRef;
8use embedded_io_async::ReadReady;
9use futures_util::future::{select, Either};
10
11use super::{
12    clear_interrupt_flags, rdr, reconfigure, set_baudrate, sr, Config, ConfigError, Error, Info, State, UartRx,
13};
14use crate::dma::ReadableRingBuffer;
15use crate::gpio::{AnyPin, SealedPin as _};
16use crate::mode::Async;
17use crate::time::Hertz;
18use crate::usart::{Regs, Sr};
19
20/// Rx-only Ring-buffered UART Driver
21///
22/// Created with [UartRx::into_ring_buffered]
23pub struct RingBufferedUartRx<'d> {
24    info: &'static Info,
25    state: &'static State,
26    kernel_clock: Hertz,
27    rx: Option<PeripheralRef<'d, AnyPin>>,
28    rts: Option<PeripheralRef<'d, AnyPin>>,
29    ring_buf: ReadableRingBuffer<'d, u8>,
30}
31
32impl<'d> SetConfig for RingBufferedUartRx<'d> {
33    type Config = Config;
34    type ConfigError = ConfigError;
35
36    fn set_config(&mut self, config: &Self::Config) -> Result<(), Self::ConfigError> {
37        self.set_config(config)
38    }
39}
40
41impl<'d> UartRx<'d, Async> {
42    /// Turn the `UartRx` into a buffered uart which can continously receive in the background
43    /// without the possibility of losing bytes. The `dma_buf` is a buffer registered to the
44    /// DMA controller, and must be large enough to prevent overflows.
45    pub fn into_ring_buffered(mut self, dma_buf: &'d mut [u8]) -> RingBufferedUartRx<'d> {
46        assert!(!dma_buf.is_empty() && dma_buf.len() <= 0xFFFF);
47
48        let opts = Default::default();
49
50        // Safety: we forget the struct before this function returns.
51        let rx_dma = self.rx_dma.as_mut().unwrap();
52        let request = rx_dma.request;
53        let rx_dma = unsafe { rx_dma.channel.clone_unchecked() };
54
55        let info = self.info;
56        let state = self.state;
57        let kernel_clock = self.kernel_clock;
58        let ring_buf = unsafe { ReadableRingBuffer::new(rx_dma, request, rdr(info.regs), dma_buf, opts) };
59        let rx = unsafe { self.rx.as_ref().map(|x| x.clone_unchecked()) };
60        let rts = unsafe { self.rts.as_ref().map(|x| x.clone_unchecked()) };
61
62        // Don't disable the clock
63        mem::forget(self);
64
65        RingBufferedUartRx {
66            info,
67            state,
68            kernel_clock,
69            rx,
70            rts,
71            ring_buf,
72        }
73    }
74}
75
76impl<'d> RingBufferedUartRx<'d> {
77    /// Reconfigure the driver
78    pub fn set_config(&mut self, config: &Config) -> Result<(), ConfigError> {
79        reconfigure(self.info, self.kernel_clock, config)
80    }
81
82    /// Configure and start the DMA backed UART receiver
83    ///
84    /// Note: This is also done automatically by [`read()`] if required.
85    pub fn start_uart(&mut self) {
86        // Clear the buffer so that it is ready to receive data
87        compiler_fence(Ordering::SeqCst);
88        self.ring_buf.start();
89
90        let r = self.info.regs;
91        // clear all interrupts and DMA Rx Request
92        r.cr1().modify(|w| {
93            // disable RXNE interrupt
94            w.set_rxneie(false);
95            // enable parity interrupt if not ParityNone
96            w.set_peie(w.pce());
97            // enable idle line interrupt
98            w.set_idleie(true);
99        });
100        r.cr3().modify(|w| {
101            // enable Error Interrupt: (Frame error, Noise error, Overrun error)
102            w.set_eie(true);
103            // enable DMA Rx Request
104            w.set_dmar(true);
105        });
106    }
107
108    /// Stop DMA backed UART receiver
109    fn stop_uart(&mut self) {
110        self.ring_buf.request_pause();
111
112        let r = self.info.regs;
113        // clear all interrupts and DMA Rx Request
114        r.cr1().modify(|w| {
115            // disable RXNE interrupt
116            w.set_rxneie(false);
117            // disable parity interrupt
118            w.set_peie(false);
119            // disable idle line interrupt
120            w.set_idleie(false);
121        });
122        r.cr3().modify(|w| {
123            // disable Error Interrupt: (Frame error, Noise error, Overrun error)
124            w.set_eie(false);
125            // disable DMA Rx Request
126            w.set_dmar(false);
127        });
128
129        compiler_fence(Ordering::SeqCst);
130    }
131
132    /// Read bytes that are readily available in the ring buffer.
133    /// If no bytes are currently available in the buffer the call waits until the some
134    /// bytes are available (at least one byte and at most half the buffer size)
135    ///
136    /// Background receive is started if `start()` has not been previously called.
137    ///
138    /// Receive in the background is terminated if an error is returned.
139    /// It must then manually be started again by calling `start()` or by re-calling `read()`.
140    pub async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Error> {
141        let r = self.info.regs;
142
143        // Start DMA and Uart if it was not already started,
144        // otherwise check for errors in status register.
145        let sr = clear_idle_flag(r);
146        if !r.cr3().read().dmar() {
147            self.start_uart();
148        } else {
149            check_for_errors(sr)?;
150        }
151
152        loop {
153            match self.ring_buf.read(buf) {
154                Ok((0, _)) => {}
155                Ok((len, _)) => {
156                    return Ok(len);
157                }
158                Err(_) => {
159                    self.stop_uart();
160                    return Err(Error::Overrun);
161                }
162            }
163
164            match self.wait_for_data_or_idle().await {
165                Ok(_) => {}
166                Err(err) => {
167                    self.stop_uart();
168                    return Err(err);
169                }
170            }
171        }
172    }
173
174    /// Wait for uart idle or dma half-full or full
175    async fn wait_for_data_or_idle(&mut self) -> Result<(), Error> {
176        compiler_fence(Ordering::SeqCst);
177
178        // Future which completes when idle line is detected
179        let s = self.state;
180        let uart = poll_fn(|cx| {
181            s.rx_waker.register(cx.waker());
182
183            compiler_fence(Ordering::SeqCst);
184
185            // Critical section is needed so that IDLE isn't set after
186            // our read but before we clear it.
187            let sr = critical_section::with(|_| clear_idle_flag(self.info.regs));
188
189            check_for_errors(sr)?;
190
191            if sr.idle() {
192                // Idle line is detected
193                Poll::Ready(Ok(()))
194            } else {
195                Poll::Pending
196            }
197        });
198
199        let mut dma_init = false;
200        // Future which completes when there is dma is half full or full
201        let dma = poll_fn(|cx| {
202            self.ring_buf.set_waker(cx.waker());
203
204            let status = match dma_init {
205                false => Poll::Pending,
206                true => Poll::Ready(()),
207            };
208
209            dma_init = true;
210            status
211        });
212
213        match select(uart, dma).await {
214            Either::Left((result, _)) => result,
215            Either::Right(((), _)) => Ok(()),
216        }
217    }
218
219    /// Set baudrate
220    pub fn set_baudrate(&self, baudrate: u32) -> Result<(), ConfigError> {
221        set_baudrate(self.info, self.kernel_clock, baudrate)
222    }
223}
224
225impl Drop for RingBufferedUartRx<'_> {
226    fn drop(&mut self) {
227        self.stop_uart();
228        self.rx.as_ref().map(|x| x.set_as_disconnected());
229        self.rts.as_ref().map(|x| x.set_as_disconnected());
230        super::drop_tx_rx(self.info, self.state);
231    }
232}
233
234/// Return an error result if the Sr register has errors
235fn check_for_errors(s: Sr) -> Result<(), Error> {
236    if s.pe() {
237        Err(Error::Parity)
238    } else if s.fe() {
239        Err(Error::Framing)
240    } else if s.ne() {
241        Err(Error::Noise)
242    } else if s.ore() {
243        Err(Error::Overrun)
244    } else {
245        Ok(())
246    }
247}
248
249/// Clear IDLE and return the Sr register
250fn clear_idle_flag(r: Regs) -> Sr {
251    // SAFETY: read only and we only use Rx related flags
252
253    let sr = sr(r).read();
254
255    // This read also clears the error and idle interrupt flags on v1.
256    unsafe { rdr(r).read_volatile() };
257    clear_interrupt_flags(r, sr);
258
259    r.cr1().modify(|w| w.set_idleie(true));
260
261    sr
262}
263
264impl embedded_io_async::ErrorType for RingBufferedUartRx<'_> {
265    type Error = Error;
266}
267
268impl embedded_io_async::Read for RingBufferedUartRx<'_> {
269    async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> {
270        self.read(buf).await
271    }
272}
273
274impl ReadReady for RingBufferedUartRx<'_> {
275    fn read_ready(&mut self) -> Result<bool, Self::Error> {
276        let len = self.ring_buf.len().map_err(|e| match e {
277            crate::dma::ringbuffer::Error::Overrun => Self::Error::Overrun,
278            crate::dma::ringbuffer::Error::DmaUnsynced => {
279                error!(
280                    "Ringbuffer error: DmaUNsynced, driver implementation is 
281                    probably bugged please open an issue"
282                );
283                // we report this as overrun since its recoverable in the same way
284                Self::Error::Overrun
285            }
286        })?;
287        Ok(len > 0)
288    }
289}