fuels_accounts/provider/
retry_util.rs

1use std::{fmt::Debug, future::Future, num::NonZeroU32, time::Duration};
2
3use fuels_core::types::errors::{error, Result};
4
5/// A set of strategies to control retry intervals between attempts.
6///
7/// The `Backoff` enum defines different strategies for managing intervals between retry attempts.
8/// Each strategy allows you to customize the waiting time before a new attempt based on the
9/// number of attempts made.
10///
11/// # Variants
12///
13/// - `Linear(Duration)`: Increases the waiting time linearly with each attempt.
14/// - `Exponential(Duration)`: Doubles the waiting time with each attempt.
15/// - `Fixed(Duration)`: Uses a constant waiting time between attempts.
16///
17/// # Examples
18///
19/// ```rust
20/// use std::time::Duration;
21/// use fuels_accounts::provider::Backoff;
22///
23/// let linear_backoff = Backoff::Linear(Duration::from_secs(2));
24/// let exponential_backoff = Backoff::Exponential(Duration::from_secs(1));
25/// let fixed_backoff = Backoff::Fixed(Duration::from_secs(5));
26/// ```
27//ANCHOR: backoff
28#[derive(Debug, Clone)]
29pub enum Backoff {
30    Linear(Duration),
31    Exponential(Duration),
32    Fixed(Duration),
33}
34//ANCHOR_END: backoff
35
36impl Default for Backoff {
37    fn default() -> Self {
38        Backoff::Linear(Duration::from_millis(10))
39    }
40}
41
42impl Backoff {
43    pub fn wait_duration(&self, attempt: u32) -> Duration {
44        match self {
45            Backoff::Linear(base_duration) => *base_duration * (attempt + 1),
46            Backoff::Exponential(base_duration) => *base_duration * 2u32.pow(attempt),
47            Backoff::Fixed(interval) => *interval,
48        }
49    }
50}
51
52/// Configuration for controlling retry behavior.
53///
54/// The `RetryConfig` struct encapsulates the configuration parameters for controlling the retry behavior
55/// of asynchronous actions. It includes the maximum number of attempts and the interval strategy from
56/// the `Backoff` enum that determines how much time to wait between retry attempts.
57///
58/// # Fields
59///
60/// - `max_attempts`: The maximum number of attempts before giving up.
61/// - `interval`: The chosen interval strategy from the `Backoff` enum.
62///
63/// # Examples
64///
65/// ```rust
66/// use std::num::NonZeroUsize;
67/// use std::time::Duration;
68/// use fuels_accounts::provider::{Backoff, RetryConfig};
69///
70/// let max_attempts = 5;
71/// let interval_strategy = Backoff::Exponential(Duration::from_secs(1));
72///
73/// let retry_config = RetryConfig::new(max_attempts, interval_strategy).unwrap();
74/// ```
75// ANCHOR: retry_config
76#[derive(Clone, Debug)]
77pub struct RetryConfig {
78    max_attempts: NonZeroU32,
79    interval: Backoff,
80}
81// ANCHOR_END: retry_config
82
83impl RetryConfig {
84    pub fn new(max_attempts: u32, interval: Backoff) -> Result<Self> {
85        let max_attempts = NonZeroU32::new(max_attempts)
86            .ok_or_else(|| error!(Other, "`max_attempts` must be greater than `0`"))?;
87
88        Ok(RetryConfig {
89            max_attempts,
90            interval,
91        })
92    }
93}
94
95impl Default for RetryConfig {
96    fn default() -> Self {
97        Self {
98            max_attempts: NonZeroU32::new(1).expect("should not fail"),
99            interval: Default::default(),
100        }
101    }
102}
103
104/// Retries an asynchronous action with customizable retry behavior.
105///
106/// This function takes an asynchronous action represented by a closure `action`.
107/// The action is executed repeatedly with backoff and retry logic based on the
108/// provided `retry_config` and the `should_retry` condition.
109///
110/// The `action` closure should return a `Future` that resolves to a `Result<T, K>`,
111/// where `T` represents the success type and `K` represents the error type.
112///
113/// # Parameters
114///
115/// - `action`: The asynchronous action to be retried.
116/// - `retry_config`: A reference to the retry configuration.
117/// - `should_retry`: A closure that determines whether to retry based on the result.
118///
119/// # Return
120///
121/// Returns `Ok(T)` if the action succeeds without requiring further retries.
122/// Returns `Err(Error)` if the maximum number of attempts is reached and the action
123/// still fails. If a retryable error occurs during the attempts, the error will
124/// be returned if the `should_retry` condition allows further retries.
125pub(crate) async fn retry<Fut, T, ShouldRetry>(
126    mut action: impl FnMut() -> Fut,
127    retry_config: &RetryConfig,
128    should_retry: ShouldRetry,
129) -> T
130where
131    Fut: Future<Output = T>,
132    ShouldRetry: Fn(&T) -> bool,
133{
134    let mut last_result = None;
135
136    for attempt in 0..retry_config.max_attempts.into() {
137        let result = action().await;
138
139        if should_retry(&result) {
140            last_result = Some(result)
141        } else {
142            return result;
143        }
144
145        tokio::time::sleep(retry_config.interval.wait_duration(attempt)).await;
146    }
147
148    last_result.expect("should not happen")
149}
150
151#[cfg(test)]
152mod tests {
153    mod retry_until {
154        use std::time::{Duration, Instant};
155
156        use fuels_core::types::errors::{error, Result};
157        use tokio::sync::Mutex;
158
159        use crate::provider::{retry_util, Backoff, RetryConfig};
160
161        #[tokio::test]
162        async fn returns_last_received_response() -> Result<()> {
163            // given
164            let err_msgs = ["err1", "err2", "err3"];
165            let number_of_attempts = Mutex::new(0usize);
166
167            let will_always_fail = || async {
168                let msg = err_msgs[*number_of_attempts.lock().await];
169                *number_of_attempts.lock().await += 1;
170
171                msg
172            };
173
174            let should_retry_fn = |_res: &_| -> bool { true };
175
176            let retry_options = RetryConfig::new(3, Backoff::Linear(Duration::from_millis(10)))?;
177
178            // when
179            let response =
180                retry_util::retry(will_always_fail, &retry_options, should_retry_fn).await;
181
182            // then
183            assert_eq!(response, "err3");
184
185            Ok(())
186        }
187
188        #[tokio::test]
189        async fn stops_retrying_when_predicate_is_satisfied() -> Result<()> {
190            // given
191            let values = Mutex::new(vec![1, 2, 3]);
192
193            let will_always_fail = || async { values.lock().await.pop().unwrap() };
194
195            let should_retry_fn = |res: &i32| *res != 2;
196
197            let retry_options = RetryConfig::new(3, Backoff::Linear(Duration::from_millis(10)))?;
198
199            // when
200            let response =
201                retry_util::retry(will_always_fail, &retry_options, should_retry_fn).await;
202
203            // then
204            assert_eq!(response, 2);
205
206            Ok(())
207        }
208
209        #[tokio::test]
210        async fn retry_respects_delay_between_attempts_fixed() -> Result<()> {
211            // given
212            let timestamps: Mutex<Vec<Instant>> = Mutex::new(vec![]);
213
214            let will_fail_and_record_timestamp = || async {
215                timestamps.lock().await.push(Instant::now());
216                Result::<()>::Err(error!(Other, "error"))
217            };
218
219            let should_retry_fn = |_res: &_| -> bool { true };
220
221            let retry_options = RetryConfig::new(3, Backoff::Fixed(Duration::from_millis(100)))?;
222
223            // when
224            let _ = retry_util::retry(
225                will_fail_and_record_timestamp,
226                &retry_options,
227                should_retry_fn,
228            )
229            .await;
230
231            // then
232            let timestamps_vec = timestamps.lock().await.clone();
233
234            let timestamps_spaced_out_at_least_100_mills = timestamps_vec
235                .iter()
236                .zip(timestamps_vec.iter().skip(1))
237                .all(|(current_timestamp, the_next_timestamp)| {
238                    the_next_timestamp.duration_since(*current_timestamp)
239                        >= Duration::from_millis(100)
240                });
241
242            assert!(
243                timestamps_spaced_out_at_least_100_mills,
244                "retry did not wait for the specified time between attempts"
245            );
246
247            Ok(())
248        }
249
250        #[tokio::test]
251        async fn retry_respects_delay_between_attempts_linear() -> Result<()> {
252            // given
253            let timestamps: Mutex<Vec<Instant>> = Mutex::new(vec![]);
254
255            let will_fail_and_record_timestamp = || async {
256                timestamps.lock().await.push(Instant::now());
257                Result::<()>::Err(error!(Other, "error"))
258            };
259
260            let should_retry_fn = |_res: &_| -> bool { true };
261
262            let retry_options = RetryConfig::new(3, Backoff::Linear(Duration::from_millis(100)))?;
263
264            // when
265            let _ = retry_util::retry(
266                will_fail_and_record_timestamp,
267                &retry_options,
268                should_retry_fn,
269            )
270            .await;
271
272            // then
273            let timestamps_vec = timestamps.lock().await.clone();
274
275            let timestamps_spaced_out_at_least_100_mills = timestamps_vec
276                .iter()
277                .zip(timestamps_vec.iter().skip(1))
278                .enumerate()
279                .all(|(attempt, (current_timestamp, the_next_timestamp))| {
280                    the_next_timestamp.duration_since(*current_timestamp)
281                        >= (Duration::from_millis(100) * (attempt + 1) as u32)
282                });
283
284            assert!(
285                timestamps_spaced_out_at_least_100_mills,
286                "retry did not wait for the specified time between attempts"
287            );
288
289            Ok(())
290        }
291
292        #[tokio::test]
293        async fn retry_respects_delay_between_attempts_exponential() -> Result<()> {
294            // given
295            let timestamps: Mutex<Vec<Instant>> = Mutex::new(vec![]);
296
297            let will_fail_and_record_timestamp = || async {
298                timestamps.lock().await.push(Instant::now());
299                Result::<()>::Err(error!(Other, "error"))
300            };
301
302            let should_retry_fn = |_res: &_| -> bool { true };
303
304            let retry_options =
305                RetryConfig::new(3, Backoff::Exponential(Duration::from_millis(100)))?;
306
307            // when
308            let _ = retry_util::retry(
309                will_fail_and_record_timestamp,
310                &retry_options,
311                should_retry_fn,
312            )
313            .await;
314
315            // then
316            let timestamps_vec = timestamps.lock().await.clone();
317
318            let timestamps_spaced_out_at_least_100_mills = timestamps_vec
319                .iter()
320                .zip(timestamps_vec.iter().skip(1))
321                .enumerate()
322                .all(|(attempt, (current_timestamp, the_next_timestamp))| {
323                    the_next_timestamp.duration_since(*current_timestamp)
324                        >= (Duration::from_millis(100) * (2_usize.pow((attempt) as u32)) as u32)
325                });
326
327            assert!(
328                timestamps_spaced_out_at_least_100_mills,
329                "retry did not wait for the specified time between attempts"
330            );
331
332            Ok(())
333        }
334    }
335}