1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
use crate::{
    deadline_clock::{
        DeadlineClock,
        OnConflict,
    },
    ports::{
        BlockDb,
        BlockProducer,
        DBTransaction,
    },
    Config,
    Trigger,
};
use anyhow::{
    anyhow,
    Context,
};
use fuel_core_interfaces::{
    block_importer::ImportBlockBroadcast,
    common::{
        fuel_tx::UniqueIdentifier,
        prelude::{
            Signature,
            Word,
        },
        secrecy::{
            ExposeSecret,
            Secret,
        },
    },
    executor::{
        ExecutionResult,
        UncommittedResult,
    },
    model::{
        BlockHeight,
        FuelBlock,
        FuelBlockConsensus,
        FuelBlockPoAConsensus,
        SecretKeyWrapper,
    },
    poa_coordinator::TransactionPool,
    txpool::TxStatus,
};
use parking_lot::Mutex;
use std::{
    ops::Deref,
    sync::Arc,
};
use tokio::{
    sync::{
        broadcast,
        mpsc,
    },
    task::JoinHandle,
    time::Instant,
};
use tracing::{
    error,
    warn,
};

pub struct RunningService {
    join: JoinHandle<()>,
    stop: mpsc::Sender<()>,
}

pub struct Service {
    running: Mutex<Option<RunningService>>,
    config: Config,
}

impl Service {
    pub fn new(config: &Config) -> Self {
        Self {
            running: Mutex::new(None),
            config: config.clone(),
        }
    }

    pub async fn start<D, T, B>(
        &self,
        txpool_broadcast: broadcast::Receiver<TxStatus>,
        txpool: T,
        import_block_events_tx: broadcast::Sender<ImportBlockBroadcast>,
        block_producer: B,
        db: D,
    ) where
        D: BlockDb + Send + Clone + 'static,
        T: TransactionPool + Send + Sync + 'static,
        B: BlockProducer<D> + 'static,
    {
        let mut running = self.running.lock();

        if running.is_some() {
            warn!("Trying to start a service that is already running");
            return
        }

        let (stop_tx, stop_rx) = mpsc::channel(1);

        let task = Task {
            stop: stop_rx,
            block_gas_limit: self.config.block_gas_limit,
            signing_key: self.config.signing_key.clone(),
            db,
            block_producer,
            txpool_broadcast,
            txpool,
            last_block_created: Instant::now(),
            import_block_events_tx,
            trigger: self.config.trigger,
            timer: DeadlineClock::new(),
        };

        *running = Some(RunningService {
            join: tokio::spawn(task.run()),
            stop: stop_tx,
        });
    }

    pub async fn stop(&self) -> Option<JoinHandle<()>> {
        let maybe_running = self.running.lock().take();
        if let Some(running) = maybe_running {
            // Ignore possible send error, as the JoinHandle will report errors anyway
            let _ = running.stop.send(()).await;
            Some(running.join)
        } else {
            warn!("Trying to stop a service that is not running");
            None
        }
    }
}

pub struct Task<D, T, B>
where
    D: BlockDb + Send + Sync,
    T: TransactionPool,
    B: BlockProducer<D>,
{
    stop: mpsc::Receiver<()>,
    block_gas_limit: Word,
    signing_key: Option<Secret<SecretKeyWrapper>>,
    db: D,
    block_producer: B,
    txpool: T,
    txpool_broadcast: broadcast::Receiver<TxStatus>,
    import_block_events_tx: broadcast::Sender<ImportBlockBroadcast>,
    /// Last block creation time. When starting up, this is initialized
    /// to `Instant::now()`, which delays the first block on startup for
    /// a bit, but doesn't cause any other issues.
    last_block_created: Instant,
    trigger: Trigger,
    /// Deadline clock, used by the triggers
    timer: DeadlineClock,
}

impl<D, T, B> Task<D, T, B>
where
    D: BlockDb + Send,
    T: TransactionPool,
    B: BlockProducer<D>,
{
    // Request the block producer to make a new block, and return it when ready
    async fn signal_produce_block(
        &mut self,
    ) -> anyhow::Result<UncommittedResult<DBTransaction<D>>> {
        let current_height = self
            .db
            .block_height()
            .map_err(|err| anyhow::format_err!("db error {err:?}"))?;
        let height = BlockHeight::from(current_height.as_usize() + 1);

        self.block_producer
            .produce_and_execute_block(height, self.block_gas_limit)
            .await
    }

    async fn produce_block(&mut self) -> anyhow::Result<()> {
        // verify signing key is set
        if self.signing_key.is_none() {
            return Err(anyhow!("unable to produce blocks without a consensus key"))
        }

        // Ask the block producer to create the block
        let (
            ExecutionResult {
                block,
                skipped_transactions,
                ..
            },
            mut db_transaction,
        ) = self.signal_produce_block().await?.into();

        // sign the block and seal it
        seal_block(&self.signing_key, &block, db_transaction.database_mut())?;
        db_transaction.commit_box()?;

        let mut tx_ids_to_remove = Vec::with_capacity(skipped_transactions.len());
        for (tx, err) in skipped_transactions {
            error!(
                "During block production got invalid transaction {:?} with error {:?}",
                tx, err
            );
            tx_ids_to_remove.push(tx.id());
        }

        if let Err(err) = self.txpool.remove_txs(tx_ids_to_remove).await {
            error!(
                "Unable to clean up skipped transaction from `TxPool` with error {:?}",
                err
            );
        };

        // Send the block back to the txpool
        // TODO: this probably must be done differently with multi-node configuration
        self.import_block_events_tx
            .send(ImportBlockBroadcast::PendingFuelBlockImported {
                block: Arc::new(block),
            })
            .expect("Failed to import the generated block");

        // Update last block time
        self.last_block_created = Instant::now();

        // Set timer for the next block
        match self.trigger {
            Trigger::Never => {
                unreachable!("This mode will never produce blocks");
            }
            Trigger::Instant => {}
            Trigger::Interval { block_time } => {
                // TODO: instead of sleeping for `block_time`, subtract the time we used for processing
                self.timer.set_timeout(block_time, OnConflict::Min).await;
            }
            Trigger::Hybrid {
                max_block_time,
                min_block_time,
                max_tx_idle_time,
            } => {
                self.timer
                    .set_timeout(max_block_time, OnConflict::Min)
                    .await;

                let consumable_gas = self.txpool.total_consumable_gas().await?;

                // If txpool still has more than a full block of transactions available,
                // produce new block in min_block_time.
                if consumable_gas > self.block_gas_limit {
                    self.timer
                        .set_timeout(min_block_time, OnConflict::Min)
                        .await;
                } else if self.txpool.pending_number().await? > 0 {
                    // If we still have available txs, reduce the timeout to max idle time
                    self.timer
                        .set_timeout(max_tx_idle_time, OnConflict::Min)
                        .await;
                }
            }
        }

        Ok(())
    }

    async fn on_txpool_event(&mut self, txpool_event: &TxStatus) -> anyhow::Result<()> {
        match txpool_event {
            TxStatus::Submitted => match self.trigger {
                Trigger::Instant => {
                    let pending_number = self.txpool.pending_number().await?;
                    // skip production if there are no pending transactions
                    if pending_number > 0 {
                        self.produce_block().await?;
                    }
                    Ok(())
                }
                Trigger::Never | Trigger::Interval { .. } => Ok(()),
                Trigger::Hybrid {
                    max_tx_idle_time,
                    min_block_time,
                    ..
                } => {
                    let consumable_gas = self.txpool.total_consumable_gas().await?;

                    // If we have over one full block of transactions and min_block_time
                    // has expired, start block production immediately
                    if consumable_gas > self.block_gas_limit
                        && self.last_block_created + min_block_time < Instant::now()
                    {
                        self.produce_block().await?;
                    } else if self.txpool.pending_number().await? > 0 {
                        // We have at least one transaction, so tx_max_idle_time is the limit
                        self.timer
                            .set_timeout(max_tx_idle_time, OnConflict::Min)
                            .await;
                    }

                    Ok(())
                }
            },
            TxStatus::Completed => Ok(()), // This has been processed already
            TxStatus::SqueezedOut { .. } => {
                // TODO: If this is the only tx, set timer deadline to last_block_time + max_block_time
                Ok(())
            }
        }
    }

    async fn on_timer(&mut self, _at: Instant) -> anyhow::Result<()> {
        match self.trigger {
            Trigger::Instant | Trigger::Never => {
                unreachable!("Timer is never set in this mode");
            }
            // In the Interval mode the timer expires only when a new block should be created.
            // In the Hybrid mode the timer can be either:
            // 1. min_block_time expired after it was set when a block
            //    would have been produced too soon
            // 2. max_tx_idle_time expired after a tx has arrived
            // 3. max_block_time expired
            // => we produce a new block in any case
            Trigger::Interval { .. } | Trigger::Hybrid { .. } => {
                self.produce_block().await?;
                Ok(())
            }
        }
    }

    /// Processes the next incoming event. Called by the main event loop.
    /// Returns Ok(false) if the event loop should stop.
    async fn process_next_event(&mut self) -> anyhow::Result<bool> {
        tokio::select! {
            _ = self.stop.recv() => {
                Ok(false)
            }
            // TODO: This should likely be refactored to use something like tokio::sync::Notify.
            //       Otherwise, if a bunch of txs are submitted at once and all the txs are included
            //       into the first block production trigger, we'll still call the event handler
            //       for each tx after they've already been included into a block.
            //       The poa service also doesn't care about events unrelated to new tx submissions,
            //       and shouldn't be awoken when txs are completed or squeezed out of the pool.
            txpool_event = self.txpool_broadcast.recv() => {
                self.on_txpool_event(&txpool_event.context("Broadcast receive error")?).await.context("While processing txpool event")?;
                Ok(true)
            }
            at = self.timer.wait() => {
                self.on_timer(at).await.context("While processing timer event")?;
                Ok(true)
            }
        }
    }

    async fn init_timers(&mut self) {
        match self.trigger {
            Trigger::Never | Trigger::Instant => {}
            Trigger::Interval { block_time } => {
                self.timer
                    .set_timeout(block_time, OnConflict::Overwrite)
                    .await;
            }
            Trigger::Hybrid { max_block_time, .. } => {
                self.timer
                    .set_timeout(max_block_time, OnConflict::Overwrite)
                    .await;
            }
        }
    }

    /// Start event loop
    async fn run(mut self) {
        self.init_timers().await;
        loop {
            match self.process_next_event().await {
                Ok(should_continue) => {
                    if !should_continue {
                        break
                    }
                }
                Err(err) => {
                    error!("PoA module encountered an error: {err:?}");
                }
            }
        }
    }
}

pub fn seal_block(
    signing_key: &Option<Secret<SecretKeyWrapper>>,
    block: &FuelBlock,
    database: &mut dyn BlockDb,
) -> anyhow::Result<()> {
    if let Some(key) = signing_key {
        let block_hash = block.id();
        let message = block_hash.into_message();

        // The length of the secret is checked
        let signing_key = key.expose_secret().deref();

        let poa_signature = Signature::sign(signing_key, &message);
        let seal = FuelBlockConsensus::PoA(FuelBlockPoAConsensus::new(poa_signature));
        database.seal_block(block_hash, seal)
    } else {
        Err(anyhow!("no PoA signing key configured"))
    }
}

#[cfg(test)]
mod test {
    use super::*;
    use fuel_core_interfaces::{
        common::{
            fuel_crypto::SecretKey,
            fuel_tx::{
                Receipt,
                Transaction,
                TransactionBuilder,
                TxId,
            },
        },
        db::{
            Error as DBError,
            Transactional,
        },
        executor::Error,
        model::{
            ArcPoolTx,
            BlockId,
        },
        txpool::Error::NoMetadata,
    };
    use rand::{
        prelude::StdRng,
        Rng,
        SeedableRng,
    };
    use std::{
        collections::HashSet,
        time::Duration,
    };
    use tokio::time;

    mockall::mock! {
        TxPool {}

        #[async_trait::async_trait]
        impl TransactionPool for TxPool {
            async fn pending_number(&self) -> anyhow::Result<usize>;

            async fn total_consumable_gas(&self) -> anyhow::Result<u64>;

            async fn remove_txs(&mut self, tx_ids: Vec<TxId>) -> anyhow::Result<Vec<ArcPoolTx>>;
        }
    }

    mockall::mock! {
        Database {}

        unsafe impl Sync for Database {}
        unsafe impl Send for Database {}

        impl BlockDb for Database {
            fn block_height(&self) -> anyhow::Result<BlockHeight>;

            fn seal_block(
                &mut self,
                block_id: BlockId,
                consensus: FuelBlockConsensus,
            ) -> anyhow::Result<()>;
        }
    }

    mockall::mock! {
        #[derive(Debug)]
        DatabaseTransaction{}

        impl Transactional for DatabaseTransaction {
            fn commit(self) -> Result<(), DBError>;

            fn commit_box(self: Box<Self>) -> Result<(), DBError>;
        }

        impl fuel_core_interfaces::db::DatabaseTransaction<MockDatabase> for DatabaseTransaction {
            fn database(&self) -> &MockDatabase;

            fn database_mut(&mut self) -> &mut MockDatabase;
        }
    }

    mockall::mock! {
        BlockProducer {}

        #[async_trait::async_trait]
        impl BlockProducer<MockDatabase> for BlockProducer {
            async fn produce_and_execute_block(
                &self,
                _height: BlockHeight,
                _max_gas: Word,
            ) -> anyhow::Result<UncommittedResult<DBTransaction<MockDatabase>>>;

            async fn dry_run(
                &self,
                _transaction: Transaction,
                _height: Option<BlockHeight>,
                _utxo_validation: Option<bool>,
            ) -> anyhow::Result<Vec<Receipt>>;
        }
    }

    fn make_tx(rng: &mut StdRng) -> Transaction {
        TransactionBuilder::create(rng.gen(), rng.gen(), vec![])
            .gas_price(rng.gen())
            .gas_limit(rng.gen())
            .finalize_without_signature_as_transaction()
    }

    #[tokio::test]
    async fn remove_skipped_transactions() {
        // The test verifies that if `BlockProducer` returns skipped transactions, they would
        // be propagated to `TxPool` for removal.
        let mut rng = StdRng::seed_from_u64(2322);
        let secret_key = SecretKey::random(&mut rng);

        let (_, stop) = mpsc::channel(1);
        let (_, txpool_broadcast) = broadcast::channel(1);
        let (import_block_events_tx, mut import_block_receiver_tx) =
            broadcast::channel(1);
        tokio::spawn(async move {
            import_block_receiver_tx.recv().await.unwrap();
        });

        const TX_NUM: usize = 100;
        let skipped_transactions: Vec<_> =
            (0..TX_NUM).map(|_| make_tx(&mut rng)).collect();

        let mock_skipped_txs = skipped_transactions.clone();

        let mut seq = mockall::Sequence::new();

        let mut block_producer = MockBlockProducer::default();
        block_producer
            .expect_produce_and_execute_block()
            .times(1)
            .in_sequence(&mut seq)
            .returning(move |_, _| {
                let mut db = MockDatabase::default();
                // We expect that `seal_block` should be called 1 time after `produce_and_execute_block`.
                db.expect_seal_block()
                    .times(1)
                    .in_sequence(&mut seq)
                    .returning(|_, _| Ok(()));

                let mut db_transaction = MockDatabaseTransaction::default();
                db_transaction.expect_database_mut().times(1).return_var(db);

                // Check that `commit` is called after `seal_block`.
                db_transaction
                    .expect_commit_box()
                    // Verifies that `commit_box` have been called.
                    .times(1)
                    .in_sequence(&mut seq)
                    .returning(|| Ok(()));
                db_transaction
                    .expect_commit()
                    // TODO: After removing `commit_box` set `times(1)`
                    .times(0)
                    .in_sequence(&mut seq)
                    .returning(|| Ok(()));
                Ok(UncommittedResult::new(
                    ExecutionResult {
                        block: Default::default(),
                        skipped_transactions: mock_skipped_txs
                            .clone()
                            .into_iter()
                            .map(|tx| (tx, Error::OutputAlreadyExists))
                            .collect(),
                        tx_status: Default::default(),
                    },
                    Box::new(db_transaction),
                ))
            });

        let mut db = MockDatabase::default();
        db.expect_block_height()
            .returning(|| Ok(BlockHeight::from(1u32)));

        let mut txpool = MockTxPool::default();
        // Test created for only for this check.
        txpool.expect_remove_txs().returning(move |skipped_ids| {
            // Transform transactions into ids.
            let skipped_transactions: Vec<_> =
                skipped_transactions.iter().map(|tx| tx.id()).collect();

            // Check that all transactions are unique.
            let expected_skipped_ids_set: HashSet<_> =
                skipped_transactions.clone().into_iter().collect();
            assert_eq!(expected_skipped_ids_set.len(), TX_NUM);

            // Check that `TxPool::remove_txs` was called with the same ids in the same order.
            assert_eq!(skipped_ids.len(), TX_NUM);
            assert_eq!(skipped_transactions.len(), TX_NUM);
            assert_eq!(skipped_transactions, skipped_ids);
            Ok(vec![])
        });

        let mut task = Task {
            stop,
            block_gas_limit: 1000000,
            signing_key: Some(Secret::new(secret_key.into())),
            db,
            block_producer,
            txpool,
            txpool_broadcast,
            import_block_events_tx,
            last_block_created: Instant::now(),
            trigger: Trigger::Instant,
            timer: DeadlineClock::new(),
        };

        assert!(task.produce_block().await.is_ok());
    }

    #[tokio::test]
    async fn does_not_produce_when_txpool_empty_in_instant_mode() {
        // verify the PoA service doesn't trigger empty blocks to be produced when there are
        // irrelevant updates from the txpool
        let mut rng = StdRng::seed_from_u64(2322);
        let secret_key = SecretKey::random(&mut rng);

        let (_stop_tx, stop) = mpsc::channel(1);
        let (_txpool_tx, txpool_broadcast) = broadcast::channel(1);
        let (import_block_events_tx, mut import_block_receiver_tx) =
            broadcast::channel(1);
        tokio::spawn(async move {
            import_block_receiver_tx.recv().await.unwrap();
        });

        let mut block_producer = MockBlockProducer::default();

        block_producer
            .expect_produce_and_execute_block()
            .returning(|_, _| panic!("Block production should not be called"));

        let mut db = MockDatabase::default();
        db.expect_block_height()
            .returning(|| Ok(BlockHeight::from(1u32)));

        let mut txpool = MockTxPool::default();
        txpool.expect_total_consumable_gas().returning(|| Ok(0));
        txpool.expect_pending_number().returning(|| Ok(0));

        let mut task = Task {
            stop,
            block_gas_limit: 1000000,
            signing_key: Some(Secret::new(secret_key.into())),
            db,
            block_producer,
            txpool,
            txpool_broadcast,
            import_block_events_tx,
            last_block_created: Instant::now(),
            trigger: Trigger::Instant,
            timer: DeadlineClock::new(),
        };

        // simulate some txpool events to see if any block production is erroneously triggered
        task.on_txpool_event(&TxStatus::Submitted).await.unwrap();
        task.on_txpool_event(&TxStatus::Completed).await.unwrap();
        task.on_txpool_event(&TxStatus::SqueezedOut { reason: NoMetadata })
            .await
            .unwrap();
    }

    #[tokio::test(start_paused = true)]
    async fn hybrid_production_doesnt_produce_empty_blocks_when_txpool_is_empty() {
        // verify the PoA service doesn't alter the hybrid block timing when
        // receiving txpool events if txpool is actually empty
        let mut rng = StdRng::seed_from_u64(2322);
        let secret_key = SecretKey::random(&mut rng);

        const TX_IDLE_TIME_MS: u64 = 50u64;

        let (stop_tx, stop) = mpsc::channel(1);
        let (txpool_tx, txpool_broadcast) = broadcast::channel(10);
        let (import_block_events_tx, mut import_block_receiver_tx) =
            broadcast::channel(1);
        tokio::spawn(async move {
            let _ = import_block_receiver_tx.recv().await;
        });

        let mut block_producer = MockBlockProducer::default();

        block_producer
            .expect_produce_and_execute_block()
            .returning(|_, _| panic!("Block production should not be called"));

        let mut db = MockDatabase::default();
        db.expect_block_height()
            .returning(|| Ok(BlockHeight::from(1u32)));

        let mut txpool = MockTxPool::default();
        txpool.expect_total_consumable_gas().returning(|| Ok(0));
        txpool.expect_pending_number().returning(|| Ok(0));

        let task = Task {
            stop,
            block_gas_limit: 1000000,
            signing_key: Some(Secret::new(secret_key.into())),
            db,
            block_producer,
            txpool,
            txpool_broadcast,
            import_block_events_tx,
            last_block_created: Instant::now(),
            trigger: Trigger::Hybrid {
                min_block_time: Duration::from_millis(100),
                max_tx_idle_time: Duration::from_millis(TX_IDLE_TIME_MS),
                max_block_time: Duration::from_millis(1000),
            },
            timer: DeadlineClock::new(),
        };

        let jh = tokio::spawn(task.run());

        // simulate some txpool events to see if any block production is erroneously triggered
        txpool_tx.send(TxStatus::Submitted).unwrap();
        txpool_tx.send(TxStatus::Completed).unwrap();
        txpool_tx
            .send(TxStatus::SqueezedOut { reason: NoMetadata })
            .unwrap();

        // wait max_tx_idle_time - causes block production to occur if
        // pending txs > 0 is not checked.
        time::sleep(Duration::from_millis(TX_IDLE_TIME_MS)).await;

        // send stop
        stop_tx.send(()).await.unwrap();

        // await shutdown and capture any errors
        jh.await.unwrap();
    }
}