fuel_core_importer/
importer.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
use crate::{
    ports::{
        BlockVerifier,
        DatabaseTransaction,
        ImporterDatabase,
        Transactional,
        Validator,
    },
    Config,
    ImporterResult,
};
use fuel_core_metrics::importer::importer_metrics;
use fuel_core_storage::{
    not_found,
    transactional::Changes,
    Error as StorageError,
    MerkleRoot,
};
use fuel_core_types::{
    blockchain::{
        consensus::{
            Consensus,
            Sealed,
        },
        primitives::BlockId,
        SealedBlock,
    },
    fuel_tx::{
        field::MintGasPrice,
        Transaction,
    },
    fuel_types::{
        BlockHeight,
        ChainId,
    },
    services::{
        block_importer::{
            ImportResult,
            UncommittedResult,
        },
        executor::{
            self,
            ValidationResult,
        },
        Uncommitted,
    },
};
use parking_lot::Mutex;
use std::{
    ops::{
        Deref,
        DerefMut,
    },
    sync::Arc,
    time::{
        Instant,
        SystemTime,
        UNIX_EPOCH,
    },
};
use tokio::sync::{
    broadcast,
    OwnedSemaphorePermit,
    Semaphore,
    TryAcquireError,
};
use tracing::warn;

#[cfg(test)]
pub mod test;

#[derive(Debug, derive_more::Display, derive_more::From)]
pub enum Error {
    #[display(fmt = "The commit is already in the progress: {_0}.")]
    SemaphoreError(TryAcquireError),
    #[display(
        fmt = "The wrong state of database during insertion of the genesis block."
    )]
    InvalidUnderlyingDatabaseGenesisState,
    #[display(fmt = "The wrong state of storage after execution of the block.\
        The actual root is {_1:?}, when the expected root is {_0:?}.")]
    InvalidDatabaseStateAfterExecution(Option<MerkleRoot>, Option<MerkleRoot>),
    #[display(fmt = "Got overflow during increasing the height.")]
    Overflow,
    #[display(fmt = "The non-generic block can't have zero height.")]
    ZeroNonGenericHeight,
    #[display(fmt = "The actual height is {_1}, when the next expected height is {_0}.")]
    IncorrectBlockHeight(BlockHeight, BlockHeight),
    #[display(
        fmt = "Got another block id after validation of the block. Expected {_0} != Actual {_1}"
    )]
    BlockIdMismatch(BlockId, BlockId),
    #[display(fmt = "Some of the block fields are not valid: {_0}.")]
    FailedVerification(anyhow::Error),
    #[display(fmt = "The execution of the block failed: {_0}.")]
    FailedExecution(executor::Error),
    #[display(fmt = "It is not possible to execute the genesis block.")]
    ExecuteGenesis,
    #[display(fmt = "The database already contains the data at the height {_0}.")]
    NotUnique(BlockHeight),
    #[display(fmt = "The previous block processing is not finished yet.")]
    PreviousBlockProcessingNotFinished,
    #[from]
    StorageError(StorageError),
    UnsupportedConsensusVariant(String),
    ActiveBlockResultsSemaphoreClosed(tokio::sync::AcquireError),
    RayonTaskWasCanceled,
}

impl From<Error> for anyhow::Error {
    fn from(error: Error) -> Self {
        anyhow::Error::msg(error)
    }
}

#[cfg(test)]
impl PartialEq for Error {
    fn eq(&self, other: &Self) -> bool {
        format!("{self}") == format!("{other}")
    }
}

pub struct Importer<D, E, V> {
    database: Mutex<D>,
    executor: Arc<E>,
    verifier: Arc<V>,
    chain_id: ChainId,
    broadcast: broadcast::Sender<ImporterResult>,
    guard: Semaphore,
    /// The semaphore tracks the number of unprocessed `SharedImportResult`.
    /// If the number of unprocessed results is more than the threshold,
    /// the block importer stops committing new blocks and waits for
    /// the resolution of the previous one.
    active_import_results: Arc<Semaphore>,
    process_thread: rayon::ThreadPool,
    /// Enables prometheus metrics for this fuel-service
    metrics: bool,
}

impl<D, E, V> Importer<D, E, V> {
    pub fn new(
        chain_id: ChainId,
        config: Config,
        database: D,
        executor: E,
        verifier: V,
    ) -> Self {
        // We use semaphore as a back pressure mechanism instead of a `broadcast`
        // channel because we want to prevent committing to the database results
        // that will not be processed.
        let max_block_notify_buffer = config.max_block_notify_buffer;
        let (broadcast, _) = broadcast::channel(max_block_notify_buffer);
        let process_thread = rayon::ThreadPoolBuilder::new()
            .num_threads(1)
            .build()
            .expect("Failed to create a thread pool for the block processing");

        Self {
            database: Mutex::new(database),
            executor: Arc::new(executor),
            verifier: Arc::new(verifier),
            chain_id,
            broadcast,
            active_import_results: Arc::new(Semaphore::new(max_block_notify_buffer)),
            guard: Semaphore::new(1),
            process_thread,
            metrics: config.metrics,
        }
    }

    #[cfg(test)]
    pub fn default_config(database: D, executor: E, verifier: V) -> Self {
        Self::new(
            Default::default(),
            Default::default(),
            database,
            executor,
            verifier,
        )
    }

    pub fn subscribe(&self) -> broadcast::Receiver<ImporterResult> {
        self.broadcast.subscribe()
    }

    pub(crate) fn lock(&self) -> Result<tokio::sync::SemaphorePermit, Error> {
        let guard = self.guard.try_acquire();
        match guard {
            Ok(permit) => Ok(permit),
            Err(err) => {
                tracing::error!(
                    "The semaphore was acquired before. It is a problem \
                    because the current architecture doesn't expect that."
                );
                Err(Error::SemaphoreError(err))
            }
        }
    }

    async fn async_run<OP, Output>(&self, op: OP) -> Result<Output, Error>
    where
        OP: FnOnce() -> Output,
        OP: Send,
        Output: Send,
    {
        let (sender, receiver) = tokio::sync::oneshot::channel();
        self.process_thread.scope_fifo(|_| {
            let result = op();
            let _ = sender.send(result);
        });
        let result = receiver.await.map_err(|_| Error::RayonTaskWasCanceled)?;
        Ok(result)
    }
}

impl<D, E, V> Importer<D, E, V>
where
    D: ImporterDatabase + Transactional,
    E: Send + Sync,
    V: Send + Sync,
{
    /// The method commits the result of the block execution attaching the consensus data.
    /// It expects that the `UncommittedResult` contains the result of the block
    /// execution(It includes the block itself), but not more.
    ///
    /// It doesn't do any checks regarding block validity(execution, fields, signing, etc.).
    /// It only checks the validity of the database.
    ///
    /// After the commit into the database notifies about a new imported block.
    ///
    /// # Concurrency
    ///
    /// Only one commit may be in progress at the time. All other calls will fail.
    /// Returns an error if called while another call is in progress.
    pub async fn commit_result(
        &self,
        result: UncommittedResult<Changes>,
    ) -> Result<(), Error> {
        let _guard = self.lock()?;

        // Await until all receivers of the notification process the result.
        const TIMEOUT: u64 = 20;
        let await_result = tokio::time::timeout(
            tokio::time::Duration::from_secs(TIMEOUT),
            self.active_import_results.clone().acquire_owned(),
        )
        .await;

        let Ok(permit) = await_result else {
            tracing::error!(
                "The previous block processing \
                    was not finished for {TIMEOUT} seconds."
            );
            return Err(Error::PreviousBlockProcessingNotFinished)
        };
        let permit = permit.map_err(Error::ActiveBlockResultsSemaphoreClosed)?;

        self.async_run(move || {
            let mut guard = self
                .database
                .try_lock()
                .expect("Semaphore prevents concurrent access to the database");
            let database = guard.deref_mut();
            self._commit_result(result, permit, database)
        })
        .await?
    }

    /// The method commits the result of the block execution and notifies about a new imported block.
    #[tracing::instrument(
        skip_all,
        fields(
            block_id = %result.result().sealed_block.entity.id(),
            height = **result.result().sealed_block.entity.header().height(),
            tx_status = ?result.result().tx_status,
        ),
        err
    )]
    fn _commit_result(
        &self,
        result: UncommittedResult<Changes>,
        permit: OwnedSemaphorePermit,
        database: &mut D,
    ) -> Result<(), Error> {
        let (result, changes) = result.into();
        let block = &result.sealed_block.entity;
        let consensus = &result.sealed_block.consensus;
        let actual_next_height = *block.header().height();

        // During importing of the genesis block, the database should not be initialized
        // and the genesis block defines the next height.
        // During the production of the non-genesis block, the next height should be underlying
        // database height + 1.
        let expected_next_height = match consensus {
            Consensus::Genesis(_) => {
                let result = database.latest_block_height()?;
                let found = result.is_some();
                // Because the genesis block is not committed, it should return `None`.
                // If we find the latest height, something is wrong with the state of the database.
                if found {
                    return Err(Error::InvalidUnderlyingDatabaseGenesisState)
                }
                actual_next_height
            }
            Consensus::PoA(_) => {
                if actual_next_height == BlockHeight::from(0u32) {
                    return Err(Error::ZeroNonGenericHeight)
                }

                let last_db_height = database
                    .latest_block_height()?
                    .ok_or(not_found!("Latest block height"))?;
                last_db_height
                    .checked_add(1u32)
                    .ok_or(Error::Overflow)?
                    .into()
            }
            _ => {
                return Err(Error::UnsupportedConsensusVariant(format!(
                    "{:?}",
                    consensus
                )))
            }
        };

        if expected_next_height != actual_next_height {
            return Err(Error::IncorrectBlockHeight(
                expected_next_height,
                actual_next_height,
            ))
        }

        // Importer expects that `UncommittedResult` contains the result of block
        // execution without block itself.
        let expected_block_root = database.latest_block_root()?;

        #[cfg(feature = "test-helpers")]
        let changes_clone = changes.clone();
        let mut db_after_execution = database.storage_transaction(changes);
        let actual_block_root = db_after_execution.latest_block_root()?;
        if actual_block_root != expected_block_root {
            return Err(Error::InvalidDatabaseStateAfterExecution(
                expected_block_root,
                actual_block_root,
            ))
        }

        if !db_after_execution.store_new_block(&self.chain_id, &result.sealed_block)? {
            return Err(Error::NotUnique(expected_next_height))
        }

        db_after_execution.commit()?;

        if self.metrics {
            Self::update_metrics(&result, &actual_next_height);
        }
        tracing::info!("Committed block {:#x}", result.sealed_block.entity.id());

        let result = ImporterResult {
            shared_result: Arc::new(Awaiter::new(result, permit)),
            #[cfg(feature = "test-helpers")]
            changes: Arc::new(changes_clone),
        };
        let _ = self.broadcast.send(result);

        Ok(())
    }

    /// Should only be called once after startup to set importer metrics to their initial values
    pub fn init_metrics(&self) {
        // load starting values from database

        // Errors are optimistically handled via fallback to default values since the metrics
        // should get updated regularly anyways and these errors will be discovered and handled
        // correctly in more mission critical areas (such as _commit_result)
        let current_block_height = self
            .database
            .try_lock()
            .expect("Init function is the first to access the database")
            .latest_block_height()
            .unwrap_or_default()
            .unwrap_or_default();
        importer_metrics()
            .block_height
            .set(*current_block_height.deref() as i64);
        // on init just set to current time since it's not worth tracking in the db
        let current_time = SystemTime::now()
            .duration_since(UNIX_EPOCH)
            .unwrap()
            .as_secs_f64();
        importer_metrics()
            .latest_block_import_timestamp
            .set(current_time);
    }

    fn update_metrics(result: &ImportResult, actual_next_height: &BlockHeight) {
        let (total_gas_used, total_fee): (u64, u64) = result
            .tx_status
            .iter()
            .map(|tx_result| {
                (*tx_result.result.total_gas(), *tx_result.result.total_fee())
            })
            .fold((0_u64, 0_u64), |(acc_gas, acc_fee), (used_gas, fee)| {
                (
                    acc_gas.saturating_add(used_gas),
                    acc_fee.saturating_add(fee),
                )
            });
        let maybe_last_tx = result.sealed_block.entity.transactions().last();
        if let Some(last_tx) = maybe_last_tx {
            if let Transaction::Mint(mint) = last_tx {
                importer_metrics()
                    .gas_price
                    .set((*mint.gas_price()).try_into().unwrap_or(i64::MAX));
            } else {
                warn!("Last transaction is not a mint transaction");
            }
        }

        let total_transactions = result.tx_status.len();
        importer_metrics()
            .block_height
            .set(*actual_next_height.deref() as i64);
        let current_time = SystemTime::now()
            .duration_since(UNIX_EPOCH)
            .unwrap()
            .as_secs_f64();
        importer_metrics()
            .latest_block_import_timestamp
            .set(current_time);
        importer_metrics()
            .gas_per_block
            .set(total_gas_used.try_into().unwrap_or(i64::MAX));
        importer_metrics()
            .fee_per_block
            .set(total_fee.try_into().unwrap_or(i64::MAX));
        importer_metrics()
            .transactions_per_block
            .set(total_transactions.try_into().unwrap_or(i64::MAX));
    }
}

impl<IDatabase, E, V> Importer<IDatabase, E, V>
where
    E: Validator,
    V: BlockVerifier,
{
    /// Performs all checks required to commit the block, it includes the execution of
    /// the block(As a result returns the uncommitted state).
    ///
    /// It validates only the `Block` execution rules and the `Block` fields' validity.
    /// The validity of the `SealedBlock` and seal information is not the concern of this function.
    ///
    /// The method doesn't require synchronous access, so it could be called in a
    /// concurrent environment.
    ///
    /// Returns `Err` if the block is invalid for committing. Otherwise, it returns the
    /// `Ok` with the uncommitted state.
    pub fn verify_and_execute_block(
        &self,
        sealed_block: SealedBlock,
    ) -> Result<UncommittedResult<Changes>, Error> {
        Self::verify_and_execute_block_inner(
            self.executor.clone(),
            self.verifier.clone(),
            sealed_block,
        )
    }

    fn verify_and_execute_block_inner(
        executor: Arc<E>,
        verifier: Arc<V>,
        sealed_block: SealedBlock,
    ) -> Result<UncommittedResult<Changes>, Error> {
        let consensus = sealed_block.consensus;
        let block = sealed_block.entity;
        let sealed_block_id = block.id();

        let result_of_verification = verifier.verify_block_fields(&consensus, &block);
        if let Err(err) = result_of_verification {
            return Err(Error::FailedVerification(err))
        }

        // The current code has a separate function X to process `StateConfig`.
        // It is not possible to execute it via `Executor`.
        // Maybe we need consider to move the function X here, if that possible.
        if let Consensus::Genesis(_) = consensus {
            return Err(Error::ExecuteGenesis)
        }

        let (ValidationResult { tx_status, events }, changes) = executor
            .validate(&block)
            .map_err(Error::FailedExecution)?
            .into();

        let actual_block_id = block.id();
        if actual_block_id != sealed_block_id {
            // It should not be possible because, during validation, we don't touch the block.
            // But while we pass it by value, let's check it.
            return Err(Error::BlockIdMismatch(sealed_block_id, actual_block_id))
        }

        let sealed_block = Sealed {
            entity: block,
            consensus,
        };
        let import_result =
            ImportResult::new_from_network(sealed_block, tx_status, events);

        Ok(Uncommitted::new(import_result, changes))
    }
}

impl<IDatabase, E, V> Importer<IDatabase, E, V>
where
    IDatabase: ImporterDatabase + Transactional + 'static,
    E: Validator + 'static,
    V: BlockVerifier + 'static,
{
    /// The method validates the `Block` fields and commits the `SealedBlock`.
    /// It is a combination of the [`Importer::verify_and_execute_block`] and [`Importer::commit_result`].
    pub async fn execute_and_commit(
        &self,
        sealed_block: SealedBlock,
    ) -> Result<(), Error> {
        let _guard = self.lock()?;

        let executor = self.executor.clone();
        let verifier = self.verifier.clone();
        let (result, execute_time) = self
            .async_run(|| {
                let start = Instant::now();
                let result = Self::verify_and_execute_block_inner(
                    executor,
                    verifier,
                    sealed_block,
                );
                let execute_time = start.elapsed().as_secs_f64();
                (result, execute_time)
            })
            .await?;

        let result = result?;

        // Await until all receivers of the notification process the result.
        const TIMEOUT: u64 = 20;
        let await_result = tokio::time::timeout(
            tokio::time::Duration::from_secs(TIMEOUT),
            self.active_import_results.clone().acquire_owned(),
        )
        .await;

        let Ok(permit) = await_result else {
            tracing::error!(
                "The previous block processing \
                     was not finished for {TIMEOUT} seconds."
            );
            return Err(Error::PreviousBlockProcessingNotFinished)
        };
        let permit = permit.map_err(Error::ActiveBlockResultsSemaphoreClosed)?;

        let commit_result = self
            .async_run(move || {
                let mut guard = self
                    .database
                    .try_lock()
                    .expect("Semaphore prevents concurrent access to the database");
                let database = guard.deref_mut();

                let start = Instant::now();
                self._commit_result(result, permit, database).map(|_| start)
            })
            .await?;

        let time = if let Ok(start_instant) = commit_result {
            let commit_time = start_instant.elapsed().as_secs_f64();
            execute_time + commit_time
        } else {
            execute_time
        };

        importer_metrics().execute_and_commit_duration.observe(time);
        // return execution result
        commit_result.map(|_| ())
    }
}

/// The wrapper around `ImportResult` to notify about the end of the processing of a new block.
struct Awaiter {
    result: ImportResult,
    _permit: OwnedSemaphorePermit,
}

impl Deref for Awaiter {
    type Target = ImportResult;

    fn deref(&self) -> &Self::Target {
        &self.result
    }
}

impl Awaiter {
    fn new(result: ImportResult, permit: OwnedSemaphorePermit) -> Self {
        Self {
            result,
            _permit: permit,
        }
    }
}