1use crate::{Error, NodeCodec};
38use hash_db::Hasher;
39use nohash_hasher::BuildNoHashHasher;
40use parking_lot::{Mutex, MutexGuard};
41use schnellru::LruMap;
42use shared_cache::{ValueCacheKey, ValueCacheRef};
43use std::{
44 collections::HashMap,
45 sync::{
46 atomic::{AtomicU64, Ordering},
47 Arc,
48 },
49 time::Duration,
50};
51use trie_db::{node::NodeOwned, CachedValue};
52
53mod shared_cache;
54
55pub use shared_cache::SharedTrieCache;
56
57use self::shared_cache::ValueCacheKeyHash;
58
59const LOG_TARGET: &str = "trie-cache";
60
61const SHARED_CACHE_WRITE_LOCK_TIMEOUT: Duration = Duration::from_millis(100);
66
67const SHARED_NODE_CACHE_MAX_PROMOTED_KEYS: u32 = 1792;
73const SHARED_VALUE_CACHE_MAX_PROMOTED_KEYS: u32 = 1792;
75
76const SHARED_NODE_CACHE_MAX_REPLACE_PERCENT: usize = 33;
82const SHARED_VALUE_CACHE_MAX_REPLACE_PERCENT: usize = 33;
84
85const LOCAL_NODE_CACHE_MAX_INLINE_SIZE: usize = 512 * 1024;
90const LOCAL_VALUE_CACHE_MAX_INLINE_SIZE: usize = 512 * 1024;
92
93const LOCAL_NODE_CACHE_MAX_HEAP_SIZE: usize = 8 * 1024 * 1024;
99const LOCAL_VALUE_CACHE_MAX_HEAP_SIZE: usize = 2 * 1024 * 1024;
101
102#[derive(Debug, Clone, Copy)]
104pub struct CacheSize(usize);
105
106impl CacheSize {
107 pub const fn unlimited() -> Self {
109 CacheSize(usize::MAX)
110 }
111
112 pub const fn new(bytes: usize) -> Self {
114 CacheSize(bytes)
115 }
116}
117
118#[derive(Default)]
120pub struct LocalNodeCacheLimiter {
121 current_heap_size: usize,
125}
126
127impl<H> schnellru::Limiter<H, NodeCached<H>> for LocalNodeCacheLimiter
128where
129 H: AsRef<[u8]> + std::fmt::Debug,
130{
131 type KeyToInsert<'a> = H;
132 type LinkType = u32;
133
134 #[inline]
135 fn is_over_the_limit(&self, length: usize) -> bool {
136 if length <= 1 {
139 return false
140 }
141
142 self.current_heap_size > LOCAL_NODE_CACHE_MAX_HEAP_SIZE
143 }
144
145 #[inline]
146 fn on_insert<'a>(
147 &mut self,
148 _length: usize,
149 key: H,
150 cached_node: NodeCached<H>,
151 ) -> Option<(H, NodeCached<H>)> {
152 self.current_heap_size += cached_node.heap_size();
153 Some((key, cached_node))
154 }
155
156 #[inline]
157 fn on_replace(
158 &mut self,
159 _length: usize,
160 _old_key: &mut H,
161 _new_key: H,
162 old_node: &mut NodeCached<H>,
163 new_node: &mut NodeCached<H>,
164 ) -> bool {
165 debug_assert_eq!(_old_key.as_ref().len(), _new_key.as_ref().len());
166 self.current_heap_size =
167 self.current_heap_size + new_node.heap_size() - old_node.heap_size();
168 true
169 }
170
171 #[inline]
172 fn on_removed(&mut self, _key: &mut H, cached_node: &mut NodeCached<H>) {
173 self.current_heap_size -= cached_node.heap_size();
174 }
175
176 #[inline]
177 fn on_cleared(&mut self) {
178 self.current_heap_size = 0;
179 }
180
181 #[inline]
182 fn on_grow(&mut self, new_memory_usage: usize) -> bool {
183 new_memory_usage <= LOCAL_NODE_CACHE_MAX_INLINE_SIZE
184 }
185}
186
187#[derive(Default)]
189pub struct LocalValueCacheLimiter {
190 current_heap_size: usize,
194}
195
196impl<H> schnellru::Limiter<ValueCacheKey<H>, CachedValue<H>> for LocalValueCacheLimiter
197where
198 H: AsRef<[u8]>,
199{
200 type KeyToInsert<'a> = ValueCacheRef<'a, H>;
201 type LinkType = u32;
202
203 #[inline]
204 fn is_over_the_limit(&self, length: usize) -> bool {
205 if length <= 1 {
208 return false
209 }
210
211 self.current_heap_size > LOCAL_VALUE_CACHE_MAX_HEAP_SIZE
212 }
213
214 #[inline]
215 fn on_insert(
216 &mut self,
217 _length: usize,
218 key: Self::KeyToInsert<'_>,
219 value: CachedValue<H>,
220 ) -> Option<(ValueCacheKey<H>, CachedValue<H>)> {
221 self.current_heap_size += key.storage_key.len();
222 Some((key.into(), value))
223 }
224
225 #[inline]
226 fn on_replace(
227 &mut self,
228 _length: usize,
229 _old_key: &mut ValueCacheKey<H>,
230 _new_key: ValueCacheRef<H>,
231 _old_value: &mut CachedValue<H>,
232 _new_value: &mut CachedValue<H>,
233 ) -> bool {
234 debug_assert_eq!(_old_key.storage_key.len(), _new_key.storage_key.len());
235 true
236 }
237
238 #[inline]
239 fn on_removed(&mut self, key: &mut ValueCacheKey<H>, _: &mut CachedValue<H>) {
240 self.current_heap_size -= key.storage_key.len();
241 }
242
243 #[inline]
244 fn on_cleared(&mut self) {
245 self.current_heap_size = 0;
246 }
247
248 #[inline]
249 fn on_grow(&mut self, new_memory_usage: usize) -> bool {
250 new_memory_usage <= LOCAL_VALUE_CACHE_MAX_INLINE_SIZE
251 }
252}
253
254#[derive(Default)]
256struct HitStats {
257 shared_hits: AtomicU64,
258 shared_fetch_attempts: AtomicU64,
259 local_hits: AtomicU64,
260 local_fetch_attempts: AtomicU64,
261}
262
263impl std::fmt::Display for HitStats {
264 fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
265 let shared_hits = self.shared_hits.load(Ordering::Relaxed);
266 let shared_fetch_attempts = self.shared_fetch_attempts.load(Ordering::Relaxed);
267 let local_hits = self.local_hits.load(Ordering::Relaxed);
268 let local_fetch_attempts = self.local_fetch_attempts.load(Ordering::Relaxed);
269 if shared_fetch_attempts == 0 && local_hits == 0 {
270 write!(fmt, "empty")
271 } else {
272 let percent_local = (local_hits as f32 / local_fetch_attempts as f32) * 100.0;
273 let percent_shared = (shared_hits as f32 / shared_fetch_attempts as f32) * 100.0;
274 write!(
275 fmt,
276 "local hit rate = {}% [{}/{}], shared hit rate = {}% [{}/{}]",
277 percent_local as u32,
278 local_hits,
279 local_fetch_attempts,
280 percent_shared as u32,
281 shared_hits,
282 shared_fetch_attempts
283 )
284 }
285 }
286}
287
288#[derive(Default)]
290struct TrieHitStats {
291 node_cache: HitStats,
292 value_cache: HitStats,
293}
294
295pub(crate) struct NodeCached<H> {
297 pub node: NodeOwned<H>,
299 pub is_from_shared_cache: bool,
301}
302
303impl<H> NodeCached<H> {
304 fn heap_size(&self) -> usize {
306 self.node.size_in_bytes() - std::mem::size_of::<NodeOwned<H>>()
307 }
308}
309
310type NodeCacheMap<H> = LruMap<H, NodeCached<H>, LocalNodeCacheLimiter, schnellru::RandomState>;
311
312type ValueCacheMap<H> = LruMap<
313 ValueCacheKey<H>,
314 CachedValue<H>,
315 LocalValueCacheLimiter,
316 BuildNoHashHasher<ValueCacheKey<H>>,
317>;
318
319type ValueAccessSet =
320 LruMap<ValueCacheKeyHash, (), schnellru::ByLength, BuildNoHashHasher<ValueCacheKeyHash>>;
321
322pub struct LocalTrieCache<H: Hasher> {
333 shared: SharedTrieCache<H>,
335
336 node_cache: Mutex<NodeCacheMap<H::Out>>,
338
339 value_cache: Mutex<ValueCacheMap<H::Out>>,
341
342 shared_value_cache_access: Mutex<ValueAccessSet>,
351
352 stats: TrieHitStats,
353}
354
355impl<H: Hasher> LocalTrieCache<H> {
356 pub fn as_trie_db_cache(&self, storage_root: H::Out) -> TrieCache<'_, H> {
360 let value_cache = ValueCache::ForStorageRoot {
361 storage_root,
362 local_value_cache: self.value_cache.lock(),
363 shared_value_cache_access: self.shared_value_cache_access.lock(),
364 buffered_value: None,
365 };
366
367 TrieCache {
368 shared_cache: self.shared.clone(),
369 local_cache: self.node_cache.lock(),
370 value_cache,
371 stats: &self.stats,
372 }
373 }
374
375 pub fn as_trie_db_mut_cache(&self) -> TrieCache<'_, H> {
383 TrieCache {
384 shared_cache: self.shared.clone(),
385 local_cache: self.node_cache.lock(),
386 value_cache: ValueCache::Fresh(Default::default()),
387 stats: &self.stats,
388 }
389 }
390}
391
392impl<H: Hasher> Drop for LocalTrieCache<H> {
393 fn drop(&mut self) {
394 tracing::debug!(
395 target: LOG_TARGET,
396 "Local node trie cache dropped: {}",
397 self.stats.node_cache
398 );
399
400 tracing::debug!(
401 target: LOG_TARGET,
402 "Local value trie cache dropped: {}",
403 self.stats.value_cache
404 );
405
406 let mut shared_inner = match self.shared.write_lock_inner() {
407 Some(inner) => inner,
408 None => {
409 tracing::warn!(
410 target: LOG_TARGET,
411 "Timeout while trying to acquire a write lock for the shared trie cache"
412 );
413 return
414 },
415 };
416
417 shared_inner.node_cache_mut().update(self.node_cache.get_mut().drain());
418
419 shared_inner.value_cache_mut().update(
420 self.value_cache.get_mut().drain(),
421 self.shared_value_cache_access.get_mut().drain().map(|(key, ())| key),
422 );
423 }
424}
425
426enum ValueCache<'a, H: Hasher> {
428 Fresh(HashMap<Arc<[u8]>, CachedValue<H::Out>>),
431 ForStorageRoot {
433 shared_value_cache_access: MutexGuard<'a, ValueAccessSet>,
434 local_value_cache: MutexGuard<'a, ValueCacheMap<H::Out>>,
435 storage_root: H::Out,
436 buffered_value: Option<CachedValue<H::Out>>,
440 },
441}
442
443impl<H: Hasher> ValueCache<'_, H> {
444 fn get(
446 &mut self,
447 key: &[u8],
448 shared_cache: &SharedTrieCache<H>,
449 stats: &HitStats,
450 ) -> Option<&CachedValue<H::Out>> {
451 stats.local_fetch_attempts.fetch_add(1, Ordering::Relaxed);
452
453 match self {
454 Self::Fresh(map) =>
455 if let Some(value) = map.get(key) {
456 stats.local_hits.fetch_add(1, Ordering::Relaxed);
457 Some(value)
458 } else {
459 None
460 },
461 Self::ForStorageRoot {
462 local_value_cache,
463 shared_value_cache_access,
464 storage_root,
465 buffered_value,
466 } => {
467 let hash = ValueCacheKey::hash_data(key, storage_root);
476
477 if let Some(value) = local_value_cache
478 .peek_by_hash(hash.raw(), |existing_key, _| {
479 existing_key.is_eq(storage_root, key)
480 }) {
481 stats.local_hits.fetch_add(1, Ordering::Relaxed);
482
483 return Some(value)
484 }
485
486 stats.shared_fetch_attempts.fetch_add(1, Ordering::Relaxed);
487 if let Some(value) = shared_cache.peek_value_by_hash(hash, storage_root, key) {
488 stats.shared_hits.fetch_add(1, Ordering::Relaxed);
489 shared_value_cache_access.insert(hash, ());
490 *buffered_value = Some(value.clone());
491 return buffered_value.as_ref()
492 }
493
494 None
495 },
496 }
497 }
498
499 fn insert(&mut self, key: &[u8], value: CachedValue<H::Out>) {
501 match self {
502 Self::Fresh(map) => {
503 map.insert(key.into(), value);
504 },
505 Self::ForStorageRoot { local_value_cache, storage_root, .. } => {
506 local_value_cache.insert(ValueCacheRef::new(key, *storage_root), value);
507 },
508 }
509 }
510}
511
512pub struct TrieCache<'a, H: Hasher> {
518 shared_cache: SharedTrieCache<H>,
519 local_cache: MutexGuard<'a, NodeCacheMap<H::Out>>,
520 value_cache: ValueCache<'a, H>,
521 stats: &'a TrieHitStats,
522}
523
524impl<'a, H: Hasher> TrieCache<'a, H> {
525 pub fn merge_into(self, local: &LocalTrieCache<H>, storage_root: H::Out) {
532 let ValueCache::Fresh(cache) = self.value_cache else { return };
533
534 if !cache.is_empty() {
535 let mut value_cache = local.value_cache.lock();
536 let partial_hash = ValueCacheKey::hash_partial_data(&storage_root);
537
538 cache.into_iter().for_each(|(k, v)| {
539 let hash = ValueCacheKeyHash::from_hasher_and_storage_key(partial_hash.clone(), &k);
540 let k = ValueCacheRef { storage_root, storage_key: &k, hash };
541 value_cache.insert(k, v);
542 });
543 }
544 }
545}
546
547impl<'a, H: Hasher> trie_db::TrieCache<NodeCodec<H>> for TrieCache<'a, H> {
548 fn get_or_insert_node(
549 &mut self,
550 hash: H::Out,
551 fetch_node: &mut dyn FnMut() -> trie_db::Result<NodeOwned<H::Out>, H::Out, Error<H::Out>>,
552 ) -> trie_db::Result<&NodeOwned<H::Out>, H::Out, Error<H::Out>> {
553 let mut is_local_cache_hit = true;
554 self.stats.node_cache.local_fetch_attempts.fetch_add(1, Ordering::Relaxed);
555
556 let node = self.local_cache.get_or_insert_fallible(hash, || {
558 is_local_cache_hit = false;
559
560 self.stats.node_cache.shared_fetch_attempts.fetch_add(1, Ordering::Relaxed);
562 if let Some(node) = self.shared_cache.peek_node(&hash) {
563 self.stats.node_cache.shared_hits.fetch_add(1, Ordering::Relaxed);
564 tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from shared cache");
565
566 return Ok(NodeCached::<H::Out> { node: node.clone(), is_from_shared_cache: true })
567 }
568
569 match fetch_node() {
571 Ok(node) => {
572 tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from database");
573 Ok(NodeCached::<H::Out> { node, is_from_shared_cache: false })
574 },
575 Err(error) => {
576 tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from database failed");
577 Err(error)
578 },
579 }
580 });
581
582 if is_local_cache_hit {
583 tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from local cache");
584 self.stats.node_cache.local_hits.fetch_add(1, Ordering::Relaxed);
585 }
586
587 Ok(&node?
588 .expect("you can always insert at least one element into the local cache; qed")
589 .node)
590 }
591
592 fn get_node(&mut self, hash: &H::Out) -> Option<&NodeOwned<H::Out>> {
593 let mut is_local_cache_hit = true;
594 self.stats.node_cache.local_fetch_attempts.fetch_add(1, Ordering::Relaxed);
595
596 let cached_node = self.local_cache.get_or_insert_fallible(*hash, || {
598 is_local_cache_hit = false;
599
600 self.stats.node_cache.shared_fetch_attempts.fetch_add(1, Ordering::Relaxed);
602 if let Some(node) = self.shared_cache.peek_node(&hash) {
603 self.stats.node_cache.shared_hits.fetch_add(1, Ordering::Relaxed);
604 tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from shared cache");
605
606 Ok(NodeCached::<H::Out> { node: node.clone(), is_from_shared_cache: true })
607 } else {
608 tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from cache failed");
609
610 Err(())
611 }
612 });
613
614 if is_local_cache_hit {
615 tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from local cache");
616 self.stats.node_cache.local_hits.fetch_add(1, Ordering::Relaxed);
617 }
618
619 match cached_node {
620 Ok(Some(cached_node)) => Some(&cached_node.node),
621 Ok(None) => {
622 unreachable!(
623 "you can always insert at least one element into the local cache; qed"
624 );
625 },
626 Err(()) => None,
627 }
628 }
629
630 fn lookup_value_for_key(&mut self, key: &[u8]) -> Option<&CachedValue<H::Out>> {
631 let res = self.value_cache.get(key, &self.shared_cache, &self.stats.value_cache);
632
633 tracing::trace!(
634 target: LOG_TARGET,
635 key = ?sp_core::hexdisplay::HexDisplay::from(&key),
636 found = res.is_some(),
637 "Looked up value for key",
638 );
639
640 res
641 }
642
643 fn cache_value_for_key(&mut self, key: &[u8], data: CachedValue<H::Out>) {
644 tracing::trace!(
645 target: LOG_TARGET,
646 key = ?sp_core::hexdisplay::HexDisplay::from(&key),
647 "Caching value for key",
648 );
649
650 self.value_cache.insert(key, data);
651 }
652}
653
654#[cfg(test)]
655mod tests {
656 use super::*;
657 use trie_db::{Bytes, Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut};
658
659 type MemoryDB = crate::MemoryDB<sp_core::Blake2Hasher>;
660 type Layout = crate::LayoutV1<sp_core::Blake2Hasher>;
661 type Cache = super::SharedTrieCache<sp_core::Blake2Hasher>;
662 type Recorder = crate::recorder::Recorder<sp_core::Blake2Hasher>;
663
664 const TEST_DATA: &[(&[u8], &[u8])] =
665 &[(b"key1", b"val1"), (b"key2", &[2; 64]), (b"key3", b"val3"), (b"key4", &[4; 64])];
666 const CACHE_SIZE_RAW: usize = 1024 * 10;
667 const CACHE_SIZE: CacheSize = CacheSize::new(CACHE_SIZE_RAW);
668
669 fn create_trie() -> (MemoryDB, TrieHash<Layout>) {
670 let mut db = MemoryDB::default();
671 let mut root = Default::default();
672
673 {
674 let mut trie = TrieDBMutBuilder::<Layout>::new(&mut db, &mut root).build();
675 for (k, v) in TEST_DATA {
676 trie.insert(k, v).expect("Inserts data");
677 }
678 }
679
680 (db, root)
681 }
682
683 #[test]
684 fn basic_cache_works() {
685 let (db, root) = create_trie();
686
687 let shared_cache = Cache::new(CACHE_SIZE);
688 let local_cache = shared_cache.local_cache();
689
690 {
691 let mut cache = local_cache.as_trie_db_cache(root);
692 let trie = TrieDBBuilder::<Layout>::new(&db, &root).with_cache(&mut cache).build();
693 assert_eq!(TEST_DATA[0].1.to_vec(), trie.get(TEST_DATA[0].0).unwrap().unwrap());
694 }
695
696 assert!(shared_cache.read_lock_inner().value_cache().lru.is_empty());
698 assert!(shared_cache.read_lock_inner().node_cache().lru.is_empty());
699
700 drop(local_cache);
701
702 assert!(shared_cache.read_lock_inner().node_cache().lru.len() >= 1);
704 let cached_data = shared_cache
705 .read_lock_inner()
706 .value_cache()
707 .lru
708 .peek(&ValueCacheKey::new_value(TEST_DATA[0].0, root))
709 .unwrap()
710 .clone();
711 assert_eq!(Bytes::from(TEST_DATA[0].1.to_vec()), cached_data.data().flatten().unwrap());
712
713 let fake_data = Bytes::from(&b"fake_data"[..]);
714
715 let local_cache = shared_cache.local_cache();
716 shared_cache.write_lock_inner().unwrap().value_cache_mut().lru.insert(
717 ValueCacheKey::new_value(TEST_DATA[1].0, root),
718 (fake_data.clone(), Default::default()).into(),
719 );
720
721 {
722 let mut cache = local_cache.as_trie_db_cache(root);
723 let trie = TrieDBBuilder::<Layout>::new(&db, &root).with_cache(&mut cache).build();
724
725 assert_eq!(b"fake_data".to_vec(), trie.get(TEST_DATA[1].0).unwrap().unwrap());
727 }
728 }
729
730 #[test]
731 fn trie_db_mut_cache_works() {
732 let (mut db, root) = create_trie();
733
734 let new_key = b"new_key".to_vec();
735 let new_value = vec![23; 64];
737
738 let shared_cache = Cache::new(CACHE_SIZE);
739 let mut new_root = root;
740
741 {
742 let local_cache = shared_cache.local_cache();
743
744 let mut cache = local_cache.as_trie_db_mut_cache();
745
746 {
747 let mut trie = TrieDBMutBuilder::<Layout>::from_existing(&mut db, &mut new_root)
748 .with_cache(&mut cache)
749 .build();
750
751 trie.insert(&new_key, &new_value).unwrap();
752 }
753
754 cache.merge_into(&local_cache, new_root);
755 }
756
757 let cached_data = shared_cache
760 .read_lock_inner()
761 .value_cache()
762 .lru
763 .peek(&ValueCacheKey::new_value(new_key, new_root))
764 .unwrap()
765 .clone();
766 assert_eq!(Bytes::from(new_value), cached_data.data().flatten().unwrap());
767 }
768
769 #[test]
770 fn trie_db_cache_and_recorder_work_together() {
771 let (db, root) = create_trie();
772
773 let shared_cache = Cache::new(CACHE_SIZE);
774
775 for i in 0..5 {
776 if i == 2 {
778 shared_cache.reset_node_cache();
779 } else if i == 3 {
780 shared_cache.reset_value_cache();
781 }
782
783 let local_cache = shared_cache.local_cache();
784 let recorder = Recorder::default();
785
786 {
787 let mut cache = local_cache.as_trie_db_cache(root);
788 let mut recorder = recorder.as_trie_recorder(root);
789 let trie = TrieDBBuilder::<Layout>::new(&db, &root)
790 .with_cache(&mut cache)
791 .with_recorder(&mut recorder)
792 .build();
793
794 for (key, value) in TEST_DATA {
795 assert_eq!(*value, trie.get(&key).unwrap().unwrap());
796 }
797 }
798
799 let storage_proof = recorder.drain_storage_proof();
800 let memory_db: MemoryDB = storage_proof.into_memory_db();
801
802 {
803 let trie = TrieDBBuilder::<Layout>::new(&memory_db, &root).build();
804
805 for (key, value) in TEST_DATA {
806 assert_eq!(*value, trie.get(&key).unwrap().unwrap());
807 }
808 }
809 }
810 }
811
812 #[test]
813 fn trie_db_mut_cache_and_recorder_work_together() {
814 const DATA_TO_ADD: &[(&[u8], &[u8])] = &[(b"key11", &[45; 78]), (b"key33", &[78; 89])];
815
816 let (db, root) = create_trie();
817
818 let shared_cache = Cache::new(CACHE_SIZE);
819
820 for i in 0..5 {
822 if i == 2 {
824 shared_cache.reset_node_cache();
825 } else if i == 3 {
826 shared_cache.reset_value_cache();
827 }
828
829 let recorder = Recorder::default();
830 let local_cache = shared_cache.local_cache();
831 let mut new_root = root;
832
833 {
834 let mut db = db.clone();
835 let mut cache = local_cache.as_trie_db_cache(root);
836 let mut recorder = recorder.as_trie_recorder(root);
837 let mut trie = TrieDBMutBuilder::<Layout>::from_existing(&mut db, &mut new_root)
838 .with_cache(&mut cache)
839 .with_recorder(&mut recorder)
840 .build();
841
842 for (key, value) in DATA_TO_ADD {
843 trie.insert(key, value).unwrap();
844 }
845 }
846
847 let storage_proof = recorder.drain_storage_proof();
848 let mut memory_db: MemoryDB = storage_proof.into_memory_db();
849 let mut proof_root = root;
850
851 {
852 let mut trie =
853 TrieDBMutBuilder::<Layout>::from_existing(&mut memory_db, &mut proof_root)
854 .build();
855
856 for (key, value) in DATA_TO_ADD {
857 trie.insert(key, value).unwrap();
858 }
859 }
860
861 assert_eq!(new_root, proof_root)
862 }
863 }
864
865 #[test]
866 fn cache_lru_works() {
867 let (db, root) = create_trie();
868
869 let shared_cache = Cache::new(CACHE_SIZE);
870
871 {
872 let local_cache = shared_cache.local_cache();
873
874 let mut cache = local_cache.as_trie_db_cache(root);
875 let trie = TrieDBBuilder::<Layout>::new(&db, &root).with_cache(&mut cache).build();
876
877 for (k, _) in TEST_DATA {
878 trie.get(k).unwrap().unwrap();
879 }
880 }
881
882 assert!(shared_cache
884 .read_lock_inner()
885 .value_cache()
886 .lru
887 .iter()
888 .map(|d| d.0)
889 .all(|l| TEST_DATA.iter().any(|d| &*l.storage_key == d.0)));
890
891 for _ in 0..2 {
895 {
896 let local_cache = shared_cache.local_cache();
897
898 let mut cache = local_cache.as_trie_db_cache(root);
899 let trie = TrieDBBuilder::<Layout>::new(&db, &root).with_cache(&mut cache).build();
900
901 for (k, _) in TEST_DATA.iter().take(2) {
902 trie.get(k).unwrap().unwrap();
903 }
904 }
905
906 assert!(shared_cache
909 .read_lock_inner()
910 .value_cache()
911 .lru
912 .iter()
913 .take(2)
914 .map(|d| d.0)
915 .all(|l| { TEST_DATA.iter().take(2).any(|d| &*l.storage_key == d.0) }));
916
917 shared_cache.reset_value_cache();
919 }
920
921 let most_recently_used_nodes = shared_cache
922 .read_lock_inner()
923 .node_cache()
924 .lru
925 .iter()
926 .map(|d| *d.0)
927 .collect::<Vec<_>>();
928
929 {
930 let local_cache = shared_cache.local_cache();
931
932 let mut cache = local_cache.as_trie_db_cache(root);
933 let trie = TrieDBBuilder::<Layout>::new(&db, &root).with_cache(&mut cache).build();
934
935 for (k, _) in TEST_DATA.iter().skip(2) {
936 trie.get(k).unwrap().unwrap();
937 }
938 }
939
940 assert_ne!(
942 most_recently_used_nodes,
943 shared_cache
944 .read_lock_inner()
945 .node_cache()
946 .lru
947 .iter()
948 .map(|d| *d.0)
949 .collect::<Vec<_>>()
950 );
951 }
952
953 #[test]
954 fn cache_respects_bounds() {
955 let (mut db, root) = create_trie();
956
957 let shared_cache = Cache::new(CACHE_SIZE);
958 {
959 let local_cache = shared_cache.local_cache();
960
961 let mut new_root = root;
962
963 {
964 let mut cache = local_cache.as_trie_db_cache(root);
965 {
966 let mut trie =
967 TrieDBMutBuilder::<Layout>::from_existing(&mut db, &mut new_root)
968 .with_cache(&mut cache)
969 .build();
970
971 let value = vec![10u8; 100];
972 for i in 0..CACHE_SIZE_RAW / 100 * 2 {
974 trie.insert(format!("key{}", i).as_bytes(), &value).unwrap();
975 }
976 }
977
978 cache.merge_into(&local_cache, new_root);
979 }
980 }
981
982 assert!(shared_cache.used_memory_size() < CACHE_SIZE_RAW);
983 }
984}