sp_trie/cache/
mod.rs

1// This file is part of Substrate.
2
3// Copyright (C) Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: Apache-2.0
5
6// Licensed under the Apache License, Version 2.0 (the "License");
7// you may not use this file except in compliance with the License.
8// You may obtain a copy of the License at
9//
10// 	http://www.apache.org/licenses/LICENSE-2.0
11//
12// Unless required by applicable law or agreed to in writing, software
13// distributed under the License is distributed on an "AS IS" BASIS,
14// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15// See the License for the specific language governing permissions and
16// limitations under the License.
17
18//! Trie Cache
19//!
20//! Provides an implementation of the [`TrieCache`](trie_db::TrieCache) trait.
21//! The implementation is split into three types [`SharedTrieCache`], [`LocalTrieCache`] and
22//! [`TrieCache`]. The [`SharedTrieCache`] is the instance that should be kept around for the entire
23//! lifetime of the node. It will store all cached trie nodes and values on a global level. Then
24//! there is the [`LocalTrieCache`] that should be kept around per state instance requested from the
25//! backend. As there are very likely multiple accesses to the state per instance, this
26//! [`LocalTrieCache`] is used to cache the nodes and the values before they are merged back to the
27//! shared instance. Last but not least there is the [`TrieCache`] that is being used per access to
28//! the state. It will use the [`SharedTrieCache`] and the [`LocalTrieCache`] to fulfill cache
29//! requests. If both of them don't provide the requested data it will be inserted into the
30//! [`LocalTrieCache`] and then later into the [`SharedTrieCache`].
31//!
32//! The [`SharedTrieCache`] is bound to some maximum number of bytes. It is ensured that it never
33//! runs above this limit. However as long as data is cached inside a [`LocalTrieCache`] it isn't
34//! taken into account when limiting the [`SharedTrieCache`]. This means that for the lifetime of a
35//! [`LocalTrieCache`] the actual memory usage could be above the allowed maximum.
36
37use crate::{Error, NodeCodec};
38use hash_db::Hasher;
39use nohash_hasher::BuildNoHashHasher;
40use parking_lot::{Mutex, MutexGuard};
41use schnellru::LruMap;
42use shared_cache::{ValueCacheKey, ValueCacheRef};
43use std::{
44	collections::HashMap,
45	sync::{
46		atomic::{AtomicU64, Ordering},
47		Arc,
48	},
49	time::Duration,
50};
51use trie_db::{node::NodeOwned, CachedValue};
52
53mod shared_cache;
54
55pub use shared_cache::SharedTrieCache;
56
57use self::shared_cache::ValueCacheKeyHash;
58
59const LOG_TARGET: &str = "trie-cache";
60
61/// The maximum amount of time we'll wait trying to acquire the shared cache lock
62/// when the local cache is dropped and synchronized with the share cache.
63///
64/// This is just a failsafe; normally this should never trigger.
65const SHARED_CACHE_WRITE_LOCK_TIMEOUT: Duration = Duration::from_millis(100);
66
67/// The maximum number of existing keys in the shared cache that a single local cache
68/// can promote to the front of the LRU cache in one go.
69///
70/// If we have a big shared cache and the local cache hits all of those keys we don't
71/// want to spend forever bumping all of them.
72const SHARED_NODE_CACHE_MAX_PROMOTED_KEYS: u32 = 1792;
73/// Same as [`SHARED_NODE_CACHE_MAX_PROMOTED_KEYS`].
74const SHARED_VALUE_CACHE_MAX_PROMOTED_KEYS: u32 = 1792;
75
76/// The maximum portion of the shared cache (in percent) that a single local
77/// cache can replace in one go.
78///
79/// We don't want a single local cache instance to have the ability to replace
80/// everything in the shared cache.
81const SHARED_NODE_CACHE_MAX_REPLACE_PERCENT: usize = 33;
82/// Same as [`SHARED_NODE_CACHE_MAX_REPLACE_PERCENT`].
83const SHARED_VALUE_CACHE_MAX_REPLACE_PERCENT: usize = 33;
84
85/// The maximum inline capacity of the local cache, in bytes.
86///
87/// This is just an upper limit; since the maps are resized in powers of two
88/// their actual size will most likely not exactly match this.
89const LOCAL_NODE_CACHE_MAX_INLINE_SIZE: usize = 512 * 1024;
90/// Same as [`LOCAL_NODE_CACHE_MAX_INLINE_SIZE`].
91const LOCAL_VALUE_CACHE_MAX_INLINE_SIZE: usize = 512 * 1024;
92
93/// The maximum size of the memory allocated on the heap by the local cache, in bytes.
94///
95/// The size of the node cache should always be bigger than the value cache. The value
96/// cache is only holding weak references to the actual values found in the nodes and
97/// we account for the size of the node as part of the node cache.
98const LOCAL_NODE_CACHE_MAX_HEAP_SIZE: usize = 8 * 1024 * 1024;
99/// Same as [`LOCAL_NODE_CACHE_MAX_HEAP_SIZE`].
100const LOCAL_VALUE_CACHE_MAX_HEAP_SIZE: usize = 2 * 1024 * 1024;
101
102/// The size of the shared cache.
103#[derive(Debug, Clone, Copy)]
104pub struct CacheSize(usize);
105
106impl CacheSize {
107	/// An unlimited cache size.
108	pub const fn unlimited() -> Self {
109		CacheSize(usize::MAX)
110	}
111
112	/// A cache size `bytes` big.
113	pub const fn new(bytes: usize) -> Self {
114		CacheSize(bytes)
115	}
116}
117
118/// A limiter for the local node cache. This makes sure the local cache doesn't grow too big.
119#[derive(Default)]
120pub struct LocalNodeCacheLimiter {
121	/// The current size (in bytes) of data allocated by this cache on the heap.
122	///
123	/// This doesn't include the size of the map itself.
124	current_heap_size: usize,
125}
126
127impl<H> schnellru::Limiter<H, NodeCached<H>> for LocalNodeCacheLimiter
128where
129	H: AsRef<[u8]> + std::fmt::Debug,
130{
131	type KeyToInsert<'a> = H;
132	type LinkType = u32;
133
134	#[inline]
135	fn is_over_the_limit(&self, length: usize) -> bool {
136		// Only enforce the limit if there's more than one element to make sure
137		// we can always add a new element to the cache.
138		if length <= 1 {
139			return false
140		}
141
142		self.current_heap_size > LOCAL_NODE_CACHE_MAX_HEAP_SIZE
143	}
144
145	#[inline]
146	fn on_insert<'a>(
147		&mut self,
148		_length: usize,
149		key: H,
150		cached_node: NodeCached<H>,
151	) -> Option<(H, NodeCached<H>)> {
152		self.current_heap_size += cached_node.heap_size();
153		Some((key, cached_node))
154	}
155
156	#[inline]
157	fn on_replace(
158		&mut self,
159		_length: usize,
160		_old_key: &mut H,
161		_new_key: H,
162		old_node: &mut NodeCached<H>,
163		new_node: &mut NodeCached<H>,
164	) -> bool {
165		debug_assert_eq!(_old_key.as_ref().len(), _new_key.as_ref().len());
166		self.current_heap_size =
167			self.current_heap_size + new_node.heap_size() - old_node.heap_size();
168		true
169	}
170
171	#[inline]
172	fn on_removed(&mut self, _key: &mut H, cached_node: &mut NodeCached<H>) {
173		self.current_heap_size -= cached_node.heap_size();
174	}
175
176	#[inline]
177	fn on_cleared(&mut self) {
178		self.current_heap_size = 0;
179	}
180
181	#[inline]
182	fn on_grow(&mut self, new_memory_usage: usize) -> bool {
183		new_memory_usage <= LOCAL_NODE_CACHE_MAX_INLINE_SIZE
184	}
185}
186
187/// A limiter for the local value cache. This makes sure the local cache doesn't grow too big.
188#[derive(Default)]
189pub struct LocalValueCacheLimiter {
190	/// The current size (in bytes) of data allocated by this cache on the heap.
191	///
192	/// This doesn't include the size of the map itself.
193	current_heap_size: usize,
194}
195
196impl<H> schnellru::Limiter<ValueCacheKey<H>, CachedValue<H>> for LocalValueCacheLimiter
197where
198	H: AsRef<[u8]>,
199{
200	type KeyToInsert<'a> = ValueCacheRef<'a, H>;
201	type LinkType = u32;
202
203	#[inline]
204	fn is_over_the_limit(&self, length: usize) -> bool {
205		// Only enforce the limit if there's more than one element to make sure
206		// we can always add a new element to the cache.
207		if length <= 1 {
208			return false
209		}
210
211		self.current_heap_size > LOCAL_VALUE_CACHE_MAX_HEAP_SIZE
212	}
213
214	#[inline]
215	fn on_insert(
216		&mut self,
217		_length: usize,
218		key: Self::KeyToInsert<'_>,
219		value: CachedValue<H>,
220	) -> Option<(ValueCacheKey<H>, CachedValue<H>)> {
221		self.current_heap_size += key.storage_key.len();
222		Some((key.into(), value))
223	}
224
225	#[inline]
226	fn on_replace(
227		&mut self,
228		_length: usize,
229		_old_key: &mut ValueCacheKey<H>,
230		_new_key: ValueCacheRef<H>,
231		_old_value: &mut CachedValue<H>,
232		_new_value: &mut CachedValue<H>,
233	) -> bool {
234		debug_assert_eq!(_old_key.storage_key.len(), _new_key.storage_key.len());
235		true
236	}
237
238	#[inline]
239	fn on_removed(&mut self, key: &mut ValueCacheKey<H>, _: &mut CachedValue<H>) {
240		self.current_heap_size -= key.storage_key.len();
241	}
242
243	#[inline]
244	fn on_cleared(&mut self) {
245		self.current_heap_size = 0;
246	}
247
248	#[inline]
249	fn on_grow(&mut self, new_memory_usage: usize) -> bool {
250		new_memory_usage <= LOCAL_VALUE_CACHE_MAX_INLINE_SIZE
251	}
252}
253
254/// A struct to gather hit/miss stats to aid in debugging the performance of the cache.
255#[derive(Default)]
256struct HitStats {
257	shared_hits: AtomicU64,
258	shared_fetch_attempts: AtomicU64,
259	local_hits: AtomicU64,
260	local_fetch_attempts: AtomicU64,
261}
262
263impl std::fmt::Display for HitStats {
264	fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
265		let shared_hits = self.shared_hits.load(Ordering::Relaxed);
266		let shared_fetch_attempts = self.shared_fetch_attempts.load(Ordering::Relaxed);
267		let local_hits = self.local_hits.load(Ordering::Relaxed);
268		let local_fetch_attempts = self.local_fetch_attempts.load(Ordering::Relaxed);
269		if shared_fetch_attempts == 0 && local_hits == 0 {
270			write!(fmt, "empty")
271		} else {
272			let percent_local = (local_hits as f32 / local_fetch_attempts as f32) * 100.0;
273			let percent_shared = (shared_hits as f32 / shared_fetch_attempts as f32) * 100.0;
274			write!(
275				fmt,
276				"local hit rate = {}% [{}/{}], shared hit rate = {}% [{}/{}]",
277				percent_local as u32,
278				local_hits,
279				local_fetch_attempts,
280				percent_shared as u32,
281				shared_hits,
282				shared_fetch_attempts
283			)
284		}
285	}
286}
287
288/// A struct to gather hit/miss stats for the node cache and the value cache.
289#[derive(Default)]
290struct TrieHitStats {
291	node_cache: HitStats,
292	value_cache: HitStats,
293}
294
295/// An internal struct to store the cached trie nodes.
296pub(crate) struct NodeCached<H> {
297	/// The cached node.
298	pub node: NodeOwned<H>,
299	/// Whether this node was fetched from the shared cache or not.
300	pub is_from_shared_cache: bool,
301}
302
303impl<H> NodeCached<H> {
304	/// Returns the number of bytes allocated on the heap by this node.
305	fn heap_size(&self) -> usize {
306		self.node.size_in_bytes() - std::mem::size_of::<NodeOwned<H>>()
307	}
308}
309
310type NodeCacheMap<H> = LruMap<H, NodeCached<H>, LocalNodeCacheLimiter, schnellru::RandomState>;
311
312type ValueCacheMap<H> = LruMap<
313	ValueCacheKey<H>,
314	CachedValue<H>,
315	LocalValueCacheLimiter,
316	BuildNoHashHasher<ValueCacheKey<H>>,
317>;
318
319type ValueAccessSet =
320	LruMap<ValueCacheKeyHash, (), schnellru::ByLength, BuildNoHashHasher<ValueCacheKeyHash>>;
321
322/// The local trie cache.
323///
324/// This cache should be used per state instance created by the backend. One state instance is
325/// referring to the state of one block. It will cache all the accesses that are done to the state
326/// which could not be fulfilled by the [`SharedTrieCache`]. These locally cached items are merged
327/// back to the shared trie cache when this instance is dropped.
328///
329/// When using [`Self::as_trie_db_cache`] or [`Self::as_trie_db_mut_cache`], it will lock Mutexes.
330/// So, it is important that these methods are not called multiple times, because they otherwise
331/// deadlock.
332pub struct LocalTrieCache<H: Hasher> {
333	/// The shared trie cache that created this instance.
334	shared: SharedTrieCache<H>,
335
336	/// The local cache for the trie nodes.
337	node_cache: Mutex<NodeCacheMap<H::Out>>,
338
339	/// The local cache for the values.
340	value_cache: Mutex<ValueCacheMap<H::Out>>,
341
342	/// Keeps track of all values accessed in the shared cache.
343	///
344	/// This will be used to ensure that these nodes are brought to the front of the lru when this
345	/// local instance is merged back to the shared cache. This can actually lead to collision when
346	/// two [`ValueCacheKey`]s with different storage roots and keys map to the same hash. However,
347	/// as we only use this set to update the lru position it is fine, even if we bring the wrong
348	/// value to the top. The important part is that we always get the correct value from the value
349	/// cache for a given key.
350	shared_value_cache_access: Mutex<ValueAccessSet>,
351
352	stats: TrieHitStats,
353}
354
355impl<H: Hasher> LocalTrieCache<H> {
356	/// Return self as a [`TrieDB`](trie_db::TrieDB) compatible cache.
357	///
358	/// The given `storage_root` needs to be the storage root of the trie this cache is used for.
359	pub fn as_trie_db_cache(&self, storage_root: H::Out) -> TrieCache<'_, H> {
360		let value_cache = ValueCache::ForStorageRoot {
361			storage_root,
362			local_value_cache: self.value_cache.lock(),
363			shared_value_cache_access: self.shared_value_cache_access.lock(),
364			buffered_value: None,
365		};
366
367		TrieCache {
368			shared_cache: self.shared.clone(),
369			local_cache: self.node_cache.lock(),
370			value_cache,
371			stats: &self.stats,
372		}
373	}
374
375	/// Return self as [`TrieDBMut`](trie_db::TrieDBMut) compatible cache.
376	///
377	/// After finishing all operations with [`TrieDBMut`](trie_db::TrieDBMut) and having obtained
378	/// the new storage root, [`TrieCache::merge_into`] should be called to update this local
379	/// cache instance. If the function is not called, cached data is just thrown away and not
380	/// propagated to the shared cache. So, accessing these new items will be slower, but nothing
381	/// would break because of this.
382	pub fn as_trie_db_mut_cache(&self) -> TrieCache<'_, H> {
383		TrieCache {
384			shared_cache: self.shared.clone(),
385			local_cache: self.node_cache.lock(),
386			value_cache: ValueCache::Fresh(Default::default()),
387			stats: &self.stats,
388		}
389	}
390}
391
392impl<H: Hasher> Drop for LocalTrieCache<H> {
393	fn drop(&mut self) {
394		tracing::debug!(
395			target: LOG_TARGET,
396			"Local node trie cache dropped: {}",
397			self.stats.node_cache
398		);
399
400		tracing::debug!(
401			target: LOG_TARGET,
402			"Local value trie cache dropped: {}",
403			self.stats.value_cache
404		);
405
406		let mut shared_inner = match self.shared.write_lock_inner() {
407			Some(inner) => inner,
408			None => {
409				tracing::warn!(
410					target: LOG_TARGET,
411					"Timeout while trying to acquire a write lock for the shared trie cache"
412				);
413				return
414			},
415		};
416
417		shared_inner.node_cache_mut().update(self.node_cache.get_mut().drain());
418
419		shared_inner.value_cache_mut().update(
420			self.value_cache.get_mut().drain(),
421			self.shared_value_cache_access.get_mut().drain().map(|(key, ())| key),
422		);
423	}
424}
425
426/// The abstraction of the value cache for the [`TrieCache`].
427enum ValueCache<'a, H: Hasher> {
428	/// The value cache is fresh, aka not yet associated to any storage root.
429	/// This is used for example when a new trie is being build, to cache new values.
430	Fresh(HashMap<Arc<[u8]>, CachedValue<H::Out>>),
431	/// The value cache is already bound to a specific storage root.
432	ForStorageRoot {
433		shared_value_cache_access: MutexGuard<'a, ValueAccessSet>,
434		local_value_cache: MutexGuard<'a, ValueCacheMap<H::Out>>,
435		storage_root: H::Out,
436		// The shared value cache needs to be temporarily locked when reading from it
437		// so we need to clone the value that is returned, but we need to be able to
438		// return a reference to the value, so we just buffer it here.
439		buffered_value: Option<CachedValue<H::Out>>,
440	},
441}
442
443impl<H: Hasher> ValueCache<'_, H> {
444	/// Get the value for the given `key`.
445	fn get(
446		&mut self,
447		key: &[u8],
448		shared_cache: &SharedTrieCache<H>,
449		stats: &HitStats,
450	) -> Option<&CachedValue<H::Out>> {
451		stats.local_fetch_attempts.fetch_add(1, Ordering::Relaxed);
452
453		match self {
454			Self::Fresh(map) =>
455				if let Some(value) = map.get(key) {
456					stats.local_hits.fetch_add(1, Ordering::Relaxed);
457					Some(value)
458				} else {
459					None
460				},
461			Self::ForStorageRoot {
462				local_value_cache,
463				shared_value_cache_access,
464				storage_root,
465				buffered_value,
466			} => {
467				// We first need to look up in the local cache and then the shared cache.
468				// It can happen that some value is cached in the shared cache, but the
469				// weak reference of the data can not be upgraded anymore. This for example
470				// happens when the node is dropped that contains the strong reference to the data.
471				//
472				// So, the logic of the trie would lookup the data and the node and store both
473				// in our local caches.
474
475				let hash = ValueCacheKey::hash_data(key, storage_root);
476
477				if let Some(value) = local_value_cache
478					.peek_by_hash(hash.raw(), |existing_key, _| {
479						existing_key.is_eq(storage_root, key)
480					}) {
481					stats.local_hits.fetch_add(1, Ordering::Relaxed);
482
483					return Some(value)
484				}
485
486				stats.shared_fetch_attempts.fetch_add(1, Ordering::Relaxed);
487				if let Some(value) = shared_cache.peek_value_by_hash(hash, storage_root, key) {
488					stats.shared_hits.fetch_add(1, Ordering::Relaxed);
489					shared_value_cache_access.insert(hash, ());
490					*buffered_value = Some(value.clone());
491					return buffered_value.as_ref()
492				}
493
494				None
495			},
496		}
497	}
498
499	/// Insert some new `value` under the given `key`.
500	fn insert(&mut self, key: &[u8], value: CachedValue<H::Out>) {
501		match self {
502			Self::Fresh(map) => {
503				map.insert(key.into(), value);
504			},
505			Self::ForStorageRoot { local_value_cache, storage_root, .. } => {
506				local_value_cache.insert(ValueCacheRef::new(key, *storage_root), value);
507			},
508		}
509	}
510}
511
512/// The actual [`TrieCache`](trie_db::TrieCache) implementation.
513///
514/// If this instance was created for using it with a [`TrieDBMut`](trie_db::TrieDBMut), it needs to
515/// be merged back into the [`LocalTrieCache`] with [`Self::merge_into`] after all operations are
516/// done.
517pub struct TrieCache<'a, H: Hasher> {
518	shared_cache: SharedTrieCache<H>,
519	local_cache: MutexGuard<'a, NodeCacheMap<H::Out>>,
520	value_cache: ValueCache<'a, H>,
521	stats: &'a TrieHitStats,
522}
523
524impl<'a, H: Hasher> TrieCache<'a, H> {
525	/// Merge this cache into the given [`LocalTrieCache`].
526	///
527	/// This function is only required to be called when this instance was created through
528	/// [`LocalTrieCache::as_trie_db_mut_cache`], otherwise this method is a no-op. The given
529	/// `storage_root` is the new storage root that was obtained after finishing all operations
530	/// using the [`TrieDBMut`](trie_db::TrieDBMut).
531	pub fn merge_into(self, local: &LocalTrieCache<H>, storage_root: H::Out) {
532		let ValueCache::Fresh(cache) = self.value_cache else { return };
533
534		if !cache.is_empty() {
535			let mut value_cache = local.value_cache.lock();
536			let partial_hash = ValueCacheKey::hash_partial_data(&storage_root);
537
538			cache.into_iter().for_each(|(k, v)| {
539				let hash = ValueCacheKeyHash::from_hasher_and_storage_key(partial_hash.clone(), &k);
540				let k = ValueCacheRef { storage_root, storage_key: &k, hash };
541				value_cache.insert(k, v);
542			});
543		}
544	}
545}
546
547impl<'a, H: Hasher> trie_db::TrieCache<NodeCodec<H>> for TrieCache<'a, H> {
548	fn get_or_insert_node(
549		&mut self,
550		hash: H::Out,
551		fetch_node: &mut dyn FnMut() -> trie_db::Result<NodeOwned<H::Out>, H::Out, Error<H::Out>>,
552	) -> trie_db::Result<&NodeOwned<H::Out>, H::Out, Error<H::Out>> {
553		let mut is_local_cache_hit = true;
554		self.stats.node_cache.local_fetch_attempts.fetch_add(1, Ordering::Relaxed);
555
556		// First try to grab the node from the local cache.
557		let node = self.local_cache.get_or_insert_fallible(hash, || {
558			is_local_cache_hit = false;
559
560			// It was not in the local cache; try the shared cache.
561			self.stats.node_cache.shared_fetch_attempts.fetch_add(1, Ordering::Relaxed);
562			if let Some(node) = self.shared_cache.peek_node(&hash) {
563				self.stats.node_cache.shared_hits.fetch_add(1, Ordering::Relaxed);
564				tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from shared cache");
565
566				return Ok(NodeCached::<H::Out> { node: node.clone(), is_from_shared_cache: true })
567			}
568
569			// It was not in the shared cache; try fetching it from the database.
570			match fetch_node() {
571				Ok(node) => {
572					tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from database");
573					Ok(NodeCached::<H::Out> { node, is_from_shared_cache: false })
574				},
575				Err(error) => {
576					tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from database failed");
577					Err(error)
578				},
579			}
580		});
581
582		if is_local_cache_hit {
583			tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from local cache");
584			self.stats.node_cache.local_hits.fetch_add(1, Ordering::Relaxed);
585		}
586
587		Ok(&node?
588			.expect("you can always insert at least one element into the local cache; qed")
589			.node)
590	}
591
592	fn get_node(&mut self, hash: &H::Out) -> Option<&NodeOwned<H::Out>> {
593		let mut is_local_cache_hit = true;
594		self.stats.node_cache.local_fetch_attempts.fetch_add(1, Ordering::Relaxed);
595
596		// First try to grab the node from the local cache.
597		let cached_node = self.local_cache.get_or_insert_fallible(*hash, || {
598			is_local_cache_hit = false;
599
600			// It was not in the local cache; try the shared cache.
601			self.stats.node_cache.shared_fetch_attempts.fetch_add(1, Ordering::Relaxed);
602			if let Some(node) = self.shared_cache.peek_node(&hash) {
603				self.stats.node_cache.shared_hits.fetch_add(1, Ordering::Relaxed);
604				tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from shared cache");
605
606				Ok(NodeCached::<H::Out> { node: node.clone(), is_from_shared_cache: true })
607			} else {
608				tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from cache failed");
609
610				Err(())
611			}
612		});
613
614		if is_local_cache_hit {
615			tracing::trace!(target: LOG_TARGET, ?hash, "Serving node from local cache");
616			self.stats.node_cache.local_hits.fetch_add(1, Ordering::Relaxed);
617		}
618
619		match cached_node {
620			Ok(Some(cached_node)) => Some(&cached_node.node),
621			Ok(None) => {
622				unreachable!(
623					"you can always insert at least one element into the local cache; qed"
624				);
625			},
626			Err(()) => None,
627		}
628	}
629
630	fn lookup_value_for_key(&mut self, key: &[u8]) -> Option<&CachedValue<H::Out>> {
631		let res = self.value_cache.get(key, &self.shared_cache, &self.stats.value_cache);
632
633		tracing::trace!(
634			target: LOG_TARGET,
635			key = ?sp_core::hexdisplay::HexDisplay::from(&key),
636			found = res.is_some(),
637			"Looked up value for key",
638		);
639
640		res
641	}
642
643	fn cache_value_for_key(&mut self, key: &[u8], data: CachedValue<H::Out>) {
644		tracing::trace!(
645			target: LOG_TARGET,
646			key = ?sp_core::hexdisplay::HexDisplay::from(&key),
647			"Caching value for key",
648		);
649
650		self.value_cache.insert(key, data);
651	}
652}
653
654#[cfg(test)]
655mod tests {
656	use super::*;
657	use trie_db::{Bytes, Trie, TrieDBBuilder, TrieDBMutBuilder, TrieHash, TrieMut};
658
659	type MemoryDB = crate::MemoryDB<sp_core::Blake2Hasher>;
660	type Layout = crate::LayoutV1<sp_core::Blake2Hasher>;
661	type Cache = super::SharedTrieCache<sp_core::Blake2Hasher>;
662	type Recorder = crate::recorder::Recorder<sp_core::Blake2Hasher>;
663
664	const TEST_DATA: &[(&[u8], &[u8])] =
665		&[(b"key1", b"val1"), (b"key2", &[2; 64]), (b"key3", b"val3"), (b"key4", &[4; 64])];
666	const CACHE_SIZE_RAW: usize = 1024 * 10;
667	const CACHE_SIZE: CacheSize = CacheSize::new(CACHE_SIZE_RAW);
668
669	fn create_trie() -> (MemoryDB, TrieHash<Layout>) {
670		let mut db = MemoryDB::default();
671		let mut root = Default::default();
672
673		{
674			let mut trie = TrieDBMutBuilder::<Layout>::new(&mut db, &mut root).build();
675			for (k, v) in TEST_DATA {
676				trie.insert(k, v).expect("Inserts data");
677			}
678		}
679
680		(db, root)
681	}
682
683	#[test]
684	fn basic_cache_works() {
685		let (db, root) = create_trie();
686
687		let shared_cache = Cache::new(CACHE_SIZE);
688		let local_cache = shared_cache.local_cache();
689
690		{
691			let mut cache = local_cache.as_trie_db_cache(root);
692			let trie = TrieDBBuilder::<Layout>::new(&db, &root).with_cache(&mut cache).build();
693			assert_eq!(TEST_DATA[0].1.to_vec(), trie.get(TEST_DATA[0].0).unwrap().unwrap());
694		}
695
696		// Local cache wasn't dropped yet, so there should nothing in the shared caches.
697		assert!(shared_cache.read_lock_inner().value_cache().lru.is_empty());
698		assert!(shared_cache.read_lock_inner().node_cache().lru.is_empty());
699
700		drop(local_cache);
701
702		// Now we should have the cached items in the shared cache.
703		assert!(shared_cache.read_lock_inner().node_cache().lru.len() >= 1);
704		let cached_data = shared_cache
705			.read_lock_inner()
706			.value_cache()
707			.lru
708			.peek(&ValueCacheKey::new_value(TEST_DATA[0].0, root))
709			.unwrap()
710			.clone();
711		assert_eq!(Bytes::from(TEST_DATA[0].1.to_vec()), cached_data.data().flatten().unwrap());
712
713		let fake_data = Bytes::from(&b"fake_data"[..]);
714
715		let local_cache = shared_cache.local_cache();
716		shared_cache.write_lock_inner().unwrap().value_cache_mut().lru.insert(
717			ValueCacheKey::new_value(TEST_DATA[1].0, root),
718			(fake_data.clone(), Default::default()).into(),
719		);
720
721		{
722			let mut cache = local_cache.as_trie_db_cache(root);
723			let trie = TrieDBBuilder::<Layout>::new(&db, &root).with_cache(&mut cache).build();
724
725			// We should now get the "fake_data", because we inserted this manually to the cache.
726			assert_eq!(b"fake_data".to_vec(), trie.get(TEST_DATA[1].0).unwrap().unwrap());
727		}
728	}
729
730	#[test]
731	fn trie_db_mut_cache_works() {
732		let (mut db, root) = create_trie();
733
734		let new_key = b"new_key".to_vec();
735		// Use some long value to not have it inlined
736		let new_value = vec![23; 64];
737
738		let shared_cache = Cache::new(CACHE_SIZE);
739		let mut new_root = root;
740
741		{
742			let local_cache = shared_cache.local_cache();
743
744			let mut cache = local_cache.as_trie_db_mut_cache();
745
746			{
747				let mut trie = TrieDBMutBuilder::<Layout>::from_existing(&mut db, &mut new_root)
748					.with_cache(&mut cache)
749					.build();
750
751				trie.insert(&new_key, &new_value).unwrap();
752			}
753
754			cache.merge_into(&local_cache, new_root);
755		}
756
757		// After the local cache is dropped, all changes should have been merged back to the shared
758		// cache.
759		let cached_data = shared_cache
760			.read_lock_inner()
761			.value_cache()
762			.lru
763			.peek(&ValueCacheKey::new_value(new_key, new_root))
764			.unwrap()
765			.clone();
766		assert_eq!(Bytes::from(new_value), cached_data.data().flatten().unwrap());
767	}
768
769	#[test]
770	fn trie_db_cache_and_recorder_work_together() {
771		let (db, root) = create_trie();
772
773		let shared_cache = Cache::new(CACHE_SIZE);
774
775		for i in 0..5 {
776			// Clear some of the caches.
777			if i == 2 {
778				shared_cache.reset_node_cache();
779			} else if i == 3 {
780				shared_cache.reset_value_cache();
781			}
782
783			let local_cache = shared_cache.local_cache();
784			let recorder = Recorder::default();
785
786			{
787				let mut cache = local_cache.as_trie_db_cache(root);
788				let mut recorder = recorder.as_trie_recorder(root);
789				let trie = TrieDBBuilder::<Layout>::new(&db, &root)
790					.with_cache(&mut cache)
791					.with_recorder(&mut recorder)
792					.build();
793
794				for (key, value) in TEST_DATA {
795					assert_eq!(*value, trie.get(&key).unwrap().unwrap());
796				}
797			}
798
799			let storage_proof = recorder.drain_storage_proof();
800			let memory_db: MemoryDB = storage_proof.into_memory_db();
801
802			{
803				let trie = TrieDBBuilder::<Layout>::new(&memory_db, &root).build();
804
805				for (key, value) in TEST_DATA {
806					assert_eq!(*value, trie.get(&key).unwrap().unwrap());
807				}
808			}
809		}
810	}
811
812	#[test]
813	fn trie_db_mut_cache_and_recorder_work_together() {
814		const DATA_TO_ADD: &[(&[u8], &[u8])] = &[(b"key11", &[45; 78]), (b"key33", &[78; 89])];
815
816		let (db, root) = create_trie();
817
818		let shared_cache = Cache::new(CACHE_SIZE);
819
820		// Run this twice so that we use the data cache in the second run.
821		for i in 0..5 {
822			// Clear some of the caches.
823			if i == 2 {
824				shared_cache.reset_node_cache();
825			} else if i == 3 {
826				shared_cache.reset_value_cache();
827			}
828
829			let recorder = Recorder::default();
830			let local_cache = shared_cache.local_cache();
831			let mut new_root = root;
832
833			{
834				let mut db = db.clone();
835				let mut cache = local_cache.as_trie_db_cache(root);
836				let mut recorder = recorder.as_trie_recorder(root);
837				let mut trie = TrieDBMutBuilder::<Layout>::from_existing(&mut db, &mut new_root)
838					.with_cache(&mut cache)
839					.with_recorder(&mut recorder)
840					.build();
841
842				for (key, value) in DATA_TO_ADD {
843					trie.insert(key, value).unwrap();
844				}
845			}
846
847			let storage_proof = recorder.drain_storage_proof();
848			let mut memory_db: MemoryDB = storage_proof.into_memory_db();
849			let mut proof_root = root;
850
851			{
852				let mut trie =
853					TrieDBMutBuilder::<Layout>::from_existing(&mut memory_db, &mut proof_root)
854						.build();
855
856				for (key, value) in DATA_TO_ADD {
857					trie.insert(key, value).unwrap();
858				}
859			}
860
861			assert_eq!(new_root, proof_root)
862		}
863	}
864
865	#[test]
866	fn cache_lru_works() {
867		let (db, root) = create_trie();
868
869		let shared_cache = Cache::new(CACHE_SIZE);
870
871		{
872			let local_cache = shared_cache.local_cache();
873
874			let mut cache = local_cache.as_trie_db_cache(root);
875			let trie = TrieDBBuilder::<Layout>::new(&db, &root).with_cache(&mut cache).build();
876
877			for (k, _) in TEST_DATA {
878				trie.get(k).unwrap().unwrap();
879			}
880		}
881
882		// Check that all items are there.
883		assert!(shared_cache
884			.read_lock_inner()
885			.value_cache()
886			.lru
887			.iter()
888			.map(|d| d.0)
889			.all(|l| TEST_DATA.iter().any(|d| &*l.storage_key == d.0)));
890
891		// Run this in a loop. The first time we check that with the filled value cache,
892		// the expected values are at the top of the LRU.
893		// The second run is using an empty value cache to ensure that we access the nodes.
894		for _ in 0..2 {
895			{
896				let local_cache = shared_cache.local_cache();
897
898				let mut cache = local_cache.as_trie_db_cache(root);
899				let trie = TrieDBBuilder::<Layout>::new(&db, &root).with_cache(&mut cache).build();
900
901				for (k, _) in TEST_DATA.iter().take(2) {
902					trie.get(k).unwrap().unwrap();
903				}
904			}
905
906			// Ensure that the accessed items are most recently used items of the shared value
907			// cache.
908			assert!(shared_cache
909				.read_lock_inner()
910				.value_cache()
911				.lru
912				.iter()
913				.take(2)
914				.map(|d| d.0)
915				.all(|l| { TEST_DATA.iter().take(2).any(|d| &*l.storage_key == d.0) }));
916
917			// Delete the value cache, so that we access the nodes.
918			shared_cache.reset_value_cache();
919		}
920
921		let most_recently_used_nodes = shared_cache
922			.read_lock_inner()
923			.node_cache()
924			.lru
925			.iter()
926			.map(|d| *d.0)
927			.collect::<Vec<_>>();
928
929		{
930			let local_cache = shared_cache.local_cache();
931
932			let mut cache = local_cache.as_trie_db_cache(root);
933			let trie = TrieDBBuilder::<Layout>::new(&db, &root).with_cache(&mut cache).build();
934
935			for (k, _) in TEST_DATA.iter().skip(2) {
936				trie.get(k).unwrap().unwrap();
937			}
938		}
939
940		// Ensure that the most recently used nodes changed as well.
941		assert_ne!(
942			most_recently_used_nodes,
943			shared_cache
944				.read_lock_inner()
945				.node_cache()
946				.lru
947				.iter()
948				.map(|d| *d.0)
949				.collect::<Vec<_>>()
950		);
951	}
952
953	#[test]
954	fn cache_respects_bounds() {
955		let (mut db, root) = create_trie();
956
957		let shared_cache = Cache::new(CACHE_SIZE);
958		{
959			let local_cache = shared_cache.local_cache();
960
961			let mut new_root = root;
962
963			{
964				let mut cache = local_cache.as_trie_db_cache(root);
965				{
966					let mut trie =
967						TrieDBMutBuilder::<Layout>::from_existing(&mut db, &mut new_root)
968							.with_cache(&mut cache)
969							.build();
970
971					let value = vec![10u8; 100];
972					// Ensure we add enough data that would overflow the cache.
973					for i in 0..CACHE_SIZE_RAW / 100 * 2 {
974						trie.insert(format!("key{}", i).as_bytes(), &value).unwrap();
975					}
976				}
977
978				cache.merge_into(&local_cache, new_root);
979			}
980		}
981
982		assert!(shared_cache.used_memory_size() < CACHE_SIZE_RAW);
983	}
984}