bounded_collections/
bounded_btree_map.rs

1// This file is part of Substrate.
2
3// Copyright (C) 2017-2023 Parity Technologies (UK) Ltd.
4// SPDX-License-Identifier: Apache-2.0
5
6// Licensed under the Apache License, Version 2.0 (the "License");
7// you may not use this file except in compliance with the License.
8// You may obtain a copy of the License at
9//
10// 	http://www.apache.org/licenses/LICENSE-2.0
11//
12// Unless required by applicable law or agreed to in writing, software
13// distributed under the License is distributed on an "AS IS" BASIS,
14// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15// See the License for the specific language governing permissions and
16// limitations under the License.
17
18//! Traits, types and structs to support a bounded BTreeMap.
19
20use crate::{Get, TryCollect};
21use alloc::collections::BTreeMap;
22use codec::{Compact, Decode, DecodeWithMemTracking, Encode, MaxEncodedLen};
23use core::{borrow::Borrow, marker::PhantomData, ops::Deref};
24#[cfg(feature = "serde")]
25use serde::{
26	de::{Error, MapAccess, Visitor},
27	Deserialize, Deserializer, Serialize,
28};
29
30/// A bounded map based on a B-Tree.
31///
32/// B-Trees represent a fundamental compromise between cache-efficiency and actually minimizing
33/// the amount of work performed in a search. See [`BTreeMap`] for more details.
34///
35/// Unlike a standard `BTreeMap`, there is an enforced upper limit to the number of items in the
36/// map. All internal operations ensure this bound is respected.
37#[cfg_attr(feature = "serde", derive(Serialize), serde(transparent))]
38#[derive(Encode, scale_info::TypeInfo)]
39#[scale_info(skip_type_params(S))]
40pub struct BoundedBTreeMap<K, V, S>(
41	BTreeMap<K, V>,
42	#[cfg_attr(feature = "serde", serde(skip_serializing))] PhantomData<S>,
43);
44
45#[cfg(feature = "serde")]
46impl<'de, K, V, S: Get<u32>> Deserialize<'de> for BoundedBTreeMap<K, V, S>
47where
48	K: Deserialize<'de> + Ord,
49	V: Deserialize<'de>,
50{
51	fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
52	where
53		D: Deserializer<'de>,
54	{
55		// Create a visitor to visit each element in the map
56		struct BTreeMapVisitor<K, V, S>(PhantomData<(K, V, S)>);
57
58		impl<'de, K, V, S> Visitor<'de> for BTreeMapVisitor<K, V, S>
59		where
60			K: Deserialize<'de> + Ord,
61			V: Deserialize<'de>,
62			S: Get<u32>,
63		{
64			type Value = BTreeMap<K, V>;
65
66			fn expecting(&self, formatter: &mut core::fmt::Formatter) -> core::fmt::Result {
67				formatter.write_str("a map")
68			}
69
70			fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
71			where
72				A: MapAccess<'de>,
73			{
74				let size = map.size_hint().unwrap_or(0);
75				let max = S::get() as usize;
76				if size > max {
77					Err(A::Error::custom("map exceeds the size of the bounds"))
78				} else {
79					let mut values = BTreeMap::new();
80
81					while let Some(key) = map.next_key()? {
82						if values.len() >= max {
83							return Err(A::Error::custom("map exceeds the size of the bounds"));
84						}
85						let value = map.next_value()?;
86						values.insert(key, value);
87					}
88
89					Ok(values)
90				}
91			}
92		}
93
94		let visitor: BTreeMapVisitor<K, V, S> = BTreeMapVisitor(PhantomData);
95		deserializer.deserialize_map(visitor).map(|v| {
96			BoundedBTreeMap::<K, V, S>::try_from(v)
97				.map_err(|_| Error::custom("failed to create a BoundedBTreeMap from the provided map"))
98		})?
99	}
100}
101
102// Struct which allows prepending the compact after reading from an input.
103pub(crate) struct PrependCompactInput<'a, I> {
104	encoded_len: &'a [u8],
105	read: usize,
106	inner: &'a mut I,
107}
108
109impl<'a, I: codec::Input> codec::Input for PrependCompactInput<'a, I> {
110	fn remaining_len(&mut self) -> Result<Option<usize>, codec::Error> {
111		let remaining_compact = self.encoded_len.len().saturating_sub(self.read);
112		Ok(self.inner.remaining_len()?.map(|len| len.saturating_add(remaining_compact)))
113	}
114
115	fn read(&mut self, into: &mut [u8]) -> Result<(), codec::Error> {
116		if into.is_empty() {
117			return Ok(());
118		}
119
120		let remaining_compact = self.encoded_len.len().saturating_sub(self.read);
121		if remaining_compact > 0 {
122			let to_read = into.len().min(remaining_compact);
123			into[..to_read].copy_from_slice(&self.encoded_len[self.read..][..to_read]);
124			self.read += to_read;
125
126			if to_read < into.len() {
127				// Buffer not full, keep reading the inner.
128				self.inner.read(&mut into[to_read..])
129			} else {
130				// Buffer was filled by the compact.
131				Ok(())
132			}
133		} else {
134			// Prepended compact has been read, just read from inner.
135			self.inner.read(into)
136		}
137	}
138}
139
140impl<K, V, S> Decode for BoundedBTreeMap<K, V, S>
141where
142	K: Decode + Ord,
143	V: Decode,
144	S: Get<u32>,
145{
146	fn decode<I: codec::Input>(input: &mut I) -> Result<Self, codec::Error> {
147		// Fail early if the len is too big. This is a compact u32 which we will later put back.
148		let compact = <Compact<u32>>::decode(input)?;
149		if compact.0 > S::get() {
150			return Err("BoundedBTreeMap exceeds its limit".into());
151		}
152		// Reconstruct the original input by prepending the length we just read, then delegate the decoding to BTreeMap.
153		let inner = BTreeMap::decode(&mut PrependCompactInput {
154			encoded_len: compact.encode().as_ref(),
155			read: 0,
156			inner: input,
157		})?;
158		Ok(Self(inner, PhantomData))
159	}
160
161	fn skip<I: codec::Input>(input: &mut I) -> Result<(), codec::Error> {
162		BTreeMap::<K, V>::skip(input)
163	}
164}
165
166impl<K, V, S> DecodeWithMemTracking for BoundedBTreeMap<K, V, S>
167where
168	K: DecodeWithMemTracking + Ord,
169	V: DecodeWithMemTracking,
170	S: Get<u32>,
171	BoundedBTreeMap<K, V, S>: Decode,
172{
173}
174
175impl<K, V, S> BoundedBTreeMap<K, V, S>
176where
177	S: Get<u32>,
178{
179	/// Get the bound of the type in `usize`.
180	pub fn bound() -> usize {
181		S::get() as usize
182	}
183}
184
185impl<K, V, S> BoundedBTreeMap<K, V, S>
186where
187	K: Ord,
188	S: Get<u32>,
189{
190	/// Create `Self` from `t` without any checks.
191	fn unchecked_from(t: BTreeMap<K, V>) -> Self {
192		Self(t, Default::default())
193	}
194
195	/// Exactly the same semantics as `BTreeMap::retain`.
196	///
197	/// The is a safe `&mut self` borrow because `retain` can only ever decrease the length of the
198	/// inner map.
199	pub fn retain<F: FnMut(&K, &mut V) -> bool>(&mut self, f: F) {
200		self.0.retain(f)
201	}
202
203	/// Create a new `BoundedBTreeMap`.
204	///
205	/// Does not allocate.
206	pub fn new() -> Self {
207		BoundedBTreeMap(BTreeMap::new(), PhantomData)
208	}
209
210	/// Consume self, and return the inner `BTreeMap`.
211	///
212	/// This is useful when a mutating API of the inner type is desired, and closure-based mutation
213	/// such as provided by [`try_mutate`][Self::try_mutate] is inconvenient.
214	pub fn into_inner(self) -> BTreeMap<K, V> {
215		debug_assert!(self.0.len() <= Self::bound());
216		self.0
217	}
218
219	/// Consumes self and mutates self via the given `mutate` function.
220	///
221	/// If the outcome of mutation is within bounds, `Some(Self)` is returned. Else, `None` is
222	/// returned.
223	///
224	/// This is essentially a *consuming* shorthand [`Self::into_inner`] -> `...` ->
225	/// [`Self::try_from`].
226	pub fn try_mutate(mut self, mut mutate: impl FnMut(&mut BTreeMap<K, V>)) -> Option<Self> {
227		mutate(&mut self.0);
228		(self.0.len() <= Self::bound()).then(move || self)
229	}
230
231	/// Clears the map, removing all elements.
232	pub fn clear(&mut self) {
233		self.0.clear()
234	}
235
236	/// Return a mutable reference to the value corresponding to the key.
237	///
238	/// The key may be any borrowed form of the map's key type, but the ordering on the borrowed
239	/// form _must_ match the ordering on the key type.
240	pub fn get_mut<Q>(&mut self, key: &Q) -> Option<&mut V>
241	where
242		K: Borrow<Q>,
243		Q: Ord + ?Sized,
244	{
245		self.0.get_mut(key)
246	}
247
248	/// Exactly the same semantics as [`BTreeMap::insert`], but returns an `Err` (and is a noop) if
249	/// the new length of the map exceeds `S`.
250	///
251	/// In the `Err` case, returns the inserted pair so it can be further used without cloning.
252	pub fn try_insert(&mut self, key: K, value: V) -> Result<Option<V>, (K, V)> {
253		if self.len() < Self::bound() || self.0.contains_key(&key) {
254			Ok(self.0.insert(key, value))
255		} else {
256			Err((key, value))
257		}
258	}
259
260	/// Remove a key from the map, returning the value at the key if the key was previously in the
261	/// map.
262	///
263	/// The key may be any borrowed form of the map's key type, but the ordering on the borrowed
264	/// form _must_ match the ordering on the key type.
265	pub fn remove<Q>(&mut self, key: &Q) -> Option<V>
266	where
267		K: Borrow<Q>,
268		Q: Ord + ?Sized,
269	{
270		self.0.remove(key)
271	}
272
273	/// Remove a key from the map, returning the value at the key if the key was previously in the
274	/// map.
275	///
276	/// The key may be any borrowed form of the map's key type, but the ordering on the borrowed
277	/// form _must_ match the ordering on the key type.
278	pub fn remove_entry<Q>(&mut self, key: &Q) -> Option<(K, V)>
279	where
280		K: Borrow<Q>,
281		Q: Ord + ?Sized,
282	{
283		self.0.remove_entry(key)
284	}
285
286	/// Gets a mutable iterator over the entries of the map, sorted by key.
287	///
288	/// See [`BTreeMap::iter_mut`] for more information.
289	pub fn iter_mut(&mut self) -> alloc::collections::btree_map::IterMut<K, V> {
290		self.0.iter_mut()
291	}
292
293	/// Consume the map, applying `f` to each of it's values and returning a new map.
294	pub fn map<T, F>(self, mut f: F) -> BoundedBTreeMap<K, T, S>
295	where
296		F: FnMut((&K, V)) -> T,
297	{
298		BoundedBTreeMap::<K, T, S>::unchecked_from(
299			self.0
300				.into_iter()
301				.map(|(k, v)| {
302					let t = f((&k, v));
303					(k, t)
304				})
305				.collect(),
306		)
307	}
308
309	/// Consume the map, applying `f` to each of it's values as long as it returns successfully. If
310	/// an `Err(E)` is ever encountered, the mapping is short circuited and the error is returned;
311	/// otherwise, a new map is returned in the contained `Ok` value.
312	pub fn try_map<T, E, F>(self, mut f: F) -> Result<BoundedBTreeMap<K, T, S>, E>
313	where
314		F: FnMut((&K, V)) -> Result<T, E>,
315	{
316		Ok(BoundedBTreeMap::<K, T, S>::unchecked_from(
317			self.0
318				.into_iter()
319				.map(|(k, v)| (f((&k, v)).map(|t| (k, t))))
320				.collect::<Result<BTreeMap<_, _>, _>>()?,
321		))
322	}
323
324	/// Returns true if this map is full.
325	pub fn is_full(&self) -> bool {
326		self.len() >= Self::bound()
327	}
328}
329
330impl<K, V, S> Default for BoundedBTreeMap<K, V, S>
331where
332	K: Ord,
333	S: Get<u32>,
334{
335	fn default() -> Self {
336		Self::new()
337	}
338}
339
340impl<K, V, S> Clone for BoundedBTreeMap<K, V, S>
341where
342	BTreeMap<K, V>: Clone,
343{
344	fn clone(&self) -> Self {
345		BoundedBTreeMap(self.0.clone(), PhantomData)
346	}
347}
348
349impl<K, V, S> core::fmt::Debug for BoundedBTreeMap<K, V, S>
350where
351	BTreeMap<K, V>: core::fmt::Debug,
352	S: Get<u32>,
353{
354	fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
355		f.debug_tuple("BoundedBTreeMap").field(&self.0).field(&Self::bound()).finish()
356	}
357}
358
359// Custom implementation of `Hash` since deriving it would require all generic bounds to also
360// implement it.
361#[cfg(feature = "std")]
362impl<K: std::hash::Hash, V: std::hash::Hash, S> std::hash::Hash for BoundedBTreeMap<K, V, S> {
363	fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
364		self.0.hash(state);
365	}
366}
367
368impl<K, V, S1, S2> PartialEq<BoundedBTreeMap<K, V, S1>> for BoundedBTreeMap<K, V, S2>
369where
370	BTreeMap<K, V>: PartialEq,
371	S1: Get<u32>,
372	S2: Get<u32>,
373{
374	fn eq(&self, other: &BoundedBTreeMap<K, V, S1>) -> bool {
375		S1::get() == S2::get() && self.0 == other.0
376	}
377}
378
379impl<K, V, S> Eq for BoundedBTreeMap<K, V, S>
380where
381	BTreeMap<K, V>: Eq,
382	S: Get<u32>,
383{
384}
385
386impl<K, V, S> PartialEq<BTreeMap<K, V>> for BoundedBTreeMap<K, V, S>
387where
388	BTreeMap<K, V>: PartialEq,
389{
390	fn eq(&self, other: &BTreeMap<K, V>) -> bool {
391		self.0 == *other
392	}
393}
394
395impl<K, V, S> PartialOrd for BoundedBTreeMap<K, V, S>
396where
397	BTreeMap<K, V>: PartialOrd,
398	S: Get<u32>,
399{
400	fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
401		self.0.partial_cmp(&other.0)
402	}
403}
404
405impl<K, V, S> Ord for BoundedBTreeMap<K, V, S>
406where
407	BTreeMap<K, V>: Ord,
408	S: Get<u32>,
409{
410	fn cmp(&self, other: &Self) -> core::cmp::Ordering {
411		self.0.cmp(&other.0)
412	}
413}
414
415impl<K, V, S> IntoIterator for BoundedBTreeMap<K, V, S> {
416	type Item = (K, V);
417	type IntoIter = alloc::collections::btree_map::IntoIter<K, V>;
418
419	fn into_iter(self) -> Self::IntoIter {
420		self.0.into_iter()
421	}
422}
423
424impl<'a, K, V, S> IntoIterator for &'a BoundedBTreeMap<K, V, S> {
425	type Item = (&'a K, &'a V);
426	type IntoIter = alloc::collections::btree_map::Iter<'a, K, V>;
427
428	fn into_iter(self) -> Self::IntoIter {
429		self.0.iter()
430	}
431}
432
433impl<'a, K, V, S> IntoIterator for &'a mut BoundedBTreeMap<K, V, S> {
434	type Item = (&'a K, &'a mut V);
435	type IntoIter = alloc::collections::btree_map::IterMut<'a, K, V>;
436
437	fn into_iter(self) -> Self::IntoIter {
438		self.0.iter_mut()
439	}
440}
441
442impl<K, V, S> MaxEncodedLen for BoundedBTreeMap<K, V, S>
443where
444	K: MaxEncodedLen,
445	V: MaxEncodedLen,
446	S: Get<u32>,
447{
448	fn max_encoded_len() -> usize {
449		Self::bound()
450			.saturating_mul(K::max_encoded_len().saturating_add(V::max_encoded_len()))
451			.saturating_add(codec::Compact(S::get()).encoded_size())
452	}
453}
454
455impl<K, V, S> Deref for BoundedBTreeMap<K, V, S>
456where
457	K: Ord,
458{
459	type Target = BTreeMap<K, V>;
460
461	fn deref(&self) -> &Self::Target {
462		&self.0
463	}
464}
465
466impl<K, V, S> AsRef<BTreeMap<K, V>> for BoundedBTreeMap<K, V, S>
467where
468	K: Ord,
469{
470	fn as_ref(&self) -> &BTreeMap<K, V> {
471		&self.0
472	}
473}
474
475impl<K, V, S> From<BoundedBTreeMap<K, V, S>> for BTreeMap<K, V>
476where
477	K: Ord,
478{
479	fn from(map: BoundedBTreeMap<K, V, S>) -> Self {
480		map.0
481	}
482}
483
484impl<K, V, S> TryFrom<BTreeMap<K, V>> for BoundedBTreeMap<K, V, S>
485where
486	K: Ord,
487	S: Get<u32>,
488{
489	type Error = ();
490
491	fn try_from(value: BTreeMap<K, V>) -> Result<Self, Self::Error> {
492		(value.len() <= Self::bound())
493			.then(move || BoundedBTreeMap(value, PhantomData))
494			.ok_or(())
495	}
496}
497
498impl<K, V, S> codec::DecodeLength for BoundedBTreeMap<K, V, S> {
499	fn len(self_encoded: &[u8]) -> Result<usize, codec::Error> {
500		// `BoundedBTreeMap<K, V, S>` is stored just a `BTreeMap<K, V>`, which is stored as a
501		// `Compact<u32>` with its length followed by an iteration of its items. We can just use
502		// the underlying implementation.
503		<BTreeMap<K, V> as codec::DecodeLength>::len(self_encoded)
504	}
505}
506
507impl<K, V, S> codec::EncodeLike<BTreeMap<K, V>> for BoundedBTreeMap<K, V, S> where BTreeMap<K, V>: Encode {}
508
509impl<I, K, V, Bound> TryCollect<BoundedBTreeMap<K, V, Bound>> for I
510where
511	K: Ord,
512	I: ExactSizeIterator + Iterator<Item = (K, V)>,
513	Bound: Get<u32>,
514{
515	type Error = &'static str;
516
517	fn try_collect(self) -> Result<BoundedBTreeMap<K, V, Bound>, Self::Error> {
518		if self.len() > Bound::get() as usize {
519			Err("iterator length too big")
520		} else {
521			Ok(BoundedBTreeMap::<K, V, Bound>::unchecked_from(self.collect::<BTreeMap<K, V>>()))
522		}
523	}
524}
525
526#[cfg(test)]
527mod test {
528	use super::*;
529	use crate::ConstU32;
530	use alloc::{vec, vec::Vec};
531	use codec::{CompactLen, Input};
532
533	fn map_from_keys<K>(keys: &[K]) -> BTreeMap<K, ()>
534	where
535		K: Ord + Copy,
536	{
537		keys.iter().copied().zip(core::iter::repeat(())).collect()
538	}
539
540	fn boundedmap_from_keys<K, S>(keys: &[K]) -> BoundedBTreeMap<K, (), S>
541	where
542		K: Ord + Copy,
543		S: Get<u32>,
544	{
545		map_from_keys(keys).try_into().unwrap()
546	}
547
548	#[test]
549	fn encoding_same_as_unbounded_map() {
550		let b = boundedmap_from_keys::<u32, ConstU32<7>>(&[1, 2, 3, 4, 5, 6]);
551		let m = map_from_keys(&[1, 2, 3, 4, 5, 6]);
552
553		assert_eq!(b.encode(), m.encode());
554	}
555
556	#[test]
557	fn encode_then_decode_gives_original_map() {
558		let b = boundedmap_from_keys::<u32, ConstU32<7>>(&[1, 2, 3, 4, 5, 6]);
559		let b_encode_decode = BoundedBTreeMap::<u32, (), ConstU32<7>>::decode(&mut &b.encode()[..]).unwrap();
560
561		assert_eq!(b_encode_decode, b);
562	}
563
564	#[test]
565	fn try_insert_works() {
566		let mut bounded = boundedmap_from_keys::<u32, ConstU32<4>>(&[1, 2, 3]);
567		bounded.try_insert(0, ()).unwrap();
568		assert_eq!(*bounded, map_from_keys(&[1, 0, 2, 3]));
569
570		assert!(bounded.try_insert(9, ()).is_err());
571		assert_eq!(*bounded, map_from_keys(&[1, 0, 2, 3]));
572	}
573
574	#[test]
575	fn deref_coercion_works() {
576		let bounded = boundedmap_from_keys::<u32, ConstU32<7>>(&[1, 2, 3]);
577		// these methods come from deref-ed vec.
578		assert_eq!(bounded.len(), 3);
579		assert!(bounded.iter().next().is_some());
580		assert!(!bounded.is_empty());
581	}
582
583	#[test]
584	fn try_mutate_works() {
585		let bounded = boundedmap_from_keys::<u32, ConstU32<7>>(&[1, 2, 3, 4, 5, 6]);
586		let bounded = bounded
587			.try_mutate(|v| {
588				v.insert(7, ());
589			})
590			.unwrap();
591		assert_eq!(bounded.len(), 7);
592		assert!(bounded
593			.try_mutate(|v| {
594				v.insert(8, ());
595			})
596			.is_none());
597	}
598
599	#[test]
600	fn btree_map_eq_works() {
601		let bounded = boundedmap_from_keys::<u32, ConstU32<7>>(&[1, 2, 3, 4, 5, 6]);
602		assert_eq!(bounded, map_from_keys(&[1, 2, 3, 4, 5, 6]));
603	}
604
605	#[test]
606	fn too_big_fail_to_decode() {
607		let v: Vec<(u32, u32)> = vec![(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)];
608		assert_eq!(
609			BoundedBTreeMap::<u32, u32, ConstU32<4>>::decode(&mut &v.encode()[..]),
610			Err("BoundedBTreeMap exceeds its limit".into()),
611		);
612	}
613
614	#[test]
615	fn dont_consume_more_data_than_bounded_len() {
616		let m = map_from_keys(&[1, 2, 3, 4, 5, 6]);
617		let data = m.encode();
618		let data_input = &mut &data[..];
619
620		BoundedBTreeMap::<u32, u32, ConstU32<4>>::decode(data_input).unwrap_err();
621		assert_eq!(data_input.len(), data.len() - Compact::<u32>::compact_len(&(data.len() as u32)));
622	}
623
624	#[test]
625	fn unequal_eq_impl_insert_works() {
626		// given a struct with a strange notion of equality
627		#[derive(Debug)]
628		struct Unequal(u32, bool);
629
630		impl PartialEq for Unequal {
631			fn eq(&self, other: &Self) -> bool {
632				self.0 == other.0
633			}
634		}
635		impl Eq for Unequal {}
636
637		impl Ord for Unequal {
638			fn cmp(&self, other: &Self) -> core::cmp::Ordering {
639				self.0.cmp(&other.0)
640			}
641		}
642
643		impl PartialOrd for Unequal {
644			fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
645				Some(self.cmp(other))
646			}
647		}
648
649		let mut map = BoundedBTreeMap::<Unequal, u32, ConstU32<4>>::new();
650
651		// when the set is full
652
653		for i in 0..4 {
654			map.try_insert(Unequal(i, false), i).unwrap();
655		}
656
657		// can't insert a new distinct member
658		map.try_insert(Unequal(5, false), 5).unwrap_err();
659
660		// but _can_ insert a distinct member which compares equal, though per the documentation,
661		// neither the set length nor the actual member are changed, but the value is
662		map.try_insert(Unequal(0, true), 6).unwrap();
663		assert_eq!(map.len(), 4);
664		let (zero_key, zero_value) = map.get_key_value(&Unequal(0, true)).unwrap();
665		assert_eq!(zero_key.0, 0);
666		assert_eq!(zero_key.1, false);
667		assert_eq!(*zero_value, 6);
668	}
669
670	#[test]
671	fn eq_works() {
672		// of same type
673		let b1 = boundedmap_from_keys::<u32, ConstU32<7>>(&[1, 2]);
674		let b2 = boundedmap_from_keys::<u32, ConstU32<7>>(&[1, 2]);
675		assert_eq!(b1, b2);
676
677		// of different type, but same value and bound.
678		crate::parameter_types! {
679			B1: u32 = 7;
680			B2: u32 = 7;
681		}
682		let b1 = boundedmap_from_keys::<u32, B1>(&[1, 2]);
683		let b2 = boundedmap_from_keys::<u32, B2>(&[1, 2]);
684		assert_eq!(b1, b2);
685	}
686
687	#[test]
688	fn can_be_collected() {
689		let b1 = boundedmap_from_keys::<u32, ConstU32<5>>(&[1, 2, 3, 4]);
690		let b2: BoundedBTreeMap<u32, (), ConstU32<5>> = b1.iter().map(|(k, v)| (k + 1, *v)).try_collect().unwrap();
691		assert_eq!(b2.into_iter().map(|(k, _)| k).collect::<Vec<_>>(), vec![2, 3, 4, 5]);
692
693		// can also be collected into a collection of length 4.
694		let b2: BoundedBTreeMap<u32, (), ConstU32<4>> = b1.iter().map(|(k, v)| (k + 1, *v)).try_collect().unwrap();
695		assert_eq!(b2.into_iter().map(|(k, _)| k).collect::<Vec<_>>(), vec![2, 3, 4, 5]);
696
697		// can be mutated further into iterators that are `ExactSizedIterator`.
698		let b2: BoundedBTreeMap<u32, (), ConstU32<5>> =
699			b1.iter().map(|(k, v)| (k + 1, *v)).rev().skip(2).try_collect().unwrap();
700		// note that the binary tree will re-sort this, so rev() is not really seen
701		assert_eq!(b2.into_iter().map(|(k, _)| k).collect::<Vec<_>>(), vec![2, 3]);
702
703		let b2: BoundedBTreeMap<u32, (), ConstU32<5>> =
704			b1.iter().map(|(k, v)| (k + 1, *v)).take(2).try_collect().unwrap();
705		assert_eq!(b2.into_iter().map(|(k, _)| k).collect::<Vec<_>>(), vec![2, 3]);
706
707		// but these won't work
708		let b2: Result<BoundedBTreeMap<u32, (), ConstU32<3>>, _> = b1.iter().map(|(k, v)| (k + 1, *v)).try_collect();
709		assert!(b2.is_err());
710
711		let b2: Result<BoundedBTreeMap<u32, (), ConstU32<1>>, _> =
712			b1.iter().map(|(k, v)| (k + 1, *v)).skip(2).try_collect();
713		assert!(b2.is_err());
714	}
715
716	#[test]
717	fn test_iter_mut() {
718		let mut b1: BoundedBTreeMap<u8, u8, ConstU32<7>> =
719			[1, 2, 3, 4].into_iter().map(|k| (k, k)).try_collect().unwrap();
720
721		let b2: BoundedBTreeMap<u8, u8, ConstU32<7>> =
722			[1, 2, 3, 4].into_iter().map(|k| (k, k * 2)).try_collect().unwrap();
723
724		b1.iter_mut().for_each(|(_, v)| *v *= 2);
725
726		assert_eq!(b1, b2);
727	}
728
729	#[test]
730	fn map_retains_size() {
731		let b1 = boundedmap_from_keys::<u32, ConstU32<7>>(&[1, 2]);
732		let b2 = b1.clone();
733
734		assert_eq!(b1.len(), b2.map(|(_, _)| 5_u32).len());
735	}
736
737	#[test]
738	fn map_maps_properly() {
739		let b1: BoundedBTreeMap<u32, u32, ConstU32<7>> =
740			[1, 2, 3, 4].into_iter().map(|k| (k, k * 2)).try_collect().unwrap();
741		let b2: BoundedBTreeMap<u32, u32, ConstU32<7>> =
742			[1, 2, 3, 4].into_iter().map(|k| (k, k)).try_collect().unwrap();
743
744		assert_eq!(b1, b2.map(|(_, v)| v * 2));
745	}
746
747	#[test]
748	fn try_map_retains_size() {
749		let b1 = boundedmap_from_keys::<u32, ConstU32<7>>(&[1, 2]);
750		let b2 = b1.clone();
751
752		assert_eq!(b1.len(), b2.try_map::<_, (), _>(|(_, _)| Ok(5_u32)).unwrap().len());
753	}
754
755	#[test]
756	fn try_map_maps_properly() {
757		let b1: BoundedBTreeMap<u32, u32, ConstU32<7>> =
758			[1, 2, 3, 4].into_iter().map(|k| (k, k * 2)).try_collect().unwrap();
759		let b2: BoundedBTreeMap<u32, u32, ConstU32<7>> =
760			[1, 2, 3, 4].into_iter().map(|k| (k, k)).try_collect().unwrap();
761
762		assert_eq!(b1, b2.try_map::<_, (), _>(|(_, v)| Ok(v * 2)).unwrap());
763	}
764
765	#[test]
766	fn try_map_short_circuit() {
767		let b1: BoundedBTreeMap<u8, u8, ConstU32<7>> = [1, 2, 3, 4].into_iter().map(|k| (k, k)).try_collect().unwrap();
768
769		assert_eq!(Err("overflow"), b1.try_map(|(_, v)| v.checked_mul(100).ok_or("overflow")));
770	}
771
772	#[test]
773	fn try_map_ok() {
774		let b1: BoundedBTreeMap<u8, u8, ConstU32<7>> = [1, 2, 3, 4].into_iter().map(|k| (k, k)).try_collect().unwrap();
775		let b2: BoundedBTreeMap<u8, u16, ConstU32<7>> =
776			[1, 2, 3, 4].into_iter().map(|k| (k, (k as u16) * 100)).try_collect().unwrap();
777
778		assert_eq!(Ok(b2), b1.try_map(|(_, v)| (v as u16).checked_mul(100_u16).ok_or("overflow")));
779	}
780
781	#[test]
782	fn prepend_compact_input_works() {
783		let encoded_len = Compact(3u32).encode();
784		let inner = [2, 3, 4];
785		let mut input = PrependCompactInput { encoded_len: encoded_len.as_ref(), read: 0, inner: &mut &inner[..] };
786		assert_eq!(input.remaining_len(), Ok(Some(4)));
787
788		// Passing an empty buffer should leave input unchanged.
789		let mut empty_buf = [];
790		assert_eq!(input.read(&mut empty_buf), Ok(()));
791		assert_eq!(input.remaining_len(), Ok(Some(4)));
792		assert_eq!(input.read, 0);
793
794		// Passing a correctly-sized buffer will read correctly.
795		let mut buf = [0; 4];
796		assert_eq!(input.read(&mut buf), Ok(()));
797		assert_eq!(buf[0], encoded_len[0]);
798		assert_eq!(buf[1..], inner[..]);
799		// And the bookkeeping agrees.
800		assert_eq!(input.remaining_len(), Ok(Some(0)));
801		assert_eq!(input.read, encoded_len.len());
802
803		// And we can't read more.
804		assert!(input.read(&mut buf).is_err());
805	}
806
807	#[test]
808	fn prepend_compact_input_incremental_read_works() {
809		let encoded_len = Compact(3u32).encode();
810		let inner = [2, 3, 4];
811		let mut input = PrependCompactInput { encoded_len: encoded_len.as_ref(), read: 0, inner: &mut &inner[..] };
812		assert_eq!(input.remaining_len(), Ok(Some(4)));
813
814		// Compact is first byte - ensure that it fills the buffer when it's more than one.
815		let mut buf = [0u8; 2];
816		assert_eq!(input.read(&mut buf), Ok(()));
817		assert_eq!(buf[0], encoded_len[0]);
818		assert_eq!(buf[1], inner[0]);
819		assert_eq!(input.remaining_len(), Ok(Some(2)));
820		assert_eq!(input.read, encoded_len.len());
821
822		// Check the last two bytes are read correctly.
823		assert_eq!(input.read(&mut buf), Ok(()));
824		assert_eq!(buf[..], inner[1..]);
825		assert_eq!(input.remaining_len(), Ok(Some(0)));
826		assert_eq!(input.read, encoded_len.len());
827
828		// And we can't read more.
829		assert!(input.read(&mut buf).is_err());
830	}
831
832	// Just a test that structs containing `BoundedBTreeMap` can derive `Hash`. (This was broken
833	// when it was deriving `Hash`).
834	#[test]
835	#[cfg(feature = "std")]
836	fn container_can_derive_hash() {
837		#[derive(Hash, Default)]
838		struct Foo {
839			bar: u8,
840			map: BoundedBTreeMap<String, usize, ConstU32<16>>,
841		}
842		let _foo = Foo::default();
843	}
844
845	#[cfg(feature = "serde")]
846	mod serde {
847		use super::*;
848		use crate::alloc::string::ToString;
849
850		#[test]
851		fn test_bounded_btreemap_serializer() {
852			let mut map = BoundedBTreeMap::<u32, u32, ConstU32<6>>::new();
853			map.try_insert(0, 100).unwrap();
854			map.try_insert(1, 101).unwrap();
855			map.try_insert(2, 102).unwrap();
856
857			let serialized = serde_json::to_string(&map).unwrap();
858			assert_eq!(serialized, r#"{"0":100,"1":101,"2":102}"#);
859		}
860
861		#[test]
862		fn test_bounded_btreemap_deserializer() {
863			let json_str = r#"{"0":100,"1":101,"2":102}"#;
864			let map: Result<BoundedBTreeMap<u32, u32, ConstU32<6>>, serde_json::Error> = serde_json::from_str(json_str);
865			assert!(map.is_ok());
866			let map = map.unwrap();
867
868			assert_eq!(map.len(), 3);
869			assert_eq!(map.get(&0), Some(&100));
870			assert_eq!(map.get(&1), Some(&101));
871			assert_eq!(map.get(&2), Some(&102));
872		}
873
874		#[test]
875		fn test_bounded_btreemap_deserializer_bound() {
876			let json_str = r#"{"0":100,"1":101,"2":102}"#;
877			let map: Result<BoundedBTreeMap<u32, u32, ConstU32<3>>, serde_json::Error> = serde_json::from_str(json_str);
878			assert!(map.is_ok());
879			let map = map.unwrap();
880
881			assert_eq!(map.len(), 3);
882			assert_eq!(map.get(&0), Some(&100));
883			assert_eq!(map.get(&1), Some(&101));
884			assert_eq!(map.get(&2), Some(&102));
885		}
886
887		#[test]
888		fn test_bounded_btreemap_deserializer_failed() {
889			let json_str = r#"{"0":100,"1":101,"2":102,"3":103,"4":104}"#;
890			let map: Result<BoundedBTreeMap<u32, u32, ConstU32<4>>, serde_json::Error> = serde_json::from_str(json_str);
891
892			match map {
893				Err(e) => {
894					assert!(e.to_string().contains("map exceeds the size of the bounds"));
895				},
896				_ => unreachable!("deserializer must raise error"),
897			}
898		}
899	}
900
901	#[test]
902	fn is_full_works() {
903		let mut bounded = boundedmap_from_keys::<u32, ConstU32<4>>(&[1, 2, 3]);
904		assert!(!bounded.is_full());
905		bounded.try_insert(0, ()).unwrap();
906		assert_eq!(*bounded, map_from_keys(&[1, 0, 2, 3]));
907
908		assert!(bounded.is_full());
909		assert!(bounded.try_insert(9, ()).is_err());
910		assert_eq!(*bounded, map_from_keys(&[1, 0, 2, 3]));
911	}
912}