orx_v/cached/into_cached.rs
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
use super::{cache::DefaultCache, Cache, CachedVec};
use crate::{Dim, NVec};
use core::hash::Hash;
/// Converts an `NVec<D, T>` into a cached vector which maintains an internal cache.
///
/// Each time an element is requested, the vector first checks the cache:
/// * if the value is readily available in the cache, the vector returns it,
/// * otherwise, it computes its value, caches it for future use and returns it.
///
/// The cache is often a simple lookup or map, such as the HashMap. However, it might
/// be more advanced such as the least frequently used cache. Any internal structure
/// implementing the [`Cache`] can be used.
///
/// The aim of the cached vector is to improve the execution time where computing an
/// element is expensive. Consider for instance element (i, j) corresponds to the
/// duration between two addresses i and j on a street network which requires running
/// an algorithm to compute. Further assume that:
/// * pre-computing all elements is not justified as we will access only a small portion
/// of the entire matrix, and
/// * we do not know ahead of time which indices that we will access.
///
/// Note that a cached vector itself is an `NVec`. Therefore, it abstracts away the internal
/// cache management and allows us to treat it as any other vector.
///
/// In such scenarios, [`IntoCached`] trait makes it very convenient to convert a functional
/// vector into a cached functional vector.
///
/// # Examples
///
/// ```
/// use orx_v::*;
///
/// // assume an expensive api call to compute distance
/// fn api_call_to_get_distance(from: usize, to: usize) -> u64 {
/// match from > to {
/// true => (from - to) as u64,
/// false => (to - from) as u64,
/// }
/// }
///
/// let v2 = V.d2().fun(|[i, j]| api_call_to_get_distance(i, j)).into_cached();
/// assert_eq!(v2.cache_len(), 0);
///
/// // makes the api call; caches and returns the value
/// assert_eq!(v2.at([0, 3]), 3);
/// assert_eq!(v2.cache_len(), 1);
///
/// // does not repeat the api call; returns the value from the cache
/// assert_eq!(v2.at([0, 3]), 3);
/// ```
pub trait IntoCached<D, T>: NVec<D, T>
where
D: Dim,
D::Idx: Ord + Hash,
T: Copy,
{
/// Converts an `NVec<D, T>` into a cached vector which maintains an internal cache.
///
/// Each time an element is requested, the vector first checks the cache:
/// * if the value is readily available in the cache, the vector returns it,
/// * otherwise, it computes its value, caches it for future use and returns it.
///
/// The cache is often a simple lookup or map, such as the HashMap. However, it might
/// be more advanced such as the least frequently used cache. Any internal structure
/// implementing the [`Cache`] can be used.
///
/// The aim of the cached vector is to improve the execution time where computing an
/// element is expensive. Consider for instance element (i, j) corresponds to the
/// duration between two addresses i and j on a street network which requires running
/// an algorithm to compute. Further assume that:
/// * pre-computing all elements is not justified as we will access only a small portion
/// of the entire matrix, and
/// * we do not know ahead of time which indices that we will access.
///
/// Note that a cached vector itself is an `NVec`. Therefore, it abstracts away the internal
/// cache management and allows us to treat it as any other vector.
///
/// In such scenarios, [`IntoCached`] trait makes it very convenient to convert a functional
/// vector into a cached functional vector.
///
/// # Safety
///
/// The cache implementation that `CachedVec` uses by default is
/// * `HashMap` when std feature is enabled,
/// * `BTReeMap` in a no-std program.
///
/// The cached vector adds interior mutability to these structures which is currently not
/// thread-safe.
/// Practically, this means the following:
/// * We can use this vector safely by a single thread.
/// * Using this vector concurrently by multiple threads leads to data race.
/// For instance, if the cached vector is an input of a parallelized algorithm,
/// we need to provide a different copy of the vector to each thread.
///
/// # Examples
///
/// ```
/// use orx_v::*;
///
/// // assume an expensive api call to compute distance
/// fn api_call_to_get_distance(from: usize, to: usize) -> u64 {
/// match from > to {
/// true => (from - to) as u64,
/// false => (to - from) as u64,
/// }
/// }
///
/// let v2 = V.d2().fun(|[i, j]| api_call_to_get_distance(i, j)).into_cached();
/// assert_eq!(v2.cache_len(), 0);
///
/// // makes the api call; caches and returns the value
/// assert_eq!(v2.at([0, 3]), 3);
/// assert_eq!(v2.cache_len(), 1);
///
/// // does not repeat the api call; returns the value from the cache
/// assert_eq!(v2.at([0, 3]), 3);
/// ```
fn into_cached(self) -> CachedVec<D, T, Self, DefaultCache<D, T>> {
CachedVec::new(self, DefaultCache::<D, T>::default())
}
/// Converts an `NVec<D, T>` into a cached vector which maintains an internal cache.
///
/// Each time an element is requested, the vector first checks the cache:
/// * if the value is readily available in the cache, the vector returns it,
/// * otherwise, it computes its value, caches it for future use and returns it.
///
/// The cache is often a simple lookup or map, such as the HashMap. However, it might
/// be more advanced such as the least frequently used cache. Any internal structure
/// implementing the [`Cache`] can be used.
///
/// The aim of the cached vector is to improve the execution time where computing an
/// element is expensive. Consider for instance element (i, j) corresponds to the
/// duration between two addresses i and j on a street network which requires running
/// an algorithm to compute. Further assume that:
/// * pre-computing all elements is not justified as we will access only a small portion
/// of the entire matrix, and
/// * we do not know ahead of time which indices that we will access.
///
/// Note that a cached vector itself is an `NVec`. Therefore, it abstracts away the internal
/// cache management and allows us to treat it as any other vector.
///
/// In such scenarios, [`IntoCached`] trait makes it very convenient to convert a functional
/// vector into a cached functional vector.
///
/// # Examples
///
/// ```
/// use orx_v::*;
///
/// // assume an expensive api call to compute distance
/// fn api_call_to_get_distance(from: usize, to: usize) -> u64 {
/// match from > to {
/// true => (from - to) as u64,
/// false => (to - from) as u64,
/// }
/// }
///
/// let cache = DefaultCache::<D2, _>::from_iter([([4, 0], 4)]);
/// let v2 = V.d2().fun(|[i, j]| api_call_to_get_distance(i, j)).into_cached_with(cache);
/// assert_eq!(v2.cache_len(), 1);
///
/// // does not make an api call since the element exists in the cache
/// assert_eq!(v2.at([4, 0]), 4);
/// assert_eq!(v2.cache_len(), 1);
///
/// // makes the api call; caches and returns the value
/// assert_eq!(v2.at([0, 3]), 3);
/// assert_eq!(v2.cache_len(), 2);
///
/// // does not repeat the api call; returns the value from the cache
/// assert_eq!(v2.at([0, 3]), 3);
///
/// // we can destruct the cached vec into vec & cache
/// let (funvec, cache) = v2.into_inner();
/// assert_eq!(cache.len(), 2);
/// ```
fn into_cached_with<C: Cache<D::Idx, T>>(self, cache: C) -> CachedVec<D, T, Self, C> {
CachedVec::new(self, cache)
}
}