indexmap_amortized/map/
core.rs

1//! This is the core implementation that doesn't depend on the hasher at all.
2//!
3//! The methods of `IndexMapCore` don't use any Hash properties of K.
4//!
5//! It's cleaner to separate them out, then the compiler checks that we are not
6//! using Hash at all in these methods.
7//!
8//! However, we should probably not let this show in the public API or docs.
9
10mod raw;
11
12use griddle::raw::RawTable;
13
14use atone::vc::Drain;
15
16use core::cmp;
17use core::fmt;
18use core::mem::replace;
19use core::ops::RangeBounds;
20
21use crate::equivalent::Equivalent;
22use crate::util::{enumerate, simplify_range};
23use crate::EntryVec;
24use crate::{Bucket, Entries, HashValue};
25
26/// Core of the map that does not depend on S
27pub(crate) struct IndexMapCore<K, V> {
28    /// indices mapping from the entry hash to its index.
29    indices: RawTable<usize>,
30    /// entries is a dense vec of entries in their order.
31    entries: EntryVec<Bucket<K, V>>,
32}
33
34#[inline(always)]
35fn get_hash<K, V>(entries: &EntryVec<Bucket<K, V>>) -> impl Fn(&usize) -> u64 + '_ {
36    move |&i| entries[i].hash.get()
37}
38
39#[inline]
40fn equivalent<'a, K, V, Q: ?Sized + Equivalent<K>>(
41    key: &'a Q,
42    entries: &'a EntryVec<Bucket<K, V>>,
43) -> impl Fn(&usize) -> bool + 'a {
44    move |&i| Q::equivalent(key, &entries[i].key)
45}
46
47#[inline]
48fn erase_index(table: &mut RawTable<usize>, hash: HashValue, index: usize) {
49    table.erase_entry(hash.get(), move |&i| i == index);
50}
51
52#[inline]
53fn update_index(table: &mut RawTable<usize>, hash: HashValue, old: usize, new: usize) {
54    let index = table
55        .get_mut(hash.get(), move |&i| i == old)
56        .expect("index not found");
57    *index = new;
58}
59
60impl<K, V> Clone for IndexMapCore<K, V>
61where
62    K: Clone,
63    V: Clone,
64{
65    fn clone(&self) -> Self {
66        let indices = {
67            let hasher = get_hash(&self.entries);
68            self.indices.clone_with_hasher(hasher)
69        };
70        let mut entries = EntryVec::with_capacity(indices.capacity());
71        entries.clone_from(&self.entries);
72        IndexMapCore { indices, entries }
73    }
74
75    fn clone_from(&mut self, other: &Self) {
76        let hasher = get_hash(&other.entries);
77        self.indices.clone_from_with_hasher(&other.indices, hasher);
78        if self.entries.capacity() < other.entries.len() {
79            // If we must resize, match the indices capacity
80            self.reserve_entries();
81        }
82        self.entries.clone_from(&other.entries);
83    }
84}
85
86impl<K, V> fmt::Debug for IndexMapCore<K, V>
87where
88    K: fmt::Debug,
89    V: fmt::Debug,
90{
91    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
92        f.debug_struct("IndexMapCore")
93            .field("indices", &raw::DebugIndices(&self.indices))
94            .field("entries", &self.entries)
95            .finish()
96    }
97}
98
99impl<K, V> Entries for IndexMapCore<K, V> {
100    type Entry = Bucket<K, V>;
101
102    #[inline]
103    fn into_entries(self) -> EntryVec<Self::Entry> {
104        self.entries
105    }
106
107    #[inline]
108    fn as_entries(&self) -> &EntryVec<Self::Entry> {
109        &self.entries
110    }
111
112    #[inline]
113    fn as_entries_mut(&mut self) -> &mut EntryVec<Self::Entry> {
114        &mut self.entries
115    }
116
117    fn with_entries<F>(&mut self, f: F)
118    where
119        F: FnOnce(&mut EntryVec<Self::Entry>),
120    {
121        f(&mut self.entries);
122        self.rebuild_hash_table();
123    }
124}
125
126impl<K, V> IndexMapCore<K, V> {
127    #[inline]
128    pub(crate) fn new() -> Self {
129        IndexMapCore {
130            indices: RawTable::new(),
131            entries: EntryVec::new(),
132        }
133    }
134
135    #[inline]
136    pub(crate) fn with_capacity(n: usize) -> Self {
137        IndexMapCore {
138            indices: RawTable::with_capacity(n),
139            entries: EntryVec::with_capacity(n),
140        }
141    }
142
143    #[inline]
144    pub(crate) fn len(&self) -> usize {
145        self.indices.len()
146    }
147
148    #[inline]
149    pub(crate) fn capacity(&self) -> usize {
150        cmp::min(self.indices.capacity(), self.entries.capacity())
151    }
152
153    pub(crate) fn clear(&mut self) {
154        self.indices.clear();
155        self.entries.clear();
156    }
157
158    pub(crate) fn truncate(&mut self, len: usize) {
159        if len < self.len() {
160            self.erase_indices(len, self.entries.len());
161            self.entries.truncate(len);
162        }
163    }
164
165    pub(crate) fn drain<R>(&mut self, range: R) -> Drain<'_, Bucket<K, V>>
166    where
167        R: RangeBounds<usize>,
168    {
169        let range = simplify_range(range, self.entries.len());
170        self.erase_indices(range.start, range.end);
171        self.entries.drain(range)
172    }
173
174    pub(crate) fn split_off(&mut self, at: usize) -> Self {
175        assert!(at <= self.entries.len());
176        self.erase_indices(at, self.entries.len());
177        let entries = self.entries.split_off(at);
178
179        let mut indices = RawTable::with_capacity(entries.len());
180        for (i, entry) in enumerate(&entries) {
181            indices.insert_no_grow(entry.hash.get(), i, |_| unreachable!());
182        }
183        Self { indices, entries }
184    }
185
186    /// Reserve capacity for `additional` more key-value pairs.
187    pub(crate) fn reserve(&mut self, additional: usize) {
188        self.indices.reserve(additional, get_hash(&self.entries));
189        self.reserve_entries();
190    }
191
192    /// Reserve entries capacity to match the indices
193    fn reserve_entries(&mut self) {
194        let additional = self.indices.capacity() - self.entries.len();
195        self.entries.reserve_exact(additional);
196    }
197
198    /// Shrink the capacity of the map as much as possible.
199    pub(crate) fn shrink_to_fit(&mut self) {
200        self.indices.shrink_to(0, get_hash(&self.entries));
201        self.entries.shrink_to_fit();
202    }
203
204    /// Remove the last key-value pair
205    pub(crate) fn pop(&mut self) -> Option<(K, V)> {
206        if let Some(entry) = self.entries.pop() {
207            let last = self.entries.len();
208            erase_index(&mut self.indices, entry.hash, last);
209            Some((entry.key, entry.value))
210        } else {
211            None
212        }
213    }
214
215    /// Append a key-value pair, *without* checking whether it already exists,
216    /// and return the pair's new index.
217    fn push(&mut self, hash: HashValue, key: K, value: V) -> usize {
218        let i = self.entries.len();
219        self.indices.insert(hash.get(), i, get_hash(&self.entries));
220        if i == self.entries.capacity() {
221            // Reserve our own capacity synced to the indices,
222            // rather than letting `Vec::push` just double it.
223            self.reserve_entries();
224        }
225        self.entries.push(Bucket { hash, key, value });
226        i
227    }
228
229    /// Return the index in `entries` where an equivalent key can be found
230    pub(crate) fn get_index_of<Q>(&self, hash: HashValue, key: &Q) -> Option<usize>
231    where
232        Q: ?Sized + Equivalent<K>,
233    {
234        let eq = equivalent(key, &self.entries);
235        self.indices.get(hash.get(), eq).copied()
236    }
237
238    pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option<V>)
239    where
240        K: Eq,
241    {
242        match self.get_index_of(hash, &key) {
243            Some(i) => (i, Some(replace(&mut self.entries[i].value, value))),
244            None => (self.push(hash, key, value), None),
245        }
246    }
247
248    /// Remove an entry by shifting all entries that follow it
249    pub(crate) fn shift_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
250    where
251        Q: ?Sized + Equivalent<K>,
252    {
253        let eq = equivalent(key, &self.entries);
254        match self.indices.remove_entry(hash.get(), eq) {
255            Some(index) => {
256                let (key, value) = self.shift_remove_finish(index);
257                Some((index, key, value))
258            }
259            None => None,
260        }
261    }
262
263    /// Remove an entry by shifting all entries that follow it
264    pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
265        match self.entries.get(index) {
266            Some(entry) => {
267                erase_index(&mut self.indices, entry.hash, index);
268                Some(self.shift_remove_finish(index))
269            }
270            None => None,
271        }
272    }
273
274    /// Remove an entry by shifting all entries that follow it
275    ///
276    /// The index should already be removed from `self.indices`.
277    fn shift_remove_finish(&mut self, index: usize) -> (K, V) {
278        // use Vec::remove, but then we need to update the indices that point
279        // to all of the other entries that have to move
280        let entry = self.entries.remove(index);
281
282        // correct indices that point to the entries that followed the removed entry.
283        // use a heuristic between a full sweep vs. a `find()` for every shifted item.
284        let raw_capacity = self.indices.buckets();
285        let shifted_entries = self.entries.range(index..);
286        if shifted_entries.len() > raw_capacity / 2 {
287            // shift all indices greater than `index`
288            drop(shifted_entries);
289            for i in self.indices_mut() {
290                if *i > index {
291                    *i -= 1;
292                }
293            }
294        } else {
295            // find each following entry to shift its index
296            for (i, entry) in (index + 1..).zip(shifted_entries) {
297                update_index(&mut self.indices, entry.hash, i, i - 1);
298            }
299        }
300
301        (entry.key, entry.value)
302    }
303
304    /// Remove an entry by swapping it with the last
305    pub(crate) fn swap_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
306    where
307        Q: ?Sized + Equivalent<K>,
308    {
309        let eq = equivalent(key, &self.entries);
310        match self.indices.remove_entry(hash.get(), eq) {
311            Some(index) => {
312                let (key, value) = self.swap_remove_finish(index);
313                Some((index, key, value))
314            }
315            None => None,
316        }
317    }
318
319    /// Remove an entry by swapping it with the last
320    pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
321        match self.entries.get(index) {
322            Some(entry) => {
323                erase_index(&mut self.indices, entry.hash, index);
324                Some(self.swap_remove_finish(index))
325            }
326            None => None,
327        }
328    }
329
330    /// Finish removing an entry by swapping it with the last
331    ///
332    /// The index should already be removed from `self.indices`.
333    fn swap_remove_finish(&mut self, index: usize) -> (K, V) {
334        // use swap_remove, but then we need to update the index that points
335        // to the other entry that has to move
336        let entry = self.entries.swap_remove(index);
337
338        // correct index that points to the entry that had to swap places
339        if let Some(entry) = self.entries.get(index) {
340            // was not last element
341            // examine new element in `index` and find it in indices
342            let last = self.entries.len();
343            update_index(&mut self.indices, entry.hash, last, index);
344        }
345
346        (entry.key, entry.value)
347    }
348
349    /// Erase `start..end` from `indices`, and shift `end..` indices down to `start..`
350    ///
351    /// All of these items should still be at their original location in `entries`.
352    /// This is used by `drain`, which will let `Vec::drain` do the work on `entries`.
353    fn erase_indices(&mut self, start: usize, end: usize) {
354        let start_entries = self.entries.range(..start);
355        let erased_entries = self.entries.range(start..end);
356        let shifted_entries = self.entries.range(end..);
357
358        let erased = erased_entries.len();
359        let shifted = shifted_entries.len();
360        let half_capacity = self.indices.buckets() / 2;
361
362        // Use a heuristic between different strategies
363        if erased == 0 {
364            // Degenerate case, nothing to do
365        } else if start + shifted < half_capacity && start < erased {
366            // Reinsert everything, as there are few kept indices
367            self.indices.clear();
368
369            // Reinsert stable indices
370            // We should never have to reallocate, so there's no need for a real hasher.
371            for (i, entry) in enumerate(start_entries) {
372                self.indices
373                    .insert_no_grow(entry.hash.get(), i, |_| unreachable!());
374            }
375
376            // Reinsert shifted indices
377            for (i, entry) in (start..).zip(shifted_entries) {
378                self.indices
379                    .insert_no_grow(entry.hash.get(), i, |_| unreachable!());
380            }
381        } else if erased + shifted < half_capacity {
382            // Find each affected index, as there are few to adjust
383
384            // Find erased indices
385            for (i, entry) in (start..).zip(erased_entries) {
386                erase_index(&mut self.indices, entry.hash, i);
387            }
388
389            // Find shifted indices
390            for ((new, old), entry) in (start..).zip(end..).zip(shifted_entries) {
391                update_index(&mut self.indices, entry.hash, old, new);
392            }
393        } else {
394            // Sweep the whole table for adjustments
395            drop(start_entries);
396            drop(erased_entries);
397            drop(shifted_entries);
398            self.erase_indices_sweep(start, end);
399        }
400
401        debug_assert_eq!(self.indices.len(), start + shifted);
402    }
403
404    pub(crate) fn retain_in_order<F>(&mut self, mut keep: F)
405    where
406        F: FnMut(&mut K, &mut V) -> bool,
407    {
408        // Like Vec::retain in self.entries, but with mutable K and V.
409        // We swap-shift all the items we want to keep, truncate the rest,
410        // then rebuild the raw hash table with the new indexes.
411        let len = self.entries.len();
412        let mut n_deleted = 0;
413        for i in 0..len {
414            let will_keep = {
415                let entry = &mut self.entries[i];
416                keep(&mut entry.key, &mut entry.value)
417            };
418            if !will_keep {
419                n_deleted += 1;
420            } else if n_deleted > 0 {
421                self.entries.swap(i - n_deleted, i);
422            }
423        }
424        if n_deleted > 0 {
425            self.entries.truncate(len - n_deleted);
426            self.rebuild_hash_table();
427        }
428    }
429
430    fn rebuild_hash_table(&mut self) {
431        self.indices.clear();
432        debug_assert!(self.indices.capacity() >= self.entries.len());
433        for (i, entry) in enumerate(&self.entries) {
434            // We should never have to reallocate, so there's no need for a real hasher.
435            self.indices
436                .insert_no_grow(entry.hash.get(), i, |_| unreachable!());
437        }
438    }
439
440    pub(crate) fn reverse(&mut self) {
441        self.entries.reverse();
442
443        // No need to save hash indices, can easily calculate what they should
444        // be, given that this is an in-place reversal.
445        let len = self.entries.len();
446        for i in self.indices_mut() {
447            *i = len - *i - 1;
448        }
449    }
450}
451
452/// Entry for an existing key-value pair or a vacant location to
453/// insert one.
454pub enum Entry<'a, K, V> {
455    /// Existing slot with equivalent key.
456    Occupied(OccupiedEntry<'a, K, V>),
457    /// Vacant slot (no equivalent key in the map).
458    Vacant(VacantEntry<'a, K, V>),
459}
460
461impl<'a, K, V> Entry<'a, K, V> {
462    /// Computes in **O(1)** time (amortized average).
463    pub fn or_insert(self, default: V) -> &'a mut V {
464        match self {
465            Entry::Occupied(entry) => entry.into_mut(),
466            Entry::Vacant(entry) => entry.insert(default),
467        }
468    }
469
470    /// Computes in **O(1)** time (amortized average).
471    pub fn or_insert_with<F>(self, call: F) -> &'a mut V
472    where
473        F: FnOnce() -> V,
474    {
475        match self {
476            Entry::Occupied(entry) => entry.into_mut(),
477            Entry::Vacant(entry) => entry.insert(call()),
478        }
479    }
480
481    pub fn key(&self) -> &K {
482        match *self {
483            Entry::Occupied(ref entry) => entry.key(),
484            Entry::Vacant(ref entry) => entry.key(),
485        }
486    }
487
488    /// Return the index where the key-value pair exists or will be inserted.
489    pub fn index(&self) -> usize {
490        match *self {
491            Entry::Occupied(ref entry) => entry.index(),
492            Entry::Vacant(ref entry) => entry.index(),
493        }
494    }
495
496    /// Modifies the entry if it is occupied.
497    pub fn and_modify<F>(self, f: F) -> Self
498    where
499        F: FnOnce(&mut V),
500    {
501        match self {
502            Entry::Occupied(mut o) => {
503                f(o.get_mut());
504                Entry::Occupied(o)
505            }
506            x => x,
507        }
508    }
509
510    /// Inserts a default-constructed value in the entry if it is vacant and returns a mutable
511    /// reference to it. Otherwise a mutable reference to an already existent value is returned.
512    ///
513    /// Computes in **O(1)** time (amortized average).
514    pub fn or_default(self) -> &'a mut V
515    where
516        V: Default,
517    {
518        match self {
519            Entry::Occupied(entry) => entry.into_mut(),
520            Entry::Vacant(entry) => entry.insert(V::default()),
521        }
522    }
523}
524
525impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Entry<'_, K, V> {
526    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
527        match *self {
528            Entry::Vacant(ref v) => f.debug_tuple(stringify!(Entry)).field(v).finish(),
529            Entry::Occupied(ref o) => f.debug_tuple(stringify!(Entry)).field(o).finish(),
530        }
531    }
532}
533
534pub use self::raw::OccupiedEntry;
535
536// Extra methods that don't threaten the unsafe encapsulation.
537impl<K, V> OccupiedEntry<'_, K, V> {
538    /// Sets the value of the entry to `value`, and returns the entry's old value.
539    pub fn insert(&mut self, value: V) -> V {
540        replace(self.get_mut(), value)
541    }
542
543    /// Remove the key, value pair stored in the map for this entry, and return the value.
544    ///
545    /// **NOTE:** This is equivalent to `.swap_remove()`.
546    pub fn remove(self) -> V {
547        self.swap_remove()
548    }
549
550    /// Remove the key, value pair stored in the map for this entry, and return the value.
551    ///
552    /// Like `Vec::swap_remove`, the pair is removed by swapping it with the
553    /// last element of the map and popping it off. **This perturbs
554    /// the postion of what used to be the last element!**
555    ///
556    /// Computes in **O(1)** time (average).
557    pub fn swap_remove(self) -> V {
558        self.swap_remove_entry().1
559    }
560
561    /// Remove the key, value pair stored in the map for this entry, and return the value.
562    ///
563    /// Like `Vec::remove`, the pair is removed by shifting all of the
564    /// elements that follow it, preserving their relative order.
565    /// **This perturbs the index of all of those elements!**
566    ///
567    /// Computes in **O(n)** time (average).
568    pub fn shift_remove(self) -> V {
569        self.shift_remove_entry().1
570    }
571
572    /// Remove and return the key, value pair stored in the map for this entry
573    ///
574    /// **NOTE:** This is equivalent to `.swap_remove_entry()`.
575    pub fn remove_entry(self) -> (K, V) {
576        self.swap_remove_entry()
577    }
578}
579
580impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for OccupiedEntry<'_, K, V> {
581    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
582        f.debug_struct(stringify!(OccupiedEntry))
583            .field("key", self.key())
584            .field("value", self.get())
585            .finish()
586    }
587}
588
589/// A view into a vacant entry in a `IndexMap`.
590/// It is part of the [`Entry`] enum.
591///
592/// [`Entry`]: enum.Entry.html
593pub struct VacantEntry<'a, K, V> {
594    map: &'a mut IndexMapCore<K, V>,
595    hash: HashValue,
596    key: K,
597}
598
599impl<'a, K, V> VacantEntry<'a, K, V> {
600    pub fn key(&self) -> &K {
601        &self.key
602    }
603
604    pub fn into_key(self) -> K {
605        self.key
606    }
607
608    /// Return the index where the key-value pair will be inserted.
609    pub fn index(&self) -> usize {
610        self.map.len()
611    }
612
613    pub fn insert(self, value: V) -> &'a mut V {
614        let i = self.map.push(self.hash, self.key, value);
615        &mut self.map.entries[i].value
616    }
617}
618
619impl<K: fmt::Debug, V> fmt::Debug for VacantEntry<'_, K, V> {
620    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
621        f.debug_tuple(stringify!(VacantEntry))
622            .field(self.key())
623            .finish()
624    }
625}
626
627#[test]
628fn assert_send_sync() {
629    fn assert_send_sync<T: Send + Sync>() {}
630    assert_send_sync::<IndexMapCore<i32, i32>>();
631    assert_send_sync::<Entry<'_, i32, i32>>();
632}