1mod raw;
11
12use griddle::raw::RawTable;
13
14use atone::vc::Drain;
15
16use core::cmp;
17use core::fmt;
18use core::mem::replace;
19use core::ops::RangeBounds;
20
21use crate::equivalent::Equivalent;
22use crate::util::{enumerate, simplify_range};
23use crate::EntryVec;
24use crate::{Bucket, Entries, HashValue};
25
26pub(crate) struct IndexMapCore<K, V> {
28 indices: RawTable<usize>,
30 entries: EntryVec<Bucket<K, V>>,
32}
33
34#[inline(always)]
35fn get_hash<K, V>(entries: &EntryVec<Bucket<K, V>>) -> impl Fn(&usize) -> u64 + '_ {
36 move |&i| entries[i].hash.get()
37}
38
39#[inline]
40fn equivalent<'a, K, V, Q: ?Sized + Equivalent<K>>(
41 key: &'a Q,
42 entries: &'a EntryVec<Bucket<K, V>>,
43) -> impl Fn(&usize) -> bool + 'a {
44 move |&i| Q::equivalent(key, &entries[i].key)
45}
46
47#[inline]
48fn erase_index(table: &mut RawTable<usize>, hash: HashValue, index: usize) {
49 table.erase_entry(hash.get(), move |&i| i == index);
50}
51
52#[inline]
53fn update_index(table: &mut RawTable<usize>, hash: HashValue, old: usize, new: usize) {
54 let index = table
55 .get_mut(hash.get(), move |&i| i == old)
56 .expect("index not found");
57 *index = new;
58}
59
60impl<K, V> Clone for IndexMapCore<K, V>
61where
62 K: Clone,
63 V: Clone,
64{
65 fn clone(&self) -> Self {
66 let indices = {
67 let hasher = get_hash(&self.entries);
68 self.indices.clone_with_hasher(hasher)
69 };
70 let mut entries = EntryVec::with_capacity(indices.capacity());
71 entries.clone_from(&self.entries);
72 IndexMapCore { indices, entries }
73 }
74
75 fn clone_from(&mut self, other: &Self) {
76 let hasher = get_hash(&other.entries);
77 self.indices.clone_from_with_hasher(&other.indices, hasher);
78 if self.entries.capacity() < other.entries.len() {
79 self.reserve_entries();
81 }
82 self.entries.clone_from(&other.entries);
83 }
84}
85
86impl<K, V> fmt::Debug for IndexMapCore<K, V>
87where
88 K: fmt::Debug,
89 V: fmt::Debug,
90{
91 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
92 f.debug_struct("IndexMapCore")
93 .field("indices", &raw::DebugIndices(&self.indices))
94 .field("entries", &self.entries)
95 .finish()
96 }
97}
98
99impl<K, V> Entries for IndexMapCore<K, V> {
100 type Entry = Bucket<K, V>;
101
102 #[inline]
103 fn into_entries(self) -> EntryVec<Self::Entry> {
104 self.entries
105 }
106
107 #[inline]
108 fn as_entries(&self) -> &EntryVec<Self::Entry> {
109 &self.entries
110 }
111
112 #[inline]
113 fn as_entries_mut(&mut self) -> &mut EntryVec<Self::Entry> {
114 &mut self.entries
115 }
116
117 fn with_entries<F>(&mut self, f: F)
118 where
119 F: FnOnce(&mut EntryVec<Self::Entry>),
120 {
121 f(&mut self.entries);
122 self.rebuild_hash_table();
123 }
124}
125
126impl<K, V> IndexMapCore<K, V> {
127 #[inline]
128 pub(crate) fn new() -> Self {
129 IndexMapCore {
130 indices: RawTable::new(),
131 entries: EntryVec::new(),
132 }
133 }
134
135 #[inline]
136 pub(crate) fn with_capacity(n: usize) -> Self {
137 IndexMapCore {
138 indices: RawTable::with_capacity(n),
139 entries: EntryVec::with_capacity(n),
140 }
141 }
142
143 #[inline]
144 pub(crate) fn len(&self) -> usize {
145 self.indices.len()
146 }
147
148 #[inline]
149 pub(crate) fn capacity(&self) -> usize {
150 cmp::min(self.indices.capacity(), self.entries.capacity())
151 }
152
153 pub(crate) fn clear(&mut self) {
154 self.indices.clear();
155 self.entries.clear();
156 }
157
158 pub(crate) fn truncate(&mut self, len: usize) {
159 if len < self.len() {
160 self.erase_indices(len, self.entries.len());
161 self.entries.truncate(len);
162 }
163 }
164
165 pub(crate) fn drain<R>(&mut self, range: R) -> Drain<'_, Bucket<K, V>>
166 where
167 R: RangeBounds<usize>,
168 {
169 let range = simplify_range(range, self.entries.len());
170 self.erase_indices(range.start, range.end);
171 self.entries.drain(range)
172 }
173
174 pub(crate) fn split_off(&mut self, at: usize) -> Self {
175 assert!(at <= self.entries.len());
176 self.erase_indices(at, self.entries.len());
177 let entries = self.entries.split_off(at);
178
179 let mut indices = RawTable::with_capacity(entries.len());
180 for (i, entry) in enumerate(&entries) {
181 indices.insert_no_grow(entry.hash.get(), i, |_| unreachable!());
182 }
183 Self { indices, entries }
184 }
185
186 pub(crate) fn reserve(&mut self, additional: usize) {
188 self.indices.reserve(additional, get_hash(&self.entries));
189 self.reserve_entries();
190 }
191
192 fn reserve_entries(&mut self) {
194 let additional = self.indices.capacity() - self.entries.len();
195 self.entries.reserve_exact(additional);
196 }
197
198 pub(crate) fn shrink_to_fit(&mut self) {
200 self.indices.shrink_to(0, get_hash(&self.entries));
201 self.entries.shrink_to_fit();
202 }
203
204 pub(crate) fn pop(&mut self) -> Option<(K, V)> {
206 if let Some(entry) = self.entries.pop() {
207 let last = self.entries.len();
208 erase_index(&mut self.indices, entry.hash, last);
209 Some((entry.key, entry.value))
210 } else {
211 None
212 }
213 }
214
215 fn push(&mut self, hash: HashValue, key: K, value: V) -> usize {
218 let i = self.entries.len();
219 self.indices.insert(hash.get(), i, get_hash(&self.entries));
220 if i == self.entries.capacity() {
221 self.reserve_entries();
224 }
225 self.entries.push(Bucket { hash, key, value });
226 i
227 }
228
229 pub(crate) fn get_index_of<Q>(&self, hash: HashValue, key: &Q) -> Option<usize>
231 where
232 Q: ?Sized + Equivalent<K>,
233 {
234 let eq = equivalent(key, &self.entries);
235 self.indices.get(hash.get(), eq).copied()
236 }
237
238 pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option<V>)
239 where
240 K: Eq,
241 {
242 match self.get_index_of(hash, &key) {
243 Some(i) => (i, Some(replace(&mut self.entries[i].value, value))),
244 None => (self.push(hash, key, value), None),
245 }
246 }
247
248 pub(crate) fn shift_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
250 where
251 Q: ?Sized + Equivalent<K>,
252 {
253 let eq = equivalent(key, &self.entries);
254 match self.indices.remove_entry(hash.get(), eq) {
255 Some(index) => {
256 let (key, value) = self.shift_remove_finish(index);
257 Some((index, key, value))
258 }
259 None => None,
260 }
261 }
262
263 pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
265 match self.entries.get(index) {
266 Some(entry) => {
267 erase_index(&mut self.indices, entry.hash, index);
268 Some(self.shift_remove_finish(index))
269 }
270 None => None,
271 }
272 }
273
274 fn shift_remove_finish(&mut self, index: usize) -> (K, V) {
278 let entry = self.entries.remove(index);
281
282 let raw_capacity = self.indices.buckets();
285 let shifted_entries = self.entries.range(index..);
286 if shifted_entries.len() > raw_capacity / 2 {
287 drop(shifted_entries);
289 for i in self.indices_mut() {
290 if *i > index {
291 *i -= 1;
292 }
293 }
294 } else {
295 for (i, entry) in (index + 1..).zip(shifted_entries) {
297 update_index(&mut self.indices, entry.hash, i, i - 1);
298 }
299 }
300
301 (entry.key, entry.value)
302 }
303
304 pub(crate) fn swap_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
306 where
307 Q: ?Sized + Equivalent<K>,
308 {
309 let eq = equivalent(key, &self.entries);
310 match self.indices.remove_entry(hash.get(), eq) {
311 Some(index) => {
312 let (key, value) = self.swap_remove_finish(index);
313 Some((index, key, value))
314 }
315 None => None,
316 }
317 }
318
319 pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
321 match self.entries.get(index) {
322 Some(entry) => {
323 erase_index(&mut self.indices, entry.hash, index);
324 Some(self.swap_remove_finish(index))
325 }
326 None => None,
327 }
328 }
329
330 fn swap_remove_finish(&mut self, index: usize) -> (K, V) {
334 let entry = self.entries.swap_remove(index);
337
338 if let Some(entry) = self.entries.get(index) {
340 let last = self.entries.len();
343 update_index(&mut self.indices, entry.hash, last, index);
344 }
345
346 (entry.key, entry.value)
347 }
348
349 fn erase_indices(&mut self, start: usize, end: usize) {
354 let start_entries = self.entries.range(..start);
355 let erased_entries = self.entries.range(start..end);
356 let shifted_entries = self.entries.range(end..);
357
358 let erased = erased_entries.len();
359 let shifted = shifted_entries.len();
360 let half_capacity = self.indices.buckets() / 2;
361
362 if erased == 0 {
364 } else if start + shifted < half_capacity && start < erased {
366 self.indices.clear();
368
369 for (i, entry) in enumerate(start_entries) {
372 self.indices
373 .insert_no_grow(entry.hash.get(), i, |_| unreachable!());
374 }
375
376 for (i, entry) in (start..).zip(shifted_entries) {
378 self.indices
379 .insert_no_grow(entry.hash.get(), i, |_| unreachable!());
380 }
381 } else if erased + shifted < half_capacity {
382 for (i, entry) in (start..).zip(erased_entries) {
386 erase_index(&mut self.indices, entry.hash, i);
387 }
388
389 for ((new, old), entry) in (start..).zip(end..).zip(shifted_entries) {
391 update_index(&mut self.indices, entry.hash, old, new);
392 }
393 } else {
394 drop(start_entries);
396 drop(erased_entries);
397 drop(shifted_entries);
398 self.erase_indices_sweep(start, end);
399 }
400
401 debug_assert_eq!(self.indices.len(), start + shifted);
402 }
403
404 pub(crate) fn retain_in_order<F>(&mut self, mut keep: F)
405 where
406 F: FnMut(&mut K, &mut V) -> bool,
407 {
408 let len = self.entries.len();
412 let mut n_deleted = 0;
413 for i in 0..len {
414 let will_keep = {
415 let entry = &mut self.entries[i];
416 keep(&mut entry.key, &mut entry.value)
417 };
418 if !will_keep {
419 n_deleted += 1;
420 } else if n_deleted > 0 {
421 self.entries.swap(i - n_deleted, i);
422 }
423 }
424 if n_deleted > 0 {
425 self.entries.truncate(len - n_deleted);
426 self.rebuild_hash_table();
427 }
428 }
429
430 fn rebuild_hash_table(&mut self) {
431 self.indices.clear();
432 debug_assert!(self.indices.capacity() >= self.entries.len());
433 for (i, entry) in enumerate(&self.entries) {
434 self.indices
436 .insert_no_grow(entry.hash.get(), i, |_| unreachable!());
437 }
438 }
439
440 pub(crate) fn reverse(&mut self) {
441 self.entries.reverse();
442
443 let len = self.entries.len();
446 for i in self.indices_mut() {
447 *i = len - *i - 1;
448 }
449 }
450}
451
452pub enum Entry<'a, K, V> {
455 Occupied(OccupiedEntry<'a, K, V>),
457 Vacant(VacantEntry<'a, K, V>),
459}
460
461impl<'a, K, V> Entry<'a, K, V> {
462 pub fn or_insert(self, default: V) -> &'a mut V {
464 match self {
465 Entry::Occupied(entry) => entry.into_mut(),
466 Entry::Vacant(entry) => entry.insert(default),
467 }
468 }
469
470 pub fn or_insert_with<F>(self, call: F) -> &'a mut V
472 where
473 F: FnOnce() -> V,
474 {
475 match self {
476 Entry::Occupied(entry) => entry.into_mut(),
477 Entry::Vacant(entry) => entry.insert(call()),
478 }
479 }
480
481 pub fn key(&self) -> &K {
482 match *self {
483 Entry::Occupied(ref entry) => entry.key(),
484 Entry::Vacant(ref entry) => entry.key(),
485 }
486 }
487
488 pub fn index(&self) -> usize {
490 match *self {
491 Entry::Occupied(ref entry) => entry.index(),
492 Entry::Vacant(ref entry) => entry.index(),
493 }
494 }
495
496 pub fn and_modify<F>(self, f: F) -> Self
498 where
499 F: FnOnce(&mut V),
500 {
501 match self {
502 Entry::Occupied(mut o) => {
503 f(o.get_mut());
504 Entry::Occupied(o)
505 }
506 x => x,
507 }
508 }
509
510 pub fn or_default(self) -> &'a mut V
515 where
516 V: Default,
517 {
518 match self {
519 Entry::Occupied(entry) => entry.into_mut(),
520 Entry::Vacant(entry) => entry.insert(V::default()),
521 }
522 }
523}
524
525impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Entry<'_, K, V> {
526 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
527 match *self {
528 Entry::Vacant(ref v) => f.debug_tuple(stringify!(Entry)).field(v).finish(),
529 Entry::Occupied(ref o) => f.debug_tuple(stringify!(Entry)).field(o).finish(),
530 }
531 }
532}
533
534pub use self::raw::OccupiedEntry;
535
536impl<K, V> OccupiedEntry<'_, K, V> {
538 pub fn insert(&mut self, value: V) -> V {
540 replace(self.get_mut(), value)
541 }
542
543 pub fn remove(self) -> V {
547 self.swap_remove()
548 }
549
550 pub fn swap_remove(self) -> V {
558 self.swap_remove_entry().1
559 }
560
561 pub fn shift_remove(self) -> V {
569 self.shift_remove_entry().1
570 }
571
572 pub fn remove_entry(self) -> (K, V) {
576 self.swap_remove_entry()
577 }
578}
579
580impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for OccupiedEntry<'_, K, V> {
581 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
582 f.debug_struct(stringify!(OccupiedEntry))
583 .field("key", self.key())
584 .field("value", self.get())
585 .finish()
586 }
587}
588
589pub struct VacantEntry<'a, K, V> {
594 map: &'a mut IndexMapCore<K, V>,
595 hash: HashValue,
596 key: K,
597}
598
599impl<'a, K, V> VacantEntry<'a, K, V> {
600 pub fn key(&self) -> &K {
601 &self.key
602 }
603
604 pub fn into_key(self) -> K {
605 self.key
606 }
607
608 pub fn index(&self) -> usize {
610 self.map.len()
611 }
612
613 pub fn insert(self, value: V) -> &'a mut V {
614 let i = self.map.push(self.hash, self.key, value);
615 &mut self.map.entries[i].value
616 }
617}
618
619impl<K: fmt::Debug, V> fmt::Debug for VacantEntry<'_, K, V> {
620 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
621 f.debug_tuple(stringify!(VacantEntry))
622 .field(self.key())
623 .finish()
624 }
625}
626
627#[test]
628fn assert_send_sync() {
629 fn assert_send_sync<T: Send + Sync>() {}
630 assert_send_sync::<IndexMapCore<i32, i32>>();
631 assert_send_sync::<Entry<'_, i32, i32>>();
632}