imara_diff/
intern.rs

1use std::hash::{BuildHasher as _, Hash};
2use std::ops::Index;
3
4use hashbrown::hash_table::{Entry, HashTable};
5use hashbrown::DefaultHashBuilder as RandomState;
6
7/// A token represented as an interned integer.
8///
9/// A token represents the smallest possible unit of change during a diff.
10/// For text this is usually a line, a word or a single character.
11/// All [algorithms](crate::Algorithm) operate on interned tokens instead
12/// of using the token data directly.
13/// This allows for much better performance by amortizing the cost hashing/equality.
14///
15/// While you can intern tokens yourself it is strongly recommended to use [`InternedInput`] module.
16#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
17#[repr(transparent)]
18pub struct Token(pub u32);
19
20impl From<u32> for Token {
21    fn from(token: u32) -> Self {
22        Token(token)
23    }
24}
25
26impl From<Token> for u32 {
27    fn from(token: Token) -> Self {
28        token.0
29    }
30}
31
32pub trait TokenSource {
33    type Token: Hash + Eq;
34    type Tokenizer: Iterator<Item = Self::Token>;
35    fn tokenize(&self) -> Self::Tokenizer;
36    fn estimate_tokens(&self) -> u32;
37}
38
39/// Two lists of interned [tokens](crate::intern::Token) that can be compared with the [`diff`](crate::diff) function.
40///
41/// A token represents the smallest possible unit of change during a diff.
42/// For text this is usually a line, a word or a single character.
43/// All [algorithms](crate::Algorithm) operate on interned tokens instead
44/// of using the token data directly.
45/// This allows for much better performance by amortizing the cost hashing/equality.
46///
47/// While you can intern tokens yourself it is strongly recommended to use [`InternedInput`] module.
48#[derive(Default)]
49pub struct InternedInput<T> {
50    pub before: Vec<Token>,
51    pub after: Vec<Token>,
52    pub interner: Interner<T>,
53}
54
55impl<T> InternedInput<T> {
56    pub fn clear(&mut self) {
57        self.before.clear();
58        self.after.clear();
59        self.interner.clear();
60    }
61}
62
63impl<T: Eq + Hash> InternedInput<T> {
64    pub fn new<I: TokenSource<Token = T>>(before: I, after: I) -> Self {
65        let token_estimate_before = before.estimate_tokens() as usize;
66        let token_estimate_after = after.estimate_tokens() as usize;
67        let mut res = Self {
68            before: Vec::with_capacity(token_estimate_before),
69            after: Vec::with_capacity(token_estimate_after),
70            interner: Interner::new(token_estimate_before + token_estimate_after),
71        };
72        res.update_before(before.tokenize());
73        res.update_after(after.tokenize());
74        res
75    }
76
77    /// replaces `self.before` wtih the iterned Tokens yielded by `input`
78    /// Note that this does not erase any tokens from the interner and might therefore be considered
79    /// a memory leak. If this function is called often over a long_running process
80    /// consider clearing the interner with [`clear`](crate::intern::Interner::clear).
81    pub fn update_before(&mut self, input: impl Iterator<Item = T>) {
82        self.before.clear();
83        self.before
84            .extend(input.map(|token| self.interner.intern(token)));
85    }
86
87    /// replaces `self.before` wtih the iterned Tokens yielded by `input`
88    /// Note that this does not erase any tokens from the interner and might therefore be considered
89    /// a memory leak. If this function is called often over a long_running process
90    /// consider clearing the interner with [`clear`](crate::intern::Interner::clear) or
91    /// [`erase_tokens_after`](crate::intern::Interner::erase_tokens_after).
92    pub fn update_after(&mut self, input: impl Iterator<Item = T>) {
93        self.after.clear();
94        self.after
95            .extend(input.map(|token| self.interner.intern(token)));
96    }
97}
98
99/// An interner that allows for fast access of tokens produced by a [`TokenSource`].
100#[derive(Default)]
101pub struct Interner<T> {
102    tokens: Vec<T>,
103    table: HashTable<Token>,
104    hasher: RandomState,
105}
106
107impl<T> Interner<T> {
108    /// Create an Interner with an initial capacity calculated by summing the results of calling
109    /// [`estimate_tokens`](crate::intern::TokenSource::estimate_tokens) methods of `before` and `after`.
110    pub fn new_for_token_source<S: TokenSource<Token = T>>(before: &S, after: &S) -> Self {
111        Self::new(before.estimate_tokens() as usize + after.estimate_tokens() as usize)
112    }
113
114    /// Create an Interner with initial capacity `capacity`.
115    pub fn new(capacity: usize) -> Interner<T> {
116        Interner {
117            tokens: Vec::with_capacity(capacity),
118            table: HashTable::with_capacity(capacity),
119            hasher: RandomState::default(),
120        }
121    }
122
123    /// Remove all interned tokens.
124    pub fn clear(&mut self) {
125        self.table.clear();
126        self.tokens.clear();
127    }
128
129    /// Returns to total number of **distinct** tokens currently interned.
130    pub fn num_tokens(&self) -> u32 {
131        self.tokens.len() as u32
132    }
133}
134
135impl<T: Hash + Eq> Interner<T> {
136    /// Intern `token` and return a the interned integer.
137    pub fn intern(&mut self, token: T) -> Token {
138        let hash = self.hasher.hash_one(&token);
139        match self.table.entry(
140            hash,
141            |&it| self.tokens[it.0 as usize] == token,
142            |&token| self.hasher.hash_one(&self.tokens[token.0 as usize]),
143        ) {
144            Entry::Occupied(entry) => *entry.get(),
145            Entry::Vacant(entry) => {
146                let interned = Token(self.tokens.len() as u32);
147                entry.insert(interned);
148                self.tokens.push(token);
149                interned
150            }
151        }
152    }
153
154    /// Erases `first_erased_token` and any tokens interned afterward from the interner.
155    pub fn erase_tokens_after(&mut self, first_erased_token: Token) {
156        assert!(first_erased_token.0 <= self.tokens.len() as u32);
157        let retained = first_erased_token.0 as usize;
158        let erased = self.tokens.len() - retained;
159        if retained <= erased {
160            self.table.clear();
161            for (i, token) in self.tokens[0..retained].iter().enumerate() {
162                let hash = self.hasher.hash_one(token);
163                self.table.insert_unique(hash, Token(i as u32), |&token| {
164                    self.hasher.hash_one(&self.tokens[token.0 as usize])
165                });
166            }
167        } else {
168            for (i, token) in self.tokens[retained..].iter().enumerate() {
169                let hash = self.hasher.hash_one(token);
170                match self
171                    .table
172                    .find_entry(hash, |token| token.0 == (retained + i) as u32)
173                {
174                    Ok(occupied) => drop(occupied.remove()),
175                    Err(_absent) => unreachable!(),
176                }
177            }
178        }
179        self.tokens.truncate(first_erased_token.0 as usize);
180    }
181}
182
183impl<T> Index<Token> for Interner<T> {
184    type Output = T;
185    fn index(&self, index: Token) -> &Self::Output {
186        &self.tokens[index.0 as usize]
187    }
188}