tracing_log/
interest_cache.rs

1use ahash::AHasher;
2use log::{Level, Metadata};
3use lru::LruCache;
4use once_cell::sync::Lazy;
5use std::cell::RefCell;
6use std::hash::Hasher;
7use std::sync::atomic::{AtomicUsize, Ordering};
8use std::sync::Mutex;
9
10/// The interest cache configuration.
11#[derive(Debug)]
12pub struct InterestCacheConfig {
13    min_verbosity: Level,
14    lru_cache_size: usize,
15}
16
17impl Default for InterestCacheConfig {
18    fn default() -> Self {
19        InterestCacheConfig {
20            min_verbosity: Level::Debug,
21            lru_cache_size: 1024,
22        }
23    }
24}
25
26impl InterestCacheConfig {
27    fn disabled() -> Self {
28        Self {
29            lru_cache_size: 0,
30            ..Self::default()
31        }
32    }
33}
34
35impl InterestCacheConfig {
36    /// Sets the minimum logging verbosity for which the cache will apply.
37    ///
38    /// The interest for logs with a lower verbosity than specified here
39    /// will not be cached.
40    ///
41    /// It should be set to the lowest verbosity level for which the majority
42    /// of the logs in your application are usually *disabled*.
43    ///
44    /// In normal circumstances with typical logger usage patterns
45    /// you shouldn't ever have to change this.
46    ///
47    /// By default this is set to `Debug`.
48    pub fn with_min_verbosity(mut self, level: Level) -> Self {
49        self.min_verbosity = level;
50        self
51    }
52
53    /// Sets the number of entries in the LRU cache used to cache interests
54    /// for `log` records.
55    ///
56    /// The bigger the cache, the more unlikely it will be for the interest
57    /// in a given callsite to be recalculated, at the expense of extra
58    /// memory usage per every thread which tries to log events.
59    ///
60    /// Every unique [level] + [target] pair consumes a single slot
61    /// in the cache. Entries will be added to the cache until its size
62    /// reaches the value configured here, and from then on it will evict
63    /// the least recently seen level + target pair when adding a new entry.
64    ///
65    /// The ideal value to set here widely depends on how much exactly
66    /// you're logging, and how diverse the targets are to which you are logging.
67    ///
68    /// If your application spends a significant amount of time filtering logs
69    /// which are *not* getting printed out then increasing this value will most
70    /// likely help.
71    ///
72    /// Setting this to zero will disable the cache.
73    ///
74    /// By default this is set to 1024.
75    ///
76    /// [level]: log::Metadata::level
77    /// [target]: log::Metadata::target
78    pub fn with_lru_cache_size(mut self, size: usize) -> Self {
79        self.lru_cache_size = size;
80        self
81    }
82}
83
84#[derive(Copy, Clone, PartialEq, Eq, Hash)]
85struct Key {
86    target_address: usize,
87    level_and_length: usize,
88}
89
90struct State {
91    min_verbosity: Level,
92    epoch: usize,
93    cache: LruCache<Key, u64, ahash::RandomState>,
94}
95
96impl State {
97    fn new(epoch: usize, config: &InterestCacheConfig) -> Self {
98        State {
99            epoch,
100            min_verbosity: config.min_verbosity,
101            cache: LruCache::new(config.lru_cache_size),
102        }
103    }
104}
105
106// When the logger's filters are reconfigured the interest cache in core is cleared,
107// and we also want to get notified when that happens so that we can clear our cache too.
108//
109// So what we do here is to register a dummy callsite with the core, just so that we can be
110// notified when that happens. It doesn't really matter how exactly our dummy callsite looks
111// like and whether subscribers will actually be interested in it, since nothing will actually
112// be logged from it.
113
114static INTEREST_CACHE_EPOCH: AtomicUsize = AtomicUsize::new(0);
115
116fn interest_cache_epoch() -> usize {
117    INTEREST_CACHE_EPOCH.load(Ordering::Relaxed)
118}
119
120struct SentinelCallsite;
121
122impl tracing_core::Callsite for SentinelCallsite {
123    fn set_interest(&self, _: tracing_core::subscriber::Interest) {
124        INTEREST_CACHE_EPOCH.fetch_add(1, Ordering::SeqCst);
125    }
126
127    fn metadata(&self) -> &tracing_core::Metadata<'_> {
128        &SENTINEL_METADATA
129    }
130}
131
132static SENTINEL_CALLSITE: SentinelCallsite = SentinelCallsite;
133static SENTINEL_METADATA: tracing_core::Metadata<'static> = tracing_core::Metadata::new(
134    "log interest cache",
135    "log",
136    tracing_core::Level::ERROR,
137    None,
138    None,
139    None,
140    tracing_core::field::FieldSet::new(&[], tracing_core::identify_callsite!(&SENTINEL_CALLSITE)),
141    tracing_core::metadata::Kind::EVENT,
142);
143
144static CONFIG: Lazy<Mutex<InterestCacheConfig>> = Lazy::new(|| {
145    tracing_core::callsite::register(&SENTINEL_CALLSITE);
146    Mutex::new(InterestCacheConfig::disabled())
147});
148
149thread_local! {
150    static STATE: RefCell<State> = {
151        let config = CONFIG.lock().unwrap();
152        RefCell::new(State::new(interest_cache_epoch(), &config))
153    };
154}
155
156pub(crate) fn configure(new_config: Option<InterestCacheConfig>) {
157    *CONFIG.lock().unwrap() = new_config.unwrap_or_else(InterestCacheConfig::disabled);
158    INTEREST_CACHE_EPOCH.fetch_add(1, Ordering::SeqCst);
159}
160
161pub(crate) fn try_cache(metadata: &Metadata<'_>, callback: impl FnOnce() -> bool) -> bool {
162    STATE.with(|state| {
163        let mut state = state.borrow_mut();
164
165        // If the interest cache in core was rebuilt we need to reset the cache here too.
166        let epoch = interest_cache_epoch();
167        if epoch != state.epoch {
168            *state = State::new(epoch, &CONFIG.lock().unwrap());
169        }
170
171        let level = metadata.level();
172        if state.cache.cap() == 0 || level < state.min_verbosity {
173            return callback();
174        }
175
176        let target = metadata.target();
177
178        let mut hasher = AHasher::default();
179        hasher.write(target.as_bytes());
180
181        const HASH_MASK: u64 = !1;
182        const INTEREST_MASK: u64 = 1;
183
184        // We mask out the least significant bit of the hash since we'll use
185        // that space to save the interest.
186        //
187        // Since we use a good hashing function the loss of only a single bit
188        // won't really affect us negatively.
189        let target_hash = hasher.finish() & HASH_MASK;
190
191        // Since log targets are usually static strings we just use the address of the pointer
192        // as the key for our cache.
193        //
194        // We want each level to be cached separately so we also use the level as key, and since
195        // some linkers at certain optimization levels deduplicate strings if their prefix matches
196        // (e.g. "ham" and "hamster" might actually have the same address in memory) we also use the length.
197        let key = Key {
198            target_address: target.as_ptr() as usize,
199            // For extra efficiency we pack both the level and the length into a single field.
200            // The `level` can be between 1 and 5, so it can take at most 3 bits of space.
201            level_and_length: level as usize | target.len().wrapping_shl(3),
202        };
203
204        if let Some(&cached) = state.cache.get(&key) {
205            // And here we make sure that the target actually matches.
206            //
207            // This is just a hash of the target string, so theoretically we're not guaranteed
208            // that it won't collide, however in practice it shouldn't matter as it is quite
209            // unlikely that the target string's address and its length and the level and
210            // the hash will *all* be equal at the same time.
211            //
212            // We could of course actually store the whole target string in our cache,
213            // but we really want to avoid doing that as the necessary memory allocations
214            // would completely tank our performance, especially in cases where the cache's
215            // size is too small so it needs to regularly replace entries.
216            if cached & HASH_MASK == target_hash {
217                return (cached & INTEREST_MASK) != 0;
218            }
219
220            // Realistically we should never land here, unless someone is using a non-static
221            // target string with the same length and level, or is very lucky and found a hash
222            // collision for the cache's key.
223        }
224
225        let interest = callback();
226        state.cache.put(key, target_hash | interest as u64);
227
228        interest
229    })
230}
231
232#[cfg(test)]
233mod tests {
234    use super::*;
235
236    fn lock_for_test() -> impl Drop {
237        // We need to make sure only one test runs at a time.
238        static LOCK: Lazy<Mutex<()>> = Lazy::new(Mutex::new);
239
240        match LOCK.lock() {
241            Ok(guard) => guard,
242            Err(poison) => poison.into_inner(),
243        }
244    }
245
246    #[test]
247    fn test_when_disabled_the_callback_is_always_called() {
248        let _lock = lock_for_test();
249
250        *CONFIG.lock().unwrap() = InterestCacheConfig::disabled();
251
252        std::thread::spawn(|| {
253            let metadata = log::MetadataBuilder::new()
254                .level(Level::Trace)
255                .target("dummy")
256                .build();
257            let mut count = 0;
258            try_cache(&metadata, || {
259                count += 1;
260                true
261            });
262            assert_eq!(count, 1);
263            try_cache(&metadata, || {
264                count += 1;
265                true
266            });
267            assert_eq!(count, 2);
268        })
269        .join()
270        .unwrap();
271    }
272
273    #[test]
274    fn test_when_enabled_the_callback_is_called_only_once_for_a_high_enough_verbosity() {
275        let _lock = lock_for_test();
276
277        *CONFIG.lock().unwrap() = InterestCacheConfig::default().with_min_verbosity(Level::Debug);
278
279        std::thread::spawn(|| {
280            let metadata = log::MetadataBuilder::new()
281                .level(Level::Debug)
282                .target("dummy")
283                .build();
284            let mut count = 0;
285            try_cache(&metadata, || {
286                count += 1;
287                true
288            });
289            assert_eq!(count, 1);
290            try_cache(&metadata, || {
291                count += 1;
292                true
293            });
294            assert_eq!(count, 1);
295        })
296        .join()
297        .unwrap();
298    }
299
300    #[test]
301    fn test_when_core_interest_cache_is_rebuilt_this_cache_is_also_flushed() {
302        let _lock = lock_for_test();
303
304        *CONFIG.lock().unwrap() = InterestCacheConfig::default().with_min_verbosity(Level::Debug);
305
306        std::thread::spawn(|| {
307            let metadata = log::MetadataBuilder::new()
308                .level(Level::Debug)
309                .target("dummy")
310                .build();
311            {
312                let mut count = 0;
313                try_cache(&metadata, || {
314                    count += 1;
315                    true
316                });
317                try_cache(&metadata, || {
318                    count += 1;
319                    true
320                });
321                assert_eq!(count, 1);
322            }
323            tracing_core::callsite::rebuild_interest_cache();
324            {
325                let mut count = 0;
326                try_cache(&metadata, || {
327                    count += 1;
328                    true
329                });
330                try_cache(&metadata, || {
331                    count += 1;
332                    true
333                });
334                assert_eq!(count, 1);
335            }
336        })
337        .join()
338        .unwrap();
339    }
340
341    #[test]
342    fn test_when_enabled_the_callback_is_always_called_for_a_low_enough_verbosity() {
343        let _lock = lock_for_test();
344
345        *CONFIG.lock().unwrap() = InterestCacheConfig::default().with_min_verbosity(Level::Debug);
346
347        std::thread::spawn(|| {
348            let metadata = log::MetadataBuilder::new()
349                .level(Level::Info)
350                .target("dummy")
351                .build();
352            let mut count = 0;
353            try_cache(&metadata, || {
354                count += 1;
355                true
356            });
357            assert_eq!(count, 1);
358            try_cache(&metadata, || {
359                count += 1;
360                true
361            });
362            assert_eq!(count, 2);
363        })
364        .join()
365        .unwrap();
366    }
367
368    #[test]
369    fn test_different_log_levels_are_cached_separately() {
370        let _lock = lock_for_test();
371
372        *CONFIG.lock().unwrap() = InterestCacheConfig::default().with_min_verbosity(Level::Debug);
373
374        std::thread::spawn(|| {
375            let metadata_debug = log::MetadataBuilder::new()
376                .level(Level::Debug)
377                .target("dummy")
378                .build();
379            let metadata_trace = log::MetadataBuilder::new()
380                .level(Level::Trace)
381                .target("dummy")
382                .build();
383            let mut count_debug = 0;
384            let mut count_trace = 0;
385            try_cache(&metadata_debug, || {
386                count_debug += 1;
387                true
388            });
389            try_cache(&metadata_trace, || {
390                count_trace += 1;
391                true
392            });
393            try_cache(&metadata_debug, || {
394                count_debug += 1;
395                true
396            });
397            try_cache(&metadata_trace, || {
398                count_trace += 1;
399                true
400            });
401            assert_eq!(count_debug, 1);
402            assert_eq!(count_trace, 1);
403        })
404        .join()
405        .unwrap();
406    }
407
408    #[test]
409    fn test_different_log_targets_are_cached_separately() {
410        let _lock = lock_for_test();
411
412        *CONFIG.lock().unwrap() = InterestCacheConfig::default().with_min_verbosity(Level::Debug);
413
414        std::thread::spawn(|| {
415            let metadata_1 = log::MetadataBuilder::new()
416                .level(Level::Trace)
417                .target("dummy_1")
418                .build();
419            let metadata_2 = log::MetadataBuilder::new()
420                .level(Level::Trace)
421                .target("dummy_2")
422                .build();
423            let mut count_1 = 0;
424            let mut count_2 = 0;
425            try_cache(&metadata_1, || {
426                count_1 += 1;
427                true
428            });
429            try_cache(&metadata_2, || {
430                count_2 += 1;
431                true
432            });
433            try_cache(&metadata_1, || {
434                count_1 += 1;
435                true
436            });
437            try_cache(&metadata_2, || {
438                count_2 += 1;
439                true
440            });
441            assert_eq!(count_1, 1);
442            assert_eq!(count_2, 1);
443        })
444        .join()
445        .unwrap();
446    }
447
448    #[test]
449    fn test_when_cache_runs_out_of_space_the_callback_is_called_again() {
450        let _lock = lock_for_test();
451
452        *CONFIG.lock().unwrap() = InterestCacheConfig::default()
453            .with_min_verbosity(Level::Debug)
454            .with_lru_cache_size(1);
455
456        std::thread::spawn(|| {
457            let metadata_1 = log::MetadataBuilder::new()
458                .level(Level::Trace)
459                .target("dummy_1")
460                .build();
461            let metadata_2 = log::MetadataBuilder::new()
462                .level(Level::Trace)
463                .target("dummy_2")
464                .build();
465            let mut count = 0;
466            try_cache(&metadata_1, || {
467                count += 1;
468                true
469            });
470            try_cache(&metadata_1, || {
471                count += 1;
472                true
473            });
474            assert_eq!(count, 1);
475            try_cache(&metadata_2, || true);
476            try_cache(&metadata_1, || {
477                count += 1;
478                true
479            });
480            assert_eq!(count, 2);
481        })
482        .join()
483        .unwrap();
484    }
485
486    #[test]
487    fn test_cache_returns_previously_computed_value() {
488        let _lock = lock_for_test();
489
490        *CONFIG.lock().unwrap() = InterestCacheConfig::default().with_min_verbosity(Level::Debug);
491
492        std::thread::spawn(|| {
493            let metadata_1 = log::MetadataBuilder::new()
494                .level(Level::Trace)
495                .target("dummy_1")
496                .build();
497            let metadata_2 = log::MetadataBuilder::new()
498                .level(Level::Trace)
499                .target("dummy_2")
500                .build();
501            try_cache(&metadata_1, || true);
502            assert_eq!(try_cache(&metadata_1, || { unreachable!() }), true);
503            try_cache(&metadata_2, || false);
504            assert_eq!(try_cache(&metadata_2, || { unreachable!() }), false);
505        })
506        .join()
507        .unwrap();
508    }
509
510    #[test]
511    fn test_cache_handles_non_static_target_string() {
512        let _lock = lock_for_test();
513
514        *CONFIG.lock().unwrap() = InterestCacheConfig::default().with_min_verbosity(Level::Debug);
515
516        std::thread::spawn(|| {
517            let mut target = *b"dummy_1";
518            let metadata_1 = log::MetadataBuilder::new()
519                .level(Level::Trace)
520                .target(std::str::from_utf8(&target).unwrap())
521                .build();
522
523            try_cache(&metadata_1, || true);
524            assert_eq!(try_cache(&metadata_1, || { unreachable!() }), true);
525
526            *target.last_mut().unwrap() = b'2';
527            let metadata_2 = log::MetadataBuilder::new()
528                .level(Level::Trace)
529                .target(std::str::from_utf8(&target).unwrap())
530                .build();
531
532            try_cache(&metadata_2, || false);
533            assert_eq!(try_cache(&metadata_2, || { unreachable!() }), false);
534        })
535        .join()
536        .unwrap();
537    }
538}