ckb_rocksdb/
db_options.rs

1// Copyright 2014 Tyler Neely
2//
3// Licensed under the Apache License, Version 2.0 (the "License");
4// you may not use this file except in compliance with the License.
5// You may obtain a copy of the License at
6//
7// http://www.apache.org/licenses/LICENSE-2.0
8//
9// Unless required by applicable law or agreed to in writing, software
10// distributed under the License is distributed on an "AS IS" BASIS,
11// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12// See the License for the specific language governing permissions and
13// limitations under the License.
14
15use crate::{
16    handle::{ConstHandle, Handle},
17    Error,
18};
19
20use std::ffi::{CStr, CString};
21use std::path::Path;
22use std::sync::Arc;
23
24use libc::{self, c_char, c_double, c_int, c_uchar, c_uint, c_void, size_t};
25
26use crate::compaction_filter::{self, CompactionFilterCallback, CompactionFilterFn};
27use crate::compaction_filter_factory::{self, CompactionFilterFactory};
28use crate::comparator::{self, ComparatorCallback, CompareFn};
29use crate::ffi;
30use crate::merge_operator::{
31    self, full_merge_callback, partial_merge_callback, MergeFn, MergeOperatorCallback,
32};
33use crate::slice_transform::SliceTransform;
34use std::ptr::NonNull;
35
36pub(crate) struct CacheWrapper {
37    pub(crate) inner: NonNull<ffi::rocksdb_cache_t>,
38}
39
40impl Drop for CacheWrapper {
41    fn drop(&mut self) {
42        unsafe {
43            ffi::rocksdb_cache_destroy(self.inner.as_ptr());
44        }
45    }
46}
47
48#[derive(Clone)]
49pub struct Cache(pub(crate) Arc<CacheWrapper>);
50
51impl Cache {
52    /// Creates an LRU cache with capacity in bytes.
53    pub fn new_lru_cache(capacity: size_t) -> Cache {
54        let inner = NonNull::new(unsafe { ffi::rocksdb_cache_create_lru(capacity) }).unwrap();
55        Cache(Arc::new(CacheWrapper { inner }))
56    }
57
58    /// Creates a HyperClockCache with capacity in bytes.
59    ///
60    /// `estimated_entry_charge` is an important tuning parameter. The optimal
61    /// choice at any given time is
62    /// `(cache.get_usage() - 64 * cache.get_table_address_count()) /
63    /// cache.get_occupancy_count()`, or approximately `cache.get_usage() /
64    /// cache.get_occupancy_count()`.
65    ///
66    /// However, the value cannot be changed dynamically, so as the cache
67    /// composition changes at runtime, the following tradeoffs apply:
68    ///
69    /// * If the estimate is substantially too high (e.g., 25% higher),
70    ///   the cache may have to evict entries to prevent load factors that
71    ///   would dramatically affect lookup times.
72    /// * If the estimate is substantially too low (e.g., less than half),
73    ///   then meta data space overhead is substantially higher.
74    ///
75    /// The latter is generally preferable, and picking the larger of
76    /// block size and meta data block size is a reasonable choice that
77    /// errs towards this side.
78    pub fn new_hyper_clock_cache(capacity: size_t, estimated_entry_charge: size_t) -> Cache {
79        Cache(Arc::new(CacheWrapper {
80            inner: NonNull::new(unsafe {
81                ffi::rocksdb_cache_create_hyper_clock(capacity, estimated_entry_charge)
82            })
83            .unwrap(),
84        }))
85    }
86
87    /// Returns the Cache memory usage
88    pub fn get_usage(&self) -> usize {
89        unsafe { ffi::rocksdb_cache_get_usage(self.0.inner.as_ptr()) }
90    }
91
92    /// Returns pinned memory usage
93    pub fn get_pinned_usage(&self) -> usize {
94        unsafe { ffi::rocksdb_cache_get_pinned_usage(self.0.inner.as_ptr()) }
95    }
96
97    /// Sets cache capacity
98    pub fn set_capacity(&mut self, capacity: size_t) {
99        unsafe {
100            ffi::rocksdb_cache_set_capacity(self.0.inner.as_ptr(), capacity);
101        }
102    }
103}
104
105/// An Env is an interface used by the rocksdb implementation to access
106/// operating system functionality like the filesystem etc.  Callers
107/// may wish to provide a custom Env object when opening a database to
108/// get fine gain control; e.g., to rate limit file system operations.
109///
110/// All Env implementations are safe for concurrent access from
111/// multiple threads without any external synchronization.
112///
113/// Note: currently, C API behinds C++ API for various settings.
114/// See also: `rocksdb/include/env.h`
115#[derive(Clone)]
116pub struct Env(Arc<EnvWrapper>);
117
118pub(crate) struct EnvWrapper {
119    inner: *mut ffi::rocksdb_env_t,
120}
121
122impl Drop for EnvWrapper {
123    fn drop(&mut self) {
124        unsafe {
125            ffi::rocksdb_env_destroy(self.inner);
126        }
127    }
128}
129
130impl Env {
131    /// Returns default env
132    pub fn default_env() -> Result<Self, Error> {
133        let env = unsafe { ffi::rocksdb_create_default_env() };
134        if env.is_null() {
135            Err(Error::new("Could not create mem env".to_owned()))
136        } else {
137            Ok(Self(Arc::new(EnvWrapper { inner: env })))
138        }
139    }
140
141    /// Returns a new environment that stores its data in memory and delegates
142    /// all non-file-storage tasks to base_env.
143    pub fn mem_env() -> Result<Self, Error> {
144        let env = unsafe { ffi::rocksdb_create_mem_env() };
145        if env.is_null() {
146            Err(Error::new("Could not create mem env".to_owned()))
147        } else {
148            Ok(Self(Arc::new(EnvWrapper { inner: env })))
149        }
150    }
151
152    /// Sets the number of background worker threads of a specific thread pool for this environment.
153    /// `LOW` is the default pool.
154    ///
155    /// Default: 1
156    pub fn set_background_threads(&mut self, num_threads: c_int) {
157        unsafe {
158            ffi::rocksdb_env_set_background_threads(self.0.inner, num_threads);
159        }
160    }
161
162    /// Sets the size of the high priority thread pool that can be used to
163    /// prevent compactions from stalling memtable flushes.
164    pub fn set_high_priority_background_threads(&mut self, n: c_int) {
165        unsafe {
166            ffi::rocksdb_env_set_high_priority_background_threads(self.0.inner, n);
167        }
168    }
169
170    /// Sets the size of the low priority thread pool that can be used to
171    /// prevent compactions from stalling memtable flushes.
172    pub fn set_low_priority_background_threads(&mut self, n: c_int) {
173        unsafe {
174            ffi::rocksdb_env_set_low_priority_background_threads(self.0.inner, n);
175        }
176    }
177
178    /// Sets the size of the bottom priority thread pool that can be used to
179    /// prevent compactions from stalling memtable flushes.
180    pub fn set_bottom_priority_background_threads(&mut self, n: c_int) {
181        unsafe {
182            ffi::rocksdb_env_set_bottom_priority_background_threads(self.0.inner, n);
183        }
184    }
185
186    /// Wait for all threads started by StartThread to terminate.
187    pub fn join_all_threads(&mut self) {
188        unsafe {
189            ffi::rocksdb_env_join_all_threads(self.0.inner);
190        }
191    }
192
193    /// Lowering IO priority for threads from the specified pool.
194    pub fn lower_thread_pool_io_priority(&mut self) {
195        unsafe {
196            ffi::rocksdb_env_lower_thread_pool_io_priority(self.0.inner);
197        }
198    }
199
200    /// Lowering IO priority for high priority thread pool.
201    pub fn lower_high_priority_thread_pool_io_priority(&mut self) {
202        unsafe {
203            ffi::rocksdb_env_lower_high_priority_thread_pool_io_priority(self.0.inner);
204        }
205    }
206
207    /// Lowering CPU priority for threads from the specified pool.
208    pub fn lower_thread_pool_cpu_priority(&mut self) {
209        unsafe {
210            ffi::rocksdb_env_lower_thread_pool_cpu_priority(self.0.inner);
211        }
212    }
213
214    /// Lowering CPU priority for high priority thread pool.
215    pub fn lower_high_priority_thread_pool_cpu_priority(&mut self) {
216        unsafe {
217            ffi::rocksdb_env_lower_high_priority_thread_pool_cpu_priority(self.0.inner);
218        }
219    }
220
221    fn clone(&self) -> Self {
222        Self(self.0.clone())
223    }
224}
225
226#[derive(Default)]
227pub struct OptionsMustOutliveDB {
228    pub(crate) env: Option<Env>,
229    pub(crate) row_cache: Option<Cache>,
230    pub(crate) block_based: Option<BlockBasedOptionsMustOutliveDB>,
231}
232
233impl OptionsMustOutliveDB {
234    pub(crate) fn clone(&self) -> Self {
235        Self {
236            env: self.env.as_ref().map(Env::clone),
237            row_cache: self.row_cache.as_ref().map(Cache::clone),
238            block_based: self
239                .block_based
240                .as_ref()
241                .map(BlockBasedOptionsMustOutliveDB::clone),
242        }
243    }
244}
245
246#[derive(Default)]
247pub(crate) struct BlockBasedOptionsMustOutliveDB {
248    block_cache: Option<Cache>,
249}
250
251impl BlockBasedOptionsMustOutliveDB {
252    fn clone(&self) -> Self {
253        Self {
254            block_cache: self.block_cache.as_ref().map(Cache::clone),
255        }
256    }
257}
258
259/// Database-wide options around performance and behavior.
260///
261/// Please read [the official tuning guide](https://github.com/facebook/rocksdb/wiki/RocksDB-Tuning-Guide), and most importantly, measure performance under realistic workloads with realistic hardware.
262///
263/// # Examples
264///
265/// ```
266/// use ckb_rocksdb::{Options, DB, prelude::*};
267/// use ckb_rocksdb::DBCompactionStyle;
268///
269/// fn badly_tuned_for_somebody_elses_disk() -> DB {
270///    let path = "path/for/rocksdb/storageX";
271///    let mut opts = Options::default();
272///    opts.create_if_missing(true);
273///    opts.set_max_open_files(10000);
274///    opts.set_use_fsync(false);
275///    opts.set_bytes_per_sync(8388608);
276///    opts.optimize_for_point_lookup(1024);
277///    opts.set_table_cache_num_shard_bits(6);
278///    opts.set_max_write_buffer_number(32);
279///    opts.set_write_buffer_size(536870912);
280///    opts.set_target_file_size_base(1073741824);
281///    opts.set_min_write_buffer_number_to_merge(4);
282///    opts.set_level_zero_stop_writes_trigger(2000);
283///    opts.set_level_zero_slowdown_writes_trigger(0);
284///    opts.set_compaction_style(DBCompactionStyle::Universal);
285///    opts.set_max_background_compactions(4);
286///    opts.set_max_background_flushes(4);
287///    opts.set_disable_auto_compactions(true);
288///
289///    DB::open(&opts, path).unwrap()
290/// }
291/// ```
292pub struct Options {
293    pub(crate) inner: *mut ffi::rocksdb_options_t,
294    pub(crate) outlive: OptionsMustOutliveDB,
295}
296
297/// Optionally disable WAL or sync for this write.
298///
299/// # Examples
300///
301/// Making an unsafe write of a batch:
302///
303/// ```
304/// use ckb_rocksdb::{DB, Options, WriteBatch, WriteOptions, prelude::*};
305///
306/// let path = "_path_for_rocksdb_storageZ";
307/// {
308///     let db = DB::open_default(path).unwrap();
309///     let mut batch = WriteBatch::default();
310///     batch.put(b"my key", b"my value");
311///     batch.put(b"key2", b"value2");
312///     batch.put(b"key3", b"value3");
313///
314///     let mut write_options = WriteOptions::default();
315///     write_options.set_sync(false);
316///     write_options.disable_wal(true);
317///
318///     db.write_opt(&batch, &write_options);
319/// }
320/// let _ = DB::destroy(&Options::default(), path);
321/// ```
322pub struct WriteOptions {
323    option_set_sync: Option<bool>,
324    option_disable_wal: Option<bool>,
325    inner: *mut ffi::rocksdb_writeoptions_t,
326}
327
328/// Optionally wait for the memtable flush to be performed.
329///
330/// # Examples
331///
332/// Manually flushing the memtable:
333///
334/// ```
335/// use ckb_rocksdb::{DB, Options, FlushOptions, prelude::*};
336///
337/// let path = "_path_for_rocksdb_storageY";
338/// {
339///
340///     let db = DB::open_default(path).unwrap();
341///
342///     let mut flush_options = FlushOptions::default();
343///     flush_options.set_wait(true);
344///
345///     db.flush_opt(&flush_options);
346/// }
347///
348/// let _ = DB::destroy(&Options::default(), path);
349/// ```
350pub struct FlushOptions {
351    pub(crate) inner: *mut ffi::rocksdb_flushoptions_t,
352}
353
354/// For configuring block-based file storage.
355pub struct BlockBasedOptions {
356    pub(crate) inner: *mut ffi::rocksdb_block_based_table_options_t,
357    outlive: BlockBasedOptionsMustOutliveDB,
358}
359
360pub struct ReadOptions {
361    option_fill_cache: Option<bool>,
362    option_set_iterate_upper_bound: Option<Vec<u8>>,
363    option_set_iterate_lower_bound: Option<Vec<u8>>,
364    option_set_prefix_same_as_start: Option<bool>,
365    option_set_total_order_seek: Option<bool>,
366    option_set_readahead_size: Option<usize>,
367    inner: *mut ffi::rocksdb_readoptions_t,
368}
369
370/// Configuration of cuckoo-based storage.
371pub struct CuckooTableOptions {
372    pub(crate) inner: *mut ffi::rocksdb_cuckoo_table_options_t,
373}
374
375/// For configuring external files ingestion.
376///
377/// # Examples
378///
379/// Move files instead of copying them:
380///
381/// ```
382/// use ckb_rocksdb::{ops::{Open, IngestExternalFile}, DB, IngestExternalFileOptions, SstFileWriter, Options};
383///
384/// let writer_opts = Options::default();
385/// let mut writer = SstFileWriter::create(&writer_opts);
386/// writer.open("_path_for_sst_file").unwrap();
387/// writer.put(b"k1", b"v1").unwrap();
388/// writer.finish().unwrap();
389///
390/// let path = "_path_for_rocksdb_storageY3";
391/// {
392///   let db = DB::open_default(&path).unwrap();
393///   let mut ingest_opts = IngestExternalFileOptions::default();
394///   ingest_opts.set_move_files(true);
395///   db.ingest_external_file_opts(vec!["_path_for_sst_file"], &ingest_opts).unwrap();
396/// }
397/// let _ = DB::destroy(&Options::default(), path);
398/// ```
399pub struct IngestExternalFileOptions {
400    pub(crate) inner: *mut ffi::rocksdb_ingestexternalfileoptions_t,
401}
402
403// Safety note: auto-implementing Send on most db-related types is prevented by the inner FFI
404// pointer. In most cases, however, this pointer is Send-safe because it is never aliased and
405// rocksdb internally does not rely on thread-local information for its user-exposed types.
406unsafe impl Send for Options {}
407unsafe impl Send for WriteOptions {}
408unsafe impl Send for BlockBasedOptions {}
409unsafe impl Send for CuckooTableOptions {}
410unsafe impl Send for ReadOptions {}
411unsafe impl Send for IngestExternalFileOptions {}
412unsafe impl Send for CacheWrapper {}
413unsafe impl Send for EnvWrapper {}
414
415// Sync is similarly safe for many types because they do not expose interior mutability, and their
416// use within the rocksdb library is generally behind a const reference
417unsafe impl Sync for Options {}
418unsafe impl Sync for WriteOptions {}
419unsafe impl Sync for BlockBasedOptions {}
420unsafe impl Sync for CuckooTableOptions {}
421unsafe impl Sync for ReadOptions {}
422unsafe impl Sync for IngestExternalFileOptions {}
423unsafe impl Sync for CacheWrapper {}
424unsafe impl Sync for EnvWrapper {}
425
426impl Drop for Options {
427    fn drop(&mut self) {
428        unsafe {
429            ffi::rocksdb_options_destroy(self.inner);
430        }
431    }
432}
433
434impl Clone for Options {
435    fn clone(&self) -> Self {
436        let inner = unsafe { ffi::rocksdb_options_create_copy(self.inner) };
437        assert!(!inner.is_null(), "Could not copy RocksDB options");
438
439        Self {
440            inner,
441            outlive: self.outlive.clone(),
442        }
443    }
444}
445
446impl Drop for BlockBasedOptions {
447    fn drop(&mut self) {
448        unsafe {
449            ffi::rocksdb_block_based_options_destroy(self.inner);
450        }
451    }
452}
453
454impl Drop for CuckooTableOptions {
455    fn drop(&mut self) {
456        unsafe {
457            ffi::rocksdb_cuckoo_options_destroy(self.inner);
458        }
459    }
460}
461
462impl Drop for FlushOptions {
463    fn drop(&mut self) {
464        unsafe {
465            ffi::rocksdb_flushoptions_destroy(self.inner);
466        }
467    }
468}
469
470impl Drop for WriteOptions {
471    fn drop(&mut self) {
472        unsafe {
473            ffi::rocksdb_writeoptions_destroy(self.inner);
474        }
475    }
476}
477
478impl Drop for ReadOptions {
479    fn drop(&mut self) {
480        unsafe { ffi::rocksdb_readoptions_destroy(self.inner) }
481    }
482}
483
484impl Drop for IngestExternalFileOptions {
485    fn drop(&mut self) {
486        unsafe {
487            ffi::rocksdb_ingestexternalfileoptions_destroy(self.inner);
488        }
489    }
490}
491
492impl BlockBasedOptions {
493    /// Approximate size of user data packed per block. Note that the
494    /// block size specified here corresponds to uncompressed data. The
495    /// actual size of the unit read from disk may be smaller if
496    /// compression is enabled. This parameter can be changed dynamically.
497    pub fn set_block_size(&mut self, size: usize) {
498        unsafe {
499            ffi::rocksdb_block_based_options_set_block_size(self.inner, size);
500        }
501    }
502
503    /// Block size for partitioned metadata. Currently applied to indexes when
504    /// kTwoLevelIndexSearch is used and to filters when partition_filters is used.
505    /// Note: Since in the current implementation the filters and index partitions
506    /// are aligned, an index/filter block is created when either index or filter
507    /// block size reaches the specified limit.
508    ///
509    /// Note: this limit is currently applied to only index blocks; a filter
510    /// partition is cut right after an index block is cut.
511    pub fn set_metadata_block_size(&mut self, size: usize) {
512        unsafe {
513            ffi::rocksdb_block_based_options_set_metadata_block_size(self.inner, size as u64);
514        }
515    }
516
517    /// Note: currently this option requires kTwoLevelIndexSearch to be set as
518    /// well.
519    ///
520    /// Use partitioned full filters for each SST file. This option is
521    /// incompatible with block-based filters.
522    pub fn set_partition_filters(&mut self, size: bool) {
523        unsafe {
524            ffi::rocksdb_block_based_options_set_partition_filters(self.inner, size as c_uchar);
525        }
526    }
527
528    /// Sets global cache for blocks (user data is stored in a set of blocks, and
529    /// a block is the unit of reading from disk). Cache must outlive DB instance which uses it.
530    ///
531    /// If set, use the specified cache for blocks.
532    /// By default, rocksdb will automatically create and use an 8MB internal cache.
533    pub fn set_block_cache(&mut self, cache: &Cache) {
534        unsafe {
535            ffi::rocksdb_block_based_options_set_block_cache(self.inner, cache.0.inner.as_ptr());
536        }
537        self.outlive.block_cache = Some(cache.clone());
538    }
539
540    /// Disable block cache
541    pub fn disable_cache(&mut self) {
542        unsafe {
543            ffi::rocksdb_block_based_options_set_no_block_cache(self.inner, true as c_uchar);
544        }
545    }
546
547    /// Sets a [Bloom filter](https://github.com/facebook/rocksdb/wiki/RocksDB-Bloom-Filter)
548    /// policy to reduce disk reads.
549    ///
550    /// # Examples
551    ///
552    /// ```
553    /// use ckb_rocksdb::BlockBasedOptions;
554    ///
555    /// let mut opts = BlockBasedOptions::default();
556    /// opts.set_bloom_filter(10.0, true);
557    /// ```
558    pub fn set_bloom_filter(&mut self, bits_per_key: c_double, block_based: bool) {
559        unsafe {
560            let bloom = if block_based {
561                ffi::rocksdb_filterpolicy_create_bloom(bits_per_key)
562            } else {
563                ffi::rocksdb_filterpolicy_create_bloom_full(bits_per_key)
564            };
565
566            ffi::rocksdb_block_based_options_set_filter_policy(self.inner, bloom);
567        }
568    }
569
570    /// Sets a [Ribbon filter](http://rocksdb.org/blog/2021/12/29/ribbon-filter.html)
571    /// policy to reduce disk reads.
572    ///
573    /// Ribbon filters use less memory in exchange for slightly more CPU usage
574    /// compared to an equivalent bloom filter.
575    ///
576    /// # Examples
577    ///
578    /// ```
579    /// use ckb_rocksdb::BlockBasedOptions;
580    ///
581    /// let mut opts = BlockBasedOptions::default();
582    /// opts.set_ribbon_filter(10.0);
583    /// ```
584    pub fn set_ribbon_filter(&mut self, bloom_equivalent_bits_per_key: c_double) {
585        unsafe {
586            let ribbon = ffi::rocksdb_filterpolicy_create_ribbon(bloom_equivalent_bits_per_key);
587            ffi::rocksdb_block_based_options_set_filter_policy(self.inner, ribbon);
588        }
589    }
590
591    /// Sets a hybrid [Ribbon filter](http://rocksdb.org/blog/2021/12/29/ribbon-filter.html)
592    /// policy to reduce disk reads.
593    ///
594    /// Uses Bloom filters before the given level, and Ribbon filters for all
595    /// other levels. This combines the memory savings from Ribbon filters
596    /// with the lower CPU usage of Bloom filters.
597    ///
598    /// # Examples
599    ///
600    /// ```
601    /// use ckb_rocksdb::BlockBasedOptions;
602    ///
603    /// let mut opts = BlockBasedOptions::default();
604    /// opts.set_hybrid_ribbon_filter(10.0, 2);
605    /// ```
606    pub fn set_hybrid_ribbon_filter(
607        &mut self,
608        bloom_equivalent_bits_per_key: c_double,
609        bloom_before_level: c_int,
610    ) {
611        unsafe {
612            let ribbon = ffi::rocksdb_filterpolicy_create_ribbon_hybrid(
613                bloom_equivalent_bits_per_key,
614                bloom_before_level,
615            );
616            ffi::rocksdb_block_based_options_set_filter_policy(self.inner, ribbon);
617        }
618    }
619
620    pub fn set_cache_index_and_filter_blocks(&mut self, v: bool) {
621        unsafe {
622            ffi::rocksdb_block_based_options_set_cache_index_and_filter_blocks(self.inner, v as u8);
623        }
624    }
625
626    /// Defines the index type to be used for SS-table lookups.
627    ///
628    /// # Examples
629    ///
630    /// ```
631    /// use ckb_rocksdb::{BlockBasedOptions, BlockBasedIndexType, Options};
632    ///
633    /// let mut opts = Options::default();
634    /// let mut block_opts = BlockBasedOptions::default();
635    /// block_opts.set_index_type(BlockBasedIndexType::HashSearch);
636    /// ```
637    pub fn set_index_type(&mut self, index_type: BlockBasedIndexType) {
638        let index = index_type as i32;
639        unsafe {
640            ffi::rocksdb_block_based_options_set_index_type(self.inner, index);
641        }
642    }
643
644    /// If cache_index_and_filter_blocks is true and the below is true, then
645    /// filter and index blocks are stored in the cache, but a reference is
646    /// held in the "table reader" object so the blocks are pinned and only
647    /// evicted from cache when the table reader is freed.
648    ///
649    /// Default: false.
650    pub fn set_pin_l0_filter_and_index_blocks_in_cache(&mut self, v: bool) {
651        unsafe {
652            ffi::rocksdb_block_based_options_set_pin_l0_filter_and_index_blocks_in_cache(
653                self.inner,
654                v as c_uchar,
655            );
656        }
657    }
658
659    /// If cache_index_and_filter_blocks is true and the below is true, then
660    /// the top-level index of partitioned filter and index blocks are stored in
661    /// the cache, but a reference is held in the "table reader" object so the
662    /// blocks are pinned and only evicted from cache when the table reader is
663    /// freed. This is not limited to l0 in LSM tree.
664    ///
665    /// Default: false.
666    pub fn set_pin_top_level_index_and_filter(&mut self, v: bool) {
667        unsafe {
668            ffi::rocksdb_block_based_options_set_pin_top_level_index_and_filter(
669                self.inner,
670                v as c_uchar,
671            );
672        }
673    }
674
675    /// Format version, reserved for backward compatibility.
676    ///
677    /// See full [list](https://github.com/facebook/rocksdb/blob/f059c7d9b96300091e07429a60f4ad55dac84859/include/rocksdb/table.h#L249-L274)
678    /// of the supported versions.
679    ///
680    /// Default: 2.
681    pub fn set_format_version(&mut self, version: i32) {
682        unsafe {
683            ffi::rocksdb_block_based_options_set_format_version(self.inner, version);
684        }
685    }
686
687    /// Number of keys between restart points for delta encoding of keys.
688    /// This parameter can be changed dynamically. Most clients should
689    /// leave this parameter alone. The minimum value allowed is 1. Any smaller
690    /// value will be silently overwritten with 1.
691    ///
692    /// Default: 16.
693    pub fn set_block_restart_interval(&mut self, interval: i32) {
694        unsafe {
695            ffi::rocksdb_block_based_options_set_block_restart_interval(self.inner, interval);
696        }
697    }
698
699    /// Same as block_restart_interval but used for the index block.
700    /// If you don't plan to run RocksDB before version 5.16 and you are
701    /// using `index_block_restart_interval` > 1, you should
702    /// probably set the `format_version` to >= 4 as it would reduce the index size.
703    ///
704    /// Default: 1.
705    pub fn set_index_block_restart_interval(&mut self, interval: i32) {
706        unsafe {
707            ffi::rocksdb_block_based_options_set_index_block_restart_interval(self.inner, interval);
708        }
709    }
710
711    /// Set the data block index type for point lookups:
712    ///  `DataBlockIndexType::BinarySearch` to use binary search within the data block.
713    ///  `DataBlockIndexType::BinaryAndHash` to use the data block hash index in combination with
714    ///  the normal binary search.
715    ///
716    /// The hash table utilization ratio is adjustable using [`set_data_block_hash_ratio`](#method.set_data_block_hash_ratio), which is
717    /// valid only when using `DataBlockIndexType::BinaryAndHash`.
718    ///
719    /// Default: `BinarySearch`
720    /// # Examples
721    ///
722    /// ```
723    /// use ckb_rocksdb::{BlockBasedOptions, DataBlockIndexType, Options};
724    ///
725    /// let mut opts = Options::default();
726    /// let mut block_opts = BlockBasedOptions::default();
727    /// block_opts.set_data_block_index_type(DataBlockIndexType::BinaryAndHash);
728    /// block_opts.set_data_block_hash_ratio(0.85);
729    /// ```
730    pub fn set_data_block_index_type(&mut self, index_type: DataBlockIndexType) {
731        let index_t = index_type as i32;
732        unsafe {
733            ffi::rocksdb_block_based_options_set_data_block_index_type(self.inner, index_t);
734        }
735    }
736
737    /// Set the data block hash index utilization ratio.
738    ///
739    /// The smaller the utilization ratio, the less hash collisions happen, and so reduce the risk for a
740    /// point lookup to fall back to binary search due to the collisions. A small ratio means faster
741    /// lookup at the price of more space overhead.
742    ///
743    /// Default: 0.75
744    pub fn set_data_block_hash_ratio(&mut self, ratio: f64) {
745        unsafe {
746            ffi::rocksdb_block_based_options_set_data_block_hash_ratio(self.inner, ratio);
747        }
748    }
749
750    /// If false, place only prefixes in the filter, not whole keys.
751    ///
752    /// Defaults to true.
753    pub fn set_whole_key_filtering(&mut self, v: bool) {
754        unsafe {
755            ffi::rocksdb_block_based_options_set_whole_key_filtering(self.inner, v as u8);
756        }
757    }
758}
759
760impl Default for BlockBasedOptions {
761    fn default() -> Self {
762        let block_opts = unsafe { ffi::rocksdb_block_based_options_create() };
763        assert!(
764            !block_opts.is_null(),
765            "Could not create RocksDB block based options"
766        );
767
768        Self {
769            inner: block_opts,
770            outlive: BlockBasedOptionsMustOutliveDB::default(),
771        }
772    }
773}
774
775impl CuckooTableOptions {
776    /// Determines the utilization of hash tables. Smaller values
777    /// result in larger hash tables with fewer collisions.
778    /// Default: 0.9
779    pub fn set_hash_ratio(&mut self, ratio: f64) {
780        unsafe {
781            ffi::rocksdb_cuckoo_options_set_hash_ratio(self.inner, ratio);
782        }
783    }
784
785    /// A property used by builder to determine the depth to go to
786    /// to search for a path to displace elements in case of
787    /// collision. See Builder.MakeSpaceForKey method. Higher
788    /// values result in more efficient hash tables with fewer
789    /// lookups but take more time to build.
790    /// Default: 100
791    pub fn set_max_search_depth(&mut self, depth: u32) {
792        unsafe {
793            ffi::rocksdb_cuckoo_options_set_max_search_depth(self.inner, depth);
794        }
795    }
796
797    /// In case of collision while inserting, the builder
798    /// attempts to insert in the next cuckoo_block_size
799    /// locations before skipping over to the next Cuckoo hash
800    /// function. This makes lookups more cache friendly in case
801    /// of collisions.
802    /// Default: 5
803    pub fn set_cuckoo_block_size(&mut self, size: u32) {
804        unsafe {
805            ffi::rocksdb_cuckoo_options_set_cuckoo_block_size(self.inner, size);
806        }
807    }
808
809    /// If this option is enabled, user key is treated as uint64_t and its value
810    /// is used as hash value directly. This option changes builder's behavior.
811    /// Reader ignore this option and behave according to what specified in
812    /// table property.
813    /// Default: false
814    pub fn set_identity_as_first_hash(&mut self, flag: bool) {
815        let v = flag as u8;
816        unsafe {
817            ffi::rocksdb_cuckoo_options_set_identity_as_first_hash(self.inner, v);
818        }
819    }
820
821    /// If this option is set to true, module is used during hash calculation.
822    /// This often yields better space efficiency at the cost of performance.
823    /// If this option is set to false, # of entries in table is constrained to
824    /// be power of two, and bit and is used to calculate hash, which is faster in general.
825    /// Default: true
826    pub fn set_use_module_hash(&mut self, flag: bool) {
827        let v = flag as u8;
828        unsafe {
829            ffi::rocksdb_cuckoo_options_set_use_module_hash(self.inner, v);
830        }
831    }
832}
833
834impl Default for CuckooTableOptions {
835    fn default() -> Self {
836        let opts = unsafe { ffi::rocksdb_cuckoo_options_create() };
837        assert!(!opts.is_null(), "Could not create RocksDB cuckoo options");
838
839        Self { inner: opts }
840    }
841}
842
843// Verbosity of the LOG.
844#[derive(Debug, Copy, Clone, PartialEq)]
845#[repr(i32)]
846pub enum LogLevel {
847    Debug = 0,
848    Info,
849    Warn,
850    Error,
851    Fatal,
852    Header,
853}
854
855impl Options {
856    /// By default, RocksDB uses only one background thread for flush and
857    /// compaction. Calling this function will set it up such that total of
858    /// `total_threads` is used. Good value for `total_threads` is the number of
859    /// cores. You almost definitely want to call this function if your system is
860    /// bottlenecked by RocksDB.
861    ///
862    /// # Examples
863    ///
864    /// ```
865    /// use ckb_rocksdb::Options;
866    ///
867    /// let mut opts = Options::default();
868    /// opts.increase_parallelism(3);
869    /// ```
870    pub fn increase_parallelism(&mut self, parallelism: i32) {
871        unsafe {
872            ffi::rocksdb_options_increase_parallelism(self.inner, parallelism);
873        }
874    }
875
876    /// Optimize level style compaction.
877    ///
878    /// Default values for some parameters in `Options` are not optimized for heavy
879    /// workloads and big datasets, which means you might observe write stalls under
880    /// some conditions.
881    ///
882    /// This can be used as one of the starting points for tuning RocksDB options in
883    /// such cases.
884    ///
885    /// Internally, it sets `write_buffer_size`, `min_write_buffer_number_to_merge`,
886    /// `max_write_buffer_number`, `level0_file_num_compaction_trigger`,
887    /// `target_file_size_base`, `max_bytes_for_level_base`, so it can override if those
888    /// parameters were set before.
889    ///
890    /// It sets buffer sizes so that memory consumption would be constrained by
891    /// `memtable_memory_budget`.
892    pub fn optimize_level_style_compaction(&mut self, memtable_memory_budget: usize) {
893        unsafe {
894            ffi::rocksdb_options_optimize_level_style_compaction(
895                self.inner,
896                memtable_memory_budget as u64,
897            );
898        }
899    }
900
901    /// Optimize universal style compaction.
902    ///
903    /// Default values for some parameters in `Options` are not optimized for heavy
904    /// workloads and big datasets, which means you might observe write stalls under
905    /// some conditions.
906    ///
907    /// This can be used as one of the starting points for tuning RocksDB options in
908    /// such cases.
909    ///
910    /// Internally, it sets `write_buffer_size`, `min_write_buffer_number_to_merge`,
911    /// `max_write_buffer_number`, `level0_file_num_compaction_trigger`,
912    /// `target_file_size_base`, `max_bytes_for_level_base`, so it can override if those
913    /// parameters were set before.
914    ///
915    /// It sets buffer sizes so that memory consumption would be constrained by
916    /// `memtable_memory_budget`.
917    pub fn optimize_universal_style_compaction(&mut self, memtable_memory_budget: usize) {
918        unsafe {
919            ffi::rocksdb_options_optimize_universal_style_compaction(
920                self.inner,
921                memtable_memory_budget as u64,
922            );
923        }
924    }
925
926    /// If true, the database will be created if it is missing.
927    ///
928    /// Default: `false`
929    ///
930    /// # Examples
931    ///
932    /// ```
933    /// use ckb_rocksdb::Options;
934    ///
935    /// let mut opts = Options::default();
936    /// opts.create_if_missing(true);
937    /// ```
938    pub fn create_if_missing(&mut self, create_if_missing: bool) {
939        unsafe {
940            ffi::rocksdb_options_set_create_if_missing(self.inner, create_if_missing as c_uchar);
941        }
942    }
943
944    /// If true, any column families that didn't exist when opening the database
945    /// will be created.
946    ///
947    /// Default: `false`
948    ///
949    /// # Examples
950    ///
951    /// ```
952    /// use ckb_rocksdb::Options;
953    ///
954    /// let mut opts = Options::default();
955    /// opts.create_missing_column_families(true);
956    /// ```
957    pub fn create_missing_column_families(&mut self, create_missing_cfs: bool) {
958        unsafe {
959            ffi::rocksdb_options_set_create_missing_column_families(
960                self.inner,
961                create_missing_cfs as c_uchar,
962            );
963        }
964    }
965
966    /// Specifies whether an error should be raised if the database already exists.
967    ///
968    /// Default: false
969    pub fn set_error_if_exists(&mut self, enabled: bool) {
970        unsafe {
971            ffi::rocksdb_options_set_error_if_exists(self.inner, enabled as c_uchar);
972        }
973    }
974
975    /// Enable/disable paranoid checks.
976    ///
977    /// If true, the implementation will do aggressive checking of the
978    /// data it is processing and will stop early if it detects any
979    /// errors. This may have unforeseen ramifications: for example, a
980    /// corruption of one DB entry may cause a large number of entries to
981    /// become unreadable or for the entire DB to become unopenable.
982    /// If any of the  writes to the database fails (Put, Delete, Merge, Write),
983    /// the database will switch to read-only mode and fail all other
984    /// Write operations.
985    ///
986    /// Default: false
987    pub fn set_paranoid_checks(&mut self, enabled: bool) {
988        unsafe {
989            ffi::rocksdb_options_set_paranoid_checks(self.inner, enabled as c_uchar);
990        }
991    }
992
993    /// A list of paths where SST files can be put into, with its target size.
994    /// Newer data is placed into paths specified earlier in the vector while
995    /// older data gradually moves to paths specified later in the vector.
996    ///
997    /// For example, you have a flash device with 10GB allocated for the DB,
998    /// as well as a hard drive of 2TB, you should config it to be:
999    ///   [{"/flash_path", 10GB}, {"/hard_drive", 2TB}]
1000    ///
1001    /// The system will try to guarantee data under each path is close to but
1002    /// not larger than the target size. But current and future file sizes used
1003    /// by determining where to place a file are based on best-effort estimation,
1004    /// which means there is a chance that the actual size under the directory
1005    /// is slightly more than target size under some workloads. User should give
1006    /// some buffer room for those cases.
1007    ///
1008    /// If none of the paths has sufficient room to place a file, the file will
1009    /// be placed to the last path anyway, despite to the target size.
1010    ///
1011    /// Placing newer data to earlier paths is also best-efforts. User should
1012    /// expect user files to be placed in higher levels in some extreme cases.
1013    ///
1014    /// If left empty, only one path will be used, which is `path` passed when
1015    /// opening the DB.
1016    ///
1017    /// Default: empty
1018    pub fn set_db_paths(&mut self, paths: &[DBPath]) {
1019        let mut paths: Vec<_> = paths
1020            .iter()
1021            .map(|path| path.inner as *const ffi::rocksdb_dbpath_t)
1022            .collect();
1023        let num_paths = paths.len();
1024        unsafe {
1025            ffi::rocksdb_options_set_db_paths(self.inner, paths.as_mut_ptr(), num_paths);
1026        }
1027    }
1028
1029    /// Use the specified object to interact with the environment,
1030    /// e.g. to read/write files, schedule background work, etc. In the near
1031    /// future, support for doing storage operations such as read/write files
1032    /// through env will be deprecated in favor of file_system.
1033    ///
1034    /// Default: Env::default()
1035    pub fn set_env(&mut self, env: &Env) {
1036        unsafe {
1037            ffi::rocksdb_options_set_env(self.inner, env.0.inner);
1038        }
1039        self.outlive.env = Some(env.clone());
1040    }
1041
1042    /// Sets the compression algorithm that will be used for compressing blocks.
1043    ///
1044    /// Default: `DBCompressionType::Snappy` (`DBCompressionType::None` if
1045    /// snappy feature is not enabled).
1046    ///
1047    /// # Examples
1048    ///
1049    /// ```
1050    /// use ckb_rocksdb::{Options, DBCompressionType};
1051    ///
1052    /// let mut opts = Options::default();
1053    /// opts.set_compression_type(DBCompressionType::Snappy);
1054    /// ```
1055    pub fn set_compression_type(&mut self, t: DBCompressionType) {
1056        unsafe {
1057            ffi::rocksdb_options_set_compression(self.inner, t as c_int);
1058        }
1059    }
1060
1061    /// Sets the bottom-most compression algorithm that will be used for
1062    /// compressing blocks at the bottom-most level.
1063    ///
1064    /// Note that to actually unable bottom-most compression configuration after
1065    /// setting the compression type it needs to be enabled by calling
1066    /// [`set_bottommost_compression_options`] or
1067    /// [`set_bottommost_zstd_max_train_bytes`] method with `enabled` argument
1068    /// set to `true`.
1069    ///
1070    /// # Examples
1071    ///
1072    /// ```
1073    /// use ckb_rocksdb::{Options, DBCompressionType};
1074    ///
1075    /// let mut opts = Options::default();
1076    /// opts.set_bottommost_compression_type(DBCompressionType::Zstd);
1077    /// opts.set_bottommost_zstd_max_train_bytes(0, true);
1078    /// ```
1079    pub fn set_bottommost_compression_type(&mut self, t: DBCompressionType) {
1080        unsafe {
1081            ffi::rocksdb_options_set_bottommost_compression(self.inner, t as c_int);
1082        }
1083    }
1084
1085    /// Different levels can have different compression policies. There
1086    /// are cases where most lower levels would like to use quick compression
1087    /// algorithms while the higher levels (which have more data) use
1088    /// compression algorithms that have better compression but could
1089    /// be slower. This array, if non-empty, should have an entry for
1090    /// each level of the database; these override the value specified in
1091    /// the previous field 'compression'.
1092    ///
1093    /// # Examples
1094    ///
1095    /// ```
1096    /// use ckb_rocksdb::{Options, DBCompressionType};
1097    ///
1098    /// let mut opts = Options::default();
1099    /// opts.set_compression_per_level(&[
1100    ///     DBCompressionType::None,
1101    ///     DBCompressionType::None,
1102    ///     DBCompressionType::Snappy,
1103    ///     DBCompressionType::Snappy,
1104    ///     DBCompressionType::Snappy
1105    /// ]);
1106    /// ```
1107    pub fn set_compression_per_level(&mut self, level_types: &[DBCompressionType]) {
1108        unsafe {
1109            let mut level_types: Vec<_> = level_types.iter().map(|&t| t as c_int).collect();
1110            ffi::rocksdb_options_set_compression_per_level(
1111                self.inner,
1112                level_types.as_mut_ptr(),
1113                level_types.len() as size_t,
1114            );
1115        }
1116    }
1117
1118    /// Maximum size of dictionaries used to prime the compression library.
1119    /// Enabling dictionary can improve compression ratios when there are
1120    /// repetitions across data blocks.
1121    ///
1122    /// The dictionary is created by sampling the SST file data. If
1123    /// `zstd_max_train_bytes` is nonzero, the samples are passed through zstd's
1124    /// dictionary generator. Otherwise, the random samples are used directly as
1125    /// the dictionary.
1126    ///
1127    /// When compression dictionary is disabled, we compress and write each block
1128    /// before buffering data for the next one. When compression dictionary is
1129    /// enabled, we buffer all SST file data in-memory so we can sample it, as data
1130    /// can only be compressed and written after the dictionary has been finalized.
1131    /// So users of this feature may see increased memory usage.
1132    ///
1133    /// Default: `0`
1134    ///
1135    /// # Examples
1136    ///
1137    /// ```
1138    /// use ckb_rocksdb::Options;
1139    ///
1140    /// let mut opts = Options::default();
1141    /// opts.set_compression_options(4, 5, 6, 7);
1142    /// ```
1143    pub fn set_compression_options(
1144        &mut self,
1145        w_bits: c_int,
1146        level: c_int,
1147        strategy: c_int,
1148        max_dict_bytes: c_int,
1149    ) {
1150        unsafe {
1151            ffi::rocksdb_options_set_compression_options(
1152                self.inner,
1153                w_bits,
1154                level,
1155                strategy,
1156                max_dict_bytes,
1157            );
1158        }
1159    }
1160
1161    /// Sets compression options for blocks at the bottom-most level.  Meaning
1162    /// of all settings is the same as in [`set_compression_options`] method but
1163    /// affect only the bottom-most compression which is set using
1164    /// [`set_bottommost_compression_type`] method.
1165    ///
1166    /// # Examples
1167    ///
1168    /// ```
1169    /// use ckb_rocksdb::{Options, DBCompressionType};
1170    ///
1171    /// let mut opts = Options::default();
1172    /// opts.set_bottommost_compression_type(DBCompressionType::Zstd);
1173    /// opts.set_bottommost_compression_options(4, 5, 6, 7, true);
1174    /// ```
1175    pub fn set_bottommost_compression_options(
1176        &mut self,
1177        w_bits: c_int,
1178        level: c_int,
1179        strategy: c_int,
1180        max_dict_bytes: c_int,
1181        enabled: bool,
1182    ) {
1183        unsafe {
1184            ffi::rocksdb_options_set_bottommost_compression_options(
1185                self.inner,
1186                w_bits,
1187                level,
1188                strategy,
1189                max_dict_bytes,
1190                enabled as c_uchar,
1191            );
1192        }
1193    }
1194
1195    /// Sets maximum size of training data passed to zstd's dictionary trainer. Using zstd's
1196    /// dictionary trainer can achieve even better compression ratio improvements than using
1197    /// `max_dict_bytes` alone.
1198    ///
1199    /// The training data will be used to generate a dictionary of max_dict_bytes.
1200    ///
1201    /// Default: 0.
1202    pub fn set_zstd_max_train_bytes(&mut self, value: c_int) {
1203        unsafe {
1204            ffi::rocksdb_options_set_compression_options_zstd_max_train_bytes(self.inner, value);
1205        }
1206    }
1207
1208    /// Sets maximum size of training data passed to zstd's dictionary trainer
1209    /// when compressing the bottom-most level. Using zstd's dictionary trainer
1210    /// can achieve even better compression ratio improvements than using
1211    /// `max_dict_bytes` alone.
1212    ///
1213    /// The training data will be used to generate a dictionary of
1214    /// `max_dict_bytes`.
1215    ///
1216    /// Default: 0.
1217    pub fn set_bottommost_zstd_max_train_bytes(&mut self, value: c_int, enabled: bool) {
1218        unsafe {
1219            ffi::rocksdb_options_set_bottommost_compression_options_zstd_max_train_bytes(
1220                self.inner,
1221                value,
1222                enabled as c_uchar,
1223            );
1224        }
1225    }
1226
1227    /// If non-zero, we perform bigger reads when doing compaction. If you're
1228    /// running RocksDB on spinning disks, you should set this to at least 2MB.
1229    /// That way RocksDB's compaction is doing sequential instead of random reads.
1230    ///
1231    /// When non-zero, we also force new_table_reader_for_compaction_inputs to
1232    /// true.
1233    ///
1234    /// Default: `0`
1235    pub fn set_compaction_readahead_size(&mut self, compaction_readahead_size: usize) {
1236        unsafe {
1237            ffi::rocksdb_options_compaction_readahead_size(self.inner, compaction_readahead_size);
1238        }
1239    }
1240
1241    /// Allow RocksDB to pick dynamic base of bytes for levels.
1242    /// With this feature turned on, RocksDB will automatically adjust max bytes for each level.
1243    /// The goal of this feature is to have lower bound on size amplification.
1244    ///
1245    /// Default: false.
1246    pub fn set_level_compaction_dynamic_level_bytes(&mut self, v: bool) {
1247        unsafe {
1248            ffi::rocksdb_options_set_level_compaction_dynamic_level_bytes(self.inner, v as c_uchar);
1249        }
1250    }
1251
1252    pub fn set_merge_operator_associative<F: MergeFn + Clone>(
1253        &mut self,
1254        name: &str,
1255        full_merge_fn: F,
1256    ) {
1257        let cb = Box::new(MergeOperatorCallback {
1258            name: CString::new(name.as_bytes()).unwrap(),
1259            full_merge_fn: full_merge_fn.clone(),
1260            partial_merge_fn: full_merge_fn,
1261        });
1262
1263        unsafe {
1264            let mo = ffi::rocksdb_mergeoperator_create(
1265                Box::into_raw(cb).cast::<c_void>(),
1266                Some(merge_operator::destructor_callback::<F, F>),
1267                Some(full_merge_callback::<F, F>),
1268                Some(partial_merge_callback::<F, F>),
1269                Some(merge_operator::delete_callback),
1270                Some(merge_operator::name_callback::<F, F>),
1271            );
1272            ffi::rocksdb_options_set_merge_operator(self.inner, mo);
1273        }
1274    }
1275
1276    pub fn set_merge_operator<F: MergeFn, PF: MergeFn>(
1277        &mut self,
1278        name: &str,
1279        full_merge_fn: F,
1280        partial_merge_fn: PF,
1281    ) {
1282        let cb = Box::new(MergeOperatorCallback {
1283            name: CString::new(name.as_bytes()).unwrap(),
1284            full_merge_fn,
1285            partial_merge_fn,
1286        });
1287
1288        unsafe {
1289            let mo = ffi::rocksdb_mergeoperator_create(
1290                Box::into_raw(cb).cast::<c_void>(),
1291                Some(merge_operator::destructor_callback::<F, PF>),
1292                Some(full_merge_callback::<F, PF>),
1293                Some(partial_merge_callback::<F, PF>),
1294                Some(merge_operator::delete_callback),
1295                Some(merge_operator::name_callback::<F, PF>),
1296            );
1297            ffi::rocksdb_options_set_merge_operator(self.inner, mo);
1298        }
1299    }
1300
1301    #[deprecated(
1302        since = "0.5.0",
1303        note = "add_merge_operator has been renamed to set_merge_operator"
1304    )]
1305    pub fn add_merge_operator<F: MergeFn + Clone>(&mut self, name: &str, merge_fn: F) {
1306        self.set_merge_operator_associative(name, merge_fn);
1307    }
1308
1309    /// Sets a compaction filter used to determine if entries should be kept, changed,
1310    /// or removed during compaction.
1311    ///
1312    /// An example use case is to remove entries with an expired TTL.
1313    ///
1314    /// If you take a snapshot of the database, only values written since the last
1315    /// snapshot will be passed through the compaction filter.
1316    ///
1317    /// If multi-threaded compaction is used, `filter_fn` may be called multiple times
1318    /// simultaneously.
1319    pub fn set_compaction_filter<F>(&mut self, name: &str, filter_fn: F)
1320    where
1321        F: CompactionFilterFn + Send + 'static,
1322    {
1323        let cb = Box::new(CompactionFilterCallback {
1324            name: CString::new(name.as_bytes()).unwrap(),
1325            filter_fn,
1326        });
1327
1328        unsafe {
1329            let cf = ffi::rocksdb_compactionfilter_create(
1330                Box::into_raw(cb).cast::<c_void>(),
1331                Some(compaction_filter::destructor_callback::<CompactionFilterCallback<F>>),
1332                Some(compaction_filter::filter_callback::<CompactionFilterCallback<F>>),
1333                Some(compaction_filter::name_callback::<CompactionFilterCallback<F>>),
1334            );
1335            ffi::rocksdb_options_set_compaction_filter(self.inner, cf);
1336        }
1337    }
1338
1339    /// This is a factory that provides compaction filter objects which allow
1340    /// an application to modify/delete a key-value during background compaction.
1341    ///
1342    /// A new filter will be created on each compaction run.  If multi-threaded
1343    /// compaction is being used, each created CompactionFilter will only be used
1344    /// from a single thread and so does not need to be thread-safe.
1345    ///
1346    /// Default: nullptr
1347    pub fn set_compaction_filter_factory<F>(&mut self, factory: F)
1348    where
1349        F: CompactionFilterFactory + 'static,
1350    {
1351        let factory = Box::new(factory);
1352
1353        unsafe {
1354            let cff = ffi::rocksdb_compactionfilterfactory_create(
1355                Box::into_raw(factory).cast::<c_void>(),
1356                Some(compaction_filter_factory::destructor_callback::<F>),
1357                Some(compaction_filter_factory::create_compaction_filter_callback::<F>),
1358                Some(compaction_filter_factory::name_callback::<F>),
1359            );
1360
1361            ffi::rocksdb_options_set_compaction_filter_factory(self.inner, cff);
1362        }
1363    }
1364
1365    /// Sets the comparator used to define the order of keys in the table.
1366    /// Default: a comparator that uses lexicographic byte-wise ordering
1367    ///
1368    /// The client must ensure that the comparator supplied here has the same
1369    /// name and orders keys *exactly* the same as the comparator provided to
1370    /// previous open calls on the same DB.
1371    pub fn set_comparator(&mut self, name: &str, compare_fn: CompareFn) {
1372        let cb = Box::new(ComparatorCallback {
1373            name: CString::new(name.as_bytes()).unwrap(),
1374            f: compare_fn,
1375        });
1376
1377        unsafe {
1378            let cmp = ffi::rocksdb_comparator_create(
1379                Box::into_raw(cb).cast::<c_void>(),
1380                Some(comparator::destructor_callback),
1381                Some(comparator::compare_callback),
1382                Some(comparator::name_callback),
1383            );
1384            ffi::rocksdb_options_set_comparator(self.inner, cmp);
1385        }
1386    }
1387
1388    pub fn set_prefix_extractor(&mut self, prefix_extractor: SliceTransform) {
1389        unsafe {
1390            ffi::rocksdb_options_set_prefix_extractor(self.inner, prefix_extractor.inner);
1391        }
1392    }
1393
1394    #[deprecated(
1395        since = "0.5.0",
1396        note = "add_comparator has been renamed to set_comparator"
1397    )]
1398    pub fn add_comparator(&mut self, name: &str, compare_fn: CompareFn) {
1399        self.set_comparator(name, compare_fn);
1400    }
1401
1402    pub fn optimize_for_point_lookup(&mut self, cache_size: u64) {
1403        unsafe {
1404            ffi::rocksdb_options_optimize_for_point_lookup(self.inner, cache_size);
1405        }
1406    }
1407
1408    /// Sets the optimize_filters_for_hits flag
1409    ///
1410    /// Default: `false`
1411    ///
1412    /// # Examples
1413    ///
1414    /// ```
1415    /// use ckb_rocksdb::Options;
1416    ///
1417    /// let mut opts = Options::default();
1418    /// opts.set_optimize_filters_for_hits(true);
1419    /// ```
1420    pub fn set_optimize_filters_for_hits(&mut self, optimize_for_hits: bool) {
1421        unsafe {
1422            ffi::rocksdb_options_set_optimize_filters_for_hits(
1423                self.inner,
1424                optimize_for_hits as c_int,
1425            );
1426        }
1427    }
1428
1429    /// Sets the periodicity when obsolete files get deleted.
1430    ///
1431    /// The files that get out of scope by compaction
1432    /// process will still get automatically delete on every compaction,
1433    /// regardless of this setting.
1434    ///
1435    /// Default: 6 hours
1436    pub fn set_delete_obsolete_files_period_micros(&mut self, micros: u64) {
1437        unsafe {
1438            ffi::rocksdb_options_set_delete_obsolete_files_period_micros(self.inner, micros);
1439        }
1440    }
1441
1442    /// Some functions that make it easier to optimize RocksDB
1443    ///
1444    /// Set appropriate parameters for bulk loading.
1445    ///
1446    /// All data will be in level 0 without any automatic compaction.
1447    /// It's recommended to manually call CompactRange(NULL, NULL) before reading
1448    /// from the database, because otherwise the read can be very slow.
1449    pub fn set_prepare_for_bulk_load(&mut self) {
1450        unsafe {
1451            ffi::rocksdb_options_prepare_for_bulk_load(self.inner);
1452        }
1453    }
1454
1455    /// Sets the number of open files that can be used by the DB. You may need to
1456    /// increase this if your database has a large working set. Value `-1` means
1457    /// files opened are always kept open. You can estimate number of files based
1458    /// on target_file_size_base and target_file_size_multiplier for level-based
1459    /// compaction. For universal-style compaction, you can usually set it to `-1`.
1460    ///
1461    /// Default: `-1`
1462    ///
1463    /// # Examples
1464    ///
1465    /// ```
1466    /// use ckb_rocksdb::Options;
1467    ///
1468    /// let mut opts = Options::default();
1469    /// opts.set_max_open_files(10);
1470    /// ```
1471    pub fn set_max_open_files(&mut self, nfiles: c_int) {
1472        unsafe {
1473            ffi::rocksdb_options_set_max_open_files(self.inner, nfiles);
1474        }
1475    }
1476
1477    /// If max_open_files is -1, DB will open all files on DB::Open(). You can
1478    /// use this option to increase the number of threads used to open the files.
1479    /// Default: 16
1480    pub fn set_max_file_opening_threads(&mut self, nthreads: c_int) {
1481        unsafe {
1482            ffi::rocksdb_options_set_max_file_opening_threads(self.inner, nthreads);
1483        }
1484    }
1485
1486    /// If true, then every store to stable storage will issue a fsync.
1487    /// If false, then every store to stable storage will issue a fdatasync.
1488    /// This parameter should be set to true while storing data to
1489    /// filesystem like ext3 that can lose files after a reboot.
1490    ///
1491    /// Default: `false`
1492    ///
1493    /// # Examples
1494    ///
1495    /// ```
1496    /// use ckb_rocksdb::Options;
1497    ///
1498    /// let mut opts = Options::default();
1499    /// opts.set_use_fsync(true);
1500    /// ```
1501    pub fn set_use_fsync(&mut self, useit: bool) {
1502        unsafe {
1503            ffi::rocksdb_options_set_use_fsync(self.inner, useit as c_int);
1504        }
1505    }
1506
1507    /// Specifies the absolute info LOG dir.
1508    ///
1509    /// If it is empty, the log files will be in the same dir as data.
1510    /// If it is non empty, the log files will be in the specified dir,
1511    /// and the db data dir's absolute path will be used as the log file
1512    /// name's prefix.
1513    ///
1514    /// Default: empty
1515    pub fn set_db_log_dir<P: AsRef<Path>>(&mut self, path: P) {
1516        let p = CString::new(path.as_ref().to_string_lossy().as_bytes()).unwrap();
1517        unsafe {
1518            ffi::rocksdb_options_set_db_log_dir(self.inner, p.as_ptr());
1519        }
1520    }
1521
1522    /// Specifies the log level.
1523    /// Consider the `LogLevel` enum for a list of possible levels.
1524    ///
1525    /// Default: Info
1526    ///
1527    /// # Examples
1528    ///
1529    /// ```
1530    /// use ckb_rocksdb::{Options, LogLevel};
1531    ///
1532    /// let mut opts = Options::default();
1533    /// opts.set_log_level(LogLevel::Warn);
1534    /// ```
1535    pub fn set_log_level(&mut self, level: LogLevel) {
1536        unsafe {
1537            ffi::rocksdb_options_set_info_log_level(self.inner, level as c_int);
1538        }
1539    }
1540
1541    /// Allows OS to incrementally sync files to disk while they are being
1542    /// written, asynchronously, in the background. This operation can be used
1543    /// to smooth out write I/Os over time. Users shouldn't rely on it for
1544    /// persistency guarantee.
1545    /// Issue one request for every bytes_per_sync written. `0` turns it off.
1546    ///
1547    /// Default: `0`
1548    ///
1549    /// You may consider using rate_limiter to regulate write rate to device.
1550    /// When rate limiter is enabled, it automatically enables bytes_per_sync
1551    /// to 1MB.
1552    ///
1553    /// This option applies to table files
1554    ///
1555    /// # Examples
1556    ///
1557    /// ```
1558    /// use ckb_rocksdb::Options;
1559    ///
1560    /// let mut opts = Options::default();
1561    /// opts.set_bytes_per_sync(1024 * 1024);
1562    /// ```
1563    pub fn set_bytes_per_sync(&mut self, nbytes: u64) {
1564        unsafe {
1565            ffi::rocksdb_options_set_bytes_per_sync(self.inner, nbytes);
1566        }
1567    }
1568
1569    /// Same as bytes_per_sync, but applies to WAL files.
1570    ///
1571    /// Default: 0, turned off
1572    ///
1573    /// Dynamically changeable through SetDBOptions() API.
1574    pub fn set_wal_bytes_per_sync(&mut self, nbytes: u64) {
1575        unsafe {
1576            ffi::rocksdb_options_set_wal_bytes_per_sync(self.inner, nbytes);
1577        }
1578    }
1579
1580    /// Sets the maximum buffer size that is used by WritableFileWriter.
1581    ///
1582    /// On Windows, we need to maintain an aligned buffer for writes.
1583    /// We allow the buffer to grow until it's size hits the limit in buffered
1584    /// IO and fix the buffer size when using direct IO to ensure alignment of
1585    /// write requests if the logical sector size is unusual
1586    ///
1587    /// Default: 1024 * 1024 (1 MB)
1588    ///
1589    /// Dynamically changeable through SetDBOptions() API.
1590    pub fn set_writable_file_max_buffer_size(&mut self, nbytes: u64) {
1591        unsafe {
1592            ffi::rocksdb_options_set_writable_file_max_buffer_size(self.inner, nbytes);
1593        }
1594    }
1595
1596    /// If true, allow multi-writers to update mem tables in parallel.
1597    /// Only some memtable_factory-s support concurrent writes; currently it
1598    /// is implemented only for SkipListFactory.  Concurrent memtable writes
1599    /// are not compatible with inplace_update_support or filter_deletes.
1600    /// It is strongly recommended to set enable_write_thread_adaptive_yield
1601    /// if you are going to use this feature.
1602    ///
1603    /// Default: true
1604    ///
1605    /// # Examples
1606    ///
1607    /// ```
1608    /// use ckb_rocksdb::Options;
1609    ///
1610    /// let mut opts = Options::default();
1611    /// opts.set_allow_concurrent_memtable_write(false);
1612    /// ```
1613    pub fn set_allow_concurrent_memtable_write(&mut self, allow: bool) {
1614        unsafe {
1615            ffi::rocksdb_options_set_allow_concurrent_memtable_write(self.inner, allow as c_uchar);
1616        }
1617    }
1618
1619    /// If true, threads synchronizing with the write batch group leader will wait for up to
1620    /// write_thread_max_yield_usec before blocking on a mutex. This can substantially improve
1621    /// throughput for concurrent workloads, regardless of whether allow_concurrent_memtable_write
1622    /// is enabled.
1623    ///
1624    /// Default: true
1625    pub fn set_enable_write_thread_adaptive_yield(&mut self, enabled: bool) {
1626        unsafe {
1627            ffi::rocksdb_options_set_enable_write_thread_adaptive_yield(
1628                self.inner,
1629                enabled as c_uchar,
1630            );
1631        }
1632    }
1633
1634    /// Specifies whether an iteration->Next() sequentially skips over keys with the same user-key or not.
1635    ///
1636    /// This number specifies the number of keys (with the same userkey)
1637    /// that will be sequentially skipped before a reseek is issued.
1638    ///
1639    /// Default: 8
1640    pub fn set_max_sequential_skip_in_iterations(&mut self, num: u64) {
1641        unsafe {
1642            ffi::rocksdb_options_set_max_sequential_skip_in_iterations(self.inner, num);
1643        }
1644    }
1645
1646    /// Enable direct I/O mode for reading
1647    /// they may or may not improve performance depending on the use case
1648    ///
1649    /// Files will be opened in "direct I/O" mode
1650    /// which means that data read from the disk will not be cached or
1651    /// buffered. The hardware buffer of the devices may however still
1652    /// be used. Memory mapped files are not impacted by these parameters.
1653    ///
1654    /// Default: false
1655    ///
1656    /// # Examples
1657    ///
1658    /// ```
1659    /// use ckb_rocksdb::Options;
1660    ///
1661    /// let mut opts = Options::default();
1662    /// opts.set_use_direct_reads(true);
1663    /// ```
1664    pub fn set_use_direct_reads(&mut self, enabled: bool) {
1665        unsafe {
1666            ffi::rocksdb_options_set_use_direct_reads(self.inner, enabled as c_uchar);
1667        }
1668    }
1669
1670    /// Enable direct I/O mode for flush and compaction
1671    ///
1672    /// Files will be opened in "direct I/O" mode
1673    /// which means that data written to the disk will not be cached or
1674    /// buffered. The hardware buffer of the devices may however still
1675    /// be used. Memory mapped files are not impacted by these parameters.
1676    /// they may or may not improve performance depending on the use case
1677    ///
1678    /// Default: false
1679    ///
1680    /// # Examples
1681    ///
1682    /// ```
1683    /// use ckb_rocksdb::Options;
1684    ///
1685    /// let mut opts = Options::default();
1686    /// opts.set_use_direct_io_for_flush_and_compaction(true);
1687    /// ```
1688    pub fn set_use_direct_io_for_flush_and_compaction(&mut self, enabled: bool) {
1689        unsafe {
1690            ffi::rocksdb_options_set_use_direct_io_for_flush_and_compaction(
1691                self.inner,
1692                enabled as c_uchar,
1693            );
1694        }
1695    }
1696
1697    /// Enable/dsiable child process inherit open files.
1698    ///
1699    /// Default: true
1700    pub fn set_is_fd_close_on_exec(&mut self, enabled: bool) {
1701        unsafe {
1702            ffi::rocksdb_options_set_is_fd_close_on_exec(self.inner, enabled as c_uchar);
1703        }
1704    }
1705
1706    /// Hints to the OS that it should not buffer disk I/O. Enabling this
1707    /// parameter may improve performance but increases pressure on the
1708    /// system cache.
1709    ///
1710    /// The exact behavior of this parameter is platform dependent.
1711    ///
1712    /// On POSIX systems, after RocksDB reads data from disk it will
1713    /// mark the pages as "unneeded". The operating system may - or may not
1714    /// - evict these pages from memory, reducing pressure on the system
1715    /// cache. If the disk block is requested again this can result in
1716    /// additional disk I/O.
1717    ///
1718    /// On WINDOWS systems, files will be opened in "unbuffered I/O" mode
1719    /// which means that data read from the disk will not be cached or
1720    /// bufferized. The hardware buffer of the devices may however still
1721    /// be used. Memory mapped files are not impacted by this parameter.
1722    ///
1723    /// Default: true
1724    ///
1725    /// # Examples
1726    ///
1727    /// ```
1728    /// #[allow(deprecated)]
1729    /// use ckb_rocksdb::Options;
1730    ///
1731    /// let mut opts = Options::default();
1732    /// opts.set_allow_os_buffer(false);
1733    /// ```
1734    #[deprecated(
1735        since = "0.7.0",
1736        note = "replaced with set_use_direct_reads/set_use_direct_io_for_flush_and_compaction methods"
1737    )]
1738    pub fn set_allow_os_buffer(&mut self, is_allow: bool) {
1739        self.set_use_direct_reads(!is_allow);
1740        self.set_use_direct_io_for_flush_and_compaction(!is_allow);
1741    }
1742
1743    /// Sets the number of shards used for table cache.
1744    ///
1745    /// Default: `6`
1746    ///
1747    /// # Examples
1748    ///
1749    /// ```
1750    /// use ckb_rocksdb::Options;
1751    ///
1752    /// let mut opts = Options::default();
1753    /// opts.set_table_cache_num_shard_bits(4);
1754    /// ```
1755    pub fn set_table_cache_num_shard_bits(&mut self, nbits: c_int) {
1756        unsafe {
1757            ffi::rocksdb_options_set_table_cache_numshardbits(self.inner, nbits);
1758        }
1759    }
1760
1761    /// By default target_file_size_multiplier is 1, which means
1762    /// by default files in different levels will have similar size.
1763    ///
1764    /// Dynamically changeable through SetOptions() API
1765    pub fn set_target_file_size_multiplier(&mut self, multiplier: i32) {
1766        unsafe {
1767            ffi::rocksdb_options_set_target_file_size_multiplier(self.inner, multiplier as c_int);
1768        }
1769    }
1770
1771    /// Sets the minimum number of write buffers that will be merged together
1772    /// before writing to storage.  If set to `1`, then
1773    /// all write buffers are flushed to L0 as individual files and this increases
1774    /// read amplification because a get request has to check in all of these
1775    /// files. Also, an in-memory merge may result in writing lesser
1776    /// data to storage if there are duplicate records in each of these
1777    /// individual write buffers.
1778    ///
1779    /// Default: `1`
1780    ///
1781    /// # Examples
1782    ///
1783    /// ```
1784    /// use ckb_rocksdb::Options;
1785    ///
1786    /// let mut opts = Options::default();
1787    /// opts.set_min_write_buffer_number(2);
1788    /// ```
1789    pub fn set_min_write_buffer_number(&mut self, nbuf: c_int) {
1790        unsafe {
1791            ffi::rocksdb_options_set_min_write_buffer_number_to_merge(self.inner, nbuf);
1792        }
1793    }
1794
1795    /// Sets the maximum number of write buffers that are built up in memory.
1796    /// The default and the minimum number is 2, so that when 1 write buffer
1797    /// is being flushed to storage, new writes can continue to the other
1798    /// write buffer.
1799    /// If max_write_buffer_number > 3, writing will be slowed down to
1800    /// options.delayed_write_rate if we are writing to the last write buffer
1801    /// allowed.
1802    ///
1803    /// Default: `2`
1804    ///
1805    /// # Examples
1806    ///
1807    /// ```
1808    /// use ckb_rocksdb::Options;
1809    ///
1810    /// let mut opts = Options::default();
1811    /// opts.set_max_write_buffer_number(4);
1812    /// ```
1813    pub fn set_max_write_buffer_number(&mut self, nbuf: c_int) {
1814        unsafe {
1815            ffi::rocksdb_options_set_max_write_buffer_number(self.inner, nbuf);
1816        }
1817    }
1818
1819    /// Sets the amount of data to build up in memory (backed by an unsorted log
1820    /// on disk) before converting to a sorted on-disk file.
1821    ///
1822    /// Larger values increase performance, especially during bulk loads.
1823    /// Up to max_write_buffer_number write buffers may be held in memory
1824    /// at the same time,
1825    /// so you may wish to adjust this parameter to control memory usage.
1826    /// Also, a larger write buffer will result in a longer recovery time
1827    /// the next time the database is opened.
1828    ///
1829    /// Note that write_buffer_size is enforced per column family.
1830    /// See db_write_buffer_size for sharing memory across column families.
1831    ///
1832    /// Default: `0x4000000` (64MiB)
1833    ///
1834    /// Dynamically changeable through SetOptions() API
1835    ///
1836    /// # Examples
1837    ///
1838    /// ```
1839    /// use ckb_rocksdb::Options;
1840    ///
1841    /// let mut opts = Options::default();
1842    /// opts.set_write_buffer_size(128 * 1024 * 1024);
1843    /// ```
1844    pub fn set_write_buffer_size(&mut self, size: usize) {
1845        unsafe {
1846            ffi::rocksdb_options_set_write_buffer_size(self.inner, size);
1847        }
1848    }
1849
1850    /// Amount of data to build up in memtables across all column
1851    /// families before writing to disk.
1852    ///
1853    /// This is distinct from write_buffer_size, which enforces a limit
1854    /// for a single memtable.
1855    ///
1856    /// This feature is disabled by default. Specify a non-zero value
1857    /// to enable it.
1858    ///
1859    /// Default: 0 (disabled)
1860    ///
1861    /// # Examples
1862    ///
1863    /// ```
1864    /// use ckb_rocksdb::Options;
1865    ///
1866    /// let mut opts = Options::default();
1867    /// opts.set_db_write_buffer_size(128 * 1024 * 1024);
1868    /// ```
1869    pub fn set_db_write_buffer_size(&mut self, size: usize) {
1870        unsafe {
1871            ffi::rocksdb_options_set_db_write_buffer_size(self.inner, size);
1872        }
1873    }
1874
1875    /// Control maximum total data size for a level.
1876    /// max_bytes_for_level_base is the max total for level-1.
1877    /// Maximum number of bytes for level L can be calculated as
1878    /// (max_bytes_for_level_base) * (max_bytes_for_level_multiplier ^ (L-1))
1879    /// For example, if max_bytes_for_level_base is 200MB, and if
1880    /// max_bytes_for_level_multiplier is 10, total data size for level-1
1881    /// will be 200MB, total file size for level-2 will be 2GB,
1882    /// and total file size for level-3 will be 20GB.
1883    ///
1884    /// Default: `0x10000000` (256MiB).
1885    ///
1886    /// Dynamically changeable through SetOptions() API
1887    ///
1888    /// # Examples
1889    ///
1890    /// ```
1891    /// use ckb_rocksdb::Options;
1892    ///
1893    /// let mut opts = Options::default();
1894    /// opts.set_max_bytes_for_level_base(512 * 1024 * 1024);
1895    /// ```
1896    pub fn set_max_bytes_for_level_base(&mut self, size: u64) {
1897        unsafe {
1898            ffi::rocksdb_options_set_max_bytes_for_level_base(self.inner, size);
1899        }
1900    }
1901
1902    /// Default: `10`
1903    ///
1904    /// # Examples
1905    ///
1906    /// ```
1907    /// use ckb_rocksdb::Options;
1908    ///
1909    /// let mut opts = Options::default();
1910    /// opts.set_max_bytes_for_level_multiplier(4.0);
1911    /// ```
1912    pub fn set_max_bytes_for_level_multiplier(&mut self, mul: f64) {
1913        unsafe {
1914            ffi::rocksdb_options_set_max_bytes_for_level_multiplier(self.inner, mul);
1915        }
1916    }
1917
1918    /// The manifest file is rolled over on reaching this limit.
1919    /// The older manifest file be deleted.
1920    /// The default value is MAX_INT so that roll-over does not take place.
1921    ///
1922    /// # Examples
1923    ///
1924    /// ```
1925    /// use ckb_rocksdb::Options;
1926    ///
1927    /// let mut opts = Options::default();
1928    /// opts.set_max_manifest_file_size(20 * 1024 * 1024);
1929    /// ```
1930    pub fn set_max_manifest_file_size(&mut self, size: usize) {
1931        unsafe {
1932            ffi::rocksdb_options_set_max_manifest_file_size(self.inner, size);
1933        }
1934    }
1935
1936    /// Sets the target file size for compaction.
1937    /// target_file_size_base is per-file size for level-1.
1938    /// Target file size for level L can be calculated by
1939    /// target_file_size_base * (target_file_size_multiplier ^ (L-1))
1940    /// For example, if target_file_size_base is 2MB and
1941    /// target_file_size_multiplier is 10, then each file on level-1 will
1942    /// be 2MB, and each file on level 2 will be 20MB,
1943    /// and each file on level-3 will be 200MB.
1944    ///
1945    /// Default: `0x4000000` (64MiB)
1946    ///
1947    /// Dynamically changeable through SetOptions() API
1948    ///
1949    /// # Examples
1950    ///
1951    /// ```
1952    /// use ckb_rocksdb::Options;
1953    ///
1954    /// let mut opts = Options::default();
1955    /// opts.set_target_file_size_base(128 * 1024 * 1024);
1956    /// ```
1957    pub fn set_target_file_size_base(&mut self, size: u64) {
1958        unsafe {
1959            ffi::rocksdb_options_set_target_file_size_base(self.inner, size);
1960        }
1961    }
1962
1963    /// Sets the minimum number of write buffers that will be merged together
1964    /// before writing to storage.  If set to `1`, then
1965    /// all write buffers are flushed to L0 as individual files and this increases
1966    /// read amplification because a get request has to check in all of these
1967    /// files. Also, an in-memory merge may result in writing lesser
1968    /// data to storage if there are duplicate records in each of these
1969    /// individual write buffers.
1970    ///
1971    /// Default: `1`
1972    ///
1973    /// # Examples
1974    ///
1975    /// ```
1976    /// use ckb_rocksdb::Options;
1977    ///
1978    /// let mut opts = Options::default();
1979    /// opts.set_min_write_buffer_number_to_merge(2);
1980    /// ```
1981    pub fn set_min_write_buffer_number_to_merge(&mut self, to_merge: c_int) {
1982        unsafe {
1983            ffi::rocksdb_options_set_min_write_buffer_number_to_merge(self.inner, to_merge);
1984        }
1985    }
1986
1987    /// Sets the number of files to trigger level-0 compaction. A value < `0` means that
1988    /// level-0 compaction will not be triggered by number of files at all.
1989    ///
1990    /// Default: `4`
1991    ///
1992    /// Dynamically changeable through SetOptions() API
1993    ///
1994    /// # Examples
1995    ///
1996    /// ```
1997    /// use ckb_rocksdb::Options;
1998    ///
1999    /// let mut opts = Options::default();
2000    /// opts.set_level_zero_file_num_compaction_trigger(8);
2001    /// ```
2002    pub fn set_level_zero_file_num_compaction_trigger(&mut self, n: c_int) {
2003        unsafe {
2004            ffi::rocksdb_options_set_level0_file_num_compaction_trigger(self.inner, n);
2005        }
2006    }
2007
2008    /// Sets the soft limit on number of level-0 files. We start slowing down writes at this
2009    /// point. A value < `0` means that no writing slow down will be triggered by
2010    /// number of files in level-0.
2011    ///
2012    /// Default: `20`
2013    ///
2014    /// Dynamically changeable through SetOptions() API
2015    ///
2016    /// # Examples
2017    ///
2018    /// ```
2019    /// use ckb_rocksdb::Options;
2020    ///
2021    /// let mut opts = Options::default();
2022    /// opts.set_level_zero_slowdown_writes_trigger(10);
2023    /// ```
2024    pub fn set_level_zero_slowdown_writes_trigger(&mut self, n: c_int) {
2025        unsafe {
2026            ffi::rocksdb_options_set_level0_slowdown_writes_trigger(self.inner, n);
2027        }
2028    }
2029
2030    /// Sets the maximum number of level-0 files.  We stop writes at this point.
2031    ///
2032    /// Default: `24`
2033    ///
2034    /// Dynamically changeable through SetOptions() API
2035    ///
2036    /// # Examples
2037    ///
2038    /// ```
2039    /// use ckb_rocksdb::Options;
2040    ///
2041    /// let mut opts = Options::default();
2042    /// opts.set_level_zero_stop_writes_trigger(48);
2043    /// ```
2044    pub fn set_level_zero_stop_writes_trigger(&mut self, n: c_int) {
2045        unsafe {
2046            ffi::rocksdb_options_set_level0_stop_writes_trigger(self.inner, n);
2047        }
2048    }
2049
2050    /// Sets the compaction style.
2051    ///
2052    /// Default: DBCompactionStyle::Level
2053    ///
2054    /// # Examples
2055    ///
2056    /// ```
2057    /// use ckb_rocksdb::{Options, DBCompactionStyle};
2058    ///
2059    /// let mut opts = Options::default();
2060    /// opts.set_compaction_style(DBCompactionStyle::Universal);
2061    /// ```
2062    pub fn set_compaction_style(&mut self, style: DBCompactionStyle) {
2063        unsafe {
2064            ffi::rocksdb_options_set_compaction_style(self.inner, style as c_int);
2065        }
2066    }
2067
2068    /// Sets the options needed to support Universal Style compactions.
2069    pub fn set_universal_compaction_options(&mut self, uco: &UniversalCompactOptions) {
2070        unsafe {
2071            ffi::rocksdb_options_set_universal_compaction_options(self.inner, uco.inner);
2072        }
2073    }
2074
2075    /// Sets the options for FIFO compaction style.
2076    pub fn set_fifo_compaction_options(&mut self, fco: &FifoCompactOptions) {
2077        unsafe {
2078            ffi::rocksdb_options_set_fifo_compaction_options(self.inner, fco.inner);
2079        }
2080    }
2081
2082    /// Sets unordered_write to true trades higher write throughput with
2083    /// relaxing the immutability guarantee of snapshots. This violates the
2084    /// repeatability one expects from ::Get from a snapshot, as well as
2085    /// ::MultiGet and Iterator's consistent-point-in-time view property.
2086    /// If the application cannot tolerate the relaxed guarantees, it can implement
2087    /// its own mechanisms to work around that and yet benefit from the higher
2088    /// throughput. Using TransactionDB with WRITE_PREPARED write policy and
2089    /// two_write_queues=true is one way to achieve immutable snapshots despite
2090    /// unordered_write.
2091    ///
2092    /// By default, i.e., when it is false, rocksdb does not advance the sequence
2093    /// number for new snapshots unless all the writes with lower sequence numbers
2094    /// are already finished. This provides the immutability that we except from
2095    /// snapshots. Moreover, since Iterator and MultiGet internally depend on
2096    /// snapshots, the snapshot immutability results into Iterator and MultiGet
2097    /// offering consistent-point-in-time view. If set to true, although
2098    /// Read-Your-Own-Write property is still provided, the snapshot immutability
2099    /// property is relaxed: the writes issued after the snapshot is obtained (with
2100    /// larger sequence numbers) will be still not visible to the reads from that
2101    /// snapshot, however, there still might be pending writes (with lower sequence
2102    /// number) that will change the state visible to the snapshot after they are
2103    /// landed to the memtable.
2104    ///
2105    /// Default: false
2106    pub fn set_unordered_write(&mut self, unordered: bool) {
2107        unsafe {
2108            ffi::rocksdb_options_set_unordered_write(self.inner, unordered as c_uchar);
2109        }
2110    }
2111
2112    /// Sets maximum number of threads that will
2113    /// concurrently perform a compaction job by breaking it into multiple,
2114    /// smaller ones that are run simultaneously.
2115    ///
2116    /// Default: 1 (i.e. no subcompactions)
2117    pub fn set_max_subcompactions(&mut self, num: u32) {
2118        unsafe {
2119            ffi::rocksdb_options_set_max_subcompactions(self.inner, num);
2120        }
2121    }
2122
2123    /// Sets maximum number of concurrent background jobs
2124    /// (compactions and flushes).
2125    ///
2126    /// Default: 2
2127    ///
2128    /// Dynamically changeable through SetDBOptions() API.
2129    pub fn set_max_background_jobs(&mut self, jobs: c_int) {
2130        unsafe {
2131            ffi::rocksdb_options_set_max_background_jobs(self.inner, jobs);
2132        }
2133    }
2134
2135    /// Sets the maximum number of concurrent background compaction jobs, submitted to
2136    /// the default LOW priority thread pool.
2137    /// We first try to schedule compactions based on
2138    /// `base_background_compactions`. If the compaction cannot catch up , we
2139    /// will increase number of compaction threads up to
2140    /// `max_background_compactions`.
2141    ///
2142    /// If you're increasing this, also consider increasing number of threads in
2143    /// LOW priority thread pool. For more information, see
2144    /// Env::SetBackgroundThreads
2145    ///
2146    /// Default: `1`
2147    ///
2148    /// # Examples
2149    ///
2150    /// ```
2151    /// use ckb_rocksdb::Options;
2152    ///
2153    /// let mut opts = Options::default();
2154    /// opts.set_max_background_compactions(2);
2155    /// ```
2156    #[deprecated(
2157        since = "0.17.0",
2158        note = "RocksDB automatically decides this based on the value of max_background_jobs"
2159    )]
2160    pub fn set_max_background_compactions(&mut self, n: c_int) {
2161        unsafe {
2162            ffi::rocksdb_options_set_max_background_compactions(self.inner, n);
2163        }
2164    }
2165
2166    /// Sets the maximum number of concurrent background memtable flush jobs, submitted to
2167    /// the HIGH priority thread pool.
2168    ///
2169    /// By default, all background jobs (major compaction and memtable flush) go
2170    /// to the LOW priority pool. If this option is set to a positive number,
2171    /// memtable flush jobs will be submitted to the HIGH priority pool.
2172    /// It is important when the same Env is shared by multiple db instances.
2173    /// Without a separate pool, long running major compaction jobs could
2174    /// potentially block memtable flush jobs of other db instances, leading to
2175    /// unnecessary Put stalls.
2176    ///
2177    /// If you're increasing this, also consider increasing number of threads in
2178    /// HIGH priority thread pool. For more information, see
2179    /// Env::SetBackgroundThreads
2180    ///
2181    /// Default: `1`
2182    ///
2183    /// # Examples
2184    ///
2185    /// ```
2186    /// use ckb_rocksdb::Options;
2187    ///
2188    /// let mut opts = Options::default();
2189    /// opts.set_max_background_flushes(2);
2190    /// ```
2191    #[deprecated(
2192        since = "0.17.0",
2193        note = "RocksDB automatically decides this based on the value of max_background_jobs"
2194    )]
2195    pub fn set_max_background_flushes(&mut self, n: c_int) {
2196        unsafe {
2197            ffi::rocksdb_options_set_max_background_flushes(self.inner, n);
2198        }
2199    }
2200
2201    /// Disables automatic compactions. Manual compactions can still
2202    /// be issued on this column family
2203    ///
2204    /// Default: `false`
2205    ///
2206    /// Dynamically changeable through SetOptions() API
2207    ///
2208    /// # Examples
2209    ///
2210    /// ```
2211    /// use ckb_rocksdb::Options;
2212    ///
2213    /// let mut opts = Options::default();
2214    /// opts.set_disable_auto_compactions(true);
2215    /// ```
2216    pub fn set_disable_auto_compactions(&mut self, disable: bool) {
2217        unsafe {
2218            ffi::rocksdb_options_set_disable_auto_compactions(self.inner, disable as c_int);
2219        }
2220    }
2221
2222    /// SetMemtableHugePageSize sets the page size for huge page for
2223    /// arena used by the memtable.
2224    /// If <=0, it won't allocate from huge page but from malloc.
2225    /// Users are responsible to reserve huge pages for it to be allocated. For
2226    /// example:
2227    ///      sysctl -w vm.nr_hugepages=20
2228    /// See linux doc Documentation/vm/hugetlbpage.txt
2229    /// If there isn't enough free huge page available, it will fall back to
2230    /// malloc.
2231    ///
2232    /// Dynamically changeable through SetOptions() API
2233    pub fn set_memtable_huge_page_size(&mut self, size: size_t) {
2234        unsafe {
2235            ffi::rocksdb_options_set_memtable_huge_page_size(self.inner, size);
2236        }
2237    }
2238
2239    /// Sets the maximum number of successive merge operations on a key in the memtable.
2240    ///
2241    /// When a merge operation is added to the memtable and the maximum number of
2242    /// successive merges is reached, the value of the key will be calculated and
2243    /// inserted into the memtable instead of the merge operation. This will
2244    /// ensure that there are never more than max_successive_merges merge
2245    /// operations in the memtable.
2246    ///
2247    /// Default: 0 (disabled)
2248    pub fn set_max_successive_merges(&mut self, num: usize) {
2249        unsafe {
2250            ffi::rocksdb_options_set_max_successive_merges(self.inner, num);
2251        }
2252    }
2253
2254    /// Control locality of bloom filter probes to improve cache miss rate.
2255    /// This option only applies to memtable prefix bloom and plaintable
2256    /// prefix bloom. It essentially limits the max number of cache lines each
2257    /// bloom filter check can touch.
2258    ///
2259    /// This optimization is turned off when set to 0. The number should never
2260    /// be greater than number of probes. This option can boost performance
2261    /// for in-memory workload but should use with care since it can cause
2262    /// higher false positive rate.
2263    ///
2264    /// Default: 0
2265    pub fn set_bloom_locality(&mut self, v: u32) {
2266        unsafe {
2267            ffi::rocksdb_options_set_bloom_locality(self.inner, v);
2268        }
2269    }
2270
2271    /// Enable/disable thread-safe inplace updates.
2272    ///
2273    /// Requires updates if
2274    /// * key exists in current memtable
2275    /// * new sizeof(new_value) <= sizeof(old_value)
2276    /// * old_value for that key is a put i.e. kTypeValue
2277    ///
2278    /// Default: false.
2279    pub fn set_inplace_update_support(&mut self, enabled: bool) {
2280        unsafe {
2281            ffi::rocksdb_options_set_inplace_update_support(self.inner, enabled as c_uchar);
2282        }
2283    }
2284
2285    /// Sets the number of locks used for inplace update.
2286    ///
2287    /// Default: 10000 when inplace_update_support = true, otherwise 0.
2288    pub fn set_inplace_update_locks(&mut self, num: usize) {
2289        unsafe {
2290            ffi::rocksdb_options_set_inplace_update_num_locks(self.inner, num);
2291        }
2292    }
2293
2294    /// Different max-size multipliers for different levels.
2295    /// These are multiplied by max_bytes_for_level_multiplier to arrive
2296    /// at the max-size of each level.
2297    ///
2298    /// Default: 1
2299    ///
2300    /// Dynamically changeable through SetOptions() API
2301    pub fn set_max_bytes_for_level_multiplier_additional(&mut self, level_values: &[i32]) {
2302        let count = level_values.len();
2303        unsafe {
2304            ffi::rocksdb_options_set_max_bytes_for_level_multiplier_additional(
2305                self.inner,
2306                level_values.as_ptr() as *mut c_int,
2307                count,
2308            );
2309        }
2310    }
2311
2312    /// If true, then DB::Open() will not fetch and check sizes of all sst files.
2313    /// This may significantly speed up startup if there are many sst files,
2314    /// especially when using non-default Env with expensive GetFileSize().
2315    /// We'll still check that all required sst files exist.
2316    /// If paranoid_checks is false, this option is ignored, and sst files are
2317    /// not checked at all.
2318    ///
2319    /// Default: false
2320    pub fn set_skip_checking_sst_file_sizes_on_db_open(&mut self, value: bool) {
2321        unsafe {
2322            ffi::rocksdb_options_set_skip_checking_sst_file_sizes_on_db_open(
2323                self.inner,
2324                value as c_uchar,
2325            );
2326        }
2327    }
2328
2329    /// The total maximum size(bytes) of write buffers to maintain in memory
2330    /// including copies of buffers that have already been flushed. This parameter
2331    /// only affects trimming of flushed buffers and does not affect flushing.
2332    /// This controls the maximum amount of write history that will be available
2333    /// in memory for conflict checking when Transactions are used. The actual
2334    /// size of write history (flushed Memtables) might be higher than this limit
2335    /// if further trimming will reduce write history total size below this
2336    /// limit. For example, if max_write_buffer_size_to_maintain is set to 64MB,
2337    /// and there are three flushed Memtables, with sizes of 32MB, 20MB, 20MB.
2338    /// Because trimming the next Memtable of size 20MB will reduce total memory
2339    /// usage to 52MB which is below the limit, RocksDB will stop trimming.
2340    ///
2341    /// When using an OptimisticTransactionDB:
2342    /// If this value is too low, some transactions may fail at commit time due
2343    /// to not being able to determine whether there were any write conflicts.
2344    ///
2345    /// When using a TransactionDB:
2346    /// If Transaction::SetSnapshot is used, TransactionDB will read either
2347    /// in-memory write buffers or SST files to do write-conflict checking.
2348    /// Increasing this value can reduce the number of reads to SST files
2349    /// done for conflict detection.
2350    ///
2351    /// Setting this value to 0 will cause write buffers to be freed immediately
2352    /// after they are flushed. If this value is set to -1,
2353    /// 'max_write_buffer_number * write_buffer_size' will be used.
2354    ///
2355    /// Default:
2356    /// If using a TransactionDB/OptimisticTransactionDB, the default value will
2357    /// be set to the value of 'max_write_buffer_number * write_buffer_size'
2358    /// if it is not explicitly set by the user.  Otherwise, the default is 0.
2359    pub fn set_max_write_buffer_size_to_maintain(&mut self, size: i64) {
2360        unsafe {
2361            ffi::rocksdb_options_set_max_write_buffer_size_to_maintain(self.inner, size);
2362        }
2363    }
2364
2365    /// By default, a single write thread queue is maintained. The thread gets
2366    /// to the head of the queue becomes write batch group leader and responsible
2367    /// for writing to WAL and memtable for the batch group.
2368    ///
2369    /// If enable_pipelined_write is true, separate write thread queue is
2370    /// maintained for WAL write and memtable write. A write thread first enter WAL
2371    /// writer queue and then memtable writer queue. Pending thread on the WAL
2372    /// writer queue thus only have to wait for previous writers to finish their
2373    /// WAL writing but not the memtable writing. Enabling the feature may improve
2374    /// write throughput and reduce latency of the prepare phase of two-phase
2375    /// commit.
2376    ///
2377    /// Default: false
2378    pub fn set_enable_pipelined_write(&mut self, value: bool) {
2379        unsafe {
2380            ffi::rocksdb_options_set_enable_pipelined_write(self.inner, value as c_uchar);
2381        }
2382    }
2383
2384    /// Defines the underlying memtable implementation.
2385    /// See official [wiki](https://github.com/facebook/rocksdb/wiki/MemTable) for more information.
2386    /// Defaults to using a skiplist.
2387    ///
2388    /// # Examples
2389    ///
2390    /// ```
2391    /// use ckb_rocksdb::{Options, MemtableFactory};
2392    /// let mut opts = Options::default();
2393    /// let factory = MemtableFactory::HashSkipList {
2394    ///     bucket_count: 1_000_000,
2395    ///     height: 4,
2396    ///     branching_factor: 4,
2397    /// };
2398    ///
2399    /// opts.set_allow_concurrent_memtable_write(false);
2400    /// opts.set_memtable_factory(factory);
2401    /// ```
2402    pub fn set_memtable_factory(&mut self, factory: MemtableFactory) {
2403        match factory {
2404            MemtableFactory::Vector => unsafe {
2405                ffi::rocksdb_options_set_memtable_vector_rep(self.inner);
2406            },
2407            MemtableFactory::HashSkipList {
2408                bucket_count,
2409                height,
2410                branching_factor,
2411            } => unsafe {
2412                ffi::rocksdb_options_set_hash_skip_list_rep(
2413                    self.inner,
2414                    bucket_count,
2415                    height,
2416                    branching_factor,
2417                );
2418            },
2419            MemtableFactory::HashLinkList { bucket_count } => unsafe {
2420                ffi::rocksdb_options_set_hash_link_list_rep(self.inner, bucket_count);
2421            },
2422        };
2423    }
2424
2425    pub fn set_block_based_table_factory(&mut self, factory: &BlockBasedOptions) {
2426        unsafe {
2427            ffi::rocksdb_options_set_block_based_table_factory(self.inner, factory.inner);
2428        }
2429        self.outlive.block_based = Some(factory.outlive.clone());
2430    }
2431
2432    /// Sets the table factory to a CuckooTableFactory (the default table
2433    /// factory is a block-based table factory that provides a default
2434    /// implementation of TableBuilder and TableReader with default
2435    /// BlockBasedTableOptions).
2436    /// See official [wiki](https://github.com/facebook/rocksdb/wiki/CuckooTable-Format) for more information on this table format.
2437    /// # Examples
2438    ///
2439    /// ```
2440    /// use ckb_rocksdb::{Options, CuckooTableOptions};
2441    ///
2442    /// let mut opts = Options::default();
2443    /// let mut factory_opts = CuckooTableOptions::default();
2444    /// factory_opts.set_hash_ratio(0.8);
2445    /// factory_opts.set_max_search_depth(20);
2446    /// factory_opts.set_cuckoo_block_size(10);
2447    /// factory_opts.set_identity_as_first_hash(true);
2448    /// factory_opts.set_use_module_hash(false);
2449    ///
2450    /// opts.set_cuckoo_table_factory(&factory_opts);
2451    /// ```
2452    pub fn set_cuckoo_table_factory(&mut self, factory: &CuckooTableOptions) {
2453        unsafe {
2454            ffi::rocksdb_options_set_cuckoo_table_factory(self.inner, factory.inner);
2455        }
2456    }
2457
2458    // This is a factory that provides TableFactory objects.
2459    // Default: a block-based table factory that provides a default
2460    // implementation of TableBuilder and TableReader with default
2461    // BlockBasedTableOptions.
2462    /// Sets the factory as plain table.
2463    /// See official [wiki](https://github.com/facebook/rocksdb/wiki/PlainTable-Format) for more
2464    /// information.
2465    ///
2466    /// # Examples
2467    ///
2468    /// ```
2469    /// use ckb_rocksdb::{KeyEncodingType, Options, PlainTableFactoryOptions};
2470    ///
2471    /// let mut opts = Options::default();
2472    /// let factory_opts = PlainTableFactoryOptions {
2473    ///   user_key_length: 0,
2474    ///   bloom_bits_per_key: 20,
2475    ///   hash_table_ratio: 0.75,
2476    ///   index_sparseness: 16,
2477    ///   huge_page_tlb_size: 0,
2478    ///   encoding_type: KeyEncodingType::Plain,
2479    ///   full_scan_mode: false,
2480    ///   store_index_in_file: false,
2481    /// };
2482    ///
2483    /// opts.set_plain_table_factory(&factory_opts);
2484    /// ```
2485    pub fn set_plain_table_factory(&mut self, options: &PlainTableFactoryOptions) {
2486        unsafe {
2487            ffi::rocksdb_options_set_plain_table_factory(
2488                self.inner,
2489                options.user_key_length,
2490                options.bloom_bits_per_key,
2491                options.hash_table_ratio,
2492                options.index_sparseness,
2493                options.huge_page_tlb_size,
2494                options.encoding_type as c_char,
2495                c_uchar::from(options.full_scan_mode),
2496                c_uchar::from(options.store_index_in_file),
2497            );
2498        }
2499    }
2500
2501    /// Sets the start level to use compression.
2502    pub fn set_min_level_to_compress(&mut self, lvl: c_int) {
2503        unsafe {
2504            ffi::rocksdb_options_set_min_level_to_compress(self.inner, lvl);
2505        }
2506    }
2507
2508    /// Measure IO stats in compactions and flushes, if `true`.
2509    ///
2510    /// Default: `false`
2511    ///
2512    /// # Examples
2513    ///
2514    /// ```
2515    /// use ckb_rocksdb::Options;
2516    ///
2517    /// let mut opts = Options::default();
2518    /// opts.set_report_bg_io_stats(true);
2519    /// ```
2520    pub fn set_report_bg_io_stats(&mut self, enable: bool) {
2521        unsafe {
2522            ffi::rocksdb_options_set_report_bg_io_stats(self.inner, enable as c_int);
2523        }
2524    }
2525
2526    /// Once write-ahead logs exceed this size, we will start forcing the flush of
2527    /// column families whose memtables are backed by the oldest live WAL file
2528    /// (i.e. the ones that are causing all the space amplification).
2529    ///
2530    /// Default: `0`
2531    ///
2532    /// # Examples
2533    ///
2534    /// ```
2535    /// use ckb_rocksdb::Options;
2536    ///
2537    /// let mut opts = Options::default();
2538    /// // Set max total wal size to 1G.
2539    /// opts.set_max_total_wal_size(1 << 30);
2540    /// ```
2541    pub fn set_max_total_wal_size(&mut self, size: u64) {
2542        unsafe {
2543            ffi::rocksdb_options_set_max_total_wal_size(self.inner, size);
2544        }
2545    }
2546
2547    /// Recovery mode to control the consistency while replaying WAL.
2548    ///
2549    /// Default: DBRecoveryMode::PointInTime
2550    ///
2551    /// # Examples
2552    ///
2553    /// ```
2554    /// use ckb_rocksdb::{Options, DBRecoveryMode};
2555    ///
2556    /// let mut opts = Options::default();
2557    /// opts.set_wal_recovery_mode(DBRecoveryMode::AbsoluteConsistency);
2558    /// ```
2559    pub fn set_wal_recovery_mode(&mut self, mode: DBRecoveryMode) {
2560        unsafe {
2561            ffi::rocksdb_options_set_wal_recovery_mode(self.inner, mode as c_int);
2562        }
2563    }
2564
2565    pub fn enable_statistics(&mut self) {
2566        unsafe {
2567            ffi::rocksdb_options_enable_statistics(self.inner);
2568        }
2569    }
2570
2571    pub fn get_statistics(&self) -> Option<String> {
2572        unsafe {
2573            let value = ffi::rocksdb_options_statistics_get_string(self.inner);
2574            if value.is_null() {
2575                return None;
2576            }
2577
2578            // Must have valid UTF-8 format.
2579            let s = CStr::from_ptr(value).to_str().unwrap().to_owned();
2580            ffi::rocksdb_free(value as *mut c_void);
2581            Some(s)
2582        }
2583    }
2584
2585    /// If not zero, dump `rocksdb.stats` to LOG every `stats_dump_period_sec`.
2586    ///
2587    /// Default: `600` (10 mins)
2588    ///
2589    /// # Examples
2590    ///
2591    /// ```
2592    /// use ckb_rocksdb::Options;
2593    ///
2594    /// let mut opts = Options::default();
2595    /// opts.set_stats_dump_period_sec(300);
2596    /// ```
2597    pub fn set_stats_dump_period_sec(&mut self, period: c_uint) {
2598        unsafe {
2599            ffi::rocksdb_options_set_stats_dump_period_sec(self.inner, period);
2600        }
2601    }
2602
2603    /// If not zero, dump rocksdb.stats to RocksDB to LOG every `stats_persist_period_sec`.
2604    ///
2605    /// Default: `600` (10 mins)
2606    ///
2607    /// # Examples
2608    ///
2609    /// ```
2610    /// use ckb_rocksdb::Options;
2611    ///
2612    /// let mut opts = Options::default();
2613    /// opts.set_stats_persist_period_sec(5);
2614    /// ```
2615    pub fn set_stats_persist_period_sec(&mut self, period: c_uint) {
2616        unsafe {
2617            ffi::rocksdb_options_set_stats_persist_period_sec(self.inner, period);
2618        }
2619    }
2620
2621    /// When set to true, reading SST files will opt out of the filesystem's
2622    /// readahead. Setting this to false may improve sequential iteration
2623    /// performance.
2624    ///
2625    /// Default: `true`
2626    pub fn set_advise_random_on_open(&mut self, advise: bool) {
2627        unsafe {
2628            ffi::rocksdb_options_set_advise_random_on_open(self.inner, advise as c_uchar);
2629        }
2630    }
2631
2632    /// Enable/disable adaptive mutex, which spins in the user space before resorting to kernel.
2633    ///
2634    /// This could reduce context switch when the mutex is not
2635    /// heavily contended. However, if the mutex is hot, we could end up
2636    /// wasting spin time.
2637    ///
2638    /// Default: false
2639    pub fn set_use_adaptive_mutex(&mut self, enabled: bool) {
2640        unsafe {
2641            ffi::rocksdb_options_set_use_adaptive_mutex(self.inner, enabled as c_uchar);
2642        }
2643    }
2644
2645    /// Sets the number of levels for this database.
2646    pub fn set_num_levels(&mut self, n: c_int) {
2647        unsafe {
2648            ffi::rocksdb_options_set_num_levels(self.inner, n);
2649        }
2650    }
2651
2652    /// When a `prefix_extractor` is defined through `opts.set_prefix_extractor` this
2653    /// creates a prefix bloom filter for each memtable with the size of
2654    /// `write_buffer_size * memtable_prefix_bloom_ratio` (capped at 0.25).
2655    ///
2656    /// Default: `0`
2657    ///
2658    /// # Examples
2659    ///
2660    /// ```
2661    /// use ckb_rocksdb::{Options, SliceTransform};
2662    ///
2663    /// let mut opts = Options::default();
2664    /// let transform = SliceTransform::create_fixed_prefix(10);
2665    /// opts.set_prefix_extractor(transform);
2666    /// opts.set_memtable_prefix_bloom_ratio(0.2);
2667    /// ```
2668    pub fn set_memtable_prefix_bloom_ratio(&mut self, ratio: f64) {
2669        unsafe {
2670            ffi::rocksdb_options_set_memtable_prefix_bloom_size_ratio(self.inner, ratio);
2671        }
2672    }
2673
2674    /// Sets the maximum number of bytes in all compacted files.
2675    /// We try to limit number of bytes in one compaction to be lower than this
2676    /// threshold. But it's not guaranteed.
2677    ///
2678    /// Value 0 will be sanitized.
2679    ///
2680    /// Default: target_file_size_base * 25
2681    pub fn set_max_compaction_bytes(&mut self, nbytes: u64) {
2682        unsafe {
2683            ffi::rocksdb_options_set_max_compaction_bytes(self.inner, nbytes);
2684        }
2685    }
2686
2687    /// Specifies the absolute path of the directory the
2688    /// write-ahead log (WAL) should be written to.
2689    ///
2690    /// Default: same directory as the database
2691    ///
2692    /// # Examples
2693    ///
2694    /// ```
2695    /// use ckb_rocksdb::Options;
2696    ///
2697    /// let mut opts = Options::default();
2698    /// opts.set_wal_dir("/path/to/dir");
2699    /// ```
2700    pub fn set_wal_dir<P: AsRef<Path>>(&mut self, path: P) {
2701        let p = CString::new(path.as_ref().to_string_lossy().as_bytes()).unwrap();
2702        unsafe {
2703            ffi::rocksdb_options_set_wal_dir(self.inner, p.as_ptr());
2704        }
2705    }
2706
2707    /// Sets the WAL ttl in seconds.
2708    ///
2709    /// The following two options affect how archived logs will be deleted.
2710    /// 1. If both set to 0, logs will be deleted asap and will not get into
2711    ///    the archive.
2712    /// 2. If wal_ttl_seconds is 0 and wal_size_limit_mb is not 0,
2713    ///    WAL files will be checked every 10 min and if total size is greater
2714    ///    then wal_size_limit_mb, they will be deleted starting with the
2715    ///    earliest until size_limit is met. All empty files will be deleted.
2716    /// 3. If wal_ttl_seconds is not 0 and wall_size_limit_mb is 0, then
2717    ///    WAL files will be checked every wal_ttl_seconds / 2 and those that
2718    ///    are older than wal_ttl_seconds will be deleted.
2719    /// 4. If both are not 0, WAL files will be checked every 10 min and both
2720    ///    checks will be performed with ttl being first.
2721    ///
2722    /// Default: 0
2723    pub fn set_wal_ttl_seconds(&mut self, secs: u64) {
2724        unsafe {
2725            ffi::rocksdb_options_set_WAL_ttl_seconds(self.inner, secs);
2726        }
2727    }
2728
2729    /// Sets the WAL size limit in MB.
2730    ///
2731    /// If total size of WAL files is greater then wal_size_limit_mb,
2732    /// they will be deleted starting with the earliest until size_limit is met.
2733    ///
2734    /// Default: 0
2735    pub fn set_wal_size_limit_mb(&mut self, size: u64) {
2736        unsafe {
2737            ffi::rocksdb_options_set_WAL_size_limit_MB(self.inner, size);
2738        }
2739    }
2740
2741    /// Sets the number of bytes to preallocate (via fallocate) the manifest files.
2742    ///
2743    /// Default is 4MB, which is reasonable to reduce random IO
2744    /// as well as prevent overallocation for mounts that preallocate
2745    /// large amounts of data (such as xfs's allocsize option).
2746    pub fn set_manifest_preallocation_size(&mut self, size: usize) {
2747        unsafe {
2748            ffi::rocksdb_options_set_manifest_preallocation_size(self.inner, size);
2749        }
2750    }
2751
2752    /// If true, then DB::Open() will not update the statistics used to optimize
2753    /// compaction decision by loading table properties from many files.
2754    /// Turning off this feature will improve DBOpen time especially in disk environment.
2755    ///
2756    /// Default: false
2757    pub fn set_skip_stats_update_on_db_open(&mut self, skip: bool) {
2758        unsafe {
2759            ffi::rocksdb_options_set_skip_stats_update_on_db_open(self.inner, skip as c_uchar);
2760        }
2761    }
2762
2763    /// Specify the maximal number of info log files to be kept.
2764    ///
2765    /// Default: 1000
2766    ///
2767    /// # Examples
2768    ///
2769    /// ```
2770    /// use ckb_rocksdb::Options;
2771    ///
2772    /// let mut options = Options::default();
2773    /// options.set_keep_log_file_num(100);
2774    /// ```
2775    pub fn set_keep_log_file_num(&mut self, nfiles: usize) {
2776        unsafe {
2777            ffi::rocksdb_options_set_keep_log_file_num(self.inner, nfiles);
2778        }
2779    }
2780
2781    /// Allow the OS to mmap file for writing.
2782    ///
2783    /// Default: false
2784    ///
2785    /// # Examples
2786    ///
2787    /// ```
2788    /// use ckb_rocksdb::Options;
2789    ///
2790    /// let mut options = Options::default();
2791    /// options.set_allow_mmap_writes(true);
2792    /// ```
2793    pub fn set_allow_mmap_writes(&mut self, is_enabled: bool) {
2794        unsafe {
2795            ffi::rocksdb_options_set_allow_mmap_writes(self.inner, is_enabled as c_uchar);
2796        }
2797    }
2798
2799    /// Allow the OS to mmap file for reading sst tables.
2800    ///
2801    /// Default: false
2802    ///
2803    /// # Examples
2804    ///
2805    /// ```
2806    /// use ckb_rocksdb::Options;
2807    ///
2808    /// let mut options = Options::default();
2809    /// options.set_allow_mmap_reads(true);
2810    /// ```
2811    pub fn set_allow_mmap_reads(&mut self, is_enabled: bool) {
2812        unsafe {
2813            ffi::rocksdb_options_set_allow_mmap_reads(self.inner, is_enabled as c_uchar);
2814        }
2815    }
2816
2817    /// If enabled, WAL is not flushed automatically after each write. Instead it
2818    /// relies on manual invocation of `DB::flush_wal()` to write the WAL buffer
2819    /// to its file.
2820    ///
2821    /// Default: false
2822    ///
2823    /// # Examples
2824    ///
2825    /// ```
2826    /// use ckb_rocksdb::Options;
2827    ///
2828    /// let mut options = Options::default();
2829    /// options.set_manual_wal_flush(true);
2830    /// ```
2831    pub fn set_manual_wal_flush(&mut self, is_enabled: bool) {
2832        unsafe {
2833            ffi::rocksdb_options_set_manual_wal_flush(self.inner, is_enabled as c_uchar);
2834        }
2835    }
2836
2837    /// Guarantee that all column families are flushed together atomically.
2838    /// This option applies to both manual flushes (`db.flush()`) and automatic
2839    /// background flushes caused when memtables are filled.
2840    ///
2841    /// Note that this is only useful when the WAL is disabled. When using the
2842    /// WAL, writes are always consistent across column families.
2843    ///
2844    /// Default: false
2845    ///
2846    /// # Examples
2847    ///
2848    /// ```
2849    /// use ckb_rocksdb::Options;
2850    ///
2851    /// let mut options = Options::default();
2852    /// options.set_atomic_flush(true);
2853    /// ```
2854    pub fn set_atomic_flush(&mut self, atomic_flush: bool) {
2855        unsafe {
2856            ffi::rocksdb_options_set_atomic_flush(self.inner, atomic_flush as c_uchar);
2857        }
2858    }
2859
2860    /// Sets global cache for table-level rows. Cache must outlive DB instance which uses it.
2861    ///
2862    /// Default: null (disabled)
2863    /// Not supported in ROCKSDB_LITE mode!
2864    pub fn set_row_cache(&mut self, cache: &Cache) {
2865        unsafe {
2866            ffi::rocksdb_options_set_row_cache(self.inner, cache.0.inner.as_ptr());
2867        }
2868        self.outlive.row_cache = Some(cache.clone());
2869    }
2870
2871    /// Use to control write rate of flush and compaction. Flush has higher
2872    /// priority than compaction.
2873    /// If rate limiter is enabled, bytes_per_sync is set to 1MB by default.
2874    ///
2875    /// Default: disable
2876    ///
2877    /// # Examples
2878    ///
2879    /// ```
2880    /// use ckb_rocksdb::Options;
2881    ///
2882    /// let mut options = Options::default();
2883    /// options.set_ratelimiter(1024 * 1024, 100 * 1000, 10);
2884    /// ```
2885    pub fn set_ratelimiter(
2886        &mut self,
2887        rate_bytes_per_sec: i64,
2888        refill_period_us: i64,
2889        fairness: i32,
2890    ) {
2891        unsafe {
2892            let ratelimiter =
2893                ffi::rocksdb_ratelimiter_create(rate_bytes_per_sec, refill_period_us, fairness);
2894            // Since limiter is wrapped in shared_ptr, we don't need to
2895            // call rocksdb_ratelimiter_destroy explicitly.
2896            ffi::rocksdb_options_set_ratelimiter(self.inner, ratelimiter);
2897        }
2898    }
2899
2900    /// Sets the maximal size of the info log file.
2901    ///
2902    /// If the log file is larger than `max_log_file_size`, a new info log file
2903    /// will be created. If `max_log_file_size` is equal to zero, all logs will
2904    /// be written to one log file.
2905    ///
2906    /// Default: 0
2907    ///
2908    /// # Examples
2909    ///
2910    /// ```
2911    /// use ckb_rocksdb::Options;
2912    ///
2913    /// let mut options = Options::default();
2914    /// options.set_max_log_file_size(0);
2915    /// ```
2916    pub fn set_max_log_file_size(&mut self, size: usize) {
2917        unsafe {
2918            ffi::rocksdb_options_set_max_log_file_size(self.inner, size);
2919        }
2920    }
2921
2922    /// Sets the time for the info log file to roll (in seconds).
2923    ///
2924    /// If specified with non-zero value, log file will be rolled
2925    /// if it has been active longer than `log_file_time_to_roll`.
2926    /// Default: 0 (disabled)
2927    pub fn set_log_file_time_to_roll(&mut self, secs: usize) {
2928        unsafe {
2929            ffi::rocksdb_options_set_log_file_time_to_roll(self.inner, secs);
2930        }
2931    }
2932
2933    /// Controls the recycling of log files.
2934    ///
2935    /// If non-zero, previously written log files will be reused for new logs,
2936    /// overwriting the old data. The value indicates how many such files we will
2937    /// keep around at any point in time for later use. This is more efficient
2938    /// because the blocks are already allocated and fdatasync does not need to
2939    /// update the inode after each write.
2940    ///
2941    /// Default: 0
2942    ///
2943    /// # Examples
2944    ///
2945    /// ```
2946    /// use ckb_rocksdb::Options;
2947    ///
2948    /// let mut options = Options::default();
2949    /// options.set_recycle_log_file_num(5);
2950    /// ```
2951    pub fn set_recycle_log_file_num(&mut self, num: usize) {
2952        unsafe {
2953            ffi::rocksdb_options_set_recycle_log_file_num(self.inner, num);
2954        }
2955    }
2956
2957    /// Sets the threshold at which all writes will be slowed down to at least delayed_write_rate if estimated
2958    /// bytes needed to be compaction exceed this threshold.
2959    ///
2960    /// Default: 64GB
2961    pub fn set_soft_pending_compaction_bytes_limit(&mut self, limit: usize) {
2962        unsafe {
2963            ffi::rocksdb_options_set_soft_pending_compaction_bytes_limit(self.inner, limit);
2964        }
2965    }
2966
2967    /// Sets the bytes threshold at which all writes are stopped if estimated bytes needed to be compaction exceed
2968    /// this threshold.
2969    ///
2970    /// Default: 256GB
2971    pub fn set_hard_pending_compaction_bytes_limit(&mut self, limit: usize) {
2972        unsafe {
2973            ffi::rocksdb_options_set_hard_pending_compaction_bytes_limit(self.inner, limit);
2974        }
2975    }
2976
2977    /// Sets the size of one block in arena memory allocation.
2978    ///
2979    /// If <= 0, a proper value is automatically calculated (usually 1/10 of
2980    /// writer_buffer_size).
2981    ///
2982    /// Default: 0
2983    pub fn set_arena_block_size(&mut self, size: usize) {
2984        unsafe {
2985            ffi::rocksdb_options_set_arena_block_size(self.inner, size);
2986        }
2987    }
2988
2989    /// If true, then print malloc stats together with rocksdb.stats when printing to LOG.
2990    ///
2991    /// Default: false
2992    pub fn set_dump_malloc_stats(&mut self, enabled: bool) {
2993        unsafe {
2994            ffi::rocksdb_options_set_dump_malloc_stats(self.inner, enabled as c_uchar);
2995        }
2996    }
2997
2998    /// Enable whole key bloom filter in memtable. Note this will only take effect
2999    /// if memtable_prefix_bloom_size_ratio is not 0. Enabling whole key filtering
3000    /// can potentially reduce CPU usage for point-look-ups.
3001    ///
3002    /// Default: false (disable)
3003    ///
3004    /// Dynamically changeable through SetOptions() API
3005    pub fn set_memtable_whole_key_filtering(&mut self, whole_key_filter: bool) {
3006        unsafe {
3007            ffi::rocksdb_options_set_memtable_whole_key_filtering(
3008                self.inner,
3009                whole_key_filter as c_uchar,
3010            );
3011        }
3012    }
3013}
3014
3015impl Default for Options {
3016    fn default() -> Self {
3017        unsafe {
3018            let opts = ffi::rocksdb_options_create();
3019            assert!(!opts.is_null(), "Could not create RocksDB options");
3020
3021            Self {
3022                inner: opts,
3023                outlive: OptionsMustOutliveDB::default(),
3024            }
3025        }
3026    }
3027}
3028
3029impl FlushOptions {
3030    pub fn new() -> FlushOptions {
3031        FlushOptions::default()
3032    }
3033
3034    /// Waits until the flush is done.
3035    ///
3036    /// Default: true
3037    ///
3038    /// # Examples
3039    ///
3040    /// ```
3041    /// use ckb_rocksdb::FlushOptions;
3042    ///
3043    /// let mut options = FlushOptions::default();
3044    /// options.set_wait(false);
3045    /// ```
3046    pub fn set_wait(&mut self, wait: bool) {
3047        unsafe {
3048            ffi::rocksdb_flushoptions_set_wait(self.inner, wait as c_uchar);
3049        }
3050    }
3051}
3052
3053impl Default for FlushOptions {
3054    fn default() -> FlushOptions {
3055        let flush_opts = unsafe { ffi::rocksdb_flushoptions_create() };
3056        if flush_opts.is_null() {
3057            panic!("Could not create RocksDB flush options");
3058        }
3059        FlushOptions { inner: flush_opts }
3060    }
3061}
3062
3063impl WriteOptions {
3064    pub fn new() -> WriteOptions {
3065        WriteOptions::default()
3066    }
3067
3068    /// Sets the sync mode. If true, the write will be flushed
3069    /// from the operating system buffer cache before the write is considered complete.
3070    /// If this flag is true, writes will be slower.
3071    ///
3072    /// Default: false
3073    pub fn set_sync(&mut self, sync: bool) {
3074        unsafe {
3075            ffi::rocksdb_writeoptions_set_sync(self.inner, sync as c_uchar);
3076        };
3077        self.option_set_sync = Some(sync);
3078    }
3079
3080    /// Sets whether WAL should be active or not.
3081    /// If true, writes will not first go to the write ahead log,
3082    /// and the write may got lost after a crash.
3083    ///
3084    /// Default: false
3085    pub fn disable_wal(&mut self, disable: bool) {
3086        unsafe {
3087            ffi::rocksdb_writeoptions_disable_WAL(self.inner, disable as c_int);
3088        }
3089        self.option_disable_wal = Some(disable);
3090    }
3091
3092    pub(crate) fn input_or_default(
3093        input: Option<&WriteOptions>,
3094        default_writeopts: &mut Option<WriteOptions>,
3095    ) -> Result<*mut ffi::rocksdb_writeoptions_t, Error> {
3096        if default_writeopts.is_none() {
3097            default_writeopts.replace(WriteOptions::default());
3098        }
3099
3100        let wo_handle = input
3101            .or(default_writeopts.as_ref())
3102            .ok_or_else(|| Error::new("Unable to extract write options.".to_string()))?
3103            .handle();
3104
3105        Ok(wo_handle)
3106    }
3107}
3108
3109impl Default for WriteOptions {
3110    fn default() -> WriteOptions {
3111        let write_opts = unsafe { ffi::rocksdb_writeoptions_create() };
3112        if write_opts.is_null() {
3113            panic!("Could not create RocksDB write options");
3114        }
3115        WriteOptions {
3116            option_set_sync: None,
3117            option_disable_wal: None,
3118            inner: write_opts,
3119        }
3120    }
3121}
3122
3123impl Clone for WriteOptions {
3124    fn clone(&self) -> WriteOptions {
3125        let mut ops = WriteOptions::default();
3126        if let Some(set_sync) = self.option_set_sync {
3127            ops.set_sync(set_sync);
3128        };
3129        if let Some(disable_wal) = self.option_disable_wal {
3130            ops.disable_wal(disable_wal);
3131        };
3132        ops
3133    }
3134}
3135
3136impl ReadOptions {
3137    // TODO add snapshot setting here
3138    // TODO add snapshot wrapper structs with proper destructors;
3139    // that struct needs an "iterator" impl too.
3140
3141    /// Specify whether the "data block"/"index block"/"filter block"
3142    /// read for this iteration should be cached in memory?
3143    /// Callers may wish to set this field to false for bulk scans.
3144    ///
3145    /// Default: true
3146    pub fn fill_cache(&mut self, v: bool) {
3147        unsafe {
3148            ffi::rocksdb_readoptions_set_fill_cache(self.inner, v as c_uchar);
3149        }
3150        self.option_fill_cache = Some(v);
3151    }
3152
3153    /// Sets the snapshot which should be used for the read.
3154    /// The snapshot must belong to the DB that is being read and must
3155    /// not have been released.
3156    pub fn set_snapshot<T>(&mut self, snapshot: &T)
3157    where
3158        T: ConstHandle<ffi::rocksdb_snapshot_t>,
3159    {
3160        unsafe {
3161            ffi::rocksdb_readoptions_set_snapshot(self.inner, snapshot.const_handle());
3162        }
3163    }
3164
3165    /// Sets the upper bound for an iterator.
3166    /// The upper bound itself is not included on the iteration result.
3167    pub fn set_iterate_upper_bound<K: AsRef<[u8]>>(&mut self, key: K) {
3168        self.option_set_iterate_upper_bound = Some(key.as_ref().to_vec());
3169        let key = self.option_set_iterate_upper_bound.as_ref().unwrap();
3170        unsafe {
3171            ffi::rocksdb_readoptions_set_iterate_upper_bound(
3172                self.inner,
3173                key.as_ptr() as *const c_char,
3174                key.len() as size_t,
3175            );
3176        }
3177    }
3178
3179    /// Sets the lower bound for an iterator.
3180    pub fn set_iterate_lower_bound<K: AsRef<[u8]>>(&mut self, key: K) {
3181        self.option_set_iterate_lower_bound = Some(key.as_ref().to_vec());
3182        let key = self.option_set_iterate_lower_bound.as_ref().unwrap();
3183        unsafe {
3184            ffi::rocksdb_readoptions_set_iterate_upper_bound(
3185                self.inner,
3186                key.as_ptr() as *const c_char,
3187                key.len() as size_t,
3188            );
3189        }
3190    }
3191
3192    /// Enforce that the iterator only iterates over the same
3193    /// prefix as the seek.
3194    /// This option is effective only for prefix seeks, i.e. prefix_extractor is
3195    /// non-null for the column family and total_order_seek is false.  Unlike
3196    /// iterate_upper_bound, prefix_same_as_start only works within a prefix
3197    /// but in both directions.
3198    ///
3199    /// Default: false
3200    pub fn set_prefix_same_as_start(&mut self, v: bool) {
3201        unsafe { ffi::rocksdb_readoptions_set_prefix_same_as_start(self.inner, v as c_uchar) }
3202        self.option_set_prefix_same_as_start = Some(v);
3203    }
3204
3205    /// Enable a total order seek regardless of index format (e.g. hash index)
3206    /// used in the table. Some table format (e.g. plain table) may not support
3207    /// this option.
3208    ///
3209    /// If true when calling Get(), we also skip prefix bloom when reading from
3210    /// block based table. It provides a way to read existing data after
3211    /// changing implementation of prefix extractor.
3212    pub fn set_total_order_seek(&mut self, v: bool) {
3213        unsafe { ffi::rocksdb_readoptions_set_total_order_seek(self.inner, v as c_uchar) }
3214        self.option_set_total_order_seek = Some(v);
3215    }
3216
3217    /// If non-zero, an iterator will create a new table reader which
3218    /// performs reads of the given size. Using a large size (> 2MB) can
3219    /// improve the performance of forward iteration on spinning disks.
3220    /// Default: 0
3221    ///
3222    /// ```
3223    /// use ckb_rocksdb::{ReadOptions};
3224    ///
3225    /// let mut opts = ReadOptions::default();
3226    /// opts.set_readahead_size(4_194_304); // 4mb
3227    /// ```
3228    pub fn set_readahead_size(&mut self, v: usize) {
3229        unsafe {
3230            ffi::rocksdb_readoptions_set_readahead_size(self.inner, v as size_t);
3231        }
3232        self.option_set_readahead_size = Some(v);
3233    }
3234
3235    /// Asynchronously prefetch some data.
3236    ///
3237    /// Used for sequential reads and internal automatic prefetching.
3238    ///
3239    /// Default: `false`
3240    pub fn set_async_io(&mut self, v: bool) {
3241        unsafe {
3242            ffi::rocksdb_readoptions_set_async_io(self.inner, c_uchar::from(v));
3243        }
3244    }
3245
3246    pub fn input_or_default(
3247        input: Option<&ReadOptions>,
3248        default_readopts: &mut Option<ReadOptions>,
3249    ) -> Result<*mut ffi::rocksdb_readoptions_t, Error> {
3250        if input.is_none() && default_readopts.is_none() {
3251            default_readopts.replace(ReadOptions::default());
3252        }
3253
3254        let ro_handle = input
3255            .or(default_readopts.as_ref())
3256            .ok_or_else(|| Error::new("Unable to extract read options.".to_string()))?
3257            .handle();
3258
3259        if ro_handle.is_null() {
3260            return Err(Error::new(
3261                "Unable to create RocksDB read options. \
3262                 This is a fairly trivial call, and its \
3263                 failure may be indicative of a \
3264                 mis-compiled or mis-loaded RocksDB \
3265                 library."
3266                    .to_string(),
3267            ));
3268        }
3269
3270        Ok(ro_handle)
3271    }
3272}
3273
3274impl Default for ReadOptions {
3275    fn default() -> ReadOptions {
3276        unsafe {
3277            ReadOptions {
3278                option_fill_cache: None,
3279                option_set_iterate_upper_bound: None,
3280                option_set_iterate_lower_bound: None,
3281                option_set_prefix_same_as_start: None,
3282                option_set_total_order_seek: None,
3283                option_set_readahead_size: None,
3284                inner: ffi::rocksdb_readoptions_create(),
3285            }
3286        }
3287    }
3288}
3289
3290impl Clone for ReadOptions {
3291    fn clone(&self) -> ReadOptions {
3292        let mut ops = ReadOptions::default();
3293        if let Some(fill_cache) = self.option_fill_cache {
3294            ops.fill_cache(fill_cache);
3295        };
3296        if let Some(set_iterate_upper_bound) = &self.option_set_iterate_upper_bound {
3297            ops.set_iterate_upper_bound(set_iterate_upper_bound);
3298        };
3299        if let Some(set_iterate_lower_bound) = &self.option_set_iterate_lower_bound {
3300            ops.set_iterate_lower_bound(set_iterate_lower_bound);
3301        };
3302        if let Some(set_prefix_same_as_start) = self.option_set_prefix_same_as_start {
3303            ops.set_prefix_same_as_start(set_prefix_same_as_start);
3304        };
3305        if let Some(set_total_order_seek) = self.option_set_total_order_seek {
3306            ops.set_total_order_seek(set_total_order_seek);
3307        };
3308        if let Some(set_readahead_size) = self.option_set_readahead_size {
3309            ops.set_readahead_size(set_readahead_size)
3310        };
3311        ops
3312    }
3313}
3314
3315impl IngestExternalFileOptions {
3316    /// Can be set to true to move the files instead of copying them.
3317    pub fn set_move_files(&mut self, v: bool) {
3318        unsafe {
3319            ffi::rocksdb_ingestexternalfileoptions_set_move_files(self.inner, v as c_uchar);
3320        }
3321    }
3322
3323    /// If set to false, an ingested file keys could appear in existing snapshots
3324    /// that where created before the file was ingested.
3325    pub fn set_snapshot_consistency(&mut self, v: bool) {
3326        unsafe {
3327            ffi::rocksdb_ingestexternalfileoptions_set_snapshot_consistency(
3328                self.inner,
3329                v as c_uchar,
3330            );
3331        }
3332    }
3333
3334    /// If set to false, IngestExternalFile() will fail if the file key range
3335    /// overlaps with existing keys or tombstones in the DB.
3336    pub fn set_allow_global_seqno(&mut self, v: bool) {
3337        unsafe {
3338            ffi::rocksdb_ingestexternalfileoptions_set_allow_global_seqno(self.inner, v as c_uchar);
3339        }
3340    }
3341
3342    /// If set to false and the file key range overlaps with the memtable key range
3343    /// (memtable flush required), IngestExternalFile will fail.
3344    pub fn set_allow_blocking_flush(&mut self, v: bool) {
3345        unsafe {
3346            ffi::rocksdb_ingestexternalfileoptions_set_allow_blocking_flush(
3347                self.inner,
3348                v as c_uchar,
3349            );
3350        }
3351    }
3352
3353    /// Set to true if you would like duplicate keys in the file being ingested
3354    /// to be skipped rather than overwriting existing data under that key.
3355    /// Usecase: back-fill of some historical data in the database without
3356    /// over-writing existing newer version of data.
3357    /// This option could only be used if the DB has been running
3358    /// with allow_ingest_behind=true since the dawn of time.
3359    /// All files will be ingested at the bottommost level with seqno=0.
3360    pub fn set_ingest_behind(&mut self, v: bool) {
3361        unsafe {
3362            ffi::rocksdb_ingestexternalfileoptions_set_ingest_behind(self.inner, v as c_uchar);
3363        }
3364    }
3365
3366    pub fn input_or_default(
3367        input: Option<&IngestExternalFileOptions>,
3368        default_opts: &mut Option<IngestExternalFileOptions>,
3369    ) -> Result<*mut ffi::rocksdb_ingestexternalfileoptions_t, Error> {
3370        if input.is_none() && default_opts.is_none() {
3371            default_opts.replace(IngestExternalFileOptions::default());
3372        }
3373
3374        let handle = input
3375            .or(default_opts.as_ref())
3376            .ok_or_else(|| {
3377                Error::new("Unable to extract ingest external file options.".to_string())
3378            })?
3379            .handle();
3380
3381        if handle.is_null() {
3382            return Err(Error::new(
3383                "Unable to create RocksDB ingest external file options. \
3384                 This is a fairly trivial call, and its \
3385                 failure may be indicative of a \
3386                 mis-compiled or mis-loaded RocksDB \
3387                 library."
3388                    .to_string(),
3389            ));
3390        }
3391
3392        Ok(handle)
3393    }
3394}
3395
3396impl Default for IngestExternalFileOptions {
3397    fn default() -> Self {
3398        unsafe {
3399            Self {
3400                inner: ffi::rocksdb_ingestexternalfileoptions_create(),
3401            }
3402        }
3403    }
3404}
3405
3406/// Used by BlockBasedOptions::set_index_type.
3407pub enum BlockBasedIndexType {
3408    /// A space efficient index block that is optimized for
3409    /// binary-search-based index.
3410    BinarySearch,
3411
3412    /// The hash index, if enabled, will perform a hash lookup if
3413    /// a prefix extractor has been provided through Options::set_prefix_extractor.
3414    HashSearch,
3415
3416    /// A two-level index implementation. Both levels are binary search indexes.
3417    TwoLevelIndexSearch,
3418}
3419
3420/// Used by BlockBasedOptions::set_data_block_index_type.
3421#[repr(C)]
3422pub enum DataBlockIndexType {
3423    /// Use binary search when performing point lookup for keys in data blocks.
3424    /// This is the default.
3425    BinarySearch = 0,
3426
3427    /// Appends a compact hash table to the end of the data block for efficient indexing. Backwards
3428    /// compatible with databases created without this feature. Once turned on, existing data will
3429    /// be gradually converted to the hash index format.
3430    BinaryAndHash = 1,
3431}
3432
3433/// Defines the underlying memtable implementation.
3434/// See https://github.com/facebook/rocksdb/wiki/MemTable for more information.
3435pub enum MemtableFactory {
3436    Vector,
3437    HashSkipList {
3438        bucket_count: usize,
3439        height: i32,
3440        branching_factor: i32,
3441    },
3442    HashLinkList {
3443        bucket_count: usize,
3444    },
3445}
3446
3447/// Used in [`PlainTableFactoryOptions`].
3448#[derive(Default, Debug, Copy, Clone, PartialEq, Eq)]
3449pub enum KeyEncodingType {
3450    /// Always write full keys.
3451    #[default]
3452    Plain = 0,
3453    /// Find opportunities to write the same prefix for multiple rows.
3454    Prefix = 1,
3455}
3456
3457/// Used with DBOptions::set_plain_table_factory.
3458/// See https://github.com/facebook/rocksdb/wiki/PlainTable-Format.
3459///
3460/// Defaults:
3461///  user_key_length: 0 (variable length)
3462///  bloom_bits_per_key: 10
3463///  hash_table_ratio: 0.75
3464///  index_sparseness: 16
3465///  huge_page_tlb_size: 0
3466///  encoding_type: KeyEncodingType::Plain
3467///  full_scan_mode: false
3468///  store_index_in_file: false
3469pub struct PlainTableFactoryOptions {
3470    pub user_key_length: u32,
3471    pub bloom_bits_per_key: i32,
3472    pub hash_table_ratio: f64,
3473    pub index_sparseness: usize,
3474    pub huge_page_tlb_size: usize,
3475    pub encoding_type: KeyEncodingType,
3476    pub full_scan_mode: bool,
3477    pub store_index_in_file: bool,
3478}
3479
3480#[derive(Debug, Copy, Clone, PartialEq)]
3481pub enum DBCompressionType {
3482    None = ffi::rocksdb_no_compression as isize,
3483    Snappy = ffi::rocksdb_snappy_compression as isize,
3484    Zlib = ffi::rocksdb_zlib_compression as isize,
3485    Bz2 = ffi::rocksdb_bz2_compression as isize,
3486    Lz4 = ffi::rocksdb_lz4_compression as isize,
3487    Lz4hc = ffi::rocksdb_lz4hc_compression as isize,
3488    Zstd = ffi::rocksdb_zstd_compression as isize,
3489}
3490
3491#[derive(Debug, Copy, Clone, PartialEq)]
3492pub enum DBCompactionStyle {
3493    Level = ffi::rocksdb_level_compaction as isize,
3494    Universal = ffi::rocksdb_universal_compaction as isize,
3495    Fifo = ffi::rocksdb_fifo_compaction as isize,
3496}
3497
3498#[derive(Debug, Copy, Clone, PartialEq)]
3499pub enum DBRecoveryMode {
3500    TolerateCorruptedTailRecords = ffi::rocksdb_tolerate_corrupted_tail_records_recovery as isize,
3501    AbsoluteConsistency = ffi::rocksdb_absolute_consistency_recovery as isize,
3502    PointInTime = ffi::rocksdb_point_in_time_recovery as isize,
3503    SkipAnyCorruptedRecord = ffi::rocksdb_skip_any_corrupted_records_recovery as isize,
3504}
3505
3506pub struct FifoCompactOptions {
3507    pub(crate) inner: *mut ffi::rocksdb_fifo_compaction_options_t,
3508}
3509
3510impl Default for FifoCompactOptions {
3511    fn default() -> Self {
3512        let opts = unsafe { ffi::rocksdb_fifo_compaction_options_create() };
3513        assert!(
3514            !opts.is_null(),
3515            "Could not create RocksDB Fifo Compaction Options"
3516        );
3517
3518        Self { inner: opts }
3519    }
3520}
3521
3522impl Drop for FifoCompactOptions {
3523    fn drop(&mut self) {
3524        unsafe {
3525            ffi::rocksdb_fifo_compaction_options_destroy(self.inner);
3526        }
3527    }
3528}
3529
3530impl FifoCompactOptions {
3531    /// Sets the max table file size.
3532    ///
3533    /// Once the total sum of table files reaches this, we will delete the oldest
3534    /// table file
3535    ///
3536    /// Default: 1GB
3537    pub fn set_max_table_files_size(&mut self, nbytes: u64) {
3538        unsafe {
3539            ffi::rocksdb_fifo_compaction_options_set_max_table_files_size(self.inner, nbytes);
3540        }
3541    }
3542}
3543
3544#[derive(Debug, Copy, Clone, PartialEq)]
3545#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
3546pub enum UniversalCompactionStopStyle {
3547    Similar = ffi::rocksdb_similar_size_compaction_stop_style as isize,
3548    Total = ffi::rocksdb_total_size_compaction_stop_style as isize,
3549}
3550
3551pub struct UniversalCompactOptions {
3552    pub(crate) inner: *mut ffi::rocksdb_universal_compaction_options_t,
3553}
3554
3555impl Default for UniversalCompactOptions {
3556    fn default() -> Self {
3557        let opts = unsafe { ffi::rocksdb_universal_compaction_options_create() };
3558        assert!(
3559            !opts.is_null(),
3560            "Could not create RocksDB Universal Compaction Options"
3561        );
3562
3563        Self { inner: opts }
3564    }
3565}
3566
3567impl Drop for UniversalCompactOptions {
3568    fn drop(&mut self) {
3569        unsafe {
3570            ffi::rocksdb_universal_compaction_options_destroy(self.inner);
3571        }
3572    }
3573}
3574
3575impl UniversalCompactOptions {
3576    /// Sets the percentage flexibility while comparing file size.
3577    /// If the candidate file(s) size is 1% smaller than the next file's size,
3578    /// then include next file into this candidate set.
3579    ///
3580    /// Default: 1
3581    pub fn set_size_ratio(&mut self, ratio: c_int) {
3582        unsafe {
3583            ffi::rocksdb_universal_compaction_options_set_size_ratio(self.inner, ratio);
3584        }
3585    }
3586
3587    /// Sets the minimum number of files in a single compaction run.
3588    ///
3589    /// Default: 2
3590    pub fn set_min_merge_width(&mut self, num: c_int) {
3591        unsafe {
3592            ffi::rocksdb_universal_compaction_options_set_min_merge_width(self.inner, num);
3593        }
3594    }
3595
3596    /// Sets the maximum number of files in a single compaction run.
3597    ///
3598    /// Default: UINT_MAX
3599    pub fn set_max_merge_width(&mut self, num: c_int) {
3600        unsafe {
3601            ffi::rocksdb_universal_compaction_options_set_max_merge_width(self.inner, num);
3602        }
3603    }
3604
3605    /// sets the size amplification.
3606    ///
3607    /// It is defined as the amount (in percentage) of
3608    /// additional storage needed to store a single byte of data in the database.
3609    /// For example, a size amplification of 2% means that a database that
3610    /// contains 100 bytes of user-data may occupy upto 102 bytes of
3611    /// physical storage. By this definition, a fully compacted database has
3612    /// a size amplification of 0%. Rocksdb uses the following heuristic
3613    /// to calculate size amplification: it assumes that all files excluding
3614    /// the earliest file contribute to the size amplification.
3615    ///
3616    /// Default: 200, which means that a 100 byte database could require upto 300 bytes of storage.
3617    pub fn set_max_size_amplification_percent(&mut self, v: c_int) {
3618        unsafe {
3619            ffi::rocksdb_universal_compaction_options_set_max_size_amplification_percent(
3620                self.inner, v,
3621            );
3622        }
3623    }
3624
3625    /// Sets the percentage of compression size.
3626    ///
3627    /// If this option is set to be -1, all the output files
3628    /// will follow compression type specified.
3629    ///
3630    /// If this option is not negative, we will try to make sure compressed
3631    /// size is just above this value. In normal cases, at least this percentage
3632    /// of data will be compressed.
3633    /// When we are compacting to a new file, here is the criteria whether
3634    /// it needs to be compressed: assuming here are the list of files sorted
3635    /// by generation time:
3636    ///    A1...An B1...Bm C1...Ct
3637    /// where A1 is the newest and Ct is the oldest, and we are going to compact
3638    /// B1...Bm, we calculate the total size of all the files as total_size, as
3639    /// well as  the total size of C1...Ct as total_C, the compaction output file
3640    /// will be compressed iff
3641    ///   total_C / total_size < this percentage
3642    ///
3643    /// Default: -1
3644    pub fn set_compression_size_percent(&mut self, v: c_int) {
3645        unsafe {
3646            ffi::rocksdb_universal_compaction_options_set_compression_size_percent(self.inner, v);
3647        }
3648    }
3649
3650    /// Sets the algorithm used to stop picking files into a single compaction run.
3651    ///
3652    /// Default: ::Total
3653    pub fn set_stop_style(&mut self, style: UniversalCompactionStopStyle) {
3654        unsafe {
3655            ffi::rocksdb_universal_compaction_options_set_stop_style(self.inner, style as c_int);
3656        }
3657    }
3658}
3659
3660#[derive(Debug, Copy, Clone, PartialEq)]
3661#[cfg_attr(feature = "serde1", derive(serde::Serialize, serde::Deserialize))]
3662#[repr(u8)]
3663pub enum BottommostLevelCompaction {
3664    /// Skip bottommost level compaction
3665    Skip = 0,
3666    /// Only compact bottommost level if there is a compaction filter
3667    /// This is the default option
3668    IfHaveCompactionFilter,
3669    /// Always compact bottommost level
3670    Force,
3671    /// Always compact bottommost level but in bottommost level avoid
3672    /// double-compacting files created in the same compaction
3673    ForceOptimized,
3674}
3675
3676pub struct CompactOptions {
3677    pub(crate) inner: *mut ffi::rocksdb_compactoptions_t,
3678}
3679
3680impl Default for CompactOptions {
3681    fn default() -> Self {
3682        let opts = unsafe { ffi::rocksdb_compactoptions_create() };
3683        assert!(!opts.is_null(), "Could not create RocksDB Compact Options");
3684
3685        Self { inner: opts }
3686    }
3687}
3688
3689impl Drop for CompactOptions {
3690    fn drop(&mut self) {
3691        unsafe {
3692            ffi::rocksdb_compactoptions_destroy(self.inner);
3693        }
3694    }
3695}
3696
3697impl CompactOptions {
3698    /// If more than one thread calls manual compaction,
3699    /// only one will actually schedule it while the other threads will simply wait
3700    /// for the scheduled manual compaction to complete. If exclusive_manual_compaction
3701    /// is set to true, the call will disable scheduling of automatic compaction jobs
3702    /// and wait for existing automatic compaction jobs to finish.
3703    pub fn set_exclusive_manual_compaction(&mut self, v: bool) {
3704        unsafe {
3705            ffi::rocksdb_compactoptions_set_exclusive_manual_compaction(self.inner, v as c_uchar);
3706        }
3707    }
3708
3709    /// Sets bottommost level compaction.
3710    pub fn set_bottommost_level_compaction(&mut self, lvl: BottommostLevelCompaction) {
3711        unsafe {
3712            ffi::rocksdb_compactoptions_set_bottommost_level_compaction(self.inner, lvl as c_uchar);
3713        }
3714    }
3715
3716    /// If true, compacted files will be moved to the minimum level capable
3717    /// of holding the data or given level (specified non-negative target_level).
3718    pub fn set_change_level(&mut self, v: bool) {
3719        unsafe {
3720            ffi::rocksdb_compactoptions_set_change_level(self.inner, v as c_uchar);
3721        }
3722    }
3723
3724    /// If change_level is true and target_level have non-negative value, compacted
3725    /// files will be moved to target_level.
3726    pub fn set_target_level(&mut self, lvl: c_int) {
3727        unsafe {
3728            ffi::rocksdb_compactoptions_set_target_level(self.inner, lvl);
3729        }
3730    }
3731}
3732
3733/// Represents a path where sst files can be put into
3734pub struct DBPath {
3735    pub(crate) inner: *mut ffi::rocksdb_dbpath_t,
3736}
3737
3738impl DBPath {
3739    /// Create a new path
3740    pub fn new<P: AsRef<Path>>(path: P, target_size: u64) -> Result<Self, Error> {
3741        let p = CString::new(path.as_ref().to_string_lossy().as_bytes()).unwrap();
3742        let dbpath = unsafe { ffi::rocksdb_dbpath_create(p.as_ptr(), target_size) };
3743        if dbpath.is_null() {
3744            Err(Error::new(format!(
3745                "Could not create path for storing sst files at location: {}",
3746                path.as_ref().to_string_lossy()
3747            )))
3748        } else {
3749            Ok(DBPath { inner: dbpath })
3750        }
3751    }
3752}
3753
3754impl Drop for DBPath {
3755    fn drop(&mut self) {
3756        unsafe {
3757            ffi::rocksdb_dbpath_destroy(self.inner);
3758        }
3759    }
3760}
3761
3762impl ConstHandle<ffi::rocksdb_options_t> for Options {
3763    fn const_handle(&self) -> *const ffi::rocksdb_options_t {
3764        self.inner
3765    }
3766}
3767
3768impl Handle<ffi::rocksdb_options_t> for Options {
3769    fn handle(&self) -> *mut ffi::rocksdb_options_t {
3770        self.inner
3771    }
3772}
3773
3774impl Handle<ffi::rocksdb_readoptions_t> for ReadOptions {
3775    fn handle(&self) -> *mut ffi::rocksdb_readoptions_t {
3776        self.inner
3777    }
3778}
3779
3780impl Handle<ffi::rocksdb_writeoptions_t> for WriteOptions {
3781    fn handle(&self) -> *mut ffi::rocksdb_writeoptions_t {
3782        self.inner
3783    }
3784}
3785
3786impl Handle<ffi::rocksdb_ingestexternalfileoptions_t> for IngestExternalFileOptions {
3787    fn handle(&self) -> *mut ffi::rocksdb_ingestexternalfileoptions_t {
3788        self.inner
3789    }
3790}
3791
3792#[cfg(test)]
3793mod tests {
3794    use crate::{MemtableFactory, Options};
3795
3796    #[test]
3797    fn test_enable_statistics() {
3798        let mut opts = Options::default();
3799        opts.enable_statistics();
3800        opts.set_stats_dump_period_sec(60);
3801        assert!(opts.get_statistics().is_some());
3802
3803        let opts = Options::default();
3804        assert!(opts.get_statistics().is_none());
3805    }
3806
3807    #[test]
3808    fn test_set_memtable_factory() {
3809        let mut opts = Options::default();
3810        opts.set_memtable_factory(MemtableFactory::Vector);
3811        opts.set_memtable_factory(MemtableFactory::HashLinkList { bucket_count: 100 });
3812        opts.set_memtable_factory(MemtableFactory::HashSkipList {
3813            bucket_count: 100,
3814            height: 4,
3815            branching_factor: 4,
3816        });
3817    }
3818
3819    #[test]
3820    fn test_set_stats_persist_period_sec() {
3821        let mut opts = Options::default();
3822        opts.enable_statistics();
3823        opts.set_stats_persist_period_sec(5);
3824        assert!(opts.get_statistics().is_some());
3825
3826        let opts = Options::default();
3827        assert!(opts.get_statistics().is_none());
3828    }
3829}