add some compaction related interfaces

Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
Jason Volk 2025-01-18 12:05:07 +00:00
parent 8ab825b12c
commit dda27ffcb1
7 changed files with 188 additions and 8 deletions

View file

@ -18,9 +18,16 @@ use std::{
};
use conduwuit::{debug, info, warn, Err, Result};
use rocksdb::{AsColumnFamilyRef, BoundColumnFamily, DBCommon, DBWithThreadMode, MultiThreaded};
use rocksdb::{
AsColumnFamilyRef, BoundColumnFamily, DBCommon, DBWithThreadMode, MultiThreaded,
WaitForCompactOptions,
};
use crate::{pool::Pool, result, Context};
use crate::{
pool::Pool,
util::{map_err, result},
Context,
};
pub struct Engine {
pub(super) read_only: bool,
@ -55,12 +62,22 @@ impl Engine {
#[tracing::instrument(skip(self), level = "debug")]
pub fn flush(&self) -> Result { result(DBCommon::flush_wal(&self.db, false)) }
#[tracing::instrument(skip(self), level = "debug")]
#[tracing::instrument(skip(self), level = "info")]
pub fn sort(&self) -> Result {
let flushoptions = rocksdb::FlushOptions::default();
result(DBCommon::flush_opt(&self.db, &flushoptions))
}
#[tracing::instrument(skip(self), level = "info")]
pub fn wait_compactions(&self) -> Result {
let mut opts = WaitForCompactOptions::default();
opts.set_abort_on_pause(true);
opts.set_flush(false);
opts.set_timeout(0);
self.db.wait_for_compact(&opts).map_err(map_err)
}
/// Query for database property by null-terminated name which is expected to
/// have a result with an integer representation. This is intended for
/// low-overhead programmatic use.

View file

@ -1,3 +1,4 @@
pub mod compact;
mod contains;
mod count;
mod get;

View file

@ -0,0 +1,62 @@
use conduwuit::{implement, Err, Result};
use rocksdb::{BottommostLevelCompaction, CompactOptions};
use crate::keyval::KeyBuf;
#[derive(Clone, Debug, Default)]
pub struct Options {
/// Key range to start and stop compaction.
pub range: (Option<KeyBuf>, Option<KeyBuf>),
/// (None, None) - all levels to all necessary levels
/// (None, Some(1)) - compact all levels into level 1
/// (Some(1), None) - compact level 1 into level 1
/// (Some(_), Some(_) - currently unsupported
pub level: (Option<usize>, Option<usize>),
/// run compaction until complete. if false only one pass is made, and the
/// results of that pass are not further recompacted.
pub exhaustive: bool,
/// waits for other compactions to complete, then runs this compaction
/// exclusively before allowing automatic compactions to resume.
pub exclusive: bool,
}
#[implement(super::Map)]
#[tracing::instrument(
name = "compact",
level = "info"
skip(self),
fields(%self),
)]
pub fn compact_blocking(&self, opts: Options) -> Result {
let mut co = CompactOptions::default();
co.set_exclusive_manual_compaction(opts.exclusive);
co.set_bottommost_level_compaction(match opts.exhaustive {
| true => BottommostLevelCompaction::Force,
| false => BottommostLevelCompaction::ForceOptimized,
});
match opts.level {
| (None, None) => {
co.set_change_level(true);
co.set_target_level(-1);
},
| (None, Some(level)) => {
co.set_change_level(true);
co.set_target_level(level.try_into()?);
},
| (Some(level), None) => {
co.set_change_level(false);
co.set_target_level(level.try_into()?);
},
| (Some(_), Some(_)) => return Err!("compacting between specific levels not supported"),
};
self.db
.db
.compact_range_cf_opt(&self.cf(), opts.range.0, opts.range.1, &co);
Ok(())
}

View file

@ -30,12 +30,12 @@ pub use self::{
deserialized::Deserialized,
handle::Handle,
keyval::{serialize_key, serialize_val, KeyVal, Slice},
map::Map,
map::{compact, Map},
ser::{serialize, serialize_to, serialize_to_vec, Interfix, Json, Separator, SEP},
};
pub(crate) use self::{
engine::{context::Context, Engine},
util::{or_else, result},
util::or_else,
};
use crate::maps::{Maps, MapsKey, MapsVal};