split remaining map suites

Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
Jason Volk 2024-09-29 07:37:43 +00:00 committed by strawberry
parent 4496cf2d5b
commit 5192927a53
9 changed files with 205 additions and 180 deletions

View file

@ -1,7 +1,10 @@
mod count;
mod get;
mod insert;
mod keys;
mod keys_from;
mod keys_prefix;
mod remove;
mod rev_keys;
mod rev_keys_from;
mod rev_keys_prefix;
@ -18,23 +21,14 @@ use std::{
fmt,
fmt::{Debug, Display},
future::Future,
io::Write,
pin::Pin,
sync::Arc,
};
use conduit::{err, Result};
use futures::future;
use rocksdb::{AsColumnFamilyRef, ColumnFamily, ReadOptions, WriteBatchWithTransaction, WriteOptions};
use serde::Serialize;
use conduit::Result;
use rocksdb::{AsColumnFamilyRef, ColumnFamily, ReadOptions, WriteOptions};
use crate::{
keyval::{OwnedKey, OwnedVal},
ser,
util::{map_err, or_else},
watchers::Watchers,
Engine, Handle,
};
use crate::{watchers::Watchers, Engine};
pub struct Map {
name: String,
@ -57,146 +51,6 @@ impl Map {
}))
}
#[tracing::instrument(skip(self), fields(%self), level = "trace")]
pub fn del<K>(&self, key: &K)
where
K: Serialize + ?Sized + Debug,
{
let mut buf = Vec::<u8>::with_capacity(64);
self.bdel(key, &mut buf);
}
#[tracing::instrument(skip(self, buf), fields(%self), level = "trace")]
pub fn bdel<K, B>(&self, key: &K, buf: &mut B)
where
K: Serialize + ?Sized + Debug,
B: Write + AsRef<[u8]>,
{
let key = ser::serialize(buf, key).expect("failed to serialize deletion key");
self.remove(&key);
}
#[tracing::instrument(level = "trace")]
pub fn remove<K>(&self, key: &K)
where
K: AsRef<[u8]> + ?Sized + Debug,
{
let write_options = &self.write_options;
self.db
.db
.delete_cf_opt(&self.cf(), key, write_options)
.or_else(or_else)
.expect("database remove error");
if !self.db.corked() {
self.db.flush().expect("database flush error");
}
}
#[tracing::instrument(skip(self, value), fields(%self), level = "trace")]
pub fn insert<K, V>(&self, key: &K, value: &V)
where
K: AsRef<[u8]> + ?Sized + Debug,
V: AsRef<[u8]> + ?Sized,
{
let write_options = &self.write_options;
self.db
.db
.put_cf_opt(&self.cf(), key, value, write_options)
.or_else(or_else)
.expect("database insert error");
if !self.db.corked() {
self.db.flush().expect("database flush error");
}
self.watchers.wake(key.as_ref());
}
#[tracing::instrument(skip(self), fields(%self), level = "trace")]
pub fn insert_batch<'a, I, K, V>(&'a self, iter: I)
where
I: Iterator<Item = &'a (K, V)> + Send + Debug,
K: AsRef<[u8]> + Sized + Debug + 'a,
V: AsRef<[u8]> + Sized + 'a,
{
let mut batch = WriteBatchWithTransaction::<false>::default();
for (key, val) in iter {
batch.put_cf(&self.cf(), key.as_ref(), val.as_ref());
}
let write_options = &self.write_options;
self.db
.db
.write_opt(batch, write_options)
.or_else(or_else)
.expect("database insert batch error");
if !self.db.corked() {
self.db.flush().expect("database flush error");
}
}
#[tracing::instrument(skip(self), fields(%self), level = "trace")]
pub fn qry<K>(&self, key: &K) -> impl Future<Output = Result<Handle<'_>>> + Send
where
K: Serialize + ?Sized + Debug,
{
let mut buf = Vec::<u8>::with_capacity(64);
self.bqry(key, &mut buf)
}
#[tracing::instrument(skip(self, buf), fields(%self), level = "trace")]
pub fn bqry<K, B>(&self, key: &K, buf: &mut B) -> impl Future<Output = Result<Handle<'_>>> + Send
where
K: Serialize + ?Sized + Debug,
B: Write + AsRef<[u8]>,
{
let key = ser::serialize(buf, key).expect("failed to serialize query key");
let val = self.get(key);
future::ready(val)
}
#[tracing::instrument(skip(self), fields(%self), level = "trace")]
pub fn get<K>(&self, key: &K) -> Result<Handle<'_>>
where
K: AsRef<[u8]> + ?Sized + Debug,
{
self.db
.db
.get_pinned_cf_opt(&self.cf(), key, &self.read_options)
.map_err(map_err)?
.map(Handle::from)
.ok_or(err!(Request(NotFound("Not found in database"))))
}
#[tracing::instrument(skip(self), fields(%self), level = "trace")]
pub fn multi_get<'a, I, K>(&self, keys: I) -> Vec<Option<OwnedVal>>
where
I: Iterator<Item = &'a K> + ExactSizeIterator + Send + Debug,
K: AsRef<[u8]> + Sized + Debug + 'a,
{
// Optimization can be `true` if key vector is pre-sorted **by the column
// comparator**.
const SORTED: bool = false;
let mut ret: Vec<Option<OwnedKey>> = Vec::with_capacity(keys.len());
let read_options = &self.read_options;
for res in self
.db
.db
.batched_multi_get_cf_opt(&self.cf(), keys, SORTED, read_options)
{
match res {
Ok(Some(res)) => ret.push(Some((*res).to_vec())),
Ok(None) => ret.push(None),
Err(e) => or_else(e).expect("database multiget error"),
}
}
ret
}
#[inline]
pub fn watch_prefix<'a, K>(&'a self, prefix: &K) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>>
where
@ -230,10 +84,7 @@ fn open(db: &Arc<Engine>, name: &str) -> Result<Arc<ColumnFamily>> {
let bounded_ptr = Arc::into_raw(bounded_arc);
let cf_ptr = bounded_ptr.cast::<ColumnFamily>();
// SAFETY: After thorough contemplation this appears to be the best solution,
// even by a significant margin.
//
// BACKGROUND: Column family handles out of RocksDB are basic pointers and can
// SAFETY: Column family handles out of RocksDB are basic pointers and can
// be invalidated: 1. when the database closes. 2. when the column is dropped or
// closed. rust_rocksdb wraps this for us by storing handles in their own
// `RwLock<BTreeMap>` map and returning an Arc<BoundColumnFamily<'_>>` to