use tokio for threadpool mgmt
Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
parent
89a158ab0b
commit
9a9c071e82
4 changed files with 155 additions and 91 deletions
|
@ -16,7 +16,7 @@ pub struct Database {
|
||||||
impl Database {
|
impl Database {
|
||||||
/// Load an existing database or create a new one.
|
/// Load an existing database or create a new one.
|
||||||
pub async fn open(server: &Arc<Server>) -> Result<Arc<Self>> {
|
pub async fn open(server: &Arc<Server>) -> Result<Arc<Self>> {
|
||||||
let db = Engine::open(server)?;
|
let db = Engine::open(server).await?;
|
||||||
Ok(Arc::new(Self {
|
Ok(Arc::new(Self {
|
||||||
db: db.clone(),
|
db: db.clone(),
|
||||||
maps: maps::open(&db)?,
|
maps: maps::open(&db)?,
|
||||||
|
|
|
@ -23,7 +23,7 @@ use crate::{
|
||||||
};
|
};
|
||||||
|
|
||||||
pub struct Engine {
|
pub struct Engine {
|
||||||
server: Arc<Server>,
|
pub(crate) server: Arc<Server>,
|
||||||
row_cache: Cache,
|
row_cache: Cache,
|
||||||
col_cache: RwLock<HashMap<String, Cache>>,
|
col_cache: RwLock<HashMap<String, Cache>>,
|
||||||
opts: Options,
|
opts: Options,
|
||||||
|
@ -40,7 +40,7 @@ pub(crate) type Db = DBWithThreadMode<MultiThreaded>;
|
||||||
|
|
||||||
impl Engine {
|
impl Engine {
|
||||||
#[tracing::instrument(skip_all)]
|
#[tracing::instrument(skip_all)]
|
||||||
pub(crate) fn open(server: &Arc<Server>) -> Result<Arc<Self>> {
|
pub(crate) async fn open(server: &Arc<Server>) -> Result<Arc<Self>> {
|
||||||
let config = &server.config;
|
let config = &server.config;
|
||||||
let cache_capacity_bytes = config.db_cache_capacity_mb * 1024.0 * 1024.0;
|
let cache_capacity_bytes = config.db_cache_capacity_mb * 1024.0 * 1024.0;
|
||||||
|
|
||||||
|
@ -119,7 +119,7 @@ impl Engine {
|
||||||
corks: AtomicU32::new(0),
|
corks: AtomicU32::new(0),
|
||||||
read_only: config.rocksdb_read_only,
|
read_only: config.rocksdb_read_only,
|
||||||
secondary: config.rocksdb_secondary,
|
secondary: config.rocksdb_secondary,
|
||||||
pool: Pool::new(&pool_opts)?,
|
pool: Pool::new(server, &pool_opts).await?,
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -305,7 +305,7 @@ pub(crate) fn repair(db_opts: &Options, path: &PathBuf) -> Result<()> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all, name = "rocksdb", level = "debug")]
|
#[tracing::instrument(skip(msg), name = "rocksdb", level = "trace")]
|
||||||
pub(crate) fn handle_log(level: LogLevel, msg: &str) {
|
pub(crate) fn handle_log(level: LogLevel, msg: &str) {
|
||||||
let msg = msg.trim();
|
let msg = msg.trim();
|
||||||
if msg.starts_with("Options") {
|
if msg.starts_with("Options") {
|
||||||
|
@ -325,7 +325,7 @@ impl Drop for Engine {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
const BLOCKING: bool = true;
|
const BLOCKING: bool = true;
|
||||||
|
|
||||||
debug!("Joining request threads...");
|
debug!("Shutting down request pool...");
|
||||||
self.pool.close();
|
self.pool.close();
|
||||||
|
|
||||||
debug!("Waiting for background tasks to finish...");
|
debug!("Waiting for background tasks to finish...");
|
||||||
|
|
|
@ -2,7 +2,7 @@ use std::{convert::AsRef, fmt::Debug, io::Write, sync::Arc};
|
||||||
|
|
||||||
use arrayvec::ArrayVec;
|
use arrayvec::ArrayVec;
|
||||||
use conduit::{err, implement, utils::IterStream, Err, Result};
|
use conduit::{err, implement, utils::IterStream, Err, Result};
|
||||||
use futures::{future, Future, FutureExt, Stream};
|
use futures::{future, Future, FutureExt, Stream, StreamExt};
|
||||||
use rocksdb::DBPinnableSlice;
|
use rocksdb::DBPinnableSlice;
|
||||||
use serde::Serialize;
|
use serde::Serialize;
|
||||||
|
|
||||||
|
@ -54,6 +54,18 @@ where
|
||||||
self.get(key)
|
self.get(key)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[implement(super::Map)]
|
||||||
|
#[tracing::instrument(skip(self, keys), fields(%self), level = "trace")]
|
||||||
|
pub fn get_batch<'a, I, K>(self: &'a Arc<Self>, keys: I) -> impl Stream<Item = Result<Handle<'_>>> + Send + 'a
|
||||||
|
where
|
||||||
|
I: Iterator<Item = &'a K> + ExactSizeIterator + Debug + Send + 'a,
|
||||||
|
K: AsRef<[u8]> + Debug + Send + ?Sized + Sync + 'a,
|
||||||
|
{
|
||||||
|
keys.stream()
|
||||||
|
.map(move |key| self.get(key))
|
||||||
|
.buffered(self.db.server.config.db_pool_workers.saturating_mul(2))
|
||||||
|
}
|
||||||
|
|
||||||
/// Fetch a value from the database into cache, returning a reference-handle
|
/// Fetch a value from the database into cache, returning a reference-handle
|
||||||
/// asynchronously. The key is referenced directly to perform the query.
|
/// asynchronously. The key is referenced directly to perform the query.
|
||||||
#[implement(super::Map)]
|
#[implement(super::Map)]
|
||||||
|
@ -80,17 +92,8 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
#[implement(super::Map)]
|
#[implement(super::Map)]
|
||||||
#[tracing::instrument(skip(self, keys), fields(%self), level = "trace")]
|
#[tracing::instrument(skip(self, keys), name = "batch_blocking", level = "trace")]
|
||||||
pub fn get_batch<'a, I, K>(&self, keys: I) -> impl Stream<Item = Result<Handle<'_>>>
|
pub(crate) fn get_batch_blocking<'a, I, K>(&self, keys: I) -> impl Iterator<Item = Result<Handle<'_>>> + Send
|
||||||
where
|
|
||||||
I: Iterator<Item = &'a K> + ExactSizeIterator + Debug + Send,
|
|
||||||
K: AsRef<[u8]> + Debug + Send + ?Sized + Sync + 'a,
|
|
||||||
{
|
|
||||||
self.get_batch_blocking(keys).stream()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[implement(super::Map)]
|
|
||||||
pub fn get_batch_blocking<'a, I, K>(&self, keys: I) -> impl Iterator<Item = Result<Handle<'_>>>
|
|
||||||
where
|
where
|
||||||
I: Iterator<Item = &'a K> + ExactSizeIterator + Debug + Send,
|
I: Iterator<Item = &'a K> + ExactSizeIterator + Debug + Send,
|
||||||
K: AsRef<[u8]> + Debug + Send + ?Sized + Sync + 'a,
|
K: AsRef<[u8]> + Debug + Send + ?Sized + Sync + 'a,
|
||||||
|
@ -111,6 +114,7 @@ where
|
||||||
/// The key is referenced directly to perform the query. This is a thread-
|
/// The key is referenced directly to perform the query. This is a thread-
|
||||||
/// blocking call.
|
/// blocking call.
|
||||||
#[implement(super::Map)]
|
#[implement(super::Map)]
|
||||||
|
#[tracing::instrument(skip(self, key), name = "blocking", level = "trace")]
|
||||||
pub fn get_blocking<K>(&self, key: &K) -> Result<Handle<'_>>
|
pub fn get_blocking<K>(&self, key: &K) -> Result<Handle<'_>>
|
||||||
where
|
where
|
||||||
K: AsRef<[u8]> + ?Sized,
|
K: AsRef<[u8]> + ?Sized,
|
||||||
|
@ -125,7 +129,7 @@ where
|
||||||
|
|
||||||
/// Fetch a value from the cache without I/O.
|
/// Fetch a value from the cache without I/O.
|
||||||
#[implement(super::Map)]
|
#[implement(super::Map)]
|
||||||
#[tracing::instrument(skip(self, key), fields(%self), level = "trace")]
|
#[tracing::instrument(skip(self, key), name = "cache", level = "trace")]
|
||||||
pub(crate) fn get_cached<K>(&self, key: &K) -> Result<Option<Handle<'_>>>
|
pub(crate) fn get_cached<K>(&self, key: &K) -> Result<Option<Handle<'_>>>
|
||||||
where
|
where
|
||||||
K: AsRef<[u8]> + Debug + ?Sized,
|
K: AsRef<[u8]> + Debug + ?Sized,
|
||||||
|
|
|
@ -1,20 +1,25 @@
|
||||||
use std::{
|
use std::{
|
||||||
convert::identity,
|
|
||||||
mem::take,
|
mem::take,
|
||||||
sync::{Arc, Mutex},
|
sync::{
|
||||||
thread::JoinHandle,
|
atomic::{AtomicUsize, Ordering},
|
||||||
|
Arc,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
use async_channel::{bounded, Receiver, Sender};
|
use async_channel::{bounded, Receiver, RecvError, Sender};
|
||||||
use conduit::{debug, defer, err, implement, Result};
|
use conduit::{debug, debug_warn, defer, err, implement, result::DebugInspect, Result, Server};
|
||||||
use futures::channel::oneshot;
|
use futures::channel::oneshot;
|
||||||
|
use tokio::{sync::Mutex, task::JoinSet};
|
||||||
|
|
||||||
use crate::{keyval::KeyBuf, Handle, Map};
|
use crate::{keyval::KeyBuf, Handle, Map};
|
||||||
|
|
||||||
pub(crate) struct Pool {
|
pub(crate) struct Pool {
|
||||||
workers: Mutex<Vec<JoinHandle<()>>>,
|
server: Arc<Server>,
|
||||||
recv: Receiver<Cmd>,
|
workers: Mutex<JoinSet<()>>,
|
||||||
send: Sender<Cmd>,
|
queue: Sender<Cmd>,
|
||||||
|
busy: AtomicUsize,
|
||||||
|
busy_max: AtomicUsize,
|
||||||
|
queued_max: AtomicUsize,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct Opts {
|
pub(crate) struct Opts {
|
||||||
|
@ -22,10 +27,6 @@ pub(crate) struct Opts {
|
||||||
pub(crate) worker_num: usize,
|
pub(crate) worker_num: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
const QUEUE_LIMIT: (usize, usize) = (1, 8192);
|
|
||||||
const WORKER_LIMIT: (usize, usize) = (1, 512);
|
|
||||||
const WORKER_THREAD_NAME: &str = "conduwuit:db";
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub(crate) enum Cmd {
|
pub(crate) enum Cmd {
|
||||||
Get(Get),
|
Get(Get),
|
||||||
|
@ -40,83 +41,111 @@ pub(crate) struct Get {
|
||||||
|
|
||||||
type ResultSender = oneshot::Sender<Result<Handle<'static>>>;
|
type ResultSender = oneshot::Sender<Result<Handle<'static>>>;
|
||||||
|
|
||||||
#[implement(Pool)]
|
const QUEUE_LIMIT: (usize, usize) = (1, 3072);
|
||||||
pub(crate) fn new(opts: &Opts) -> Result<Arc<Self>> {
|
const WORKER_LIMIT: (usize, usize) = (1, 512);
|
||||||
let queue_size = opts.queue_size.clamp(QUEUE_LIMIT.0, QUEUE_LIMIT.1);
|
|
||||||
|
|
||||||
|
impl Drop for Pool {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
debug_assert!(self.queue.is_empty(), "channel must be empty on drop");
|
||||||
|
debug_assert!(self.queue.is_closed(), "channel should be closed on drop");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[implement(Pool)]
|
||||||
|
pub(crate) async fn new(server: &Arc<Server>, opts: &Opts) -> Result<Arc<Self>> {
|
||||||
|
let queue_size = opts.queue_size.clamp(QUEUE_LIMIT.0, QUEUE_LIMIT.1);
|
||||||
let (send, recv) = bounded(queue_size);
|
let (send, recv) = bounded(queue_size);
|
||||||
let pool = Arc::new(Self {
|
let pool = Arc::new(Self {
|
||||||
workers: Vec::new().into(),
|
server: server.clone(),
|
||||||
recv,
|
workers: JoinSet::new().into(),
|
||||||
send,
|
queue: send,
|
||||||
|
busy: AtomicUsize::default(),
|
||||||
|
busy_max: AtomicUsize::default(),
|
||||||
|
queued_max: AtomicUsize::default(),
|
||||||
});
|
});
|
||||||
|
|
||||||
let worker_num = opts.worker_num.clamp(WORKER_LIMIT.0, WORKER_LIMIT.1);
|
let worker_num = opts.worker_num.clamp(WORKER_LIMIT.0, WORKER_LIMIT.1);
|
||||||
pool.spawn_until(worker_num)?;
|
pool.spawn_until(recv, worker_num).await?;
|
||||||
|
|
||||||
Ok(pool)
|
Ok(pool)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[implement(Pool)]
|
#[implement(Pool)]
|
||||||
fn spawn_until(self: &Arc<Self>, max: usize) -> Result {
|
pub(crate) async fn _shutdown(self: &Arc<Self>) {
|
||||||
let mut workers = self.workers.lock()?;
|
if !self.queue.is_closed() {
|
||||||
|
self.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
let workers = take(&mut *self.workers.lock().await);
|
||||||
|
debug!(workers = workers.len(), "Waiting for workers to join...");
|
||||||
|
|
||||||
|
workers.join_all().await;
|
||||||
|
debug_assert!(self.queue.is_empty(), "channel is not empty");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[implement(Pool)]
|
||||||
|
pub(crate) fn close(&self) {
|
||||||
|
debug_assert!(!self.queue.is_closed(), "channel already closed");
|
||||||
|
debug!(
|
||||||
|
senders = self.queue.sender_count(),
|
||||||
|
receivers = self.queue.receiver_count(),
|
||||||
|
"Closing pool channel"
|
||||||
|
);
|
||||||
|
|
||||||
|
let closing = self.queue.close();
|
||||||
|
debug_assert!(closing, "channel is not closing");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[implement(Pool)]
|
||||||
|
async fn spawn_until(self: &Arc<Self>, recv: Receiver<Cmd>, max: usize) -> Result {
|
||||||
|
let mut workers = self.workers.lock().await;
|
||||||
while workers.len() < max {
|
while workers.len() < max {
|
||||||
self.clone().spawn_one(&mut workers)?;
|
self.spawn_one(&mut workers, recv.clone())?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[implement(Pool)]
|
#[implement(Pool)]
|
||||||
fn spawn_one(self: Arc<Self>, workers: &mut Vec<JoinHandle<()>>) -> Result<usize> {
|
fn spawn_one(self: &Arc<Self>, workers: &mut JoinSet<()>, recv: Receiver<Cmd>) -> Result {
|
||||||
use std::thread::Builder;
|
|
||||||
|
|
||||||
let id = workers.len();
|
let id = workers.len();
|
||||||
|
|
||||||
debug!(?id, "spawning {WORKER_THREAD_NAME}...");
|
debug!(?id, "spawning");
|
||||||
let thread = Builder::new()
|
let self_ = self.clone();
|
||||||
.name(WORKER_THREAD_NAME.into())
|
let _abort = workers.spawn_blocking_on(move || self_.worker(id, recv), self.server.runtime());
|
||||||
.spawn(move || self.worker(id))?;
|
|
||||||
|
|
||||||
workers.push(thread);
|
Ok(())
|
||||||
|
|
||||||
Ok(id)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[implement(Pool)]
|
#[implement(Pool)]
|
||||||
pub(crate) fn close(self: &Arc<Self>) {
|
#[tracing::instrument(
|
||||||
debug!(
|
level = "trace"
|
||||||
senders = %self.send.sender_count(),
|
skip(self, cmd),
|
||||||
receivers = %self.send.receiver_count(),
|
fields(
|
||||||
"Closing pool channel"
|
task = ?tokio::task::try_id(),
|
||||||
);
|
receivers = self.queue.receiver_count(),
|
||||||
let closing = self.send.close();
|
senders = self.queue.sender_count(),
|
||||||
debug_assert!(closing, "channel is not closing");
|
queued = self.queue.len(),
|
||||||
|
queued_max = self.queued_max.load(Ordering::Relaxed),
|
||||||
debug!("Shutting down pool...");
|
),
|
||||||
let mut workers = self.workers.lock().expect("locked");
|
)]
|
||||||
|
|
||||||
debug!(
|
|
||||||
workers = %workers.len(),
|
|
||||||
"Waiting for workers to join..."
|
|
||||||
);
|
|
||||||
take(&mut *workers)
|
|
||||||
.into_iter()
|
|
||||||
.map(JoinHandle::join)
|
|
||||||
.try_for_each(identity)
|
|
||||||
.expect("failed to join worker threads");
|
|
||||||
|
|
||||||
debug_assert!(self.send.is_empty(), "channel is not empty");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[implement(Pool)]
|
|
||||||
#[tracing::instrument(skip(self, cmd), level = "trace")]
|
|
||||||
pub(crate) async fn execute(&self, mut cmd: Cmd) -> Result<Handle<'_>> {
|
pub(crate) async fn execute(&self, mut cmd: Cmd) -> Result<Handle<'_>> {
|
||||||
let (send, recv) = oneshot::channel();
|
let (send, recv) = oneshot::channel();
|
||||||
Self::prepare(&mut cmd, send);
|
Self::prepare(&mut cmd, send);
|
||||||
|
|
||||||
self.send
|
if cfg!(debug_assertions) {
|
||||||
|
self.queued_max
|
||||||
|
.fetch_max(self.queue.len(), Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.queue.is_full() {
|
||||||
|
debug_warn!(
|
||||||
|
capacity = ?self.queue.capacity(),
|
||||||
|
"pool queue is full"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
self.queue
|
||||||
.send(cmd)
|
.send(cmd)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| err!(error!("send failed {e:?}")))?;
|
.map_err(|e| err!(error!("send failed {e:?}")))?;
|
||||||
|
@ -136,30 +165,61 @@ fn prepare(cmd: &mut Cmd, send: ResultSender) {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[implement(Pool)]
|
#[implement(Pool)]
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self, recv))]
|
||||||
fn worker(self: Arc<Self>, id: usize) {
|
fn worker(self: Arc<Self>, id: usize, recv: Receiver<Cmd>) {
|
||||||
debug!(?id, "worker spawned");
|
debug!("worker spawned");
|
||||||
defer! {{ debug!(?id, "worker finished"); }}
|
defer! {{ debug!("worker finished"); }}
|
||||||
self.worker_loop(id);
|
|
||||||
|
self.worker_loop(&recv);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[implement(Pool)]
|
#[implement(Pool)]
|
||||||
fn worker_loop(&self, id: usize) {
|
fn worker_loop(&self, recv: &Receiver<Cmd>) {
|
||||||
while let Ok(mut cmd) = self.recv.recv_blocking() {
|
// initial +1 needed prior to entering wait
|
||||||
self.worker_handle(id, &mut cmd);
|
self.busy.fetch_add(1, Ordering::Relaxed);
|
||||||
|
|
||||||
|
while let Ok(mut cmd) = self.worker_wait(recv) {
|
||||||
|
self.worker_handle(&mut cmd);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[implement(Pool)]
|
#[implement(Pool)]
|
||||||
fn worker_handle(&self, id: usize, cmd: &mut Cmd) {
|
#[tracing::instrument(
|
||||||
|
name = "wait",
|
||||||
|
level = "trace",
|
||||||
|
skip_all,
|
||||||
|
fields(
|
||||||
|
receivers = recv.receiver_count(),
|
||||||
|
senders = recv.sender_count(),
|
||||||
|
queued = recv.len(),
|
||||||
|
busy = self.busy.load(Ordering::Relaxed),
|
||||||
|
busy_max = self.busy_max.fetch_max(
|
||||||
|
self.busy.fetch_sub(1, Ordering::Relaxed),
|
||||||
|
Ordering::Relaxed
|
||||||
|
),
|
||||||
|
),
|
||||||
|
)]
|
||||||
|
fn worker_wait(&self, recv: &Receiver<Cmd>) -> Result<Cmd, RecvError> {
|
||||||
|
recv.recv_blocking().debug_inspect(|_| {
|
||||||
|
self.busy.fetch_add(1, Ordering::Relaxed);
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[implement(Pool)]
|
||||||
|
fn worker_handle(&self, cmd: &mut Cmd) {
|
||||||
match cmd {
|
match cmd {
|
||||||
Cmd::Get(get) => self.handle_get(id, get),
|
Cmd::Get(cmd) => self.handle_get(cmd),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[implement(Pool)]
|
#[implement(Pool)]
|
||||||
#[tracing::instrument(skip(self, cmd), fields(%cmd.map), level = "trace")]
|
#[tracing::instrument(
|
||||||
fn handle_get(&self, id: usize, cmd: &mut Get) {
|
name = "get",
|
||||||
|
level = "trace",
|
||||||
|
skip_all,
|
||||||
|
fields(%cmd.map),
|
||||||
|
)]
|
||||||
|
fn handle_get(&self, cmd: &mut Get) {
|
||||||
debug_assert!(!cmd.key.is_empty(), "querying for empty key");
|
debug_assert!(!cmd.key.is_empty(), "querying for empty key");
|
||||||
|
|
||||||
// Obtain the result channel.
|
// Obtain the result channel.
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue