Database Refactor

combine service/users data w/ mod unit

split sliding sync related out of service/users

instrument database entry points

remove increment crap from database interface

de-wrap all database get() calls

de-wrap all database insert() calls

de-wrap all database remove() calls

refactor database interface for async streaming

add query key serializer for database

implement Debug for result handle

add query deserializer for database

add deserialization trait for option handle

start a stream utils suite

de-wrap/asyncify/type-query count_one_time_keys()

de-wrap/asyncify users count

add admin query users command suite

de-wrap/asyncify users exists

de-wrap/partially asyncify user filter related

asyncify/de-wrap users device/keys related

asyncify/de-wrap user auth/misc related

asyncify/de-wrap users blurhash

asyncify/de-wrap account_data get; merge Data into Service

partial asyncify/de-wrap uiaa; merge Data into Service

partially asyncify/de-wrap transaction_ids get; merge Data into Service

partially asyncify/de-wrap key_backups; merge Data into Service

asyncify/de-wrap pusher service getters; merge Data into Service

asyncify/de-wrap rooms alias getters/some iterators

asyncify/de-wrap rooms directory getters/iterator

partially asyncify/de-wrap rooms lazy-loading

partially asyncify/de-wrap rooms metadata

asyncify/dewrap rooms outlier

asyncify/dewrap rooms pdu_metadata

dewrap/partially asyncify rooms read receipt

de-wrap rooms search service

de-wrap/partially asyncify rooms user service

partial de-wrap rooms state_compressor

de-wrap rooms state_cache

de-wrap room state et al

de-wrap rooms timeline service

additional users device/keys related

de-wrap/asyncify sender

asyncify services

refactor database to TryFuture/TryStream

refactor services for TryFuture/TryStream

asyncify api handlers

additional asyncification for admin module

abstract stream related; support reverse streams

additional stream conversions

asyncify state-res related

Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
Jason Volk 2024-08-08 17:18:30 +00:00 committed by strawberry
parent 6001014078
commit 946ca364e0
203 changed files with 12202 additions and 10709 deletions

View file

@ -4,8 +4,8 @@ use std::{
};
use conduit::{trace, utils, Error, Result, Server};
use database::{Database, Map};
use futures_util::{stream::FuturesUnordered, StreamExt};
use database::{Database, Deserialized, Map};
use futures::{pin_mut, stream::FuturesUnordered, FutureExt, StreamExt};
use ruma::{
api::federation::discovery::{ServerSigningKeys, VerifyKey},
signatures::Ed25519KeyPair,
@ -83,7 +83,7 @@ impl Data {
.checked_add(1)
.expect("counter must not overflow u64");
self.global.insert(COUNTER, &counter.to_be_bytes())?;
self.global.insert(COUNTER, &counter.to_be_bytes());
Ok(*counter)
}
@ -102,7 +102,7 @@ impl Data {
fn stored_count(global: &Arc<Map>) -> Result<u64> {
global
.get(COUNTER)?
.get(COUNTER)
.as_deref()
.map_or(Ok(0_u64), utils::u64_from_bytes)
}
@ -133,36 +133,18 @@ impl Data {
futures.push(self.userroomid_highlightcount.watch_prefix(&userid_prefix));
// Events for rooms we are in
for room_id in self
.services
.state_cache
.rooms_joined(user_id)
.filter_map(Result::ok)
{
let short_roomid = self
.services
.short
.get_shortroomid(&room_id)
.ok()
.flatten()
.expect("room exists")
.to_be_bytes()
.to_vec();
let rooms_joined = self.services.state_cache.rooms_joined(user_id);
pin_mut!(rooms_joined);
while let Some(room_id) = rooms_joined.next().await {
let Ok(short_roomid) = self.services.short.get_shortroomid(room_id).await else {
continue;
};
let roomid_bytes = room_id.as_bytes().to_vec();
let mut roomid_prefix = roomid_bytes.clone();
roomid_prefix.push(0xFF);
// PDUs
futures.push(self.pduid_pdu.watch_prefix(&short_roomid));
// EDUs
futures.push(Box::pin(async move {
let _result = self.services.typing.wait_for_update(&room_id).await;
}));
futures.push(self.readreceiptid_readreceipt.watch_prefix(&roomid_prefix));
// Key changes
futures.push(self.keychangeid_userid.watch_prefix(&roomid_prefix));
@ -174,6 +156,19 @@ impl Data {
self.roomusertype_roomuserdataid
.watch_prefix(&roomuser_prefix),
);
// PDUs
let short_roomid = short_roomid.to_be_bytes().to_vec();
futures.push(self.pduid_pdu.watch_prefix(&short_roomid));
// EDUs
let typing_room_id = room_id.to_owned();
let typing_wait_for_update = async move {
self.services.typing.wait_for_update(&typing_room_id).await;
};
futures.push(typing_wait_for_update.boxed());
futures.push(self.readreceiptid_readreceipt.watch_prefix(&roomid_prefix));
}
let mut globaluserdata_prefix = vec![0xFF];
@ -190,12 +185,14 @@ impl Data {
// One time keys
futures.push(self.userid_lastonetimekeyupdate.watch_prefix(&userid_bytes));
futures.push(Box::pin(async move {
// Server shutdown
let server_shutdown = async move {
while self.services.server.running() {
let _result = self.services.server.signal.subscribe().recv().await;
self.services.server.signal.subscribe().recv().await.ok();
}
}));
};
futures.push(server_shutdown.boxed());
if !self.services.server.running() {
return Ok(());
}
@ -209,10 +206,10 @@ impl Data {
}
pub fn load_keypair(&self) -> Result<Ed25519KeyPair> {
let keypair_bytes = self.global.get(b"keypair")?.map_or_else(
|| {
let keypair_bytes = self.global.get(b"keypair").map_or_else(
|_| {
let keypair = utils::generate_keypair();
self.global.insert(b"keypair", &keypair)?;
self.global.insert(b"keypair", &keypair);
Ok::<_, Error>(keypair)
},
|val| Ok(val.to_vec()),
@ -241,7 +238,10 @@ impl Data {
}
#[inline]
pub fn remove_keypair(&self) -> Result<()> { self.global.remove(b"keypair") }
pub fn remove_keypair(&self) -> Result<()> {
self.global.remove(b"keypair");
Ok(())
}
/// TODO: the key valid until timestamp (`valid_until_ts`) is only honored
/// in room version > 4
@ -250,15 +250,15 @@ impl Data {
///
/// This doesn't actually check that the keys provided are newer than the
/// old set.
pub fn add_signing_key(
pub async fn add_signing_key(
&self, origin: &ServerName, new_keys: ServerSigningKeys,
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
) -> BTreeMap<OwnedServerSigningKeyId, VerifyKey> {
// Not atomic, but this is not critical
let signingkeys = self.server_signingkeys.get(origin.as_bytes())?;
let signingkeys = self.server_signingkeys.qry(origin).await;
let mut keys = signingkeys
.and_then(|keys| serde_json::from_slice(&keys).ok())
.unwrap_or_else(|| {
.and_then(|keys| serde_json::from_slice(&keys).map_err(Into::into))
.unwrap_or_else(|_| {
// Just insert "now", it doesn't matter
ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now())
});
@ -275,7 +275,7 @@ impl Data {
self.server_signingkeys.insert(
origin.as_bytes(),
&serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"),
)?;
);
let mut tree = keys.verify_keys;
tree.extend(
@ -284,45 +284,38 @@ impl Data {
.map(|old| (old.0, VerifyKey::new(old.1.key))),
);
Ok(tree)
tree
}
/// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found
/// for the server.
pub fn verify_keys_for(&self, origin: &ServerName) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
let signingkeys = self
.signing_keys_for(origin)?
.map_or_else(BTreeMap::new, |keys: ServerSigningKeys| {
pub async fn verify_keys_for(&self, origin: &ServerName) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
self.signing_keys_for(origin).await.map_or_else(
|_| Ok(BTreeMap::new()),
|keys: ServerSigningKeys| {
let mut tree = keys.verify_keys;
tree.extend(
keys.old_verify_keys
.into_iter()
.map(|old| (old.0, VerifyKey::new(old.1.key))),
);
tree
});
Ok(signingkeys)
Ok(tree)
},
)
}
pub fn signing_keys_for(&self, origin: &ServerName) -> Result<Option<ServerSigningKeys>> {
let signingkeys = self
.server_signingkeys
.get(origin.as_bytes())?
.and_then(|bytes| serde_json::from_slice(&bytes).ok());
Ok(signingkeys)
pub async fn signing_keys_for(&self, origin: &ServerName) -> Result<ServerSigningKeys> {
self.server_signingkeys
.qry(origin)
.await
.deserialized_json()
}
pub fn database_version(&self) -> Result<u64> {
self.global.get(b"version")?.map_or(Ok(0), |version| {
utils::u64_from_bytes(&version).map_err(|_| Error::bad_database("Database version id is invalid."))
})
}
pub async fn database_version(&self) -> u64 { self.global.qry("version").await.deserialized().unwrap_or(0) }
#[inline]
pub fn bump_database_version(&self, new_version: u64) -> Result<()> {
self.global.insert(b"version", &new_version.to_be_bytes())?;
self.global.insert(b"version", &new_version.to_be_bytes());
Ok(())
}