Database Refactor

combine service/users data w/ mod unit

split sliding sync related out of service/users

instrument database entry points

remove increment crap from database interface

de-wrap all database get() calls

de-wrap all database insert() calls

de-wrap all database remove() calls

refactor database interface for async streaming

add query key serializer for database

implement Debug for result handle

add query deserializer for database

add deserialization trait for option handle

start a stream utils suite

de-wrap/asyncify/type-query count_one_time_keys()

de-wrap/asyncify users count

add admin query users command suite

de-wrap/asyncify users exists

de-wrap/partially asyncify user filter related

asyncify/de-wrap users device/keys related

asyncify/de-wrap user auth/misc related

asyncify/de-wrap users blurhash

asyncify/de-wrap account_data get; merge Data into Service

partial asyncify/de-wrap uiaa; merge Data into Service

partially asyncify/de-wrap transaction_ids get; merge Data into Service

partially asyncify/de-wrap key_backups; merge Data into Service

asyncify/de-wrap pusher service getters; merge Data into Service

asyncify/de-wrap rooms alias getters/some iterators

asyncify/de-wrap rooms directory getters/iterator

partially asyncify/de-wrap rooms lazy-loading

partially asyncify/de-wrap rooms metadata

asyncify/dewrap rooms outlier

asyncify/dewrap rooms pdu_metadata

dewrap/partially asyncify rooms read receipt

de-wrap rooms search service

de-wrap/partially asyncify rooms user service

partial de-wrap rooms state_compressor

de-wrap rooms state_cache

de-wrap room state et al

de-wrap rooms timeline service

additional users device/keys related

de-wrap/asyncify sender

asyncify services

refactor database to TryFuture/TryStream

refactor services for TryFuture/TryStream

asyncify api handlers

additional asyncification for admin module

abstract stream related; support reverse streams

additional stream conversions

asyncify state-res related

Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
Jason Volk 2024-08-08 17:18:30 +00:00 committed by strawberry
parent 6001014078
commit 946ca364e0
203 changed files with 12202 additions and 10709 deletions

View file

@ -1,93 +1,319 @@
mod data;
use std::{collections::BTreeMap, sync::Arc};
use conduit::Result;
use data::Data;
use conduit::{
err, implement, utils,
utils::stream::{ReadyExt, TryIgnore},
Err, Error, Result,
};
use database::{Deserialized, Ignore, Interfix, Map};
use futures::StreamExt;
use ruma::{
api::client::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup},
serde::Raw,
OwnedRoomId, RoomId, UserId,
};
use crate::{globals, Dep};
pub struct Service {
db: Data,
services: Services,
}
struct Data {
backupid_algorithm: Arc<Map>,
backupid_etag: Arc<Map>,
backupkeyid_backup: Arc<Map>,
}
struct Services {
globals: Dep<globals::Service>,
}
impl crate::Service for Service {
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
Ok(Arc::new(Self {
db: Data::new(&args),
db: Data {
backupid_algorithm: args.db["backupid_algorithm"].clone(),
backupid_etag: args.db["backupid_etag"].clone(),
backupkeyid_backup: args.db["backupkeyid_backup"].clone(),
},
services: Services {
globals: args.depend::<globals::Service>("globals"),
},
}))
}
fn name(&self) -> &str { crate::service::make_name(std::module_path!()) }
}
impl Service {
pub fn create_backup(&self, user_id: &UserId, backup_metadata: &Raw<BackupAlgorithm>) -> Result<String> {
self.db.create_backup(user_id, backup_metadata)
}
#[implement(Service)]
pub fn create_backup(&self, user_id: &UserId, backup_metadata: &Raw<BackupAlgorithm>) -> Result<String> {
let version = self.services.globals.next_count()?.to_string();
pub fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> {
self.db.delete_backup(user_id, version)
}
let mut key = user_id.as_bytes().to_vec();
key.push(0xFF);
key.extend_from_slice(version.as_bytes());
pub fn update_backup(
&self, user_id: &UserId, version: &str, backup_metadata: &Raw<BackupAlgorithm>,
) -> Result<String> {
self.db.update_backup(user_id, version, backup_metadata)
}
self.db.backupid_algorithm.insert(
&key,
&serde_json::to_vec(backup_metadata).expect("BackupAlgorithm::to_vec always works"),
);
pub fn get_latest_backup_version(&self, user_id: &UserId) -> Result<Option<String>> {
self.db.get_latest_backup_version(user_id)
}
self.db
.backupid_etag
.insert(&key, &self.services.globals.next_count()?.to_be_bytes());
pub fn get_latest_backup(&self, user_id: &UserId) -> Result<Option<(String, Raw<BackupAlgorithm>)>> {
self.db.get_latest_backup(user_id)
}
pub fn get_backup(&self, user_id: &UserId, version: &str) -> Result<Option<Raw<BackupAlgorithm>>> {
self.db.get_backup(user_id, version)
}
pub fn add_key(
&self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str, key_data: &Raw<KeyBackupData>,
) -> Result<()> {
self.db
.add_key(user_id, version, room_id, session_id, key_data)
}
pub fn count_keys(&self, user_id: &UserId, version: &str) -> Result<usize> { self.db.count_keys(user_id, version) }
pub fn get_etag(&self, user_id: &UserId, version: &str) -> Result<String> { self.db.get_etag(user_id, version) }
pub fn get_all(&self, user_id: &UserId, version: &str) -> Result<BTreeMap<OwnedRoomId, RoomKeyBackup>> {
self.db.get_all(user_id, version)
}
pub fn get_room(
&self, user_id: &UserId, version: &str, room_id: &RoomId,
) -> Result<BTreeMap<String, Raw<KeyBackupData>>> {
self.db.get_room(user_id, version, room_id)
}
pub fn get_session(
&self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str,
) -> Result<Option<Raw<KeyBackupData>>> {
self.db.get_session(user_id, version, room_id, session_id)
}
pub fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> {
self.db.delete_all_keys(user_id, version)
}
pub fn delete_room_keys(&self, user_id: &UserId, version: &str, room_id: &RoomId) -> Result<()> {
self.db.delete_room_keys(user_id, version, room_id)
}
pub fn delete_room_key(&self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str) -> Result<()> {
self.db
.delete_room_key(user_id, version, room_id, session_id)
}
Ok(version)
}
#[implement(Service)]
pub async fn delete_backup(&self, user_id: &UserId, version: &str) {
let mut key = user_id.as_bytes().to_vec();
key.push(0xFF);
key.extend_from_slice(version.as_bytes());
self.db.backupid_algorithm.remove(&key);
self.db.backupid_etag.remove(&key);
let key = (user_id, version, Interfix);
self.db
.backupkeyid_backup
.keys_raw_prefix(&key)
.ignore_err()
.ready_for_each(|outdated_key| self.db.backupkeyid_backup.remove(outdated_key))
.await;
}
#[implement(Service)]
pub async fn update_backup(
&self, user_id: &UserId, version: &str, backup_metadata: &Raw<BackupAlgorithm>,
) -> Result<String> {
let key = (user_id, version);
if self.db.backupid_algorithm.qry(&key).await.is_err() {
return Err!(Request(NotFound("Tried to update nonexistent backup.")));
}
let mut key = user_id.as_bytes().to_vec();
key.push(0xFF);
key.extend_from_slice(version.as_bytes());
self.db
.backupid_algorithm
.insert(&key, backup_metadata.json().get().as_bytes());
self.db
.backupid_etag
.insert(&key, &self.services.globals.next_count()?.to_be_bytes());
Ok(version.to_owned())
}
#[implement(Service)]
pub async fn get_latest_backup_version(&self, user_id: &UserId) -> Result<String> {
let mut prefix = user_id.as_bytes().to_vec();
prefix.push(0xFF);
let mut last_possible_key = prefix.clone();
last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes());
self.db
.backupid_algorithm
.rev_raw_keys_from(&last_possible_key)
.ignore_err()
.ready_take_while(move |key| key.starts_with(&prefix))
.next()
.await
.ok_or_else(|| err!(Request(NotFound("No backup versions found"))))
.and_then(|key| {
utils::string_from_bytes(
key.rsplit(|&b| b == 0xFF)
.next()
.expect("rsplit always returns an element"),
)
.map_err(|_| Error::bad_database("backupid_algorithm key is invalid."))
})
}
#[implement(Service)]
pub async fn get_latest_backup(&self, user_id: &UserId) -> Result<(String, Raw<BackupAlgorithm>)> {
let mut prefix = user_id.as_bytes().to_vec();
prefix.push(0xFF);
let mut last_possible_key = prefix.clone();
last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes());
self.db
.backupid_algorithm
.rev_raw_stream_from(&last_possible_key)
.ignore_err()
.ready_take_while(move |(key, _)| key.starts_with(&prefix))
.next()
.await
.ok_or_else(|| err!(Request(NotFound("No backup found"))))
.and_then(|(key, val)| {
let version = utils::string_from_bytes(
key.rsplit(|&b| b == 0xFF)
.next()
.expect("rsplit always returns an element"),
)
.map_err(|_| Error::bad_database("backupid_algorithm key is invalid."))?;
let algorithm = serde_json::from_slice(val)
.map_err(|_| Error::bad_database("Algorithm in backupid_algorithm is invalid."))?;
Ok((version, algorithm))
})
}
#[implement(Service)]
pub async fn get_backup(&self, user_id: &UserId, version: &str) -> Result<Raw<BackupAlgorithm>> {
let key = (user_id, version);
self.db
.backupid_algorithm
.qry(&key)
.await
.deserialized_json()
}
#[implement(Service)]
pub async fn add_key(
&self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str, key_data: &Raw<KeyBackupData>,
) -> Result<()> {
let key = (user_id, version);
if self.db.backupid_algorithm.qry(&key).await.is_err() {
return Err!(Request(NotFound("Tried to update nonexistent backup.")));
}
let mut key = user_id.as_bytes().to_vec();
key.push(0xFF);
key.extend_from_slice(version.as_bytes());
self.db
.backupid_etag
.insert(&key, &self.services.globals.next_count()?.to_be_bytes());
key.push(0xFF);
key.extend_from_slice(room_id.as_bytes());
key.push(0xFF);
key.extend_from_slice(session_id.as_bytes());
self.db
.backupkeyid_backup
.insert(&key, key_data.json().get().as_bytes());
Ok(())
}
#[implement(Service)]
pub async fn count_keys(&self, user_id: &UserId, version: &str) -> usize {
let prefix = (user_id, version);
self.db
.backupkeyid_backup
.keys_raw_prefix(&prefix)
.count()
.await
}
#[implement(Service)]
pub async fn get_etag(&self, user_id: &UserId, version: &str) -> String {
let key = (user_id, version);
self.db
.backupid_etag
.qry(&key)
.await
.deserialized::<u64>()
.as_ref()
.map(ToString::to_string)
.expect("Backup has no etag.")
}
#[implement(Service)]
pub async fn get_all(&self, user_id: &UserId, version: &str) -> BTreeMap<OwnedRoomId, RoomKeyBackup> {
type KeyVal<'a> = ((Ignore, Ignore, &'a RoomId, &'a str), &'a [u8]);
let mut rooms = BTreeMap::<OwnedRoomId, RoomKeyBackup>::new();
let default = || RoomKeyBackup {
sessions: BTreeMap::new(),
};
let prefix = (user_id, version, Interfix);
self.db
.backupkeyid_backup
.stream_prefix(&prefix)
.ignore_err()
.ready_for_each(|((_, _, room_id, session_id), value): KeyVal<'_>| {
let key_data = serde_json::from_slice(value).expect("Invalid KeyBackupData JSON");
rooms
.entry(room_id.into())
.or_insert_with(default)
.sessions
.insert(session_id.into(), key_data);
})
.await;
rooms
}
#[implement(Service)]
pub async fn get_room(
&self, user_id: &UserId, version: &str, room_id: &RoomId,
) -> BTreeMap<String, Raw<KeyBackupData>> {
type KeyVal<'a> = ((Ignore, Ignore, Ignore, &'a str), &'a [u8]);
let prefix = (user_id, version, room_id, Interfix);
self.db
.backupkeyid_backup
.stream_prefix(&prefix)
.ignore_err()
.map(|((.., session_id), value): KeyVal<'_>| {
let session_id = session_id.to_owned();
let key_backup_data = serde_json::from_slice(value).expect("Invalid KeyBackupData JSON");
(session_id, key_backup_data)
})
.collect()
.await
}
#[implement(Service)]
pub async fn get_session(
&self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str,
) -> Result<Raw<KeyBackupData>> {
let key = (user_id, version, room_id, session_id);
self.db
.backupkeyid_backup
.qry(&key)
.await
.deserialized_json()
}
#[implement(Service)]
pub async fn delete_all_keys(&self, user_id: &UserId, version: &str) {
let key = (user_id, version, Interfix);
self.db
.backupkeyid_backup
.keys_raw_prefix(&key)
.ignore_err()
.ready_for_each(|outdated_key| self.db.backupkeyid_backup.remove(outdated_key))
.await;
}
#[implement(Service)]
pub async fn delete_room_keys(&self, user_id: &UserId, version: &str, room_id: &RoomId) {
let key = (user_id, version, room_id, Interfix);
self.db
.backupkeyid_backup
.keys_raw_prefix(&key)
.ignore_err()
.ready_for_each(|outdated_key| self.db.backupkeyid_backup.remove(outdated_key))
.await;
}
#[implement(Service)]
pub async fn delete_room_key(&self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str) {
let key = (user_id, version, room_id, session_id);
self.db
.backupkeyid_backup
.keys_raw_prefix(&key)
.ignore_err()
.ready_for_each(|outdated_key| self.db.backupkeyid_backup.remove(outdated_key))
.await;
}