Database Refactor

combine service/users data w/ mod unit

split sliding sync related out of service/users

instrument database entry points

remove increment crap from database interface

de-wrap all database get() calls

de-wrap all database insert() calls

de-wrap all database remove() calls

refactor database interface for async streaming

add query key serializer for database

implement Debug for result handle

add query deserializer for database

add deserialization trait for option handle

start a stream utils suite

de-wrap/asyncify/type-query count_one_time_keys()

de-wrap/asyncify users count

add admin query users command suite

de-wrap/asyncify users exists

de-wrap/partially asyncify user filter related

asyncify/de-wrap users device/keys related

asyncify/de-wrap user auth/misc related

asyncify/de-wrap users blurhash

asyncify/de-wrap account_data get; merge Data into Service

partial asyncify/de-wrap uiaa; merge Data into Service

partially asyncify/de-wrap transaction_ids get; merge Data into Service

partially asyncify/de-wrap key_backups; merge Data into Service

asyncify/de-wrap pusher service getters; merge Data into Service

asyncify/de-wrap rooms alias getters/some iterators

asyncify/de-wrap rooms directory getters/iterator

partially asyncify/de-wrap rooms lazy-loading

partially asyncify/de-wrap rooms metadata

asyncify/dewrap rooms outlier

asyncify/dewrap rooms pdu_metadata

dewrap/partially asyncify rooms read receipt

de-wrap rooms search service

de-wrap/partially asyncify rooms user service

partial de-wrap rooms state_compressor

de-wrap rooms state_cache

de-wrap room state et al

de-wrap rooms timeline service

additional users device/keys related

de-wrap/asyncify sender

asyncify services

refactor database to TryFuture/TryStream

refactor services for TryFuture/TryStream

asyncify api handlers

additional asyncification for admin module

abstract stream related; support reverse streams

additional stream conversions

asyncify state-res related

Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
Jason Volk 2024-08-08 17:18:30 +00:00 committed by strawberry
parent 6001014078
commit 946ca364e0
203 changed files with 12202 additions and 10709 deletions

View file

@ -1,8 +1,8 @@
mod data;
use std::sync::Arc;
use conduit::{PduCount, PduEvent, Result};
use conduit::{utils::stream::IterStream, PduCount, Result};
use futures::StreamExt;
use ruma::{
api::{client::relations::get_relating_events, Direction},
events::{relation::RelationType, TimelineEventType},
@ -10,7 +10,7 @@ use ruma::{
};
use serde::Deserialize;
use self::data::Data;
use self::data::{Data, PdusIterItem};
use crate::{rooms, Dep};
pub struct Service {
@ -51,21 +51,19 @@ impl crate::Service for Service {
impl Service {
#[tracing::instrument(skip(self, from, to), level = "debug")]
pub fn add_relation(&self, from: PduCount, to: PduCount) -> Result<()> {
pub fn add_relation(&self, from: PduCount, to: PduCount) {
match (from, to) {
(PduCount::Normal(f), PduCount::Normal(t)) => self.db.add_relation(f, t),
_ => {
// TODO: Relations with backfilled pdus
Ok(())
},
}
}
#[allow(clippy::too_many_arguments)]
pub fn paginate_relations_with_filter(
&self, sender_user: &UserId, room_id: &RoomId, target: &EventId, filter_event_type: &Option<TimelineEventType>,
filter_rel_type: &Option<RelationType>, from: &Option<String>, to: &Option<String>, limit: &Option<UInt>,
pub async fn paginate_relations_with_filter(
&self, sender_user: &UserId, room_id: &RoomId, target: &EventId, filter_event_type: Option<TimelineEventType>,
filter_rel_type: Option<RelationType>, from: Option<&String>, to: Option<&String>, limit: Option<UInt>,
recurse: bool, dir: Direction,
) -> Result<get_relating_events::v1::Response> {
let from = match from {
@ -76,7 +74,7 @@ impl Service {
},
};
let to = to.as_ref().and_then(|t| PduCount::try_from_string(t).ok());
let to = to.and_then(|t| PduCount::try_from_string(t).ok());
// Use limit or else 10, with maximum 100
let limit = limit
@ -92,30 +90,32 @@ impl Service {
1
};
let relations_until = &self.relations_until(sender_user, room_id, target, from, depth)?;
let events: Vec<_> = relations_until // TODO: should be relations_after
.iter()
.filter(|(_, pdu)| {
filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t)
&& if let Ok(content) =
serde_json::from_str::<ExtractRelatesToEventId>(pdu.content.get())
{
filter_rel_type
.as_ref()
.map_or(true, |r| &content.relates_to.rel_type == r)
} else {
false
}
})
.take(limit)
.filter(|(_, pdu)| {
self.services
.state_accessor
.user_can_see_event(sender_user, room_id, &pdu.event_id)
.unwrap_or(false)
})
.take_while(|(k, _)| Some(k) != to.as_ref()) // Stop at `to`
.collect();
let relations_until: Vec<PdusIterItem> = self
.relations_until(sender_user, room_id, target, from, depth)
.await?;
// TODO: should be relations_after
let events: Vec<_> = relations_until
.into_iter()
.filter(move |(_, pdu): &PdusIterItem| {
if !filter_event_type.as_ref().map_or(true, |t| pdu.kind == *t) {
return false;
}
let Ok(content) = serde_json::from_str::<ExtractRelatesToEventId>(pdu.content.get()) else {
return false;
};
filter_rel_type
.as_ref()
.map_or(true, |r| *r == content.relates_to.rel_type)
})
.take(limit)
.take_while(|(k, _)| Some(*k) != to)
.stream()
.filter_map(|item| self.visibility_filter(sender_user, item))
.collect()
.await;
let next_token = events.last().map(|(count, _)| count).copied();
@ -125,9 +125,9 @@ impl Service {
.map(|(_, pdu)| pdu.to_message_like_event())
.collect(),
Direction::Backward => events
.into_iter()
.rev() // relations are always most recent first
.map(|(_, pdu)| pdu.to_message_like_event())
.into_iter()
.rev() // relations are always most recent first
.map(|(_, pdu)| pdu.to_message_like_event())
.collect(),
};
@ -135,68 +135,85 @@ impl Service {
chunk: events_chunk,
next_batch: next_token.map(|t| t.stringify()),
prev_batch: Some(from.stringify()),
recursion_depth: if recurse {
Some(depth.into())
} else {
None
},
recursion_depth: recurse.then_some(depth.into()),
})
}
pub fn relations_until<'a>(
&'a self, user_id: &'a UserId, room_id: &'a RoomId, target: &'a EventId, until: PduCount, max_depth: u8,
) -> Result<Vec<(PduCount, PduEvent)>> {
let room_id = self.services.short.get_or_create_shortroomid(room_id)?;
#[allow(unknown_lints)]
#[allow(clippy::manual_unwrap_or_default)]
let target = match self.services.timeline.get_pdu_count(target)? {
Some(PduCount::Normal(c)) => c,
async fn visibility_filter(&self, sender_user: &UserId, item: PdusIterItem) -> Option<PdusIterItem> {
let (_, pdu) = &item;
self.services
.state_accessor
.user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id)
.await
.then_some(item)
}
pub async fn relations_until(
&self, user_id: &UserId, room_id: &RoomId, target: &EventId, until: PduCount, max_depth: u8,
) -> Result<Vec<PdusIterItem>> {
let room_id = self.services.short.get_or_create_shortroomid(room_id).await;
let target = match self.services.timeline.get_pdu_count(target).await {
Ok(PduCount::Normal(c)) => c,
// TODO: Support backfilled relations
_ => 0, // This will result in an empty iterator
};
self.db
let mut pdus: Vec<PdusIterItem> = self
.db
.relations_until(user_id, room_id, target, until)
.map(|mut relations| {
let mut pdus: Vec<_> = (*relations).into_iter().filter_map(Result::ok).collect();
let mut stack: Vec<_> = pdus.clone().iter().map(|pdu| (pdu.to_owned(), 1)).collect();
.collect()
.await;
while let Some(stack_pdu) = stack.pop() {
let target = match stack_pdu.0 .0 {
PduCount::Normal(c) => c,
// TODO: Support backfilled relations
PduCount::Backfilled(_) => 0, // This will result in an empty iterator
};
let mut stack: Vec<_> = pdus.clone().into_iter().map(|pdu| (pdu, 1)).collect();
if let Ok(relations) = self.db.relations_until(user_id, room_id, target, until) {
for relation in relations.flatten() {
if stack_pdu.1 < max_depth {
stack.push((relation.clone(), stack_pdu.1.saturating_add(1)));
}
while let Some(stack_pdu) = stack.pop() {
let target = match stack_pdu.0 .0 {
PduCount::Normal(c) => c,
// TODO: Support backfilled relations
PduCount::Backfilled(_) => 0, // This will result in an empty iterator
};
pdus.push(relation);
}
}
let relations: Vec<PdusIterItem> = self
.db
.relations_until(user_id, room_id, target, until)
.collect()
.await;
for relation in relations {
if stack_pdu.1 < max_depth {
stack.push((relation.clone(), stack_pdu.1.saturating_add(1)));
}
pdus.sort_by(|a, b| a.0.cmp(&b.0));
pdus
})
pdus.push(relation);
}
}
pdus.sort_by(|a, b| a.0.cmp(&b.0));
Ok(pdus)
}
#[inline]
#[tracing::instrument(skip_all, level = "debug")]
pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc<EventId>]) -> Result<()> {
self.db.mark_as_referenced(room_id, event_ids)
pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc<EventId>]) {
self.db.mark_as_referenced(room_id, event_ids);
}
#[inline]
#[tracing::instrument(skip(self), level = "debug")]
pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result<bool> {
self.db.is_event_referenced(room_id, event_id)
pub async fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> bool {
self.db.is_event_referenced(room_id, event_id).await
}
#[inline]
#[tracing::instrument(skip(self), level = "debug")]
pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { self.db.mark_event_soft_failed(event_id) }
pub fn mark_event_soft_failed(&self, event_id: &EventId) { self.db.mark_event_soft_failed(event_id) }
#[inline]
#[tracing::instrument(skip(self), level = "debug")]
pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result<bool> { self.db.is_event_soft_failed(event_id) }
pub async fn is_event_soft_failed(&self, event_id: &EventId) -> bool {
self.db.is_event_soft_failed(event_id).await
}
}