refactor for stronger RawPduId type

implement standard traits for PduCount

enable serde for arrayvec

typedef various shortid's

pducount simplifications

split parts of pdu_metadata service to core/pdu and api/relations

remove some yields; improve var names/syntax

tweak types for limit timeline limit arguments

Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
Jason Volk 2024-11-02 06:12:54 +00:00
parent 2e4d9cb37c
commit 9da523c004
41 changed files with 796 additions and 573 deletions

View file

@ -7,9 +7,11 @@ use conduit::{err, utils, utils::math::usize_from_f64, Err, Result};
use database::Map;
use lru_cache::LruCache;
use crate::rooms::short::ShortEventId;
pub(super) struct Data {
shorteventid_authchain: Arc<Map>,
pub(super) auth_chain_cache: Mutex<LruCache<Vec<u64>, Arc<[u64]>>>,
pub(super) auth_chain_cache: Mutex<LruCache<Vec<u64>, Arc<[ShortEventId]>>>,
}
impl Data {
@ -24,7 +26,7 @@ impl Data {
}
}
pub(super) async fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result<Arc<[u64]>> {
pub(super) async fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result<Arc<[ShortEventId]>> {
debug_assert!(!key.is_empty(), "auth_chain key must not be empty");
// Check RAM cache
@ -63,7 +65,7 @@ impl Data {
Ok(chain)
}
pub(super) fn cache_auth_chain(&self, key: Vec<u64>, auth_chain: Arc<[u64]>) {
pub(super) fn cache_auth_chain(&self, key: Vec<u64>, auth_chain: Arc<[ShortEventId]>) {
debug_assert!(!key.is_empty(), "auth_chain key must not be empty");
// Only persist single events in db

View file

@ -10,7 +10,7 @@ use futures::Stream;
use ruma::{EventId, RoomId};
use self::data::Data;
use crate::{rooms, Dep};
use crate::{rooms, rooms::short::ShortEventId, Dep};
pub struct Service {
services: Services,
@ -64,7 +64,7 @@ impl Service {
}
#[tracing::instrument(skip_all, name = "auth_chain")]
pub async fn get_auth_chain(&self, room_id: &RoomId, starting_events: &[&EventId]) -> Result<Vec<u64>> {
pub async fn get_auth_chain(&self, room_id: &RoomId, starting_events: &[&EventId]) -> Result<Vec<ShortEventId>> {
const NUM_BUCKETS: usize = 50; //TODO: change possible w/o disrupting db?
const BUCKET: BTreeSet<(u64, &EventId)> = BTreeSet::new();
@ -97,7 +97,7 @@ impl Service {
continue;
}
let chunk_key: Vec<u64> = chunk.iter().map(|(short, _)| short).copied().collect();
let chunk_key: Vec<ShortEventId> = chunk.iter().map(|(short, _)| short).copied().collect();
if let Ok(cached) = self.get_cached_eventid_authchain(&chunk_key).await {
trace!("Found cache entry for whole chunk");
full_auth_chain.extend(cached.iter().copied());
@ -156,7 +156,7 @@ impl Service {
}
#[tracing::instrument(skip(self, room_id))]
async fn get_auth_chain_inner(&self, room_id: &RoomId, event_id: &EventId) -> Result<HashSet<u64>> {
async fn get_auth_chain_inner(&self, room_id: &RoomId, event_id: &EventId) -> Result<HashSet<ShortEventId>> {
let mut todo = vec![Arc::from(event_id)];
let mut found = HashSet::new();
@ -195,19 +195,19 @@ impl Service {
}
#[inline]
pub async fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result<Arc<[u64]>> {
pub async fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result<Arc<[ShortEventId]>> {
self.db.get_cached_eventid_authchain(key).await
}
#[tracing::instrument(skip(self), level = "debug")]
pub fn cache_auth_chain(&self, key: Vec<u64>, auth_chain: &HashSet<u64>) {
let val = auth_chain.iter().copied().collect::<Arc<[u64]>>();
pub fn cache_auth_chain(&self, key: Vec<u64>, auth_chain: &HashSet<ShortEventId>) {
let val = auth_chain.iter().copied().collect::<Arc<[ShortEventId]>>();
self.db.cache_auth_chain(key, val);
}
#[tracing::instrument(skip(self), level = "debug")]
pub fn cache_auth_chain_vec(&self, key: Vec<u64>, auth_chain: &Vec<u64>) {
let val = auth_chain.iter().copied().collect::<Arc<[u64]>>();
pub fn cache_auth_chain_vec(&self, key: Vec<u64>, auth_chain: &Vec<ShortEventId>) {
let val = auth_chain.iter().copied().collect::<Arc<[ShortEventId]>>();
self.db.cache_auth_chain(key, val);
}