refactor for stronger RawPduId type
implement standard traits for PduCount enable serde for arrayvec typedef various shortid's pducount simplifications split parts of pdu_metadata service to core/pdu and api/relations remove some yields; improve var names/syntax tweak types for limit timeline limit arguments Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
parent
2e4d9cb37c
commit
9da523c004
41 changed files with 796 additions and 573 deletions
|
@ -1,14 +1,13 @@
|
|||
use std::{
|
||||
collections::{hash_map, HashMap},
|
||||
mem::size_of,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use conduit::{
|
||||
err, expected,
|
||||
at, err,
|
||||
result::{LogErr, NotFound},
|
||||
utils,
|
||||
utils::{future::TryExtExt, stream::TryIgnore, u64_from_u8, ReadyExt},
|
||||
utils::{future::TryExtExt, stream::TryIgnore, ReadyExt},
|
||||
Err, PduCount, PduEvent, Result,
|
||||
};
|
||||
use database::{Database, Deserialized, Json, KeyVal, Map};
|
||||
|
@ -16,7 +15,8 @@ use futures::{Stream, StreamExt};
|
|||
use ruma::{CanonicalJsonObject, EventId, OwnedRoomId, OwnedUserId, RoomId, UserId};
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::{rooms, Dep};
|
||||
use super::{PduId, RawPduId};
|
||||
use crate::{rooms, rooms::short::ShortRoomId, Dep};
|
||||
|
||||
pub(super) struct Data {
|
||||
eventid_outlierpdu: Arc<Map>,
|
||||
|
@ -58,30 +58,25 @@ impl Data {
|
|||
.lasttimelinecount_cache
|
||||
.lock()
|
||||
.await
|
||||
.entry(room_id.to_owned())
|
||||
.entry(room_id.into())
|
||||
{
|
||||
hash_map::Entry::Vacant(v) => {
|
||||
if let Some(last_count) = self
|
||||
.pdus_until(sender_user, room_id, PduCount::max())
|
||||
.await?
|
||||
.next()
|
||||
.await
|
||||
{
|
||||
Ok(*v.insert(last_count.0))
|
||||
} else {
|
||||
Ok(PduCount::Normal(0))
|
||||
}
|
||||
},
|
||||
hash_map::Entry::Occupied(o) => Ok(*o.get()),
|
||||
hash_map::Entry::Vacant(v) => Ok(self
|
||||
.pdus_until(sender_user, room_id, PduCount::max())
|
||||
.await?
|
||||
.next()
|
||||
.await
|
||||
.map(at!(0))
|
||||
.filter(|&count| matches!(count, PduCount::Normal(_)))
|
||||
.map_or_else(PduCount::max, |count| *v.insert(count))),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the `count` of this pdu's id.
|
||||
pub(super) async fn get_pdu_count(&self, event_id: &EventId) -> Result<PduCount> {
|
||||
self.eventid_pduid
|
||||
.get(event_id)
|
||||
self.get_pdu_id(event_id)
|
||||
.await
|
||||
.map(|pdu_id| pdu_count(&pdu_id))
|
||||
.map(|pdu_id| pdu_id.pdu_count())
|
||||
}
|
||||
|
||||
/// Returns the json of a pdu.
|
||||
|
@ -102,8 +97,11 @@ impl Data {
|
|||
|
||||
/// Returns the pdu's id.
|
||||
#[inline]
|
||||
pub(super) async fn get_pdu_id(&self, event_id: &EventId) -> Result<database::Handle<'_>> {
|
||||
self.eventid_pduid.get(event_id).await
|
||||
pub(super) async fn get_pdu_id(&self, event_id: &EventId) -> Result<RawPduId> {
|
||||
self.eventid_pduid
|
||||
.get(event_id)
|
||||
.await
|
||||
.map(|handle| RawPduId::from(&*handle))
|
||||
}
|
||||
|
||||
/// Returns the pdu directly from `eventid_pduid` only.
|
||||
|
@ -154,34 +152,40 @@ impl Data {
|
|||
/// Returns the pdu.
|
||||
///
|
||||
/// This does __NOT__ check the outliers `Tree`.
|
||||
pub(super) async fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result<PduEvent> {
|
||||
pub(super) async fn get_pdu_from_id(&self, pdu_id: &RawPduId) -> Result<PduEvent> {
|
||||
self.pduid_pdu.get(pdu_id).await.deserialized()
|
||||
}
|
||||
|
||||
/// Returns the pdu as a `BTreeMap<String, CanonicalJsonValue>`.
|
||||
pub(super) async fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result<CanonicalJsonObject> {
|
||||
pub(super) async fn get_pdu_json_from_id(&self, pdu_id: &RawPduId) -> Result<CanonicalJsonObject> {
|
||||
self.pduid_pdu.get(pdu_id).await.deserialized()
|
||||
}
|
||||
|
||||
pub(super) async fn append_pdu(&self, pdu_id: &[u8], pdu: &PduEvent, json: &CanonicalJsonObject, count: u64) {
|
||||
pub(super) async fn append_pdu(
|
||||
&self, pdu_id: &RawPduId, pdu: &PduEvent, json: &CanonicalJsonObject, count: PduCount,
|
||||
) {
|
||||
debug_assert!(matches!(count, PduCount::Normal(_)), "PduCount not Normal");
|
||||
|
||||
self.pduid_pdu.raw_put(pdu_id, Json(json));
|
||||
self.lasttimelinecount_cache
|
||||
.lock()
|
||||
.await
|
||||
.insert(pdu.room_id.clone(), PduCount::Normal(count));
|
||||
.insert(pdu.room_id.clone(), count);
|
||||
|
||||
self.eventid_pduid.insert(pdu.event_id.as_bytes(), pdu_id);
|
||||
self.eventid_outlierpdu.remove(pdu.event_id.as_bytes());
|
||||
}
|
||||
|
||||
pub(super) fn prepend_backfill_pdu(&self, pdu_id: &[u8], event_id: &EventId, json: &CanonicalJsonObject) {
|
||||
pub(super) fn prepend_backfill_pdu(&self, pdu_id: &RawPduId, event_id: &EventId, json: &CanonicalJsonObject) {
|
||||
self.pduid_pdu.raw_put(pdu_id, Json(json));
|
||||
self.eventid_pduid.insert(event_id, pdu_id);
|
||||
self.eventid_outlierpdu.remove(event_id);
|
||||
}
|
||||
|
||||
/// Removes a pdu and creates a new one with the same id.
|
||||
pub(super) async fn replace_pdu(&self, pdu_id: &[u8], pdu_json: &CanonicalJsonObject, _pdu: &PduEvent) -> Result {
|
||||
pub(super) async fn replace_pdu(
|
||||
&self, pdu_id: &RawPduId, pdu_json: &CanonicalJsonObject, _pdu: &PduEvent,
|
||||
) -> Result {
|
||||
if self.pduid_pdu.get(pdu_id).await.is_not_found() {
|
||||
return Err!(Request(NotFound("PDU does not exist.")));
|
||||
}
|
||||
|
@ -197,13 +201,14 @@ impl Data {
|
|||
pub(super) async fn pdus_until<'a>(
|
||||
&'a self, user_id: &'a UserId, room_id: &'a RoomId, until: PduCount,
|
||||
) -> Result<impl Stream<Item = PdusIterItem> + Send + 'a> {
|
||||
let (prefix, current) = self.count_to_id(room_id, until, 1, true).await?;
|
||||
let current = self.count_to_id(room_id, until, true).await?;
|
||||
let prefix = current.shortroomid();
|
||||
let stream = self
|
||||
.pduid_pdu
|
||||
.rev_raw_stream_from(¤t)
|
||||
.ignore_err()
|
||||
.ready_take_while(move |(key, _)| key.starts_with(&prefix))
|
||||
.map(move |item| Self::each_pdu(item, user_id));
|
||||
.map(|item| Self::each_pdu(item, user_id));
|
||||
|
||||
Ok(stream)
|
||||
}
|
||||
|
@ -211,7 +216,8 @@ impl Data {
|
|||
pub(super) async fn pdus_after<'a>(
|
||||
&'a self, user_id: &'a UserId, room_id: &'a RoomId, from: PduCount,
|
||||
) -> Result<impl Stream<Item = PdusIterItem> + Send + 'a> {
|
||||
let (prefix, current) = self.count_to_id(room_id, from, 1, false).await?;
|
||||
let current = self.count_to_id(room_id, from, false).await?;
|
||||
let prefix = current.shortroomid();
|
||||
let stream = self
|
||||
.pduid_pdu
|
||||
.raw_stream_from(¤t)
|
||||
|
@ -223,6 +229,8 @@ impl Data {
|
|||
}
|
||||
|
||||
fn each_pdu((pdu_id, pdu): KeyVal<'_>, user_id: &UserId) -> PdusIterItem {
|
||||
let pdu_id: RawPduId = pdu_id.into();
|
||||
|
||||
let mut pdu =
|
||||
serde_json::from_slice::<PduEvent>(pdu).expect("PduEvent in pduid_pdu database column is invalid JSON");
|
||||
|
||||
|
@ -231,9 +239,8 @@ impl Data {
|
|||
}
|
||||
|
||||
pdu.add_age().log_err().ok();
|
||||
let count = pdu_count(pdu_id);
|
||||
|
||||
(count, pdu)
|
||||
(pdu_id.pdu_count(), pdu)
|
||||
}
|
||||
|
||||
pub(super) fn increment_notification_counts(
|
||||
|
@ -256,56 +263,25 @@ impl Data {
|
|||
}
|
||||
}
|
||||
|
||||
pub(super) async fn count_to_id(
|
||||
&self, room_id: &RoomId, count: PduCount, offset: u64, subtract: bool,
|
||||
) -> Result<(Vec<u8>, Vec<u8>)> {
|
||||
let prefix = self
|
||||
async fn count_to_id(&self, room_id: &RoomId, count: PduCount, subtract: bool) -> Result<RawPduId> {
|
||||
let shortroomid: ShortRoomId = self
|
||||
.services
|
||||
.short
|
||||
.get_shortroomid(room_id)
|
||||
.await
|
||||
.map_err(|e| err!(Request(NotFound("Room {room_id:?} not found: {e:?}"))))?
|
||||
.to_be_bytes()
|
||||
.to_vec();
|
||||
.map_err(|e| err!(Request(NotFound("Room {room_id:?} not found: {e:?}"))))?;
|
||||
|
||||
let mut pdu_id = prefix.clone();
|
||||
// +1 so we don't send the base event
|
||||
let count_raw = match count {
|
||||
PduCount::Normal(x) => {
|
||||
if subtract {
|
||||
x.saturating_sub(offset)
|
||||
} else {
|
||||
x.saturating_add(offset)
|
||||
}
|
||||
},
|
||||
PduCount::Backfilled(x) => {
|
||||
pdu_id.extend_from_slice(&0_u64.to_be_bytes());
|
||||
let num = u64::MAX.saturating_sub(x);
|
||||
if subtract {
|
||||
num.saturating_sub(offset)
|
||||
} else {
|
||||
num.saturating_add(offset)
|
||||
}
|
||||
let pdu_id = PduId {
|
||||
shortroomid,
|
||||
shorteventid: if subtract {
|
||||
count.checked_sub(1)?
|
||||
} else {
|
||||
count.checked_add(1)?
|
||||
},
|
||||
};
|
||||
pdu_id.extend_from_slice(&count_raw.to_be_bytes());
|
||||
|
||||
Ok((prefix, pdu_id))
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the `count` of this pdu's id.
|
||||
pub(super) fn pdu_count(pdu_id: &[u8]) -> PduCount {
|
||||
const STRIDE: usize = size_of::<u64>();
|
||||
|
||||
let pdu_id_len = pdu_id.len();
|
||||
let last_u64 = u64_from_u8(&pdu_id[expected!(pdu_id_len - STRIDE)..]);
|
||||
let second_last_u64 = u64_from_u8(&pdu_id[expected!(pdu_id_len - 2 * STRIDE)..expected!(pdu_id_len - STRIDE)]);
|
||||
|
||||
if second_last_u64 == 0 {
|
||||
PduCount::Backfilled(u64::MAX.saturating_sub(last_u64))
|
||||
} else {
|
||||
PduCount::Normal(last_u64)
|
||||
Ok(pdu_id.into())
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
mod data;
|
||||
mod pduid;
|
||||
|
||||
use std::{
|
||||
cmp,
|
||||
|
@ -15,6 +14,7 @@ use conduit::{
|
|||
utils::{stream::TryIgnore, IterStream, MutexMap, MutexMapGuard, ReadyExt},
|
||||
validated, warn, Err, Error, Result, Server,
|
||||
};
|
||||
pub use conduit::{PduId, RawPduId};
|
||||
use futures::{future, future::ready, Future, FutureExt, Stream, StreamExt, TryStreamExt};
|
||||
use ruma::{
|
||||
api::federation,
|
||||
|
@ -39,13 +39,13 @@ use serde::Deserialize;
|
|||
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
||||
|
||||
use self::data::Data;
|
||||
pub use self::{
|
||||
data::PdusIterItem,
|
||||
pduid::{PduId, RawPduId},
|
||||
};
|
||||
pub use self::data::PdusIterItem;
|
||||
use crate::{
|
||||
account_data, admin, appservice, appservice::NamespaceRegex, globals, pusher, rooms,
|
||||
rooms::state_compressor::CompressedStateEvent, sending, server_keys, users, Dep,
|
||||
account_data, admin, appservice,
|
||||
appservice::NamespaceRegex,
|
||||
globals, pusher, rooms,
|
||||
rooms::{short::ShortRoomId, state_compressor::CompressedStateEvent},
|
||||
sending, server_keys, users, Dep,
|
||||
};
|
||||
|
||||
// Update Relationships
|
||||
|
@ -229,9 +229,7 @@ impl Service {
|
|||
|
||||
/// Returns the pdu's id.
|
||||
#[inline]
|
||||
pub async fn get_pdu_id(&self, event_id: &EventId) -> Result<database::Handle<'_>> {
|
||||
self.db.get_pdu_id(event_id).await
|
||||
}
|
||||
pub async fn get_pdu_id(&self, event_id: &EventId) -> Result<RawPduId> { self.db.get_pdu_id(event_id).await }
|
||||
|
||||
/// Returns the pdu.
|
||||
///
|
||||
|
@ -256,16 +254,16 @@ impl Service {
|
|||
/// Returns the pdu.
|
||||
///
|
||||
/// This does __NOT__ check the outliers `Tree`.
|
||||
pub async fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result<PduEvent> { self.db.get_pdu_from_id(pdu_id).await }
|
||||
pub async fn get_pdu_from_id(&self, pdu_id: &RawPduId) -> Result<PduEvent> { self.db.get_pdu_from_id(pdu_id).await }
|
||||
|
||||
/// Returns the pdu as a `BTreeMap<String, CanonicalJsonValue>`.
|
||||
pub async fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result<CanonicalJsonObject> {
|
||||
pub async fn get_pdu_json_from_id(&self, pdu_id: &RawPduId) -> Result<CanonicalJsonObject> {
|
||||
self.db.get_pdu_json_from_id(pdu_id).await
|
||||
}
|
||||
|
||||
/// Removes a pdu and creates a new one with the same id.
|
||||
#[tracing::instrument(skip(self), level = "debug")]
|
||||
pub async fn replace_pdu(&self, pdu_id: &[u8], pdu_json: &CanonicalJsonObject, pdu: &PduEvent) -> Result<()> {
|
||||
pub async fn replace_pdu(&self, pdu_id: &RawPduId, pdu_json: &CanonicalJsonObject, pdu: &PduEvent) -> Result<()> {
|
||||
self.db.replace_pdu(pdu_id, pdu_json, pdu).await
|
||||
}
|
||||
|
||||
|
@ -282,7 +280,7 @@ impl Service {
|
|||
mut pdu_json: CanonicalJsonObject,
|
||||
leaves: Vec<OwnedEventId>,
|
||||
state_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex
|
||||
) -> Result<Vec<u8>> {
|
||||
) -> Result<RawPduId> {
|
||||
// Coalesce database writes for the remainder of this scope.
|
||||
let _cork = self.db.db.cork_and_flush();
|
||||
|
||||
|
@ -359,9 +357,12 @@ impl Service {
|
|||
.user
|
||||
.reset_notification_counts(&pdu.sender, &pdu.room_id);
|
||||
|
||||
let count2 = self.services.globals.next_count().unwrap();
|
||||
let mut pdu_id = shortroomid.to_be_bytes().to_vec();
|
||||
pdu_id.extend_from_slice(&count2.to_be_bytes());
|
||||
let count2 = PduCount::Normal(self.services.globals.next_count().unwrap());
|
||||
let pdu_id: RawPduId = PduId {
|
||||
shortroomid,
|
||||
shorteventid: count2,
|
||||
}
|
||||
.into();
|
||||
|
||||
// Insert pdu
|
||||
self.db.append_pdu(&pdu_id, pdu, &pdu_json, count2).await;
|
||||
|
@ -544,7 +545,7 @@ impl Service {
|
|||
if let Ok(related_pducount) = self.get_pdu_count(&content.relates_to.event_id).await {
|
||||
self.services
|
||||
.pdu_metadata
|
||||
.add_relation(PduCount::Normal(count2), related_pducount);
|
||||
.add_relation(count2, related_pducount);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -558,7 +559,7 @@ impl Service {
|
|||
if let Ok(related_pducount) = self.get_pdu_count(&in_reply_to.event_id).await {
|
||||
self.services
|
||||
.pdu_metadata
|
||||
.add_relation(PduCount::Normal(count2), related_pducount);
|
||||
.add_relation(count2, related_pducount);
|
||||
}
|
||||
},
|
||||
Relation::Thread(thread) => {
|
||||
|
@ -580,7 +581,7 @@ impl Service {
|
|||
{
|
||||
self.services
|
||||
.sending
|
||||
.send_pdu_appservice(appservice.registration.id.clone(), pdu_id.clone())?;
|
||||
.send_pdu_appservice(appservice.registration.id.clone(), pdu_id)?;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -596,7 +597,7 @@ impl Service {
|
|||
if state_key_uid == appservice_uid {
|
||||
self.services
|
||||
.sending
|
||||
.send_pdu_appservice(appservice.registration.id.clone(), pdu_id.clone())?;
|
||||
.send_pdu_appservice(appservice.registration.id.clone(), pdu_id)?;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
@ -623,7 +624,7 @@ impl Service {
|
|||
{
|
||||
self.services
|
||||
.sending
|
||||
.send_pdu_appservice(appservice.registration.id.clone(), pdu_id.clone())?;
|
||||
.send_pdu_appservice(appservice.registration.id.clone(), pdu_id)?;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -935,7 +936,7 @@ impl Service {
|
|||
state_ids_compressed: Arc<HashSet<CompressedStateEvent>>,
|
||||
soft_fail: bool,
|
||||
state_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex
|
||||
) -> Result<Option<Vec<u8>>> {
|
||||
) -> Result<Option<RawPduId>> {
|
||||
// We append to state before appending the pdu, so we don't have a moment in
|
||||
// time with the pdu without it's state. This is okay because append_pdu can't
|
||||
// fail.
|
||||
|
@ -993,7 +994,7 @@ impl Service {
|
|||
|
||||
/// Replace a PDU with the redacted form.
|
||||
#[tracing::instrument(skip(self, reason))]
|
||||
pub async fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent, shortroomid: u64) -> Result<()> {
|
||||
pub async fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent, shortroomid: ShortRoomId) -> Result {
|
||||
// TODO: Don't reserialize, keep original json
|
||||
let Ok(pdu_id) = self.get_pdu_id(event_id).await else {
|
||||
// If event does not exist, just noop
|
||||
|
@ -1133,7 +1134,6 @@ impl Service {
|
|||
|
||||
// Skip the PDU if we already have it as a timeline event
|
||||
if let Ok(pdu_id) = self.get_pdu_id(&event_id).await {
|
||||
let pdu_id = pdu_id.to_vec();
|
||||
debug!("We already know {event_id} at {pdu_id:?}");
|
||||
return Ok(());
|
||||
}
|
||||
|
@ -1158,11 +1158,13 @@ impl Service {
|
|||
|
||||
let insert_lock = self.mutex_insert.lock(&room_id).await;
|
||||
|
||||
let max = u64::MAX;
|
||||
let count = self.services.globals.next_count().unwrap();
|
||||
let mut pdu_id = shortroomid.to_be_bytes().to_vec();
|
||||
pdu_id.extend_from_slice(&0_u64.to_be_bytes());
|
||||
pdu_id.extend_from_slice(&(validated!(max - count)).to_be_bytes());
|
||||
let count: i64 = self.services.globals.next_count().unwrap().try_into()?;
|
||||
|
||||
let pdu_id: RawPduId = PduId {
|
||||
shortroomid,
|
||||
shorteventid: PduCount::Backfilled(validated!(0 - count)),
|
||||
}
|
||||
.into();
|
||||
|
||||
// Insert pdu
|
||||
self.db.prepend_backfill_pdu(&pdu_id, &event_id, &value);
|
||||
|
@ -1246,16 +1248,3 @@ async fn check_pdu_for_admin_room(&self, pdu: &PduEvent, sender: &UserId) -> Res
|
|||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn comparisons() {
|
||||
assert!(PduCount::Normal(1) < PduCount::Normal(2));
|
||||
assert!(PduCount::Backfilled(2) < PduCount::Backfilled(1));
|
||||
assert!(PduCount::Normal(1) > PduCount::Backfilled(1));
|
||||
assert!(PduCount::Backfilled(1) < PduCount::Normal(1));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
use crate::rooms::short::{ShortEventId, ShortRoomId};
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct PduId {
|
||||
_room_id: ShortRoomId,
|
||||
_event_id: ShortEventId,
|
||||
}
|
||||
|
||||
pub type RawPduId = [u8; PduId::LEN];
|
||||
|
||||
impl PduId {
|
||||
pub const LEN: usize = size_of::<ShortRoomId>() + size_of::<ShortEventId>();
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue