make pdu stuff async, remove unnecessary db version check

Signed-off-by: strawberry <strawberry@pupbrain.dev>
This commit is contained in:
strawberry 2023-11-25 18:29:38 -05:00
parent 4d7b5eb759
commit 6958c720d0
14 changed files with 805 additions and 645 deletions

View file

@ -860,14 +860,18 @@ impl Service {
debug!("Starting soft fail auth check");
if soft_fail {
services().rooms.timeline.append_incoming_pdu(
&incoming_pdu,
val,
extremities.iter().map(|e| (**e).to_owned()).collect(),
state_ids_compressed,
soft_fail,
&state_lock,
)?;
services()
.rooms
.timeline
.append_incoming_pdu(
&incoming_pdu,
val,
extremities.iter().map(|e| (**e).to_owned()).collect(),
state_ids_compressed,
soft_fail,
&state_lock,
)
.await?;
// Soft fail, we keep the event as an outlier but don't add it to the timeline
warn!("Event was soft failed: {:?}", incoming_pdu);
@ -888,14 +892,18 @@ impl Service {
// We use the `state_at_event` instead of `state_after` so we accurately
// represent the state for this event.
let pdu_id = services().rooms.timeline.append_incoming_pdu(
&incoming_pdu,
val,
extremities.iter().map(|e| (**e).to_owned()).collect(),
state_ids_compressed,
soft_fail,
&state_lock,
)?;
let pdu_id = services()
.rooms
.timeline
.append_incoming_pdu(
&incoming_pdu,
val,
extremities.iter().map(|e| (**e).to_owned()).collect(),
state_ids_compressed,
soft_fail,
&state_lock,
)
.await?;
debug!("Appended incoming pdu");

View file

@ -205,7 +205,7 @@ impl Service {
)
.await
{
warn!("Got response from {server} for /hierarchy\n{response:?}");
debug!("Got response from {server} for /hierarchy\n{response:?}");
let chunk = SpaceHierarchyRoomsChunk {
canonical_alias: response.room.canonical_alias,
name: response.room.name,

View file

@ -80,14 +80,11 @@ impl Service {
Err(_) => continue,
};
services().rooms.state_cache.update_membership(
room_id,
&user_id,
membership,
&pdu.sender,
None,
false,
)?;
services()
.rooms
.state_cache
.update_membership(room_id, &user_id, membership, &pdu.sender, None, false)
.await?;
}
TimelineEventType::SpaceChild => {
services()

View file

@ -25,7 +25,7 @@ pub struct Service {
impl Service {
/// Update current membership data.
#[tracing::instrument(skip(self, last_state))]
pub fn update_membership(
pub async fn update_membership(
&self,
room_id: &RoomId,
user_id: &UserId,

View file

@ -213,7 +213,7 @@ impl Service {
///
/// Returns pdu id
#[tracing::instrument(skip(self, pdu, pdu_json, leaves))]
pub fn append_pdu<'a>(
pub async fn append_pdu<'a>(
&self,
pdu: &PduEvent,
mut pdu_json: CanonicalJsonObject,
@ -279,7 +279,7 @@ impl Service {
.entry(pdu.room_id.clone())
.or_default(),
);
let insert_lock = mutex_insert.lock().unwrap();
let insert_lock = mutex_insert.lock().await;
let count1 = services().globals.next_count()?;
// Mark as read first so the sending client doesn't get a notification even if appending
@ -422,14 +422,18 @@ impl Service {
// Update our membership info, we do this here incase a user is invited
// and immediately leaves we need the DB to record the invite event for auth
services().rooms.state_cache.update_membership(
&pdu.room_id,
&target_user_id,
content.membership,
&pdu.sender,
invite_state,
true,
)?;
services()
.rooms
.state_cache
.update_membership(
&pdu.room_id,
&target_user_id,
content.membership,
&pdu.sender,
invite_state,
true,
)
.await?;
}
}
TimelineEventType::RoomMessage => {
@ -655,7 +659,7 @@ impl Service {
.as_ref()
.map(|create_event| {
serde_json::from_str(create_event.content.get()).map_err(|e| {
warn!("Invalid create event: {}", e);
warn!("Invalid database create event: {}", e);
Error::bad_database("Invalid create event in db.")
})
})
@ -809,7 +813,7 @@ impl Service {
/// Creates a new persisted data unit and adds it to a room. This function takes a
/// roomid_mutex_state, meaning that only this function is able to mutate the room state.
#[tracing::instrument(skip(self, state_lock))]
pub fn build_and_append_pdu(
pub async fn build_and_append_pdu(
&self,
pdu_builder: PduBuilder,
sender: &UserId,
@ -909,14 +913,16 @@ impl Service {
// pdu without it's state. This is okay because append_pdu can't fail.
let statehashid = services().rooms.state.append_to_state(&pdu)?;
let pdu_id = self.append_pdu(
&pdu,
pdu_json,
// Since this PDU references all pdu_leaves we can update the leaves
// of the room
vec![(*pdu.event_id).to_owned()],
state_lock,
)?;
let pdu_id = self
.append_pdu(
&pdu,
pdu_json,
// Since this PDU references all pdu_leaves we can update the leaves
// of the room
vec![(*pdu.event_id).to_owned()],
state_lock,
)
.await?;
// We set the room state after inserting the pdu, so that we never have a moment in time
// where events in the current room state do not exist
@ -954,7 +960,7 @@ impl Service {
/// Append the incoming event setting the state snapshot to the state from the
/// server that sent the event.
#[tracing::instrument(skip_all)]
pub fn append_incoming_pdu<'a>(
pub async fn append_incoming_pdu<'a>(
&self,
pdu: &PduEvent,
pdu_json: CanonicalJsonObject,
@ -984,11 +990,11 @@ impl Service {
return Ok(None);
}
let pdu_id =
services()
.rooms
.timeline
.append_pdu(pdu, pdu_json, new_room_leaves, state_lock)?;
let pdu_id = services()
.rooms
.timeline
.append_pdu(pdu, pdu_json, new_room_leaves, state_lock)
.await?;
Ok(Some(pdu_id))
}
@ -1169,7 +1175,7 @@ impl Service {
.entry(room_id.clone())
.or_default(),
);
let insert_lock = mutex_insert.lock().unwrap();
let insert_lock = mutex_insert.lock().await;
let count = services().globals.next_count()?;
let mut pdu_id = shortroomid.to_be_bytes().to_vec();