refactor incoming extremities retention; broad filter, single pass
Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
parent
31c2968bb2
commit
1a8482b3b4
4 changed files with 74 additions and 68 deletions
|
@ -1,6 +1,7 @@
|
|||
mod data;
|
||||
|
||||
use std::{
|
||||
borrow::Borrow,
|
||||
cmp,
|
||||
collections::{BTreeMap, HashSet},
|
||||
fmt::Write,
|
||||
|
@ -260,14 +261,16 @@ impl Service {
|
|||
///
|
||||
/// Returns pdu id
|
||||
#[tracing::instrument(level = "debug", skip_all)]
|
||||
pub async fn append_pdu(
|
||||
&self,
|
||||
pdu: &PduEvent,
|
||||
pub async fn append_pdu<'a, Leafs>(
|
||||
&'a self,
|
||||
pdu: &'a PduEvent,
|
||||
mut pdu_json: CanonicalJsonObject,
|
||||
leaves: Vec<OwnedEventId>,
|
||||
state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room state
|
||||
* mutex */
|
||||
) -> Result<RawPduId> {
|
||||
leafs: Leafs,
|
||||
state_lock: &'a RoomMutexGuard,
|
||||
) -> Result<RawPduId>
|
||||
where
|
||||
Leafs: Iterator<Item = &'a EventId> + Send + 'a,
|
||||
{
|
||||
// Coalesce database writes for the remainder of this scope.
|
||||
let _cork = self.db.db.cork_and_flush();
|
||||
|
||||
|
@ -335,7 +338,7 @@ impl Service {
|
|||
|
||||
self.services
|
||||
.state
|
||||
.set_forward_extremities(&pdu.room_id, leaves, state_lock)
|
||||
.set_forward_extremities(&pdu.room_id, leafs, state_lock)
|
||||
.await;
|
||||
|
||||
let insert_lock = self.mutex_insert.lock(&pdu.room_id).await;
|
||||
|
@ -819,8 +822,7 @@ impl Service {
|
|||
pdu_builder: PduBuilder,
|
||||
sender: &UserId,
|
||||
room_id: &RoomId,
|
||||
state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room state
|
||||
* mutex */
|
||||
state_lock: &RoomMutexGuard,
|
||||
) -> Result<OwnedEventId> {
|
||||
let (pdu, pdu_json) = self
|
||||
.create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock)
|
||||
|
@ -896,7 +898,7 @@ impl Service {
|
|||
pdu_json,
|
||||
// Since this PDU references all pdu_leaves we can update the leaves
|
||||
// of the room
|
||||
vec![(*pdu.event_id).to_owned()],
|
||||
once(pdu.event_id.borrow()),
|
||||
state_lock,
|
||||
)
|
||||
.boxed()
|
||||
|
@ -943,16 +945,18 @@ impl Service {
|
|||
/// Append the incoming event setting the state snapshot to the state from
|
||||
/// the server that sent the event.
|
||||
#[tracing::instrument(level = "debug", skip_all)]
|
||||
pub async fn append_incoming_pdu(
|
||||
&self,
|
||||
pdu: &PduEvent,
|
||||
pub async fn append_incoming_pdu<'a, Leafs>(
|
||||
&'a self,
|
||||
pdu: &'a PduEvent,
|
||||
pdu_json: CanonicalJsonObject,
|
||||
new_room_leaves: Vec<OwnedEventId>,
|
||||
new_room_leafs: Leafs,
|
||||
state_ids_compressed: Arc<HashSet<CompressedStateEvent>>,
|
||||
soft_fail: bool,
|
||||
state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room state
|
||||
* mutex */
|
||||
) -> Result<Option<RawPduId>> {
|
||||
state_lock: &'a RoomMutexGuard,
|
||||
) -> Result<Option<RawPduId>>
|
||||
where
|
||||
Leafs: Iterator<Item = &'a EventId> + Send + 'a,
|
||||
{
|
||||
// We append to state before appending the pdu, so we don't have a moment in
|
||||
// time with the pdu without it's state. This is okay because append_pdu can't
|
||||
// fail.
|
||||
|
@ -968,14 +972,14 @@ impl Service {
|
|||
|
||||
self.services
|
||||
.state
|
||||
.set_forward_extremities(&pdu.room_id, new_room_leaves, state_lock)
|
||||
.set_forward_extremities(&pdu.room_id, new_room_leafs, state_lock)
|
||||
.await;
|
||||
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let pdu_id = self
|
||||
.append_pdu(pdu, pdu_json, new_room_leaves, state_lock)
|
||||
.append_pdu(pdu, pdu_json, new_room_leafs, state_lock)
|
||||
.await?;
|
||||
|
||||
Ok(Some(pdu_id))
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue