apply new rustfmt.toml changes, fix some clippy lints

Signed-off-by: strawberry <strawberry@puppygock.gay>
This commit is contained in:
strawberry 2024-12-15 00:05:47 -05:00
parent 0317cc8cc5
commit 77e0b76408
No known key found for this signature in database
296 changed files with 7147 additions and 4300 deletions

View file

@ -5,10 +5,14 @@ use std::{
};
use conduwuit::{
debug, debug_error, implement, info, pdu, trace, utils::math::continue_exponential_backoff_secs, warn, PduEvent,
debug, debug_error, implement, info, pdu, trace,
utils::math::continue_exponential_backoff_secs, warn, PduEvent,
};
use futures::TryFutureExt;
use ruma::{api::federation::event::get_event, CanonicalJsonValue, EventId, RoomId, RoomVersionId, ServerName};
use ruma::{
api::federation::event::get_event, CanonicalJsonValue, EventId, RoomId, RoomVersionId,
ServerName,
};
/// Find the event and auth it. Once the event is validated (steps 1 - 8)
/// it is appended to the outliers Tree.
@ -21,7 +25,11 @@ use ruma::{api::federation::event::get_event, CanonicalJsonValue, EventId, RoomI
/// d. TODO: Ask other servers over federation?
#[implement(super::Service)]
pub(super) async fn fetch_and_handle_outliers<'a>(
&self, origin: &'a ServerName, events: &'a [Arc<EventId>], create_event: &'a PduEvent, room_id: &'a RoomId,
&self,
origin: &'a ServerName,
events: &'a [Arc<EventId>],
create_event: &'a PduEvent,
room_id: &'a RoomId,
room_version_id: &'a RoomVersionId,
) -> Vec<(Arc<PduEvent>, Option<BTreeMap<String, CanonicalJsonValue>>)> {
let back_off = |id| match self
@ -32,10 +40,12 @@ pub(super) async fn fetch_and_handle_outliers<'a>(
.expect("locked")
.entry(id)
{
hash_map::Entry::Vacant(e) => {
| hash_map::Entry::Vacant(e) => {
e.insert((Instant::now(), 1));
},
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1.saturating_add(1)),
| hash_map::Entry::Occupied(mut e) => {
*e.get_mut() = (Instant::now(), e.get().1.saturating_add(1));
},
};
let mut events_with_auth_events = Vec::with_capacity(events.len());
@ -67,7 +77,12 @@ pub(super) async fn fetch_and_handle_outliers<'a>(
// Exponential backoff
const MIN_DURATION: u64 = 5 * 60;
const MAX_DURATION: u64 = 60 * 60 * 24;
if continue_exponential_backoff_secs(MIN_DURATION, MAX_DURATION, time.elapsed(), *tries) {
if continue_exponential_backoff_secs(
MIN_DURATION,
MAX_DURATION,
time.elapsed(),
*tries,
) {
info!("Backing off from {next_id}");
continue;
}
@ -86,18 +101,16 @@ pub(super) async fn fetch_and_handle_outliers<'a>(
match self
.services
.sending
.send_federation_request(
origin,
get_event::v1::Request {
event_id: (*next_id).to_owned(),
include_unredacted_content: None,
},
)
.send_federation_request(origin, get_event::v1::Request {
event_id: (*next_id).to_owned(),
include_unredacted_content: None,
})
.await
{
Ok(res) => {
| Ok(res) => {
debug!("Got {next_id} over federation");
let Ok((calculated_event_id, value)) = pdu::gen_event_id_canonical_json(&res.pdu, room_version_id)
let Ok((calculated_event_id, value)) =
pdu::gen_event_id_canonical_json(&res.pdu, room_version_id)
else {
back_off((*next_id).to_owned());
continue;
@ -105,15 +118,18 @@ pub(super) async fn fetch_and_handle_outliers<'a>(
if calculated_event_id != *next_id {
warn!(
"Server didn't return event id we requested: requested: {next_id}, we got \
{calculated_event_id}. Event: {:?}",
"Server didn't return event id we requested: requested: {next_id}, \
we got {calculated_event_id}. Event: {:?}",
&res.pdu
);
}
if let Some(auth_events) = value.get("auth_events").and_then(|c| c.as_array()) {
if let Some(auth_events) = value.get("auth_events").and_then(|c| c.as_array())
{
for auth_event in auth_events {
if let Ok(auth_event) = serde_json::from_value(auth_event.clone().into()) {
if let Ok(auth_event) =
serde_json::from_value(auth_event.clone().into())
{
let a: Arc<EventId> = auth_event;
todo_auth_events.push(a);
} else {
@ -127,7 +143,7 @@ pub(super) async fn fetch_and_handle_outliers<'a>(
events_in_reverse_order.push((next_id.clone(), value));
events_all.insert(next_id);
},
Err(e) => {
| Err(e) => {
debug_error!("Failed to fetch event {next_id}: {e}");
back_off((*next_id).to_owned());
},
@ -158,20 +174,32 @@ pub(super) async fn fetch_and_handle_outliers<'a>(
// Exponential backoff
const MIN_DURATION: u64 = 5 * 60;
const MAX_DURATION: u64 = 60 * 60 * 24;
if continue_exponential_backoff_secs(MIN_DURATION, MAX_DURATION, time.elapsed(), *tries) {
if continue_exponential_backoff_secs(
MIN_DURATION,
MAX_DURATION,
time.elapsed(),
*tries,
) {
debug!("Backing off from {next_id}");
continue;
}
}
match Box::pin(self.handle_outlier_pdu(origin, create_event, &next_id, room_id, value.clone(), true)).await
match Box::pin(self.handle_outlier_pdu(
origin,
create_event,
&next_id,
room_id,
value.clone(),
true,
))
.await
{
Ok((pdu, json)) => {
| Ok((pdu, json)) =>
if next_id == *id {
pdus.push((pdu, Some(json)));
}
},
Err(e) => {
},
| Err(e) => {
warn!("Authentication of event {next_id} failed: {e:?}");
back_off(next_id.into());
},

View file

@ -8,7 +8,8 @@ use futures::{future, FutureExt};
use ruma::{
int,
state_res::{self},
uint, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName,
uint, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId,
ServerName,
};
use super::check_room_id;
@ -17,7 +18,11 @@ use super::check_room_id;
#[allow(clippy::type_complexity)]
#[tracing::instrument(skip_all)]
pub(super) async fn fetch_prev(
&self, origin: &ServerName, create_event: &PduEvent, room_id: &RoomId, room_version_id: &RoomVersionId,
&self,
origin: &ServerName,
create_event: &PduEvent,
room_id: &RoomId,
room_version_id: &RoomVersionId,
initial_set: Vec<Arc<EventId>>,
) -> Result<(
Vec<Arc<EventId>>,
@ -35,7 +40,13 @@ pub(super) async fn fetch_prev(
self.services.server.check_running()?;
if let Some((pdu, mut json_opt)) = self
.fetch_and_handle_outliers(origin, &[prev_event_id.clone()], create_event, room_id, room_version_id)
.fetch_and_handle_outliers(
origin,
&[prev_event_id.clone()],
create_event,
room_id,
room_version_id,
)
.boxed()
.await
.pop()
@ -67,7 +78,8 @@ pub(super) async fn fetch_prev(
}
}
graph.insert(prev_event_id.clone(), pdu.prev_events.iter().cloned().collect());
graph
.insert(prev_event_id.clone(), pdu.prev_events.iter().cloned().collect());
} else {
// Time based check failed
graph.insert(prev_event_id.clone(), HashSet::new());

View file

@ -6,7 +6,8 @@ use std::{
use conduwuit::{debug, implement, warn, Err, Error, PduEvent, Result};
use futures::FutureExt;
use ruma::{
api::federation::event::get_room_state_ids, events::StateEventType, EventId, RoomId, RoomVersionId, ServerName,
api::federation::event::get_room_state_ids, events::StateEventType, EventId, RoomId,
RoomVersionId, ServerName,
};
/// Call /state_ids to find out what the state at this pdu is. We trust the
@ -15,20 +16,21 @@ use ruma::{
#[implement(super::Service)]
#[tracing::instrument(skip(self, create_event, room_version_id))]
pub(super) async fn fetch_state(
&self, origin: &ServerName, create_event: &PduEvent, room_id: &RoomId, room_version_id: &RoomVersionId,
&self,
origin: &ServerName,
create_event: &PduEvent,
room_id: &RoomId,
room_version_id: &RoomVersionId,
event_id: &EventId,
) -> Result<Option<HashMap<u64, Arc<EventId>>>> {
debug!("Fetching state ids");
let res = self
.services
.sending
.send_federation_request(
origin,
get_room_state_ids::v1::Request {
room_id: room_id.to_owned(),
event_id: (*event_id).to_owned(),
},
)
.send_federation_request(origin, get_room_state_ids::v1::Request {
room_id: room_id.to_owned(),
event_id: (*event_id).to_owned(),
})
.await
.inspect_err(|e| warn!("Fetching state for event failed: {e}"))?;
@ -58,14 +60,13 @@ pub(super) async fn fetch_state(
.await;
match state.entry(shortstatekey) {
hash_map::Entry::Vacant(v) => {
| hash_map::Entry::Vacant(v) => {
v.insert(Arc::from(&*pdu.event_id));
},
hash_map::Entry::Occupied(_) => {
| hash_map::Entry::Occupied(_) =>
return Err(Error::bad_database(
"State event's type and state_key combination exists multiple times.",
))
},
)),
}
}

View file

@ -7,7 +7,8 @@ use std::{
use conduwuit::{debug, err, implement, warn, Error, Result};
use futures::{FutureExt, TryFutureExt};
use ruma::{
api::client::error::ErrorKind, events::StateEventType, CanonicalJsonValue, EventId, RoomId, ServerName, UserId,
api::client::error::ErrorKind, events::StateEventType, CanonicalJsonValue, EventId, RoomId,
ServerName, UserId,
};
use super::{check_room_id, get_room_version_id};
@ -43,8 +44,12 @@ use crate::rooms::timeline::RawPduId;
#[implement(super::Service)]
#[tracing::instrument(skip(self, origin, value, is_timeline_event), name = "pdu")]
pub async fn handle_incoming_pdu<'a>(
&self, origin: &'a ServerName, room_id: &'a RoomId, event_id: &'a EventId,
value: BTreeMap<String, CanonicalJsonValue>, is_timeline_event: bool,
&self,
origin: &'a ServerName,
room_id: &'a RoomId,
event_id: &'a EventId,
value: BTreeMap<String, CanonicalJsonValue>,
is_timeline_event: bool,
) -> Result<Option<RawPduId>> {
// 1. Skip the PDU if we already have it as a timeline event
if let Ok(pdu_id) = self.services.timeline.get_pdu_id(event_id).await {
@ -144,10 +149,10 @@ pub async fn handle_incoming_pdu<'a>(
.expect("locked")
.entry(prev_id.into())
{
Entry::Vacant(e) => {
| Entry::Vacant(e) => {
e.insert((now, 1));
},
Entry::Occupied(mut e) => {
| Entry::Occupied(mut e) => {
*e.get_mut() = (now, e.get().1.saturating_add(1));
},
};

View file

@ -17,8 +17,13 @@ use super::{check_room_id, get_room_version_id, to_room_version};
#[implement(super::Service)]
#[allow(clippy::too_many_arguments)]
pub(super) async fn handle_outlier_pdu<'a>(
&self, origin: &'a ServerName, create_event: &'a PduEvent, event_id: &'a EventId, room_id: &'a RoomId,
mut value: CanonicalJsonObject, auth_events_known: bool,
&self,
origin: &'a ServerName,
create_event: &'a PduEvent,
event_id: &'a EventId,
room_id: &'a RoomId,
mut value: CanonicalJsonObject,
auth_events_known: bool,
) -> Result<(Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>)> {
// 1. Remove unsigned field
value.remove("unsigned");
@ -34,8 +39,8 @@ pub(super) async fn handle_outlier_pdu<'a>(
.verify_event(&value, Some(&room_version_id))
.await
{
Ok(ruma::signatures::Verified::All) => value,
Ok(ruma::signatures::Verified::Signatures) => {
| Ok(ruma::signatures::Verified::All) => value,
| Ok(ruma::signatures::Verified::Signatures) => {
// Redact
debug_info!("Calculated hash does not match (redaction): {event_id}");
let Ok(obj) = ruma::canonical_json::redact(value, &room_version_id, None) else {
@ -44,24 +49,26 @@ pub(super) async fn handle_outlier_pdu<'a>(
// Skip the PDU if it is redacted and we already have it as an outlier event
if self.services.timeline.pdu_exists(event_id).await {
return Err!(Request(InvalidParam("Event was redacted and we already knew about it")));
return Err!(Request(InvalidParam(
"Event was redacted and we already knew about it"
)));
}
obj
},
Err(e) => {
| Err(e) =>
return Err!(Request(InvalidParam(debug_error!(
"Signature verification failed for {event_id}: {e}"
))))
},
)))),
};
// Now that we have checked the signature and hashes we can add the eventID and
// convert to our PduEvent type
val.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned()));
let incoming_pdu =
serde_json::from_value::<PduEvent>(serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue"))
.map_err(|_| Error::bad_database("Event is not a valid PDU."))?;
let incoming_pdu = serde_json::from_value::<PduEvent>(
serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue"),
)
.map_err(|_| Error::bad_database("Event is not a valid PDU."))?;
check_room_id(room_id, &incoming_pdu)?;
@ -108,10 +115,10 @@ pub(super) async fn handle_outlier_pdu<'a>(
.clone()
.expect("all auth events have state keys"),
)) {
hash_map::Entry::Vacant(v) => {
| hash_map::Entry::Vacant(v) => {
v.insert(auth_event);
},
hash_map::Entry::Occupied(_) => {
| hash_map::Entry::Occupied(_) => {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Auth event's type and state_key combination exists multiple times.",

View file

@ -4,7 +4,9 @@ use std::{
time::Instant,
};
use conduwuit::{debug, implement, utils::math::continue_exponential_backoff_secs, Error, PduEvent, Result};
use conduwuit::{
debug, implement, utils::math::continue_exponential_backoff_secs, Error, PduEvent, Result,
};
use ruma::{api::client::error::ErrorKind, CanonicalJsonValue, EventId, RoomId, ServerName};
#[implement(super::Service)]
@ -15,15 +17,23 @@ use ruma::{api::client::error::ErrorKind, CanonicalJsonValue, EventId, RoomId, S
name = "prev"
)]
pub(super) async fn handle_prev_pdu<'a>(
&self, origin: &'a ServerName, event_id: &'a EventId, room_id: &'a RoomId,
eventid_info: &mut HashMap<Arc<EventId>, (Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>)>,
create_event: &Arc<PduEvent>, first_pdu_in_room: &Arc<PduEvent>, prev_id: &EventId,
&self,
origin: &'a ServerName,
event_id: &'a EventId,
room_id: &'a RoomId,
eventid_info: &mut HashMap<
Arc<EventId>,
(Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>),
>,
create_event: &Arc<PduEvent>,
first_pdu_in_room: &Arc<PduEvent>,
prev_id: &EventId,
) -> Result {
// Check for disabled again because it might have changed
if self.services.metadata.is_disabled(room_id).await {
debug!(
"Federaton of room {room_id} is currently disabled on this server. Request by origin {origin} and event \
ID {event_id}"
"Federaton of room {room_id} is currently disabled on this server. Request by \
origin {origin} and event ID {event_id}"
);
return Err(Error::BadRequest(
ErrorKind::forbidden(),

View file

@ -23,8 +23,8 @@ use conduwuit::{
};
use futures::TryFutureExt;
use ruma::{
events::room::create::RoomCreateEventContent, state_res::RoomVersion, EventId, OwnedEventId, OwnedRoomId, RoomId,
RoomVersionId,
events::room::create::RoomCreateEventContent, state_res::RoomVersion, EventId, OwnedEventId,
OwnedRoomId, RoomId, RoomVersionId,
};
use crate::{globals, rooms, sending, server_keys, Dep};
@ -69,8 +69,10 @@ impl crate::Service for Service {
pdu_metadata: args.depend::<rooms::pdu_metadata::Service>("rooms::pdu_metadata"),
short: args.depend::<rooms::short::Service>("rooms::short"),
state: args.depend::<rooms::state::Service>("rooms::state"),
state_accessor: args.depend::<rooms::state_accessor::Service>("rooms::state_accessor"),
state_compressor: args.depend::<rooms::state_compressor::Service>("rooms::state_compressor"),
state_accessor: args
.depend::<rooms::state_accessor::Service>("rooms::state_accessor"),
state_compressor: args
.depend::<rooms::state_compressor::Service>("rooms::state_compressor"),
timeline: args.depend::<rooms::timeline::Service>("rooms::timeline"),
server: args.server.clone(),
},
@ -95,7 +97,9 @@ impl crate::Service for Service {
}
impl Service {
async fn event_exists(&self, event_id: Arc<EventId>) -> bool { self.services.timeline.pdu_exists(&event_id).await }
async fn event_exists(&self, event_id: Arc<EventId>) -> bool {
self.services.timeline.pdu_exists(&event_id).await
}
async fn event_fetch(&self, event_id: Arc<EventId>) -> Option<Arc<PduEvent>> {
self.services

View file

@ -3,9 +3,13 @@ use ruma::{CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, R
use serde_json::value::RawValue as RawJsonValue;
#[implement(super::Service)]
pub async fn parse_incoming_pdu(&self, pdu: &RawJsonValue) -> Result<(OwnedEventId, CanonicalJsonObject, OwnedRoomId)> {
let value = serde_json::from_str::<CanonicalJsonObject>(pdu.get())
.map_err(|e| err!(BadServerResponse(debug_warn!("Error parsing incoming event {e:?}"))))?;
pub async fn parse_incoming_pdu(
&self,
pdu: &RawJsonValue,
) -> Result<(OwnedEventId, CanonicalJsonObject, OwnedRoomId)> {
let value = serde_json::from_str::<CanonicalJsonObject>(pdu.get()).map_err(|e| {
err!(BadServerResponse(debug_warn!("Error parsing incoming event {e:?}")))
})?;
let room_id: OwnedRoomId = value
.get("room_id")
@ -20,8 +24,9 @@ pub async fn parse_incoming_pdu(&self, pdu: &RawJsonValue) -> Result<(OwnedEvent
.await
.map_err(|_| err!("Server is not in room {room_id}"))?;
let (event_id, value) = gen_event_id_canonical_json(pdu, &room_version_id)
.map_err(|e| err!(Request(InvalidParam("Could not convert event to canonical json: {e}"))))?;
let (event_id, value) = gen_event_id_canonical_json(pdu, &room_version_id).map_err(|e| {
err!(Request(InvalidParam("Could not convert event to canonical json: {e}")))
})?;
Ok((event_id, value, room_id))
}

View file

@ -20,7 +20,10 @@ use crate::rooms::state_compressor::CompressedStateEvent;
#[implement(super::Service)]
#[tracing::instrument(skip_all, name = "resolve")]
pub async fn resolve_state(
&self, room_id: &RoomId, room_version_id: &RoomVersionId, incoming_state: HashMap<u64, Arc<EventId>>,
&self,
room_id: &RoomId,
room_version_id: &RoomVersionId,
incoming_state: HashMap<u64, Arc<EventId>>,
) -> Result<Arc<HashSet<CompressedStateEvent>>> {
debug!("Loading current room state ids");
let current_sstatehash = self
@ -76,10 +79,16 @@ pub async fn resolve_state(
let event_fetch = |event_id| self.event_fetch(event_id);
let event_exists = |event_id| self.event_exists(event_id);
let state = state_res::resolve(room_version_id, &fork_states, &auth_chain_sets, &event_fetch, &event_exists)
.boxed()
.await
.map_err(|e| err!(Database(error!("State resolution failed: {e:?}"))))?;
let state = state_res::resolve(
room_version_id,
&fork_states,
&auth_chain_sets,
&event_fetch,
&event_exists,
)
.boxed()
.await
.map_err(|e| err!(Database(error!("State resolution failed: {e:?}"))))?;
drop(lock);

View file

@ -21,7 +21,8 @@ use ruma::{
// request and build the state from a known point and resolve if > 1 prev_event
#[tracing::instrument(skip_all, name = "state")]
pub(super) async fn state_at_incoming_degree_one(
&self, incoming_pdu: &Arc<PduEvent>,
&self,
incoming_pdu: &Arc<PduEvent>,
) -> Result<Option<HashMap<u64, Arc<EventId>>>> {
let prev_event = &*incoming_pdu.prev_events[0];
let Ok(prev_event_sstatehash) = self
@ -70,7 +71,10 @@ pub(super) async fn state_at_incoming_degree_one(
#[implement(super::Service)]
#[tracing::instrument(skip_all, name = "state")]
pub(super) async fn state_at_incoming_resolved(
&self, incoming_pdu: &Arc<PduEvent>, room_id: &RoomId, room_version_id: &RoomVersionId,
&self,
incoming_pdu: &Arc<PduEvent>,
room_id: &RoomId,
room_version_id: &RoomVersionId,
) -> Result<Option<HashMap<u64, Arc<EventId>>>> {
debug!("Calculating state at event using state res");
let mut extremity_sstatehashes = HashMap::with_capacity(incoming_pdu.prev_events.len());
@ -157,10 +161,16 @@ pub(super) async fn state_at_incoming_resolved(
let event_fetch = |event_id| self.event_fetch(event_id);
let event_exists = |event_id| self.event_exists(event_id);
let result = state_res::resolve(room_version_id, &fork_states, &auth_chain_sets, &event_fetch, &event_exists)
.boxed()
.await
.map_err(|e| err!(Database(warn!(?e, "State resolution on prev events failed."))));
let result = state_res::resolve(
room_version_id,
&fork_states,
&auth_chain_sets,
&event_fetch,
&event_exists,
)
.boxed()
.await
.map_err(|e| err!(Database(warn!(?e, "State resolution on prev events failed."))));
drop(lock);

View file

@ -19,8 +19,12 @@ use crate::rooms::{state_compressor::HashSetCompressStateEvent, timeline::RawPdu
#[implement(super::Service)]
pub(super) async fn upgrade_outlier_to_timeline_pdu(
&self, incoming_pdu: Arc<PduEvent>, val: BTreeMap<String, CanonicalJsonValue>, create_event: &PduEvent,
origin: &ServerName, room_id: &RoomId,
&self,
incoming_pdu: Arc<PduEvent>,
val: BTreeMap<String, CanonicalJsonValue>,
create_event: &PduEvent,
origin: &ServerName,
room_id: &RoomId,
) -> Result<Option<RawPduId>> {
// Skip the PDU if we already have it as a timeline event
if let Ok(pduid) = self
@ -63,7 +67,8 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu(
.await?;
}
let state_at_incoming_event = state_at_incoming_event.expect("we always set this to some above");
let state_at_incoming_event =
state_at_incoming_event.expect("we always set this to some above");
let room_version = to_room_version(&room_version_id);
debug!("Performing auth check");
@ -124,24 +129,34 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu(
!auth_check
|| incoming_pdu.kind == TimelineEventType::RoomRedaction
&& match room_version_id {
V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => {
| V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => {
if let Some(redact_id) = &incoming_pdu.redacts {
!self
.services
.state_accessor
.user_can_redact(redact_id, &incoming_pdu.sender, &incoming_pdu.room_id, true)
.user_can_redact(
redact_id,
&incoming_pdu.sender,
&incoming_pdu.room_id,
true,
)
.await?
} else {
false
}
},
_ => {
| _ => {
let content: RoomRedactionEventContent = incoming_pdu.get_content()?;
if let Some(redact_id) = &content.redacts {
!self
.services
.state_accessor
.user_can_redact(redact_id, &incoming_pdu.sender, &incoming_pdu.room_id, true)
.user_can_redact(
redact_id,
&incoming_pdu.sender,
&incoming_pdu.room_id,
true,
)
.await?
} else {
false
@ -229,11 +244,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu(
// Set the new room state to the resolved state
debug!("Forcing new room state");
let HashSetCompressStateEvent {
shortstatehash,
added,
removed,
} = self
let HashSetCompressStateEvent { shortstatehash, added, removed } = self
.services
.state_compressor
.save_state(room_id, new_room_state)