refactor various Arc<EventId> to OwnedEventId

Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
Jason Volk 2024-12-28 00:57:02 +00:00 committed by strawberry
parent 5a335933b8
commit 6458f4b195
29 changed files with 142 additions and 152 deletions

View file

@ -10,7 +10,7 @@ use conduwuit::{
};
use futures::TryFutureExt;
use ruma::{
api::federation::event::get_event, CanonicalJsonValue, EventId, RoomId, RoomVersionId,
api::federation::event::get_event, CanonicalJsonValue, OwnedEventId, RoomId, RoomVersionId,
ServerName,
};
@ -27,7 +27,7 @@ use ruma::{
pub(super) async fn fetch_and_handle_outliers<'a>(
&self,
origin: &'a ServerName,
events: &'a [Arc<EventId>],
events: &'a [OwnedEventId],
create_event: &'a PduEvent,
room_id: &'a RoomId,
room_version_id: &'a RoomVersionId,
@ -62,7 +62,7 @@ pub(super) async fn fetch_and_handle_outliers<'a>(
// c. Ask origin server over federation
// We also handle its auth chain here so we don't get a stack overflow in
// handle_outlier_pdu.
let mut todo_auth_events = vec![Arc::clone(id)];
let mut todo_auth_events = vec![id.clone()];
let mut events_in_reverse_order = Vec::with_capacity(todo_auth_events.len());
let mut events_all = HashSet::with_capacity(todo_auth_events.len());
while let Some(next_id) = todo_auth_events.pop() {
@ -124,14 +124,15 @@ pub(super) async fn fetch_and_handle_outliers<'a>(
);
}
if let Some(auth_events) = value.get("auth_events").and_then(|c| c.as_array())
if let Some(auth_events) = value
.get("auth_events")
.and_then(CanonicalJsonValue::as_array)
{
for auth_event in auth_events {
if let Ok(auth_event) =
serde_json::from_value(auth_event.clone().into())
serde_json::from_value::<OwnedEventId>(auth_event.clone().into())
{
let a: Arc<EventId> = auth_event;
todo_auth_events.push(a);
todo_auth_events.push(auth_event);
} else {
warn!("Auth event id is not valid");
}
@ -201,7 +202,7 @@ pub(super) async fn fetch_and_handle_outliers<'a>(
},
| Err(e) => {
warn!("Authentication of event {next_id} failed: {e:?}");
back_off(next_id.into());
back_off(next_id);
},
}
}

View file

@ -8,7 +8,7 @@ use futures::{future, FutureExt};
use ruma::{
int,
state_res::{self},
uint, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId,
uint, CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, RoomVersionId,
ServerName,
};
@ -23,14 +23,14 @@ pub(super) async fn fetch_prev(
create_event: &PduEvent,
room_id: &RoomId,
room_version_id: &RoomVersionId,
initial_set: Vec<Arc<EventId>>,
initial_set: Vec<OwnedEventId>,
) -> Result<(
Vec<Arc<EventId>>,
HashMap<Arc<EventId>, (Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>)>,
Vec<OwnedEventId>,
HashMap<OwnedEventId, (Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>)>,
)> {
let mut graph: HashMap<Arc<EventId>, _> = HashMap::with_capacity(initial_set.len());
let mut graph: HashMap<OwnedEventId, _> = HashMap::with_capacity(initial_set.len());
let mut eventid_info = HashMap::new();
let mut todo_outlier_stack: Vec<Arc<EventId>> = initial_set;
let mut todo_outlier_stack: Vec<OwnedEventId> = initial_set;
let first_pdu_in_room = self.services.timeline.first_pdu_in_room(room_id).await?;

View file

@ -1,15 +1,14 @@
use std::{
collections::{hash_map, HashMap},
sync::Arc,
};
use std::collections::{hash_map, HashMap};
use conduwuit::{debug, implement, warn, Err, Error, PduEvent, Result};
use futures::FutureExt;
use ruma::{
api::federation::event::get_room_state_ids, events::StateEventType, EventId, RoomId,
RoomVersionId, ServerName,
api::federation::event::get_room_state_ids, events::StateEventType, EventId, OwnedEventId,
RoomId, RoomVersionId, ServerName,
};
use crate::rooms::short::ShortStateKey;
/// Call /state_ids to find out what the state at this pdu is. We trust the
/// server's response to some extend (sic), but we still do a lot of checks
/// on the events
@ -22,31 +21,25 @@ pub(super) async fn fetch_state(
room_id: &RoomId,
room_version_id: &RoomVersionId,
event_id: &EventId,
) -> Result<Option<HashMap<u64, Arc<EventId>>>> {
) -> Result<Option<HashMap<u64, OwnedEventId>>> {
debug!("Fetching state ids");
let res = self
.services
.sending
.send_federation_request(origin, get_room_state_ids::v1::Request {
room_id: room_id.to_owned(),
event_id: (*event_id).to_owned(),
event_id: event_id.to_owned(),
})
.await
.inspect_err(|e| warn!("Fetching state for event failed: {e}"))?;
debug!("Fetching state events");
let collect = res
.pdu_ids
.iter()
.map(|x| Arc::from(&**x))
.collect::<Vec<_>>();
let state_vec = self
.fetch_and_handle_outliers(origin, &collect, create_event, room_id, room_version_id)
.fetch_and_handle_outliers(origin, &res.pdu_ids, create_event, room_id, room_version_id)
.boxed()
.await;
let mut state: HashMap<_, Arc<EventId>> = HashMap::with_capacity(state_vec.len());
let mut state: HashMap<ShortStateKey, OwnedEventId> = HashMap::with_capacity(state_vec.len());
for (pdu, _) in state_vec {
let state_key = pdu
.state_key
@ -61,10 +54,10 @@ pub(super) async fn fetch_state(
match state.entry(shortstatekey) {
| hash_map::Entry::Vacant(v) => {
v.insert(Arc::from(&*pdu.event_id));
v.insert(pdu.event_id.clone());
},
| hash_map::Entry::Occupied(_) =>
return Err(Error::bad_database(
return Err!(Database(
"State event's type and state_key combination exists multiple times.",
)),
}
@ -77,7 +70,7 @@ pub(super) async fn fetch_state(
.get_shortstatekey(&StateEventType::RoomCreate, "")
.await?;
if state.get(&create_shortstatekey).map(AsRef::as_ref) != Some(&create_event.event_id) {
if state.get(&create_shortstatekey) != Some(&create_event.event_id) {
return Err!(Database("Incoming event refers to wrong create event."));
}

View file

@ -147,7 +147,7 @@ pub async fn handle_incoming_pdu<'a>(
.bad_event_ratelimiter
.write()
.expect("locked")
.entry(prev_id.into())
.entry(prev_id)
{
| Entry::Vacant(e) => {
e.insert((now, 1));

View file

@ -79,19 +79,13 @@ pub(super) async fn handle_outlier_pdu<'a>(
// the auth events are also rejected "due to auth events"
// NOTE: Step 5 is not applied anymore because it failed too often
debug!("Fetching auth events");
Box::pin(
self.fetch_and_handle_outliers(
origin,
&incoming_pdu
.auth_events
.iter()
.map(|x| Arc::from(&**x))
.collect::<Vec<Arc<EventId>>>(),
create_event,
room_id,
&room_version_id,
),
)
Box::pin(self.fetch_and_handle_outliers(
origin,
&incoming_pdu.auth_events,
create_event,
room_id,
&room_version_id,
))
.await;
}

View file

@ -5,9 +5,9 @@ use std::{
};
use conduwuit::{
debug, implement, utils::math::continue_exponential_backoff_secs, Error, PduEvent, Result,
debug, implement, utils::math::continue_exponential_backoff_secs, Err, PduEvent, Result,
};
use ruma::{api::client::error::ErrorKind, CanonicalJsonValue, EventId, RoomId, ServerName};
use ruma::{CanonicalJsonValue, EventId, OwnedEventId, RoomId, ServerName};
#[implement(super::Service)]
#[allow(clippy::type_complexity)]
@ -22,7 +22,7 @@ pub(super) async fn handle_prev_pdu<'a>(
event_id: &'a EventId,
room_id: &'a RoomId,
eventid_info: &mut HashMap<
Arc<EventId>,
OwnedEventId,
(Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>),
>,
create_event: &Arc<PduEvent>,
@ -31,14 +31,10 @@ pub(super) async fn handle_prev_pdu<'a>(
) -> Result {
// Check for disabled again because it might have changed
if self.services.metadata.is_disabled(room_id).await {
debug!(
return Err!(Request(Forbidden(debug_warn!(
"Federaton of room {room_id} is currently disabled on this server. Request by \
origin {origin} and event ID {event_id}"
);
return Err(Error::BadRequest(
ErrorKind::forbidden(),
"Federation of this room is currently disabled on this server.",
));
))));
}
if let Some((time, tries)) = self

View file

@ -23,7 +23,7 @@ use conduwuit::{
};
use futures::TryFutureExt;
use ruma::{
events::room::create::RoomCreateEventContent, state_res::RoomVersion, EventId, OwnedEventId,
events::room::create::RoomCreateEventContent, state_res::RoomVersion, OwnedEventId,
OwnedRoomId, RoomId, RoomVersionId,
};
@ -97,11 +97,11 @@ impl crate::Service for Service {
}
impl Service {
async fn event_exists(&self, event_id: Arc<EventId>) -> bool {
async fn event_exists(&self, event_id: OwnedEventId) -> bool {
self.services.timeline.pdu_exists(&event_id).await
}
async fn event_fetch(&self, event_id: Arc<EventId>) -> Option<Arc<PduEvent>> {
async fn event_fetch(&self, event_id: OwnedEventId) -> Option<Arc<PduEvent>> {
self.services
.timeline
.get_pdu(&event_id)

View file

@ -12,7 +12,7 @@ use conduwuit::{
use futures::{FutureExt, StreamExt, TryFutureExt};
use ruma::{
state_res::{self, StateMap},
EventId, RoomId, RoomVersionId,
OwnedEventId, RoomId, RoomVersionId,
};
use crate::rooms::state_compressor::CompressedStateEvent;
@ -23,7 +23,7 @@ pub async fn resolve_state(
&self,
room_id: &RoomId,
room_version_id: &RoomVersionId,
incoming_state: HashMap<u64, Arc<EventId>>,
incoming_state: HashMap<u64, OwnedEventId>,
) -> Result<Arc<HashSet<CompressedStateEvent>>> {
debug!("Loading current room state ids");
let current_sstatehash = self
@ -44,7 +44,7 @@ pub async fn resolve_state(
for state in &fork_states {
let starting_events = state.values().map(Borrow::borrow);
let auth_chain: HashSet<Arc<EventId>> = self
let auth_chain: HashSet<OwnedEventId> = self
.services
.auth_chain
.get_event_ids(room_id, starting_events)
@ -56,7 +56,7 @@ pub async fn resolve_state(
}
debug!("Loading fork states");
let fork_states: Vec<StateMap<Arc<EventId>>> = fork_states
let fork_states: Vec<StateMap<OwnedEventId>> = fork_states
.into_iter()
.stream()
.wide_then(|fork_state| {
@ -113,9 +113,9 @@ pub async fn resolve_state(
pub async fn state_resolution(
&self,
room_version: &RoomVersionId,
state_sets: &[StateMap<Arc<EventId>>],
auth_chain_sets: &Vec<HashSet<Arc<EventId>>>,
) -> Result<StateMap<Arc<EventId>>> {
state_sets: &[StateMap<OwnedEventId>],
auth_chain_sets: &Vec<HashSet<OwnedEventId>>,
) -> Result<StateMap<OwnedEventId>> {
//TODO: ???
let _lock = self.services.globals.stateres_mutex.lock();

View file

@ -11,7 +11,7 @@ use conduwuit::{
PduEvent, Result,
};
use futures::{FutureExt, StreamExt};
use ruma::{state_res::StateMap, EventId, RoomId, RoomVersionId};
use ruma::{state_res::StateMap, OwnedEventId, RoomId, RoomVersionId};
// TODO: if we know the prev_events of the incoming event we can avoid the
#[implement(super::Service)]
@ -20,8 +20,8 @@ use ruma::{state_res::StateMap, EventId, RoomId, RoomVersionId};
pub(super) async fn state_at_incoming_degree_one(
&self,
incoming_pdu: &Arc<PduEvent>,
) -> Result<Option<HashMap<u64, Arc<EventId>>>> {
let prev_event = &*incoming_pdu.prev_events[0];
) -> Result<Option<HashMap<u64, OwnedEventId>>> {
let prev_event = &incoming_pdu.prev_events[0];
let Ok(prev_event_sstatehash) = self
.services
.state_accessor
@ -56,7 +56,7 @@ pub(super) async fn state_at_incoming_degree_one(
.get_or_create_shortstatekey(&prev_pdu.kind.to_string().into(), state_key)
.await;
state.insert(shortstatekey, Arc::from(prev_event));
state.insert(shortstatekey, prev_event.clone());
// Now it's the state after the pdu
}
@ -72,7 +72,7 @@ pub(super) async fn state_at_incoming_resolved(
incoming_pdu: &Arc<PduEvent>,
room_id: &RoomId,
room_version_id: &RoomVersionId,
) -> Result<Option<HashMap<u64, Arc<EventId>>>> {
) -> Result<Option<HashMap<u64, OwnedEventId>>> {
debug!("Calculating state at event using state res");
let mut extremity_sstatehashes = HashMap::with_capacity(incoming_pdu.prev_events.len());
@ -142,7 +142,7 @@ pub(super) async fn state_at_incoming_resolved(
starting_events.push(id.borrow());
}
let auth_chain: HashSet<Arc<EventId>> = self
let auth_chain: HashSet<OwnedEventId> = self
.services
.auth_chain
.get_event_ids(room_id, starting_events.into_iter())

View file

@ -282,7 +282,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu(
}
trace!("Appending pdu to timeline");
extremities.insert(incoming_pdu.event_id.clone().into());
extremities.insert(incoming_pdu.event_id.clone());
// Now that the event has passed all auth it is added into the timeline.
// We use the `state_at_event` instead of `state_after` so we accurately