Refactor server_keys service/interface and related callsites
Signed-off-by: Jason Volk <jason@zemos.net> Signed-off-by: strawberry <strawberry@puppygock.gay>
This commit is contained in:
parent
d82ea331cf
commit
c0939c3e9a
30 changed files with 1025 additions and 1378 deletions
|
@ -28,12 +28,10 @@ use ruma::{
|
|||
StateEventType, TimelineEventType,
|
||||
},
|
||||
int,
|
||||
serde::Base64,
|
||||
state_res::{self, EventTypeExt, RoomVersion, StateMap},
|
||||
uint, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, RoomId, RoomVersionId,
|
||||
ServerName, UserId,
|
||||
uint, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId,
|
||||
RoomId, RoomVersionId, ServerName, UserId,
|
||||
};
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use super::state_compressor::CompressedStateEvent;
|
||||
use crate::{globals, rooms, sending, server_keys, Dep};
|
||||
|
@ -129,11 +127,10 @@ impl Service {
|
|||
/// 13. Use state resolution to find new room state
|
||||
/// 14. Check if the event passes auth based on the "current state" of the
|
||||
/// room, if not soft fail it
|
||||
#[tracing::instrument(skip(self, origin, value, is_timeline_event, pub_key_map), name = "pdu")]
|
||||
#[tracing::instrument(skip(self, origin, value, is_timeline_event), name = "pdu")]
|
||||
pub async fn handle_incoming_pdu<'a>(
|
||||
&self, origin: &'a ServerName, room_id: &'a RoomId, event_id: &'a EventId,
|
||||
value: BTreeMap<String, CanonicalJsonValue>, is_timeline_event: bool,
|
||||
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
) -> Result<Option<Vec<u8>>> {
|
||||
// 1. Skip the PDU if we already have it as a timeline event
|
||||
if let Ok(pdu_id) = self.services.timeline.get_pdu_id(event_id).await {
|
||||
|
@ -177,7 +174,7 @@ impl Service {
|
|||
let first_pdu_in_room = self.services.timeline.first_pdu_in_room(room_id).await?;
|
||||
|
||||
let (incoming_pdu, val) = self
|
||||
.handle_outlier_pdu(origin, &create_event, event_id, room_id, value, false, pub_key_map)
|
||||
.handle_outlier_pdu(origin, &create_event, event_id, room_id, value, false)
|
||||
.boxed()
|
||||
.await?;
|
||||
|
||||
|
@ -200,7 +197,6 @@ impl Service {
|
|||
&create_event,
|
||||
room_id,
|
||||
&room_version_id,
|
||||
pub_key_map,
|
||||
incoming_pdu.prev_events.clone(),
|
||||
)
|
||||
.await?;
|
||||
|
@ -212,7 +208,6 @@ impl Service {
|
|||
origin,
|
||||
event_id,
|
||||
room_id,
|
||||
pub_key_map,
|
||||
&mut eventid_info,
|
||||
&create_event,
|
||||
&first_pdu_in_room,
|
||||
|
@ -250,7 +245,7 @@ impl Service {
|
|||
.insert(room_id.to_owned(), (event_id.to_owned(), start_time));
|
||||
|
||||
let r = self
|
||||
.upgrade_outlier_to_timeline_pdu(incoming_pdu, val, &create_event, origin, room_id, pub_key_map)
|
||||
.upgrade_outlier_to_timeline_pdu(incoming_pdu, val, &create_event, origin, room_id)
|
||||
.await;
|
||||
|
||||
self.federation_handletime
|
||||
|
@ -264,12 +259,11 @@ impl Service {
|
|||
#[allow(clippy::type_complexity)]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[tracing::instrument(
|
||||
skip(self, origin, event_id, room_id, pub_key_map, eventid_info, create_event, first_pdu_in_room),
|
||||
skip(self, origin, event_id, room_id, eventid_info, create_event, first_pdu_in_room),
|
||||
name = "prev"
|
||||
)]
|
||||
pub async fn handle_prev_pdu<'a>(
|
||||
&self, origin: &'a ServerName, event_id: &'a EventId, room_id: &'a RoomId,
|
||||
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
eventid_info: &mut HashMap<Arc<EventId>, (Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>)>,
|
||||
create_event: &Arc<PduEvent>, first_pdu_in_room: &Arc<PduEvent>, prev_id: &EventId,
|
||||
) -> Result<()> {
|
||||
|
@ -318,7 +312,7 @@ impl Service {
|
|||
.expect("locked")
|
||||
.insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time));
|
||||
|
||||
self.upgrade_outlier_to_timeline_pdu(pdu, json, create_event, origin, room_id, pub_key_map)
|
||||
self.upgrade_outlier_to_timeline_pdu(pdu, json, create_event, origin, room_id)
|
||||
.await?;
|
||||
|
||||
self.federation_handletime
|
||||
|
@ -338,8 +332,7 @@ impl Service {
|
|||
#[allow(clippy::too_many_arguments)]
|
||||
async fn handle_outlier_pdu<'a>(
|
||||
&self, origin: &'a ServerName, create_event: &'a PduEvent, event_id: &'a EventId, room_id: &'a RoomId,
|
||||
mut value: BTreeMap<String, CanonicalJsonValue>, auth_events_known: bool,
|
||||
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
mut value: CanonicalJsonObject, auth_events_known: bool,
|
||||
) -> Result<(Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>)> {
|
||||
// 1. Remove unsigned field
|
||||
value.remove("unsigned");
|
||||
|
@ -349,14 +342,13 @@ impl Service {
|
|||
// 2. Check signatures, otherwise drop
|
||||
// 3. check content hash, redact if doesn't match
|
||||
let room_version_id = Self::get_room_version_id(create_event)?;
|
||||
|
||||
let guard = pub_key_map.read().await;
|
||||
let mut val = match ruma::signatures::verify_event(&guard, &value, &room_version_id) {
|
||||
Err(e) => {
|
||||
// Drop
|
||||
warn!("Dropping bad event {event_id}: {e}");
|
||||
return Err!(Request(InvalidParam("Signature verification failed")));
|
||||
},
|
||||
let mut val = match self
|
||||
.services
|
||||
.server_keys
|
||||
.verify_event(&value, Some(&room_version_id))
|
||||
.await
|
||||
{
|
||||
Ok(ruma::signatures::Verified::All) => value,
|
||||
Ok(ruma::signatures::Verified::Signatures) => {
|
||||
// Redact
|
||||
debug_info!("Calculated hash does not match (redaction): {event_id}");
|
||||
|
@ -371,11 +363,13 @@ impl Service {
|
|||
|
||||
obj
|
||||
},
|
||||
Ok(ruma::signatures::Verified::All) => value,
|
||||
Err(e) => {
|
||||
return Err!(Request(InvalidParam(debug_error!(
|
||||
"Signature verification failed for {event_id}: {e}"
|
||||
))))
|
||||
},
|
||||
};
|
||||
|
||||
drop(guard);
|
||||
|
||||
// Now that we have checked the signature and hashes we can add the eventID and
|
||||
// convert to our PduEvent type
|
||||
val.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned()));
|
||||
|
@ -404,7 +398,6 @@ impl Service {
|
|||
create_event,
|
||||
room_id,
|
||||
&room_version_id,
|
||||
pub_key_map,
|
||||
),
|
||||
)
|
||||
.await;
|
||||
|
@ -487,7 +480,7 @@ impl Service {
|
|||
|
||||
pub async fn upgrade_outlier_to_timeline_pdu(
|
||||
&self, incoming_pdu: Arc<PduEvent>, val: BTreeMap<String, CanonicalJsonValue>, create_event: &PduEvent,
|
||||
origin: &ServerName, room_id: &RoomId, pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
origin: &ServerName, room_id: &RoomId,
|
||||
) -> Result<Option<Vec<u8>>> {
|
||||
// Skip the PDU if we already have it as a timeline event
|
||||
if let Ok(pduid) = self
|
||||
|
@ -526,14 +519,7 @@ impl Service {
|
|||
|
||||
if state_at_incoming_event.is_none() {
|
||||
state_at_incoming_event = self
|
||||
.fetch_state(
|
||||
origin,
|
||||
create_event,
|
||||
room_id,
|
||||
&room_version_id,
|
||||
pub_key_map,
|
||||
&incoming_pdu.event_id,
|
||||
)
|
||||
.fetch_state(origin, create_event, room_id, &room_version_id, &incoming_pdu.event_id)
|
||||
.await?;
|
||||
}
|
||||
|
||||
|
@ -1021,10 +1007,10 @@ impl Service {
|
|||
/// Call /state_ids to find out what the state at this pdu is. We trust the
|
||||
/// server's response to some extend (sic), but we still do a lot of checks
|
||||
/// on the events
|
||||
#[tracing::instrument(skip(self, pub_key_map, create_event, room_version_id))]
|
||||
#[tracing::instrument(skip(self, create_event, room_version_id))]
|
||||
async fn fetch_state(
|
||||
&self, origin: &ServerName, create_event: &PduEvent, room_id: &RoomId, room_version_id: &RoomVersionId,
|
||||
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>, event_id: &EventId,
|
||||
event_id: &EventId,
|
||||
) -> Result<Option<HashMap<u64, Arc<EventId>>>> {
|
||||
debug!("Fetching state ids");
|
||||
let res = self
|
||||
|
@ -1048,7 +1034,7 @@ impl Service {
|
|||
.collect::<Vec<_>>();
|
||||
|
||||
let state_vec = self
|
||||
.fetch_and_handle_outliers(origin, &collect, create_event, room_id, room_version_id, pub_key_map)
|
||||
.fetch_and_handle_outliers(origin, &collect, create_event, room_id, room_version_id)
|
||||
.boxed()
|
||||
.await;
|
||||
|
||||
|
@ -1102,7 +1088,7 @@ impl Service {
|
|||
/// d. TODO: Ask other servers over federation?
|
||||
pub async fn fetch_and_handle_outliers<'a>(
|
||||
&self, origin: &'a ServerName, events: &'a [Arc<EventId>], create_event: &'a PduEvent, room_id: &'a RoomId,
|
||||
room_version_id: &'a RoomVersionId, pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
room_version_id: &'a RoomVersionId,
|
||||
) -> Vec<(Arc<PduEvent>, Option<BTreeMap<String, CanonicalJsonValue>>)> {
|
||||
let back_off = |id| match self
|
||||
.services
|
||||
|
@ -1222,22 +1208,6 @@ impl Service {
|
|||
events_with_auth_events.push((id, None, events_in_reverse_order));
|
||||
}
|
||||
|
||||
// We go through all the signatures we see on the PDUs and their unresolved
|
||||
// dependencies and fetch the corresponding signing keys
|
||||
self.services
|
||||
.server_keys
|
||||
.fetch_required_signing_keys(
|
||||
events_with_auth_events
|
||||
.iter()
|
||||
.flat_map(|(_id, _local_pdu, events)| events)
|
||||
.map(|(_event_id, event)| event),
|
||||
pub_key_map,
|
||||
)
|
||||
.await
|
||||
.unwrap_or_else(|e| {
|
||||
warn!("Could not fetch all signatures for PDUs from {origin}: {e:?}");
|
||||
});
|
||||
|
||||
let mut pdus = Vec::with_capacity(events_with_auth_events.len());
|
||||
for (id, local_pdu, events_in_reverse_order) in events_with_auth_events {
|
||||
// a. Look in the main timeline (pduid_pdu tree)
|
||||
|
@ -1266,16 +1236,8 @@ impl Service {
|
|||
}
|
||||
}
|
||||
|
||||
match Box::pin(self.handle_outlier_pdu(
|
||||
origin,
|
||||
create_event,
|
||||
&next_id,
|
||||
room_id,
|
||||
value.clone(),
|
||||
true,
|
||||
pub_key_map,
|
||||
))
|
||||
.await
|
||||
match Box::pin(self.handle_outlier_pdu(origin, create_event, &next_id, room_id, value.clone(), true))
|
||||
.await
|
||||
{
|
||||
Ok((pdu, json)) => {
|
||||
if next_id == *id {
|
||||
|
@ -1296,7 +1258,7 @@ impl Service {
|
|||
#[tracing::instrument(skip_all)]
|
||||
async fn fetch_prev(
|
||||
&self, origin: &ServerName, create_event: &PduEvent, room_id: &RoomId, room_version_id: &RoomVersionId,
|
||||
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>, initial_set: Vec<Arc<EventId>>,
|
||||
initial_set: Vec<Arc<EventId>>,
|
||||
) -> Result<(
|
||||
Vec<Arc<EventId>>,
|
||||
HashMap<Arc<EventId>, (Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>)>,
|
||||
|
@ -1311,14 +1273,7 @@ impl Service {
|
|||
|
||||
while let Some(prev_event_id) = todo_outlier_stack.pop() {
|
||||
if let Some((pdu, mut json_opt)) = self
|
||||
.fetch_and_handle_outliers(
|
||||
origin,
|
||||
&[prev_event_id.clone()],
|
||||
create_event,
|
||||
room_id,
|
||||
room_version_id,
|
||||
pub_key_map,
|
||||
)
|
||||
.fetch_and_handle_outliers(origin, &[prev_event_id.clone()], create_event, room_id, room_version_id)
|
||||
.boxed()
|
||||
.await
|
||||
.pop()
|
||||
|
|
|
@ -16,7 +16,7 @@ use conduit::{
|
|||
};
|
||||
use futures::{future, future::ready, Future, FutureExt, Stream, StreamExt, TryStreamExt};
|
||||
use ruma::{
|
||||
api::{client::error::ErrorKind, federation},
|
||||
api::federation,
|
||||
canonical_json::to_canonical_value,
|
||||
events::{
|
||||
push_rules::PushRulesEvent,
|
||||
|
@ -30,14 +30,12 @@ use ruma::{
|
|||
GlobalAccountDataEventType, StateEventType, TimelineEventType,
|
||||
},
|
||||
push::{Action, Ruleset, Tweak},
|
||||
serde::Base64,
|
||||
state_res::{self, Event, RoomVersion},
|
||||
uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName,
|
||||
RoomId, RoomVersionId, ServerName, UserId,
|
||||
};
|
||||
use serde::Deserialize;
|
||||
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use self::data::Data;
|
||||
pub use self::data::PdusIterItem;
|
||||
|
@ -784,21 +782,15 @@ impl Service {
|
|||
to_canonical_value(self.services.globals.server_name()).expect("server name is a valid CanonicalJsonValue"),
|
||||
);
|
||||
|
||||
match ruma::signatures::hash_and_sign_event(
|
||||
self.services.globals.server_name().as_str(),
|
||||
self.services.globals.keypair(),
|
||||
&mut pdu_json,
|
||||
&room_version_id,
|
||||
) {
|
||||
Ok(()) => {},
|
||||
Err(e) => {
|
||||
return match e {
|
||||
ruma::signatures::Error::PduSize => {
|
||||
Err(Error::BadRequest(ErrorKind::TooLarge, "Message is too long"))
|
||||
},
|
||||
_ => Err(Error::BadRequest(ErrorKind::Unknown, "Signing event failed")),
|
||||
}
|
||||
},
|
||||
if let Err(e) = self
|
||||
.services
|
||||
.server_keys
|
||||
.hash_and_sign_event(&mut pdu_json, &room_version_id)
|
||||
{
|
||||
return match e {
|
||||
Error::Signatures(ruma::signatures::Error::PduSize) => Err!(Request(TooLarge("Message is too long"))),
|
||||
_ => Err!(Request(Unknown("Signing event failed"))),
|
||||
};
|
||||
}
|
||||
|
||||
// Generate event id
|
||||
|
@ -1106,9 +1098,8 @@ impl Service {
|
|||
.await;
|
||||
match response {
|
||||
Ok(response) => {
|
||||
let pub_key_map = RwLock::new(BTreeMap::new());
|
||||
for pdu in response.pdus {
|
||||
if let Err(e) = self.backfill_pdu(backfill_server, pdu, &pub_key_map).await {
|
||||
if let Err(e) = self.backfill_pdu(backfill_server, pdu).await {
|
||||
warn!("Failed to add backfilled pdu in room {room_id}: {e}");
|
||||
}
|
||||
}
|
||||
|
@ -1124,11 +1115,8 @@ impl Service {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self, pdu, pub_key_map))]
|
||||
pub async fn backfill_pdu(
|
||||
&self, origin: &ServerName, pdu: Box<RawJsonValue>,
|
||||
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
) -> Result<()> {
|
||||
#[tracing::instrument(skip(self, pdu))]
|
||||
pub async fn backfill_pdu(&self, origin: &ServerName, pdu: Box<RawJsonValue>) -> Result<()> {
|
||||
let (event_id, value, room_id) = self.services.event_handler.parse_incoming_pdu(&pdu).await?;
|
||||
|
||||
// Lock so we cannot backfill the same pdu twice at the same time
|
||||
|
@ -1146,14 +1134,9 @@ impl Service {
|
|||
return Ok(());
|
||||
}
|
||||
|
||||
self.services
|
||||
.server_keys
|
||||
.fetch_required_signing_keys([&value], pub_key_map)
|
||||
.await?;
|
||||
|
||||
self.services
|
||||
.event_handler
|
||||
.handle_incoming_pdu(origin, &room_id, &event_id, value, false, pub_key_map)
|
||||
.handle_incoming_pdu(origin, &room_id, &event_id, value, false)
|
||||
.await?;
|
||||
|
||||
let value = self
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue