Hot-Reloading Refactor
Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
parent
ae1a4fd283
commit
6c1434c165
212 changed files with 5679 additions and 4206 deletions
|
@ -1,11 +1,17 @@
|
|||
mod parse_incoming_pdu;
|
||||
mod signing_keys;
|
||||
pub struct Service;
|
||||
|
||||
use std::{
|
||||
cmp,
|
||||
collections::{hash_map, HashSet},
|
||||
collections::{hash_map, BTreeMap, HashMap, HashSet},
|
||||
pin::Pin,
|
||||
sync::Arc,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use futures_util::Future;
|
||||
pub use parse_incoming_pdu::parse_incoming_pdu;
|
||||
use ruma::{
|
||||
api::{
|
||||
client::error::ErrorKind,
|
||||
|
@ -24,14 +30,7 @@ use tokio::sync::RwLock;
|
|||
use tracing::{debug, error, info, trace, warn};
|
||||
|
||||
use super::state_compressor::CompressedStateEvent;
|
||||
use crate::{
|
||||
debug_error, debug_info,
|
||||
service::{pdu, Arc, BTreeMap, HashMap, Result},
|
||||
services, Error, PduEvent,
|
||||
};
|
||||
|
||||
mod signing_keys;
|
||||
pub(crate) struct Service;
|
||||
use crate::{debug_error, debug_info, pdu, services, Error, PduEvent, Result};
|
||||
|
||||
// We use some AsyncRecursiveType hacks here so we can call async funtion
|
||||
// recursively.
|
||||
|
@ -70,7 +69,7 @@ impl Service {
|
|||
/// 14. Check if the event passes auth based on the "current state" of the
|
||||
/// room, if not soft fail it
|
||||
#[tracing::instrument(skip(self, origin, value, is_timeline_event, pub_key_map), name = "pdu")]
|
||||
pub(crate) async fn handle_incoming_pdu<'a>(
|
||||
pub async fn handle_incoming_pdu<'a>(
|
||||
&self, origin: &'a ServerName, room_id: &'a RoomId, event_id: &'a EventId,
|
||||
value: BTreeMap<String, CanonicalJsonValue>, is_timeline_event: bool,
|
||||
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
|
@ -207,7 +206,7 @@ impl Service {
|
|||
skip(self, origin, event_id, room_id, pub_key_map, eventid_info, create_event, first_pdu_in_room),
|
||||
name = "prev"
|
||||
)]
|
||||
pub(crate) async fn handle_prev_pdu<'a>(
|
||||
pub async fn handle_prev_pdu<'a>(
|
||||
&self, origin: &'a ServerName, event_id: &'a EventId, room_id: &'a RoomId,
|
||||
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
eventid_info: &mut HashMap<Arc<EventId>, (Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>)>,
|
||||
|
@ -427,7 +426,7 @@ impl Service {
|
|||
})
|
||||
}
|
||||
|
||||
pub(crate) async fn upgrade_outlier_to_timeline_pdu(
|
||||
pub async fn upgrade_outlier_to_timeline_pdu(
|
||||
&self, incoming_pdu: Arc<PduEvent>, val: BTreeMap<String, CanonicalJsonValue>, create_event: &PduEvent,
|
||||
origin: &ServerName, room_id: &RoomId, pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
) -> Result<Option<Vec<u8>>> {
|
||||
|
@ -748,7 +747,7 @@ impl Service {
|
|||
// TODO: if we know the prev_events of the incoming event we can avoid the
|
||||
// request and build the state from a known point and resolve if > 1 prev_event
|
||||
#[tracing::instrument(skip_all, name = "state")]
|
||||
pub(crate) async fn state_at_incoming_degree_one(
|
||||
pub async fn state_at_incoming_degree_one(
|
||||
&self, incoming_pdu: &Arc<PduEvent>,
|
||||
) -> Result<Option<HashMap<u64, Arc<EventId>>>> {
|
||||
let prev_event = &*incoming_pdu.prev_events[0];
|
||||
|
@ -796,7 +795,7 @@ impl Service {
|
|||
}
|
||||
|
||||
#[tracing::instrument(skip_all, name = "state")]
|
||||
pub(crate) async fn state_at_incoming_resolved(
|
||||
pub async fn state_at_incoming_resolved(
|
||||
&self, incoming_pdu: &Arc<PduEvent>, room_id: &RoomId, room_version_id: &RoomVersionId,
|
||||
) -> Result<Option<HashMap<u64, Arc<EventId>>>> {
|
||||
debug!("Calculating state at event using state res");
|
||||
|
@ -988,7 +987,7 @@ impl Service {
|
|||
/// b. Look at outlier pdu tree
|
||||
/// c. Ask origin server over federation
|
||||
/// d. TODO: Ask other servers over federation?
|
||||
pub(crate) fn fetch_and_handle_outliers<'a>(
|
||||
pub fn fetch_and_handle_outliers<'a>(
|
||||
&'a self, origin: &'a ServerName, events: &'a [Arc<EventId>], create_event: &'a PduEvent, room_id: &'a RoomId,
|
||||
room_version_id: &'a RoomVersionId, pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
) -> AsyncRecursiveCanonicalJsonVec<'a> {
|
||||
|
@ -1275,7 +1274,7 @@ impl Service {
|
|||
|
||||
/// Returns Ok if the acl allows the server
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub(crate) fn acl_check(&self, server_name: &ServerName, room_id: &RoomId) -> Result<()> {
|
||||
pub fn acl_check(&self, server_name: &ServerName, room_id: &RoomId) -> Result<()> {
|
||||
let acl_event = if let Some(acl) =
|
||||
services()
|
||||
.rooms
|
||||
|
|
31
src/service/rooms/event_handler/parse_incoming_pdu.rs
Normal file
31
src/service/rooms/event_handler/parse_incoming_pdu.rs
Normal file
|
@ -0,0 +1,31 @@
|
|||
use ruma::{api::client::error::ErrorKind, CanonicalJsonObject, OwnedEventId, OwnedRoomId, RoomId};
|
||||
use serde_json::value::RawValue as RawJsonValue;
|
||||
use tracing::warn;
|
||||
|
||||
use crate::{service::pdu::gen_event_id_canonical_json, services, Error, Result};
|
||||
|
||||
pub fn parse_incoming_pdu(pdu: &RawJsonValue) -> Result<(OwnedEventId, CanonicalJsonObject, OwnedRoomId)> {
|
||||
let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
|
||||
warn!("Error parsing incoming event {:?}: {:?}", pdu, e);
|
||||
Error::BadServerResponse("Invalid PDU in server response")
|
||||
})?;
|
||||
|
||||
let room_id: OwnedRoomId = value
|
||||
.get("room_id")
|
||||
.and_then(|id| RoomId::parse(id.as_str()?).ok())
|
||||
.ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Invalid room id in pdu"))?;
|
||||
|
||||
let Ok(room_version_id) = services().rooms.state.get_room_version(&room_id) else {
|
||||
return Err(Error::Err(format!("Server is not in room {room_id}")));
|
||||
};
|
||||
|
||||
let Ok((event_id, value)) = gen_event_id_canonical_json(pdu, &room_version_id) else {
|
||||
// Event could not be converted to canonical json
|
||||
return Err(Error::BadRequest(
|
||||
ErrorKind::InvalidParam,
|
||||
"Could not convert event to canonical json.",
|
||||
));
|
||||
};
|
||||
|
||||
Ok((event_id, value, room_id))
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
use std::{
|
||||
collections::HashSet,
|
||||
collections::{BTreeMap, HashMap, HashSet},
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
|
@ -21,13 +21,10 @@ use serde_json::value::RawValue as RawJsonValue;
|
|||
use tokio::sync::{RwLock, RwLockWriteGuard};
|
||||
use tracing::{debug, error, info, trace, warn};
|
||||
|
||||
use crate::{
|
||||
service::{BTreeMap, HashMap, Result},
|
||||
services, Error,
|
||||
};
|
||||
use crate::{services, Error, Result};
|
||||
|
||||
impl super::Service {
|
||||
pub(crate) async fn fetch_required_signing_keys<'a, E>(
|
||||
pub async fn fetch_required_signing_keys<'a, E>(
|
||||
&'a self, events: E, pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
) -> Result<()>
|
||||
where
|
||||
|
@ -265,7 +262,7 @@ impl super::Service {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn fetch_join_signing_keys(
|
||||
pub async fn fetch_join_signing_keys(
|
||||
&self, event: &create_join_event::v2::Response, room_version: &RoomVersionId,
|
||||
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
) -> Result<()> {
|
||||
|
@ -342,7 +339,7 @@ impl super::Service {
|
|||
/// Search the DB for the signing keys of the given server, if we don't have
|
||||
/// them fetch them from the server and save to our DB.
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub(crate) async fn fetch_signing_keys_for_server(
|
||||
pub async fn fetch_signing_keys_for_server(
|
||||
&self, origin: &ServerName, signature_ids: Vec<String>,
|
||||
) -> Result<BTreeMap<String, Base64>> {
|
||||
let contains_all_ids = |keys: &BTreeMap<String, Base64>| signature_ids.iter().all(|id| keys.contains_key(id));
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue