Refactor server_keys service/interface and related callsites

Signed-off-by: Jason Volk <jason@zemos.net>
Signed-off-by: strawberry <strawberry@puppygock.gay>
This commit is contained in:
Jason Volk 2024-10-11 18:57:59 +00:00 committed by strawberry
parent d82ea331cf
commit c0939c3e9a
30 changed files with 1025 additions and 1378 deletions

View file

@ -1,17 +1,16 @@
use std::{
collections::{hash_map::Entry, BTreeMap, HashMap, HashSet},
collections::{BTreeMap, HashMap, HashSet},
net::IpAddr,
sync::Arc,
time::Instant,
};
use axum::extract::State;
use axum_client_ip::InsecureClientIp;
use conduit::{
debug, debug_error, debug_warn, err, error, info,
debug, debug_info, debug_warn, err, error, info, pdu,
pdu::{gen_event_id_canonical_json, PduBuilder},
trace, utils,
utils::{math::continue_exponential_backoff_secs, IterStream, ReadyExt},
utils::{IterStream, ReadyExt},
warn, Err, Error, PduEvent, Result,
};
use futures::{FutureExt, StreamExt};
@ -36,13 +35,10 @@ use ruma::{
},
StateEventType,
},
serde::Base64,
state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName,
OwnedUserId, RoomId, RoomVersionId, ServerName, UserId,
state_res, CanonicalJsonObject, CanonicalJsonValue, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId,
RoomVersionId, ServerName, UserId,
};
use serde_json::value::RawValue as RawJsonValue;
use service::{appservice::RegistrationInfo, rooms::state::RoomMutexGuard, Services};
use tokio::sync::RwLock;
use crate::{client::full_user_deactivate, Ruma};
@ -670,20 +666,22 @@ pub async fn join_room_by_id_helper(
if local_join {
join_room_by_id_helper_local(services, sender_user, room_id, reason, servers, third_party_signed, state_lock)
.boxed()
.await
.await?;
} else {
// Ask a remote server if we are not participating in this room
join_room_by_id_helper_remote(services, sender_user, room_id, reason, servers, third_party_signed, state_lock)
.boxed()
.await
.await?;
}
Ok(join_room_by_id::v3::Response::new(room_id.to_owned()))
}
#[tracing::instrument(skip_all, fields(%sender_user, %room_id), name = "join_remote")]
async fn join_room_by_id_helper_remote(
services: &Services, sender_user: &UserId, room_id: &RoomId, reason: Option<String>, servers: &[OwnedServerName],
_third_party_signed: Option<&ThirdPartySigned>, state_lock: RoomMutexGuard,
) -> Result<join_room_by_id::v3::Response> {
) -> Result {
info!("Joining {room_id} over federation.");
let (make_join_response, remote_server) = make_join_request(services, sender_user, room_id, servers).await?;
@ -751,43 +749,33 @@ async fn join_room_by_id_helper_remote(
// In order to create a compatible ref hash (EventID) the `hashes` field needs
// to be present
ruma::signatures::hash_and_sign_event(
services.globals.server_name().as_str(),
services.globals.keypair(),
&mut join_event_stub,
&room_version_id,
)
.expect("event is valid, we just created it");
services
.server_keys
.hash_and_sign_event(&mut join_event_stub, &room_version_id)?;
// Generate event id
let event_id = format!(
"${}",
ruma::signatures::reference_hash(&join_event_stub, &room_version_id)
.expect("ruma can calculate reference hashes")
);
let event_id = <&EventId>::try_from(event_id.as_str()).expect("ruma's reference hashes are valid event ids");
let event_id = pdu::gen_event_id(&join_event_stub, &room_version_id)?;
// Add event_id back
join_event_stub.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned()));
join_event_stub.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into()));
// It has enough fields to be called a proper event now
let mut join_event = join_event_stub;
info!("Asking {remote_server} for send_join in room {room_id}");
let send_join_request = federation::membership::create_join_event::v2::Request {
room_id: room_id.to_owned(),
event_id: event_id.clone(),
omit_members: false,
pdu: services
.sending
.convert_to_outgoing_federation_event(join_event.clone())
.await,
};
let send_join_response = services
.sending
.send_federation_request(
&remote_server,
federation::membership::create_join_event::v2::Request {
room_id: room_id.to_owned(),
event_id: event_id.to_owned(),
omit_members: false,
pdu: services
.sending
.convert_to_outgoing_federation_event(join_event.clone())
.await,
},
)
.send_federation_request(&remote_server, send_join_request)
.await?;
info!("send_join finished");
@ -805,7 +793,7 @@ async fn join_room_by_id_helper_remote(
// validate and send signatures
_ => {
if let Some(signed_raw) = &send_join_response.room_state.event {
info!(
debug_info!(
"There is a signed event. This room is probably using restricted joins. Adding signature to \
our event"
);
@ -862,25 +850,25 @@ async fn join_room_by_id_helper_remote(
.await;
info!("Parsing join event");
let parsed_join_pdu = PduEvent::from_id_val(event_id, join_event.clone())
let parsed_join_pdu = PduEvent::from_id_val(&event_id, join_event.clone())
.map_err(|e| err!(BadServerResponse("Invalid join event PDU: {e:?}")))?;
let mut state = HashMap::new();
let pub_key_map = RwLock::new(BTreeMap::new());
info!("Fetching join signing keys");
info!("Acquiring server signing keys for response events");
let resp_events = &send_join_response.room_state;
let resp_state = &resp_events.state;
let resp_auth = &resp_events.auth_chain;
services
.server_keys
.fetch_join_signing_keys(&send_join_response, &room_version_id, &pub_key_map)
.await?;
.acquire_events_pubkeys(resp_auth.iter().chain(resp_state.iter()))
.await;
info!("Going through send_join response room_state");
for result in send_join_response
.room_state
.state
.iter()
.map(|pdu| validate_and_add_event_id(services, pdu, &room_version_id, &pub_key_map))
{
let mut state = HashMap::new();
for result in send_join_response.room_state.state.iter().map(|pdu| {
services
.server_keys
.validate_and_add_event_id(pdu, &room_version_id)
}) {
let Ok((event_id, value)) = result.await else {
continue;
};
@ -902,12 +890,11 @@ async fn join_room_by_id_helper_remote(
}
info!("Going through send_join response auth_chain");
for result in send_join_response
.room_state
.auth_chain
.iter()
.map(|pdu| validate_and_add_event_id(services, pdu, &room_version_id, &pub_key_map))
{
for result in send_join_response.room_state.auth_chain.iter().map(|pdu| {
services
.server_keys
.validate_and_add_event_id(pdu, &room_version_id)
}) {
let Ok((event_id, value)) = result.await else {
continue;
};
@ -937,29 +924,22 @@ async fn join_room_by_id_helper_remote(
return Err!(Request(Forbidden("Auth check failed")));
}
info!("Saving state from send_join");
info!("Compressing state from send_join");
let compressed = state
.iter()
.stream()
.then(|(&k, id)| services.rooms.state_compressor.compress_state_event(k, id))
.collect()
.await;
debug!("Saving compressed state");
let (statehash_before_join, new, removed) = services
.rooms
.state_compressor
.save_state(
room_id,
Arc::new(
state
.into_iter()
.stream()
.then(|(k, id)| async move {
services
.rooms
.state_compressor
.compress_state_event(k, &id)
.await
})
.collect()
.await,
),
)
.save_state(room_id, Arc::new(compressed))
.await?;
debug!("Forcing state for new room");
services
.rooms
.state
@ -1002,14 +982,14 @@ async fn join_room_by_id_helper_remote(
.state
.set_room_state(room_id, statehash_after_join, &state_lock);
Ok(join_room_by_id::v3::Response::new(room_id.to_owned()))
Ok(())
}
#[tracing::instrument(skip_all, fields(%sender_user, %room_id), name = "join_local")]
async fn join_room_by_id_helper_local(
services: &Services, sender_user: &UserId, room_id: &RoomId, reason: Option<String>, servers: &[OwnedServerName],
_third_party_signed: Option<&ThirdPartySigned>, state_lock: RoomMutexGuard,
) -> Result<join_room_by_id::v3::Response> {
) -> Result {
debug!("We can join locally");
let join_rules_event_content = services
@ -1089,7 +1069,7 @@ async fn join_room_by_id_helper_local(
)
.await
{
Ok(_event_id) => return Ok(join_room_by_id::v3::Response::new(room_id.to_owned())),
Ok(_) => return Ok(()),
Err(e) => e,
};
@ -1159,24 +1139,15 @@ async fn join_room_by_id_helper_local(
// In order to create a compatible ref hash (EventID) the `hashes` field needs
// to be present
ruma::signatures::hash_and_sign_event(
services.globals.server_name().as_str(),
services.globals.keypair(),
&mut join_event_stub,
&room_version_id,
)
.expect("event is valid, we just created it");
services
.server_keys
.hash_and_sign_event(&mut join_event_stub, &room_version_id)?;
// Generate event id
let event_id = format!(
"${}",
ruma::signatures::reference_hash(&join_event_stub, &room_version_id)
.expect("ruma can calculate reference hashes")
);
let event_id = <&EventId>::try_from(event_id.as_str()).expect("ruma's reference hashes are valid event ids");
let event_id = pdu::gen_event_id(&join_event_stub, &room_version_id)?;
// Add event_id back
join_event_stub.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned()));
join_event_stub.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into()));
// It has enough fields to be called a proper event now
let join_event = join_event_stub;
@ -1187,7 +1158,7 @@ async fn join_room_by_id_helper_local(
&remote_server,
federation::membership::create_join_event::v2::Request {
room_id: room_id.to_owned(),
event_id: event_id.to_owned(),
event_id: event_id.clone(),
omit_members: false,
pdu: services
.sending
@ -1214,15 +1185,10 @@ async fn join_room_by_id_helper_local(
}
drop(state_lock);
let pub_key_map = RwLock::new(BTreeMap::new());
services
.server_keys
.fetch_required_signing_keys([&signed_value], &pub_key_map)
.await?;
services
.rooms
.event_handler
.handle_incoming_pdu(&remote_server, room_id, &signed_event_id, signed_value, true, &pub_key_map)
.handle_incoming_pdu(&remote_server, room_id, &signed_event_id, signed_value, true)
.await?;
} else {
return Err(error);
@ -1231,7 +1197,7 @@ async fn join_room_by_id_helper_local(
return Err(error);
}
Ok(join_room_by_id::v3::Response::new(room_id.to_owned()))
Ok(())
}
async fn make_join_request(
@ -1301,62 +1267,6 @@ async fn make_join_request(
make_join_response_and_server
}
pub async fn validate_and_add_event_id(
services: &Services, pdu: &RawJsonValue, room_version: &RoomVersionId,
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> Result<(OwnedEventId, CanonicalJsonObject)> {
let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get())
.map_err(|e| err!(BadServerResponse(debug_error!("Invalid PDU in server response: {e:?}"))))?;
let event_id = EventId::parse(format!(
"${}",
ruma::signatures::reference_hash(&value, room_version).expect("ruma can calculate reference hashes")
))
.expect("ruma's reference hashes are valid event ids");
let back_off = |id| async {
match services
.globals
.bad_event_ratelimiter
.write()
.expect("locked")
.entry(id)
{
Entry::Vacant(e) => {
e.insert((Instant::now(), 1));
},
Entry::Occupied(mut e) => {
*e.get_mut() = (Instant::now(), e.get().1.saturating_add(1));
},
}
};
if let Some((time, tries)) = services
.globals
.bad_event_ratelimiter
.read()
.expect("locked")
.get(&event_id)
{
// Exponential backoff
const MIN: u64 = 60 * 5;
const MAX: u64 = 60 * 60 * 24;
if continue_exponential_backoff_secs(MIN, MAX, time.elapsed(), *tries) {
return Err!(BadServerResponse("bad event {event_id:?}, still backing off"));
}
}
if let Err(e) = ruma::signatures::verify_event(&*pub_key_map.read().await, &value, room_version) {
debug_error!("Event {event_id} failed verification {pdu:#?}");
let e = Err!(BadServerResponse(debug_error!("Event {event_id} failed verification: {e:?}")));
back_off(event_id).await;
return e;
}
value.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned()));
Ok((event_id, value))
}
pub(crate) async fn invite_helper(
services: &Services, sender_user: &UserId, user_id: &UserId, room_id: &RoomId, reason: Option<String>,
is_direct: bool,
@ -1423,8 +1333,6 @@ pub(crate) async fn invite_helper(
)
.await?;
let pub_key_map = RwLock::new(BTreeMap::new());
// We do not add the event_id field to the pdu here because of signature and
// hashes checks
let Ok((event_id, value)) = gen_event_id_canonical_json(&response.event, &room_version_id) else {
@ -1452,15 +1360,10 @@ pub(crate) async fn invite_helper(
)
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?;
services
.server_keys
.fetch_required_signing_keys([&value], &pub_key_map)
.await?;
let pdu_id: Vec<u8> = services
.rooms
.event_handler
.handle_incoming_pdu(&origin, room_id, &event_id, value, true, &pub_key_map)
.handle_incoming_pdu(&origin, room_id, &event_id, value, true)
.await?
.ok_or(Error::BadRequest(
ErrorKind::InvalidParam,
@ -1714,24 +1617,15 @@ async fn remote_leave_room(services: &Services, user_id: &UserId, room_id: &Room
// In order to create a compatible ref hash (EventID) the `hashes` field needs
// to be present
ruma::signatures::hash_and_sign_event(
services.globals.server_name().as_str(),
services.globals.keypair(),
&mut leave_event_stub,
&room_version_id,
)
.expect("event is valid, we just created it");
services
.server_keys
.hash_and_sign_event(&mut leave_event_stub, &room_version_id)?;
// Generate event id
let event_id = EventId::parse(format!(
"${}",
ruma::signatures::reference_hash(&leave_event_stub, &room_version_id)
.expect("ruma can calculate reference hashes")
))
.expect("ruma's reference hashes are valid event ids");
let event_id = pdu::gen_event_id(&leave_event_stub, &room_version_id)?;
// Add event_id back
leave_event_stub.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned()));
leave_event_stub.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into()));
// It has enough fields to be called a proper event now
let leave_event = leave_event_stub;

View file

@ -52,7 +52,7 @@ pub(super) use keys::*;
pub(super) use media::*;
pub(super) use media_legacy::*;
pub(super) use membership::*;
pub use membership::{join_room_by_id_helper, leave_all_rooms, leave_room, validate_and_add_event_id};
pub use membership::{join_room_by_id_helper, leave_all_rooms, leave_room};
pub(super) use message::*;
pub(super) use openid::*;
pub(super) use presence::*;

View file

@ -48,7 +48,7 @@ where
async fn from_request(request: hyper::Request<Body>, services: &State) -> Result<Self, Self::Rejection> {
let mut request = request::from(services, request).await?;
let mut json_body = serde_json::from_slice::<CanonicalJsonValue>(&request.body).ok();
let auth = auth::auth(services, &mut request, &json_body, &T::METADATA).await?;
let auth = auth::auth(services, &mut request, json_body.as_ref(), &T::METADATA).await?;
Ok(Self {
body: make_body::<T>(services, &mut request, &mut json_body, &auth)?,
origin: auth.origin,

View file

@ -1,19 +1,20 @@
use std::collections::BTreeMap;
use axum::RequestPartsExt;
use axum_extra::{
headers::{authorization::Bearer, Authorization},
typed_header::TypedHeaderRejectionReason,
TypedHeader,
};
use conduit::{debug_info, warn, Err, Error, Result};
use conduit::{debug_error, err, warn, Err, Error, Result};
use http::uri::PathAndQuery;
use ruma::{
api::{client::error::ErrorKind, AuthScheme, Metadata},
server_util::authorization::XMatrix,
CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId,
CanonicalJsonObject, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId,
};
use service::{
server_keys::{PubKeyMap, PubKeys},
Services,
};
use service::Services;
use super::request::Request;
use crate::service::appservice::RegistrationInfo;
@ -33,7 +34,7 @@ pub(super) struct Auth {
}
pub(super) async fn auth(
services: &Services, request: &mut Request, json_body: &Option<CanonicalJsonValue>, metadata: &Metadata,
services: &Services, request: &mut Request, json_body: Option<&CanonicalJsonValue>, metadata: &Metadata,
) -> Result<Auth> {
let bearer: Option<TypedHeader<Authorization<Bearer>>> = request.parts.extract().await?;
let token = match &bearer {
@ -151,27 +152,24 @@ pub(super) async fn auth(
}
async fn auth_appservice(services: &Services, request: &Request, info: Box<RegistrationInfo>) -> Result<Auth> {
let user_id = request
let user_id_default =
|| UserId::parse_with_server_name(info.registration.sender_localpart.as_str(), services.globals.server_name());
let Ok(user_id) = request
.query
.user_id
.clone()
.map_or_else(
|| {
UserId::parse_with_server_name(
info.registration.sender_localpart.as_str(),
services.globals.server_name(),
)
},
UserId::parse,
)
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?;
.map_or_else(user_id_default, UserId::parse)
else {
return Err!(Request(InvalidUsername("Username is invalid.")));
};
if !info.is_user_match(&user_id) {
return Err(Error::BadRequest(ErrorKind::Exclusive, "User is not in namespace."));
return Err!(Request(Exclusive("User is not in namespace.")));
}
if !services.users.exists(&user_id).await {
return Err(Error::BadRequest(ErrorKind::forbidden(), "User does not exist."));
return Err!(Request(Forbidden("User does not exist.")));
}
Ok(Auth {
@ -182,118 +180,104 @@ async fn auth_appservice(services: &Services, request: &Request, info: Box<Regis
})
}
async fn auth_server(
services: &Services, request: &mut Request, json_body: &Option<CanonicalJsonValue>,
) -> Result<Auth> {
async fn auth_server(services: &Services, request: &mut Request, body: Option<&CanonicalJsonValue>) -> Result<Auth> {
type Member = (String, CanonicalJsonValue);
type Object = CanonicalJsonObject;
type Value = CanonicalJsonValue;
let x_matrix = parse_x_matrix(request).await?;
auth_server_checks(services, &x_matrix)?;
let destination = services.globals.server_name();
let origin = &x_matrix.origin;
#[allow(clippy::or_fun_call)]
let signature_uri = request
.parts
.uri
.path_and_query()
.unwrap_or(&PathAndQuery::from_static("/"))
.to_string();
let signature: [Member; 1] = [(x_matrix.key.to_string(), Value::String(x_matrix.sig.to_string()))];
let signatures: [Member; 1] = [(origin.to_string(), Value::Object(signature.into()))];
let authorization: [Member; 5] = [
("destination".into(), Value::String(destination.into())),
("method".into(), Value::String(request.parts.method.to_string())),
("origin".into(), Value::String(origin.to_string())),
("signatures".into(), Value::Object(signatures.into())),
("uri".into(), Value::String(signature_uri)),
];
let mut authorization: Object = authorization.into();
if let Some(body) = body {
authorization.insert("content".to_owned(), body.clone());
}
let key = services
.server_keys
.get_verify_key(origin, &x_matrix.key)
.await
.map_err(|e| err!(Request(Forbidden(warn!("Failed to fetch signing keys: {e}")))))?;
let keys: PubKeys = [(x_matrix.key.to_string(), key.key)].into();
let keys: PubKeyMap = [(origin.to_string(), keys)].into();
if let Err(e) = ruma::signatures::verify_json(&keys, authorization) {
debug_error!("Failed to verify federation request from {origin}: {e}");
if request.parts.uri.to_string().contains('@') {
warn!(
"Request uri contained '@' character. Make sure your reverse proxy gives Conduit the raw uri (apache: \
use nocanon)"
);
}
return Err!(Request(Forbidden("Failed to verify X-Matrix signatures.")));
}
Ok(Auth {
origin: origin.to_owned().into(),
sender_user: None,
sender_device: None,
appservice_info: None,
})
}
fn auth_server_checks(services: &Services, x_matrix: &XMatrix) -> Result<()> {
if !services.server.config.allow_federation {
return Err!(Config("allow_federation", "Federation is disabled."));
}
let TypedHeader(Authorization(x_matrix)) = request
.parts
.extract::<TypedHeader<Authorization<XMatrix>>>()
.await
.map_err(|e| {
warn!("Missing or invalid Authorization header: {e}");
let msg = match e.reason() {
TypedHeaderRejectionReason::Missing => "Missing Authorization header.",
TypedHeaderRejectionReason::Error(_) => "Invalid X-Matrix signatures.",
_ => "Unknown header-related error",
};
Error::BadRequest(ErrorKind::forbidden(), msg)
})?;
let destination = services.globals.server_name();
if x_matrix.destination.as_deref() != Some(destination) {
return Err!(Request(Forbidden("Invalid destination.")));
}
let origin = &x_matrix.origin;
if services
.server
.config
.forbidden_remote_server_names
.contains(origin)
{
debug_info!("Refusing to accept inbound federation request to {origin}");
return Err!(Request(Forbidden("Federation with this homeserver is not allowed.")));
return Err!(Request(Forbidden(debug_warn!("Federation requests from {origin} denied."))));
}
let signatures =
BTreeMap::from_iter([(x_matrix.key.clone(), CanonicalJsonValue::String(x_matrix.sig.to_string()))]);
let signatures = BTreeMap::from_iter([(
origin.as_str().to_owned(),
CanonicalJsonValue::Object(
signatures
.into_iter()
.map(|(k, v)| (k.to_string(), v))
.collect(),
),
)]);
let server_destination = services.globals.server_name().as_str().to_owned();
if let Some(destination) = x_matrix.destination.as_ref() {
if destination != &server_destination {
return Err(Error::BadRequest(ErrorKind::forbidden(), "Invalid authorization."));
}
}
#[allow(clippy::or_fun_call)]
let signature_uri = CanonicalJsonValue::String(
request
.parts
.uri
.path_and_query()
.unwrap_or(&PathAndQuery::from_static("/"))
.to_string(),
);
let mut request_map = BTreeMap::from_iter([
(
"method".to_owned(),
CanonicalJsonValue::String(request.parts.method.to_string()),
),
("uri".to_owned(), signature_uri),
("origin".to_owned(), CanonicalJsonValue::String(origin.as_str().to_owned())),
("destination".to_owned(), CanonicalJsonValue::String(server_destination)),
("signatures".to_owned(), CanonicalJsonValue::Object(signatures)),
]);
if let Some(json_body) = json_body {
request_map.insert("content".to_owned(), json_body.clone());
};
let keys_result = services
.server_keys
.fetch_signing_keys_for_server(origin, vec![x_matrix.key.to_string()])
.await;
let keys = keys_result.map_err(|e| {
warn!("Failed to fetch signing keys: {e}");
Error::BadRequest(ErrorKind::forbidden(), "Failed to fetch signing keys.")
})?;
let pub_key_map = BTreeMap::from_iter([(origin.as_str().to_owned(), keys)]);
match ruma::signatures::verify_json(&pub_key_map, &request_map) {
Ok(()) => Ok(Auth {
origin: Some(origin.clone()),
sender_user: None,
sender_device: None,
appservice_info: None,
}),
Err(e) => {
warn!("Failed to verify json request from {origin}: {e}\n{request_map:?}");
if request.parts.uri.to_string().contains('@') {
warn!(
"Request uri contained '@' character. Make sure your reverse proxy gives Conduit the raw uri \
(apache: use nocanon)"
);
}
Err(Error::BadRequest(
ErrorKind::forbidden(),
"Failed to verify X-Matrix signatures.",
))
},
}
Ok(())
}
async fn parse_x_matrix(request: &mut Request) -> Result<XMatrix> {
let TypedHeader(Authorization(x_matrix)) = request
.parts
.extract::<TypedHeader<Authorization<XMatrix>>>()
.await
.map_err(|e| {
let msg = match e.reason() {
TypedHeaderRejectionReason::Missing => "Missing Authorization header.",
TypedHeaderRejectionReason::Error(_) => "Invalid X-Matrix signatures.",
_ => "Unknown header-related error",
};
err!(Request(Forbidden(warn!("{msg}: {e}"))))
})?;
Ok(x_matrix)
}

View file

@ -85,13 +85,10 @@ pub(crate) async fn create_invite_route(
.acl_check(invited_user.server_name(), &body.room_id)
.await?;
ruma::signatures::hash_and_sign_event(
services.globals.server_name().as_str(),
services.globals.keypair(),
&mut signed_event,
&body.room_version,
)
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Failed to sign event."))?;
services
.server_keys
.hash_and_sign_event(&mut signed_event, &body.room_version)
.map_err(|e| err!(Request(InvalidParam("Failed to sign event: {e}"))))?;
// Generate event id
let event_id = EventId::parse(format!(

View file

@ -1,20 +1,16 @@
use std::{
collections::BTreeMap,
time::{Duration, SystemTime},
};
use std::{collections::BTreeMap, time::Duration};
use axum::{extract::State, response::IntoResponse, Json};
use conduit::{utils::timepoint_from_now, Result};
use ruma::{
api::{
federation::discovery::{get_server_keys, ServerSigningKeys, VerifyKey},
federation::discovery::{get_server_keys, ServerSigningKeys},
OutgoingResponse,
},
serde::{Base64, Raw},
MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId,
serde::Raw,
MilliSecondsSinceUnixEpoch,
};
use crate::Result;
/// # `GET /_matrix/key/v2/server`
///
/// Gets the public signing keys of this server.
@ -24,47 +20,33 @@ use crate::Result;
// Response type for this endpoint is Json because we need to calculate a
// signature for the response
pub(crate) async fn get_server_keys_route(State(services): State<crate::State>) -> Result<impl IntoResponse> {
let verify_keys: BTreeMap<OwnedServerSigningKeyId, VerifyKey> = BTreeMap::from([(
format!("ed25519:{}", services.globals.keypair().version())
.try_into()
.expect("found invalid server signing keys in DB"),
VerifyKey {
key: Base64::new(services.globals.keypair().public_key().to_vec()),
},
)]);
let server_name = services.globals.server_name();
let verify_keys = services.server_keys.verify_keys_for(server_name).await;
let server_key = ServerSigningKeys {
verify_keys,
server_name: server_name.to_owned(),
valid_until_ts: valid_until_ts(),
old_verify_keys: BTreeMap::new(),
signatures: BTreeMap::new(),
};
let mut response = serde_json::from_slice(
get_server_keys::v2::Response {
server_key: Raw::new(&ServerSigningKeys {
server_name: services.globals.server_name().to_owned(),
verify_keys,
old_verify_keys: BTreeMap::new(),
signatures: BTreeMap::new(),
valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time(
SystemTime::now()
.checked_add(Duration::from_secs(86400 * 7))
.expect("valid_until_ts should not get this high"),
)
.expect("time is valid"),
})
.expect("static conversion, no errors"),
}
.try_into_http_response::<Vec<u8>>()
.unwrap()
.body(),
)
.unwrap();
let response = get_server_keys::v2::Response {
server_key: Raw::new(&server_key)?,
}
.try_into_http_response::<Vec<u8>>()?;
ruma::signatures::sign_json(
services.globals.server_name().as_str(),
services.globals.keypair(),
&mut response,
)
.unwrap();
let mut response = serde_json::from_slice(response.body())?;
services.server_keys.sign_json(&mut response)?;
Ok(Json(response))
}
fn valid_until_ts() -> MilliSecondsSinceUnixEpoch {
let dur = Duration::from_secs(86400 * 7);
let timepoint = timepoint_from_now(dur).expect("SystemTime should not overflow");
MilliSecondsSinceUnixEpoch::from_system_time(timepoint).expect("UInt should not overflow")
}
/// # `GET /_matrix/key/v2/server/{keyId}`
///
/// Gets the public signing keys of this server.

View file

@ -21,7 +21,6 @@ use ruma::{
OwnedEventId, ServerName,
};
use serde_json::value::RawValue as RawJsonValue;
use tokio::sync::RwLock;
use crate::{
services::Services,
@ -109,22 +108,6 @@ async fn handle_pdus(
// and hashes checks
}
// We go through all the signatures we see on the PDUs and fetch the
// corresponding signing keys
let pub_key_map = RwLock::new(BTreeMap::new());
if !parsed_pdus.is_empty() {
services
.server_keys
.fetch_required_signing_keys(parsed_pdus.iter().map(|(_event_id, event, _room_id)| event), &pub_key_map)
.await
.unwrap_or_else(|e| warn!("Could not fetch all signatures for PDUs from {origin}: {e:?}"));
debug!(
elapsed = ?txn_start_time.elapsed(),
"Fetched signing keys"
);
}
let mut resolved_map = BTreeMap::new();
for (event_id, value, room_id) in parsed_pdus {
let pdu_start_time = Instant::now();
@ -134,17 +117,18 @@ async fn handle_pdus(
.mutex_federation
.lock(&room_id)
.await;
resolved_map.insert(
event_id.clone(),
services
.rooms
.event_handler
.handle_incoming_pdu(origin, &room_id, &event_id, value, true, &pub_key_map)
.handle_incoming_pdu(origin, &room_id, &event_id, value, true)
.await
.map(|_| ()),
);
drop(mutex_lock);
drop(mutex_lock);
debug!(
pdu_elapsed = ?pdu_start_time.elapsed(),
txn_elapsed = ?txn_start_time.elapsed(),

View file

@ -1,6 +1,6 @@
#![allow(deprecated)]
use std::{borrow::Borrow, collections::BTreeMap};
use std::borrow::Borrow;
use axum::extract::State;
use conduit::{err, pdu::gen_event_id_canonical_json, utils::IterStream, warn, Error, Result};
@ -15,7 +15,6 @@ use ruma::{
};
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
use service::Services;
use tokio::sync::RwLock;
use crate::Ruma;
@ -43,9 +42,6 @@ async fn create_join_event(
.await
.map_err(|_| err!(Request(NotFound("Event state not found."))))?;
let pub_key_map = RwLock::new(BTreeMap::new());
// let mut auth_cache = EventMap::new();
// We do not add the event_id field to the pdu here because of signature and
// hashes checks
let room_version_id = services.rooms.state.get_room_version(room_id).await?;
@ -137,20 +133,12 @@ async fn create_join_event(
.await
.unwrap_or_default()
{
ruma::signatures::hash_and_sign_event(
services.globals.server_name().as_str(),
services.globals.keypair(),
&mut value,
&room_version_id,
)
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Failed to sign event."))?;
services
.server_keys
.hash_and_sign_event(&mut value, &room_version_id)
.map_err(|e| err!(Request(InvalidParam("Failed to sign event: {e}"))))?;
}
services
.server_keys
.fetch_required_signing_keys([&value], &pub_key_map)
.await?;
let origin: OwnedServerName = serde_json::from_value(
serde_json::to_value(
value
@ -171,7 +159,7 @@ async fn create_join_event(
let pdu_id: Vec<u8> = services
.rooms
.event_handler
.handle_incoming_pdu(&origin, room_id, &event_id, value.clone(), true, &pub_key_map)
.handle_incoming_pdu(&origin, room_id, &event_id, value.clone(), true)
.await?
.ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Could not accept as timeline event."))?;

View file

@ -1,7 +1,5 @@
#![allow(deprecated)]
use std::collections::BTreeMap;
use axum::extract::State;
use conduit::{utils::ReadyExt, Error, Result};
use ruma::{
@ -13,7 +11,6 @@ use ruma::{
OwnedServerName, OwnedUserId, RoomId, ServerName,
};
use serde_json::value::RawValue as RawJsonValue;
use tokio::sync::RwLock;
use crate::{
service::{pdu::gen_event_id_canonical_json, Services},
@ -60,8 +57,6 @@ async fn create_leave_event(
.acl_check(origin, room_id)
.await?;
let pub_key_map = RwLock::new(BTreeMap::new());
// We do not add the event_id field to the pdu here because of signature and
// hashes checks
let room_version_id = services.rooms.state.get_room_version(room_id).await?;
@ -154,21 +149,17 @@ async fn create_leave_event(
)
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "origin is not a server name."))?;
services
.server_keys
.fetch_required_signing_keys([&value], &pub_key_map)
.await?;
let mutex_lock = services
.rooms
.event_handler
.mutex_federation
.lock(room_id)
.await;
let pdu_id: Vec<u8> = services
.rooms
.event_handler
.handle_incoming_pdu(&origin, room_id, &event_id, value, true, &pub_key_map)
.handle_incoming_pdu(&origin, room_id, &event_id, value, true)
.await?
.ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Could not accept as timeline event."))?;