Compare commits

...
Sign in to create a new pull request.

20 commits

Author SHA1 Message Date
strawberry
2fad03597a a
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-03-17 17:38:59 -04:00
strawberry
7f22f0e3a6 keypair logging adjustments
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-03-17 15:09:36 -04:00
strawberry
a0161ed7c1 config option to allow incoming remote read receipts
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-03-17 12:16:04 -04:00
strawberry
41d9e24c03 ignore deactivated users and remote user profiles wih forbidden_usernames
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-03-17 12:11:24 -04:00
strawberry
3ac5368578 bump conduwuit version to 0.1.8
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-03-17 10:50:39 -04:00
Jason Volk
d2bb3dc93f add flush suite to sending service; trigger on read receipts.
Signed-off-by: Jason Volk <jason@zemos.net>
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-03-17 10:50:39 -04:00
Jason Volk
3af303e52b complete federation destination caching preempting getaddrinfo(3).
fixed some clippy lints and spacing adjusted

Signed-off-by: Jason Volk <jason@zemos.net>
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-03-17 10:50:39 -04:00
Jason Volk
72c97434b0 add remove_batch with transaction to database abstraction.
adjusted to make building sqlite happy again

Signed-off-by: Jason Volk <jason@zemos.net>
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-03-17 10:50:39 -04:00
Jason Volk
73c42991e9 clear dns and tls-override caches from !admin command.
Signed-off-by: Jason Volk <jason@zemos.net>
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-03-17 10:50:39 -04:00
strawberry
e982428f07 bump async-trait and ruma
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-03-17 10:50:39 -04:00
strawberry
70b1bdd655 slight inclusive wording changes
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-03-17 10:50:39 -04:00
strawberry
6d4163d410 track media uploads by user
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-03-17 10:50:39 -04:00
strawberry
a33b33cab5 document forbidden room aliases and usernames
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-03-17 10:50:39 -04:00
Matthias Ahouansou
c14b28b408 feat(spaces): hierarchy over federation
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-03-17 10:50:30 -04:00
strawberry
8972487691 check allow_federation in send_federation_request
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-03-16 16:07:42 -04:00
Matthias Ahouansou
aec63c29e1 refactor: check if federation is disabled inside the authcheck where possible
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-03-16 16:05:52 -04:00
Matthias Ahouansou
72182f3714 fix: avoid panics when admin room is not available
Co-authored-by: strawberry <strawberry@puppygock.gay>
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-03-16 14:11:03 -04:00
strawberry
94b4d584a6 admin command to see a room's full state from our database
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-03-16 13:23:57 -04:00
strawberry
41f27dc949 slight wording updates
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-03-16 13:23:57 -04:00
strawberry
29f5b58098 remove rocksdb optimize_level_style_compaction
Signed-off-by: strawberry <strawberry@puppygock.gay>
2024-03-16 13:23:57 -04:00
30 changed files with 1770 additions and 706 deletions

7
Cargo.lock generated
View file

@ -96,9 +96,9 @@ dependencies = [
[[package]]
name = "async-trait"
version = "0.1.77"
version = "0.1.78"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9"
checksum = "461abc97219de0eaaf81fe3ef974a540158f3d079c2ab200f891f1a2ef201e85"
dependencies = [
"proc-macro2",
"quote",
@ -412,7 +412,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b"
[[package]]
name = "conduit"
version = "0.7.0-alpha+conduwuit-0.1.7"
version = "0.7.0-alpha+conduwuit-0.1.8"
dependencies = [
"argon2",
"async-trait",
@ -2062,6 +2062,7 @@ dependencies = [
"tokio-rustls",
"tokio-socks",
"tower-service",
"trust-dns-resolver",
"url",
"wasm-bindgen",
"wasm-bindgen-futures",

View file

@ -6,7 +6,7 @@ authors = ["strawberry <strawberry@puppygock.gay>", "timokoesters <timo@koesters
homepage = "https://puppygock.gay/conduwuit"
repository = "https://gitlab.com/girlbossceo/conduwuit"
readme = "README.md"
version = "0.7.0-alpha+conduwuit-0.1.7"
version = "0.7.0-alpha+conduwuit-0.1.8"
edition = "2021"
# See also `rust-toolchain.toml`
@ -48,7 +48,7 @@ serde_html_form = "0.2.5"
hmac = "0.12.1"
sha-1 = "0.10.1"
async-trait = "0.1.77"
async-trait = "0.1.78"
# used for checking if an IP is in specific subnets / CIDR ranges easier
ipaddress = "0.1.3"
@ -106,6 +106,7 @@ default-features = false
features = [
"rustls-tls-native-roots",
"socks",
"trust-dns",
]
# all the serde stuff

View file

@ -161,7 +161,19 @@ registration_token = "change this token for something specific to your server"
# controls whether non-admin local users are forbidden from sending room invites (local and remote),
# and if non-admin users can receive remote room invites. admins are always allowed to send and receive all room invites.
# defaults to false
# block_non_admin_invites = falsse
# block_non_admin_invites = false
# List of forbidden username patterns/strings. Values in this list are matched as *contains*.
# This is checked upon username availability check, registration, and startup as warnings if any local users in your database
# have a forbidden username.
# No default.
# forbidden_usernames = []
# List of forbidden room aliases and room IDs as patterns/strings. Values in this list are matched as *contains*.
# This is checked upon room alias creation, custom room ID creation if used, and startup as warnings if any room aliases
# in your database have a forbidden room alias/ID.
# No default.
# forbidden_room_names = []
# Set this to true to allow your server's public room directory to be federated.
# Set this to false to protect against /publicRooms spiders, but will forbid external users
@ -345,7 +357,7 @@ url_preview_check_root_domain = false
### Presence
### Presence / Typing Indicators / Read Receipts
# Config option to control local (your server only) presence updates/requests. Defaults to false.
# Note that presence on conduwuit is very fast unlike Synapse's.
@ -373,6 +385,9 @@ url_preview_check_root_domain = false
# Config option to control how many seconds before presence updates that you are offline. Defaults to 30 minutes.
#presence_offline_timeout_s = 1800
# Config option to control whether we should receive remote incoming read receipts.
# Defaults to true.
#allow_incoming_read_receipts = true
# Other options not in [global]:
@ -387,4 +402,4 @@ url_preview_check_root_domain = false
# Whether to listen and allow for HTTP and HTTPS connections (insecure!)
# This config option is only available if conduwuit was built with `axum_dual_protocol` feature (not default feature)
# Defaults to false
#dual_protocol = false
#dual_protocol = false

View file

@ -275,10 +275,14 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
// If this is the first real user, grant them admin privileges except for guest
// users Note: the server user, @conduit:servername, is generated first
if services().users.count()? == 2 && !is_guest {
services().admin.make_user_admin(&user_id, displayname).await?;
if !is_guest {
if let Some(admin_room) = services().admin.get_admin_room()? {
if services().rooms.state_cache.room_joined_count(&admin_room)? == Some(1) {
services().admin.make_user_admin(&user_id, displayname).await?;
warn!("Granting {} admin privileges as the first user", user_id);
warn!("Granting {} admin privileges as the first user", user_id);
}
}
}
Ok(register::v3::Response {

View file

@ -138,6 +138,8 @@ pub async fn get_media_preview_v1_route(
/// - Some metadata will be saved in the database
/// - Media will be saved in the media/ directory
pub async fn create_content_route(body: Ruma<create_content::v3::Request>) -> Result<create_content::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let mxc = format!(
"mxc://{}/{}",
services().globals.server_name(),
@ -147,6 +149,7 @@ pub async fn create_content_route(body: Ruma<create_content::v3::Request>) -> Re
services()
.media
.create(
Some(sender_user.clone()),
mxc.clone(),
body.filename.as_ref().map(|filename| "inline; filename=".to_owned() + filename).as_deref(),
body.content_type.as_deref(),
@ -175,6 +178,8 @@ pub async fn create_content_route(body: Ruma<create_content::v3::Request>) -> Re
pub async fn create_content_v1_route(
body: Ruma<create_content::v3::Request>,
) -> Result<RumaResponse<create_content::v3::Response>> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let mxc = format!(
"mxc://{}/{}",
services().globals.server_name(),
@ -184,6 +189,7 @@ pub async fn create_content_v1_route(
services()
.media
.create(
Some(sender_user.clone()),
mxc.clone(),
body.filename.as_ref().map(|filename| "inline; filename=".to_owned() + filename).as_deref(),
body.content_type.as_deref(),
@ -231,6 +237,7 @@ pub async fn get_remote_content(
services()
.media
.create(
None,
mxc.to_owned(),
content_response.content_disposition.as_deref(),
content_response.content_type.as_deref(),
@ -484,6 +491,7 @@ pub async fn get_content_thumbnail_route(
services()
.media
.upload_thumbnail(
None,
mxc,
None,
get_thumbnail_response.content_type.as_deref(),
@ -566,6 +574,7 @@ pub async fn get_content_thumbnail_v1_route(
services()
.media
.upload_thumbnail(
None,
mxc,
None,
get_thumbnail_response.content_type.as_deref(),
@ -589,7 +598,7 @@ async fn download_image(client: &reqwest::Client, url: &str) -> Result<UrlPrevie
utils::random_string(MXC_LENGTH)
);
services().media.create(mxc.clone(), None, None, &image).await?;
services().media.create(None, mxc.clone(), None, None, &image).await?;
let (width, height) = match ImgReader::new(Cursor::new(&image)).with_guessed_format() {
Err(_) => (None, None),

View file

@ -81,6 +81,8 @@ pub async fn set_read_marker_route(body: Ruma<set_read_marker::v3::Request>) ->
room_id: body.room_id.clone(),
},
)?;
services().sending.flush_room(&body.room_id)?;
}
Ok(set_read_marker::v3::Response {})
@ -136,6 +138,8 @@ pub async fn create_receipt_route(body: Ruma<create_receipt::v3::Request>) -> Re
room_id: body.room_id.clone(),
},
)?;
services().sending.flush_room(&body.room_id)?;
},
create_receipt::v3::ReceiptType::ReadPrivate => {
let count = services()

View file

@ -1,6 +1,11 @@
use ruma::api::client::space::get_hierarchy;
use std::str::FromStr;
use crate::{services, Result, Ruma};
use ruma::{
api::client::{error::ErrorKind, space::get_hierarchy},
UInt,
};
use crate::{service::rooms::spaces::PagnationToken, services, Error, Result, Ruma};
/// # `GET /_matrix/client/v1/rooms/{room_id}/hierarchy``
///
@ -9,11 +14,32 @@ use crate::{services, Result, Ruma};
pub async fn get_hierarchy_route(body: Ruma<get_hierarchy::v1::Request>) -> Result<get_hierarchy::v1::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let skip = body.from.as_ref().and_then(|s| s.parse::<usize>().ok()).unwrap_or(0);
let limit = body.limit.unwrap_or_else(|| UInt::from(10_u32)).min(UInt::from(100_u32));
let limit = body.limit.map_or(10, u64::from).min(100) as usize;
let max_depth = body.max_depth.unwrap_or_else(|| UInt::from(3_u32)).min(UInt::from(10_u32));
let max_depth = body.max_depth.map_or(3, u64::from).min(10) as usize + 1; // +1 to skip the space room itself
let key = body.from.as_ref().and_then(|s| PagnationToken::from_str(s).ok());
services().rooms.spaces.get_hierarchy(sender_user, &body.room_id, limit, skip, max_depth, body.suggested_only).await
// Should prevent unexpeded behaviour in (bad) clients
if let Some(ref token) = key {
if token.suggested_only != body.suggested_only || token.max_depth != max_depth {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"suggested_only and max_depth cannot change on paginated requests",
));
}
}
services()
.rooms
.spaces
.get_client_hierarchy(
sender_user,
&body.room_id,
u64::from(limit) as usize,
key.map_or(0, |token| u64::from(token.skip) as usize),
u64::from(max_depth) as usize,
body.suggested_only,
)
.await
}

View file

@ -75,7 +75,7 @@ pub async fn send_state_event_for_empty_key_route(
.into())
}
/// # `GET /_matrix/client/r0/rooms/{roomid}/state`
/// # `GET /_matrix/client/v3/rooms/{roomid}/state`
///
/// Get all state events for a room.
///

View file

@ -153,6 +153,10 @@ where
// treat non-appservice registrations as None authentication
AuthScheme::AppserviceToken => (None, None, None, false),
AuthScheme::ServerSignatures => {
if !services().globals.allow_federation() {
return Err(Error::bad_config("Federation is disabled."));
}
let TypedHeader(Authorization(x_matrix)) =
parts.extract::<TypedHeader<Authorization<XMatrix>>>().await.map_err(|e| {
warn!("Missing or invalid Authorization header: {}", e);

View file

@ -28,6 +28,7 @@ use ruma::{
keys::{claim_keys, get_keys},
membership::{create_invite, create_join_event, prepare_join_event},
query::{get_profile_information, get_room_information},
space::get_hierarchy,
transactions::{
edu::{DeviceListUpdateContent, DirectDeviceContent, Edu, SigningKeyUpdateContent},
send_transaction_message,
@ -364,7 +365,10 @@ async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDe
None => {
if let Some(pos) = destination_str.find(':') {
debug!("2: Hostname with included port");
let (host, port) = destination_str.split_at(pos);
query_and_cache_override(host, host, port.parse::<u16>().unwrap_or(8448)).await;
FedDest::Named(host.to_owned(), port.to_owned())
} else {
debug!("Requesting well known for {destination}");
@ -377,30 +381,23 @@ async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDe
None => {
if let Some(pos) = delegated_hostname.find(':') {
debug!("3.2: Hostname with port in .well-known file");
let (host, port) = delegated_hostname.split_at(pos);
query_and_cache_override(host, host, port.parse::<u16>().unwrap_or(8448)).await;
FedDest::Named(host.to_owned(), port.to_owned())
} else {
debug!("Delegated hostname has no port in this branch");
if let Some(hostname_override) = query_srv_record(&delegated_hostname).await {
debug!("3.3: SRV lookup successful");
let force_port = hostname_override.port();
if let Ok(override_ip) = services()
.globals
.dns_resolver()
.lookup_ip(hostname_override.hostname())
.await
{
services().globals.tls_name_override.write().unwrap().insert(
delegated_hostname.clone(),
(override_ip.iter().collect(), force_port.unwrap_or(8448)),
);
} else {
debug!(
"Using SRV record {}, but could not resolve to IP",
hostname_override.hostname()
);
}
let force_port = hostname_override.port();
query_and_cache_override(
&delegated_hostname,
&hostname_override.hostname(),
force_port.unwrap_or(8448),
)
.await;
if let Some(port) = force_port {
FedDest::Named(delegated_hostname, format!(":{port}"))
@ -409,6 +406,7 @@ async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDe
}
} else {
debug!("3.4: No SRV records, just use the hostname from .well-known");
query_and_cache_override(&delegated_hostname, &delegated_hostname, 8448).await;
add_port_to_hostname(&delegated_hostname)
}
}
@ -420,21 +418,14 @@ async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDe
match query_srv_record(&destination_str).await {
Some(hostname_override) => {
debug!("4: SRV record found");
let force_port = hostname_override.port();
if let Ok(override_ip) =
services().globals.dns_resolver().lookup_ip(hostname_override.hostname()).await
{
services().globals.tls_name_override.write().unwrap().insert(
hostname.clone(),
(override_ip.iter().collect(), force_port.unwrap_or(8448)),
);
} else {
debug!(
"Using SRV record {}, but could not resolve to IP",
hostname_override.hostname()
);
}
let force_port = hostname_override.port();
query_and_cache_override(
&hostname,
&hostname_override.hostname(),
force_port.unwrap_or(8448),
)
.await;
if let Some(port) = force_port {
FedDest::Named(hostname.clone(), format!(":{port}"))
@ -444,6 +435,7 @@ async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDe
},
None => {
debug!("5: No SRV record found");
query_and_cache_override(&destination_str, &destination_str, 8448).await;
add_port_to_hostname(&destination_str)
},
}
@ -452,7 +444,6 @@ async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDe
}
},
};
debug!("Actual destination: {actual_destination:?}");
// Can't use get_ip_with_port here because we don't want to add a port
// to an IP address if it wasn't specified
@ -466,9 +457,29 @@ async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDe
} else {
FedDest::Named(hostname, ":8448".to_owned())
};
debug!("Actual destination: {actual_destination:?} hostname: {hostname:?}");
(actual_destination, hostname)
}
async fn query_and_cache_override(overname: &'_ str, hostname: &'_ str, port: u16) {
match services().globals.dns_resolver().lookup_ip(hostname.to_owned()).await {
Ok(override_ip) => {
debug!("Caching result of {:?} overriding {:?}", hostname, overname);
services()
.globals
.tls_name_override
.write()
.unwrap()
.insert(overname.to_owned(), (override_ip.iter().collect(), port));
},
Err(e) => {
debug!("Got {:?} for {:?} to override {:?}", e.kind(), hostname, overname);
},
}
}
async fn query_srv_record(hostname: &'_ str) -> Option<FedDest> {
fn handle_successful_srv(srv: &SrvLookup) -> Option<FedDest> {
srv.iter().next().map(|result| {
@ -500,6 +511,10 @@ async fn query_srv_record(hostname: &'_ str) -> Option<FedDest> {
}
async fn request_well_known(destination: &str) -> Option<String> {
if !services().globals.tls_name_override.read().unwrap().contains_key(destination) {
query_and_cache_override(destination, destination, 8448).await;
}
let response = services()
.globals
.default_client()
@ -619,10 +634,6 @@ pub async fn get_server_keys_deprecated_route() -> impl IntoResponse { get_serve
pub async fn get_public_rooms_filtered_route(
body: Ruma<get_public_rooms_filtered::v1::Request>,
) -> Result<get_public_rooms_filtered::v1::Response> {
if !services().globals.allow_federation() {
return Err(Error::bad_config("Federation is disabled."));
}
if !services().globals.allow_public_room_directory_over_federation() {
return Err(Error::bad_config("Room directory is not public."));
}
@ -650,10 +661,6 @@ pub async fn get_public_rooms_filtered_route(
pub async fn get_public_rooms_route(
body: Ruma<get_public_rooms::v1::Request>,
) -> Result<get_public_rooms::v1::Response> {
if !services().globals.allow_federation() {
return Err(Error::bad_config("Federation is disabled."));
}
if !services().globals.allow_public_room_directory_over_federation() {
return Err(Error::bad_config("Room directory is not public."));
}
@ -707,10 +714,6 @@ pub fn parse_incoming_pdu(pdu: &RawJsonValue) -> Result<(OwnedEventId, Canonical
pub async fn send_transaction_message_route(
body: Ruma<send_transaction_message::v1::Request>,
) -> Result<send_transaction_message::v1::Response> {
if !services().globals.allow_federation() {
return Err(Error::bad_config("Federation is disabled."));
}
let sender_servername = body.sender_servername.as_ref().expect("server is authenticated");
let mut resolved_map = BTreeMap::new();
@ -820,6 +823,10 @@ pub async fn send_transaction_message_route(
}
},
Edu::Receipt(receipt) => {
if !services().globals.allow_incoming_read_receipts() {
continue;
}
for (room_id, room_updates) in receipt.receipts {
for (user_id, user_updates) in room_updates.read {
if let Some((event_id, _)) = user_updates
@ -946,10 +953,6 @@ pub async fn send_transaction_message_route(
/// - Only works if a user of this server is currently invited or joined the
/// room
pub async fn get_event_route(body: Ruma<get_event::v1::Request>) -> Result<get_event::v1::Response> {
if !services().globals.allow_federation() {
return Err(Error::bad_config("Federation is disabled."));
}
let sender_servername = body.sender_servername.as_ref().expect("server is authenticated");
let event = services().rooms.timeline.get_pdu_json(&body.event_id)?.ok_or_else(|| {
@ -985,10 +988,6 @@ pub async fn get_event_route(body: Ruma<get_event::v1::Request>) -> Result<get_e
/// Retrieves events from before the sender joined the room, if the room's
/// history visibility allows.
pub async fn get_backfill_route(body: Ruma<get_backfill::v1::Request>) -> Result<get_backfill::v1::Response> {
if !services().globals.allow_federation() {
return Err(Error::bad_config("Federation is disabled."));
}
let sender_servername = body.sender_servername.as_ref().expect("server is authenticated");
debug!("Got backfill request from: {}", sender_servername);
@ -1041,10 +1040,6 @@ pub async fn get_backfill_route(body: Ruma<get_backfill::v1::Request>) -> Result
pub async fn get_missing_events_route(
body: Ruma<get_missing_events::v1::Request>,
) -> Result<get_missing_events::v1::Response> {
if !services().globals.allow_federation() {
return Err(Error::bad_config("Federation is disabled."));
}
let sender_servername = body.sender_servername.as_ref().expect("server is authenticated");
if !services().rooms.state_cache.server_in_room(sender_servername, &body.room_id)? {
@ -1118,10 +1113,6 @@ pub async fn get_missing_events_route(
pub async fn get_event_authorization_route(
body: Ruma<get_event_authorization::v1::Request>,
) -> Result<get_event_authorization::v1::Response> {
if !services().globals.allow_federation() {
return Err(Error::bad_config("Federation is disabled."));
}
let sender_servername = body.sender_servername.as_ref().expect("server is authenticated");
if !services().rooms.state_cache.server_in_room(sender_servername, &body.room_id)? {
@ -1157,10 +1148,6 @@ pub async fn get_event_authorization_route(
///
/// Retrieves the current state of the room.
pub async fn get_room_state_route(body: Ruma<get_room_state::v1::Request>) -> Result<get_room_state::v1::Response> {
if !services().globals.allow_federation() {
return Err(Error::bad_config("Federation is disabled."));
}
let sender_servername = body.sender_servername.as_ref().expect("server is authenticated");
if !services().rooms.state_cache.server_in_room(sender_servername, &body.room_id)? {
@ -1211,10 +1198,6 @@ pub async fn get_room_state_route(body: Ruma<get_room_state::v1::Request>) -> Re
pub async fn get_room_state_ids_route(
body: Ruma<get_room_state_ids::v1::Request>,
) -> Result<get_room_state_ids::v1::Response> {
if !services().globals.allow_federation() {
return Err(Error::bad_config("Federation is disabled."));
}
let sender_servername = body.sender_servername.as_ref().expect("server is authenticated");
if !services().rooms.state_cache.server_in_room(sender_servername, &body.room_id)? {
@ -1253,10 +1236,6 @@ pub async fn get_room_state_ids_route(
pub async fn create_join_event_template_route(
body: Ruma<prepare_join_event::v1::Request>,
) -> Result<prepare_join_event::v1::Response> {
if !services().globals.allow_federation() {
return Err(Error::bad_config("Federation is disabled."));
}
if !services().rooms.metadata.exists(&body.room_id)? {
return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server."));
}
@ -1343,10 +1322,6 @@ pub async fn create_join_event_template_route(
async fn create_join_event(
sender_servername: &ServerName, room_id: &RoomId, pdu: &RawJsonValue,
) -> Result<create_join_event::v1::RoomState> {
if !services().globals.allow_federation() {
return Err(Error::bad_config("Federation is disabled."));
}
if !services().rooms.metadata.exists(room_id)? {
return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server."));
}
@ -1500,10 +1475,6 @@ pub async fn create_join_event_v2_route(
///
/// Invites a remote user to a room.
pub async fn create_invite_route(body: Ruma<create_invite::v2::Request>) -> Result<create_invite::v2::Response> {
if !services().globals.allow_federation() {
return Err(Error::bad_config("Federation is disabled."));
}
let sender_servername = body.sender_servername.as_ref().expect("server is authenticated");
services().rooms.event_handler.acl_check(sender_servername, &body.room_id)?;
@ -1587,7 +1558,7 @@ pub async fn create_invite_route(body: Ruma<create_invite::v2::Request>) -> Resu
let mut event: JsonObject = serde_json::from_str(body.event.get())
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event bytes."))?;
event.insert("event_id".to_owned(), "$dummy".into());
event.insert("event_id".to_owned(), "$placeholder".into());
let pdu: PduEvent = serde_json::from_value(event.into()).map_err(|e| {
warn!("Invalid invite event: {}", e);
@ -1622,10 +1593,6 @@ pub async fn create_invite_route(body: Ruma<create_invite::v2::Request>) -> Resu
///
/// Gets information on all devices of the user.
pub async fn get_devices_route(body: Ruma<get_devices::v1::Request>) -> Result<get_devices::v1::Response> {
if !services().globals.allow_federation() {
return Err(Error::bad_config("Federation is disabled."));
}
if body.user_id.server_name() != services().globals.server_name() {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
@ -1673,10 +1640,6 @@ pub async fn get_devices_route(body: Ruma<get_devices::v1::Request>) -> Result<g
pub async fn get_room_information_route(
body: Ruma<get_room_information::v1::Request>,
) -> Result<get_room_information::v1::Response> {
if !services().globals.allow_federation() {
return Err(Error::bad_config("Federation is disabled."));
}
let room_id = services()
.rooms
.alias
@ -1695,10 +1658,6 @@ pub async fn get_room_information_route(
pub async fn get_profile_information_route(
body: Ruma<get_profile_information::v1::Request>,
) -> Result<get_profile_information::v1::Response> {
if !services().globals.allow_federation() {
return Err(Error::bad_config("Federation is disabled."));
}
if body.user_id.server_name() != services().globals.server_name() {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
@ -1738,10 +1697,6 @@ pub async fn get_profile_information_route(
///
/// Gets devices and identity keys for the given users.
pub async fn get_keys_route(body: Ruma<get_keys::v1::Request>) -> Result<get_keys::v1::Response> {
if !services().globals.allow_federation() {
return Err(Error::bad_config("Federation is disabled."));
}
if body.device_keys.iter().any(|(u, _)| u.server_name() != services().globals.server_name()) {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
@ -1768,10 +1723,6 @@ pub async fn get_keys_route(body: Ruma<get_keys::v1::Request>) -> Result<get_key
///
/// Claims one-time keys.
pub async fn claim_keys_route(body: Ruma<claim_keys::v1::Request>) -> Result<claim_keys::v1::Response> {
if !services().globals.allow_federation() {
return Err(Error::bad_config("Federation is disabled."));
}
if body.one_time_keys.iter().any(|(u, _)| u.server_name() != services().globals.server_name()) {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
@ -1788,6 +1739,10 @@ pub async fn claim_keys_route(body: Ruma<claim_keys::v1::Request>) -> Result<cla
/// # `GET /.well-known/matrix/server`
pub async fn well_known_server_route() -> Result<impl IntoResponse> {
if !services().globals.allow_federation() {
return Err(Error::bad_config("Federation is disabled."));
}
let server_url = match services().globals.well_known_server() {
Some(url) => url.clone(),
None => return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")),
@ -1798,6 +1753,20 @@ pub async fn well_known_server_route() -> Result<impl IntoResponse> {
})))
}
/// # `GET /_matrix/federation/v1/hierarchy/{roomId}`
///
/// Gets the space tree in a depth-first manner to locate child rooms of a given
/// space.
pub async fn get_hierarchy_route(body: Ruma<get_hierarchy::v1::Request>) -> Result<get_hierarchy::v1::Response> {
let sender_servername = body.sender_servername.as_ref().expect("server is authenticated");
if services().rooms.metadata.exists(&body.room_id)? {
services().rooms.spaces.get_federation_hierarchy(&body.room_id, sender_servername, body.suggested_only).await
} else {
Err(Error::BadRequest(ErrorKind::NotFound, "Room does not exist."))
}
}
#[cfg(test)]
mod tests {
use super::{add_port_to_hostname, get_ip_with_port, FedDest};

View file

@ -2,7 +2,7 @@
use std::path::PathBuf;
use clap::Parser;
use clap::{Parser, Subcommand};
/// Commandline arguments
#[derive(Parser, Debug)]
@ -11,6 +11,43 @@ pub struct Args {
#[arg(short, long)]
/// Optional argument to the path of a conduwuit config TOML file
pub config: Option<PathBuf>,
#[clap(subcommand)]
/// Optional subcommand to export the homeserver signing key and exit
pub signing_key: Option<SigningKey>,
}
#[derive(Debug, Subcommand)]
pub enum SigningKey {
/// Filesystem path to export the homeserver signing key to.
/// The output will be: `ed25519 <version> <keypair base64 encoded>` which
/// is Synapse's format
ExportPath {
path: PathBuf,
},
/// Filesystem path for conduwuit to attempt to read and import the
/// homeserver signing key. The expected format is Synapse's format:
/// `ed25519 <version> <keypair base64 encoded>`
ImportPath {
path: PathBuf,
#[arg(long)]
/// Optional argument to import the key but don't overwrite our signing
/// key, and instead add it to `old_verify_keys`. This field tells other
/// servers that this is our old public key that can still be used to
/// sign old events.
///
/// See https://spec.matrix.org/v1.9/server-server-api/#get_matrixkeyv2server for more details.
add_to_old_public_keys: bool,
#[arg(long)]
/// Timestamp (`expired_ts`) in seconds since UNIX epoch that the old
/// homeserver signing key stopped being used.
///
/// See https://spec.matrix.org/v1.9/server-server-api/#get_matrixkeyv2server for more details.
timestamp: u64,
},
}
/// Parse commandline arguments into structured data

View file

@ -144,6 +144,9 @@ pub struct Config {
#[serde(default = "default_presence_offline_timeout_s")]
pub presence_offline_timeout_s: u64,
#[serde(default = "true_fn")]
pub allow_incoming_read_receipts: bool,
#[serde(default)]
pub zstd_compression: bool,
@ -282,6 +285,10 @@ impl fmt::Display for Config {
"Allow local presence requests (updates)",
&self.allow_local_presence.to_string(),
),
(
"Allow incoming remote read receipts",
&self.allow_incoming_read_receipts.to_string(),
),
(
"Block non-admin room invites (local and remote, admins can still send and receive invites)",
&self.block_non_admin_invites.to_string(),

View file

@ -43,6 +43,10 @@ pub(crate) trait KvTree: Send + Sync {
fn remove(&self, key: &[u8]) -> Result<()>;
#[allow(dead_code)]
#[cfg(feature = "rocksdb")]
fn remove_batch(&self, _iter: &mut dyn Iterator<Item = Vec<u8>>) -> Result<()> { unimplemented!() }
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a>;
fn iter_from<'a>(&'a self, from: &[u8], backwards: bool) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a>;

View file

@ -101,7 +101,6 @@ fn db_options(rocksdb_cache: &rust_rocksdb::Cache, config: &Config) -> rust_rock
threads.try_into().expect("Failed to convert \"rocksdb_parallelism_threads\" usize into i32"),
);
db_opts.set_compression_type(rocksdb_compression_algo);
db_opts.optimize_level_style_compaction(10 * 1024 * 1024);
// https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning
db_opts.set_level_compaction_dynamic_level_bytes(true);
@ -252,6 +251,18 @@ impl KvTree for RocksDbEngineTree<'_> {
Ok(self.db.rocks.delete_cf_opt(&self.cf(), key, &writeoptions)?)
}
fn remove_batch(&self, iter: &mut dyn Iterator<Item = Vec<u8>>) -> Result<()> {
let writeoptions = rust_rocksdb::WriteOptions::default();
let mut batch = WriteBatchWithTransaction::<false>::default();
for key in iter {
batch.delete_cf(&self.cf(), key);
}
Ok(self.db.rocks.write_opt(batch, &writeoptions)?)
}
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
let mut readoptions = rust_rocksdb::ReadOptions::default();
readoptions.set_total_order_seek(true);

View file

@ -8,6 +8,7 @@ use ruma::{
signatures::Ed25519KeyPair,
DeviceId, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, ServerName, UserId,
};
use tracing::debug;
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
@ -52,7 +53,7 @@ impl service::globals::Data for KeyValueDatabase {
let mut futures = FuturesUnordered::new();
// Return when *any* user changed his key
// Return when *any* user changed their key
// TODO: only send for user they share a room with
futures.push(self.todeviceid_events.watch_prefix(&userdeviceid_prefix));
@ -185,7 +186,9 @@ lasttimelinecount_cache: {lasttimelinecount_cache}\n"
fn load_keypair(&self) -> Result<Ed25519KeyPair> {
let keypair_bytes = self.global.get(b"keypair")?.map_or_else(
|| {
debug!("No keypair found in database, assuming this is a new deployment and generating one.");
let keypair = utils::generate_keypair();
debug!("Generated keypair bytes: {:?}", keypair);
self.global.insert(b"keypair", &keypair)?;
Ok::<_, Error>(keypair)
},
@ -200,6 +203,7 @@ lasttimelinecount_cache: {lasttimelinecount_cache}\n"
)
.map_err(|_| Error::bad_database("Invalid version bytes in keypair."))
.and_then(|version| {
debug!("Keypair version: {version}");
// 2. key
parts
.next()
@ -207,8 +211,11 @@ lasttimelinecount_cache: {lasttimelinecount_cache}\n"
.map(|key| (version, key))
})
.and_then(|(version, key)| {
Ed25519KeyPair::from_der(key, version)
.map_err(|_| Error::bad_database("Private or public keys are invalid."))
debug!("Keypair bytes: {:?}", key);
let keypair = Ed25519KeyPair::from_der(key, version)
.map_err(|_| Error::bad_database("Private or public keys are invalid."));
debug!("Private and public key: {keypair:?}");
keypair
})
}

View file

@ -4,12 +4,14 @@ use tracing::debug;
use crate::{
database::KeyValueDatabase,
service::{self, media::UrlPreviewData},
utils, Error, Result,
utils::string_from_bytes,
Error, Result,
};
impl service::media::Data for KeyValueDatabase {
fn create_file_metadata(
&self, mxc: String, width: u32, height: u32, content_disposition: Option<&str>, content_type: Option<&str>,
&self, sender_user: Option<&str>, mxc: String, width: u32, height: u32, content_disposition: Option<&str>,
content_type: Option<&str>,
) -> Result<Vec<u8>> {
let mut key = mxc.as_bytes().to_vec();
key.push(0xFF);
@ -22,6 +24,12 @@ impl service::media::Data for KeyValueDatabase {
self.mediaid_file.insert(&key, &[])?;
if let Some(user) = sender_user {
let key = mxc.as_bytes().to_vec();
let user = user.as_bytes().to_vec();
self.mediaid_user.insert(&key, &user)?;
}
Ok(key)
}
@ -31,13 +39,22 @@ impl service::media::Data for KeyValueDatabase {
let mut prefix = mxc.as_bytes().to_vec();
prefix.push(0xFF);
debug!("MXC db prefix: {:?}", prefix);
debug!("MXC db prefix: {prefix:?}");
for (key, _) in self.mediaid_file.scan_prefix(prefix) {
debug!("Deleting key: {:?}", key);
self.mediaid_file.remove(&key)?;
}
for (key, value) in self.mediaid_user.scan_prefix(mxc.as_bytes().to_vec()) {
if key == mxc.as_bytes().to_vec() {
let user = string_from_bytes(&value).unwrap_or_default();
debug!("Deleting key \"{key:?}\" which was uploaded by user {user}");
self.mediaid_user.remove(&key)?;
}
}
Ok(())
}
@ -85,7 +102,7 @@ impl service::media::Data for KeyValueDatabase {
let content_type = parts
.next()
.map(|bytes| {
utils::string_from_bytes(bytes)
string_from_bytes(bytes)
.map_err(|_| Error::bad_database("Content type in mediaid_file is invalid unicode."))
})
.transpose()?;
@ -97,7 +114,7 @@ impl service::media::Data for KeyValueDatabase {
None
} else {
Some(
utils::string_from_bytes(content_disposition_bytes)
string_from_bytes(content_disposition_bytes)
.map_err(|_| Error::bad_database("Content Disposition in mediaid_file is invalid unicode."))?,
)
};

View file

@ -268,6 +268,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
}))
}
/// Returns the number of users which are currently in a room
#[tracing::instrument(skip(self))]
fn room_joined_count(&self, room_id: &RoomId) -> Result<Option<u64>> {
self.roomid_joinedcount
@ -276,6 +277,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
.transpose()
}
/// Returns the number of users which are currently invited to a room
#[tracing::instrument(skip(self))]
fn room_invited_count(&self, room_id: &RoomId) -> Result<Option<u64>> {
self.roomid_invitedcount

View file

@ -90,6 +90,10 @@ impl service::sending::Data for KeyValueDatabase {
fn mark_as_active(&self, events: &[(SendingEventType, Vec<u8>)]) -> Result<()> {
for (e, key) in events {
if key.is_empty() {
continue;
}
let value = if let SendingEventType::Edu(value) = &e {
&**value
} else {

View file

@ -157,6 +157,7 @@ pub struct KeyValueDatabase {
//pub media: media::Media,
pub(super) mediaid_file: Arc<dyn KvTree>, // MediaId = MXC + WidthHeight + ContentDisposition + ContentType
pub(super) url_previews: Arc<dyn KvTree>,
pub(super) mediaid_user: Arc<dyn KvTree>,
//pub key_backups: key_backups::KeyBackups,
pub(super) backupid_algorithm: Arc<dyn KvTree>, // BackupId = UserId + Version(Count)
pub(super) backupid_etag: Arc<dyn KvTree>, // BackupId = UserId + Version(Count)
@ -365,6 +366,7 @@ impl KeyValueDatabase {
roomusertype_roomuserdataid: builder.open_tree("roomusertype_roomuserdataid")?,
mediaid_file: builder.open_tree("mediaid_file")?,
url_previews: builder.open_tree("url_previews")?,
mediaid_user: builder.open_tree("mediaid_user")?,
backupid_algorithm: builder.open_tree("backupid_algorithm")?,
backupid_etag: builder.open_tree("backupid_etag")?,
backupkeyid_backup: builder.open_tree("backupkeyid_backup")?,
@ -931,8 +933,13 @@ impl KeyValueDatabase {
{
let patterns = &services().globals.config.forbidden_usernames;
if !patterns.is_empty() {
for user in services().users.iter() {
let user_id = user?;
for user_id in services()
.users
.iter()
.filter_map(std::result::Result::ok)
.filter(|user| !services().users.is_deactivated(user).unwrap_or(true))
.filter(|user| user.server_name() == services().globals.server_name())
{
let matches = patterns.matches(user_id.localpart());
if matches.matched_any() {
warn!(

View file

@ -15,8 +15,12 @@ use axum::{
use axum_server::{bind, bind_rustls, tls_rustls::RustlsConfig, Handle as ServerHandle};
#[cfg(feature = "axum_dual_protocol")]
use axum_server_dual_protocol::ServerExt;
use conduit::api::{client_server, server_server};
use base64::{engine::general_purpose, Engine as _};
pub use conduit::*; // Re-export everything from the library crate
use conduit::{
api::{client_server, server_server},
clap::{Args, SigningKey},
};
use either::Either::{Left, Right};
use figment::{
providers::{Env, Format, Toml},
@ -28,12 +32,15 @@ use http::{
};
#[cfg(unix)]
use hyperlocal::SocketIncoming;
use ruma::api::{
client::{
error::{Error as RumaError, ErrorBody, ErrorKind},
uiaa::UiaaResponse,
use ruma::{
api::{
client::{
error::{Error as RumaError, ErrorBody, ErrorKind},
uiaa::UiaaResponse,
},
IncomingRequest,
},
IncomingRequest,
serde::Base64,
};
#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))]
use tikv_jemallocator::Jemalloc;
@ -73,7 +80,7 @@ async fn main() {
} else if args.config.is_some() {
Figment::new()
.merge(
Toml::file(args.config.expect(
Toml::file(args.config.as_ref().expect(
"conduwuit config commandline argument was specified, but appears to be invalid. This should be \
set to the path of a valid TOML file.",
))
@ -169,8 +176,16 @@ async fn main() {
let config = &services().globals.config;
/* ad-hoc config validation/checks */
/* homeserver signing keypair subcommand stuff */
if let Some(subcommands) = &args.signing_key {
if signing_key_operations(subcommands).await.is_ok() {
return;
}
}
debug!("Ed25519KeyPair: {:?}", services().globals.keypair());
/* ad-hoc config validation/checks */
if config.unix_socket_path.is_some() && !cfg!(unix) {
error!(
"UNIX socket support is only available on *nix platforms. Please remove \"unix_socket_path\" from your \
@ -739,6 +754,7 @@ fn routes() -> Router {
.ruma_route(server_server::get_profile_information_route)
.ruma_route(server_server::get_keys_route)
.ruma_route(server_server::claim_keys_route)
.ruma_route(server_server::get_hierarchy_route)
.route("/_matrix/client/r0/rooms/:room_id/initialSync", get(initial_sync))
.route("/_matrix/client/v3/rooms/:room_id/initialSync", get(initial_sync))
.route("/client/server.json", get(client_server::syncv3_client_server_json))
@ -911,3 +927,36 @@ fn maximize_fd_limit() -> Result<(), nix::errno::Errno> {
Ok(())
}
/// Homeserver signing key commands/operations
async fn signing_key_operations(subcommands: &SigningKey) -> Result<()> {
match subcommands {
SigningKey::ExportPath {
path,
} => {
let mut file = tokio::fs::File::create(path).await?;
let mut content = String::new();
content.push_str("ed25519 ");
let version = services().globals.keypair().version();
content.push_str(version);
content.push(' ');
let keypair = services().globals.keypair();
debug!("Ed25519KeyPair: {:?}", keypair);
//let key_base64 = Base64::new(key);
Ok(())
},
SigningKey::ImportPath {
path,
add_to_old_public_keys,
timestamp,
} => {
unimplemented!()
},
}
}

View file

@ -396,6 +396,21 @@ enum DebugCommand {
server: Box<ServerName>,
},
/// - Gets all the room state events for the specified room.
///
/// This is functionally equivalent to `GET
/// /_matrix/client/v3/rooms/{roomid}/state`, except the admin command does
/// *not* check if the sender user is allowed to see state events. This is
/// done because it's implied that server admins here have database access
/// and can see/get room info themselves anyways if they were malicious
/// admins.
///
/// Of course the check is still done on the actual client API.
GetRoomState {
/// Room ID
room_id: Box<RoomId>,
},
/// - Forces device lists for all local and remote users to be updated (as
/// having new keys available)
ForceDeviceListUpdates,
@ -458,60 +473,50 @@ impl Service {
let conduit_user = UserId::parse(format!("@conduit:{}", services().globals.server_name()))
.expect("@conduit:server_name is valid");
let conduit_room = services()
.rooms
.alias
.resolve_local_alias(
format!("#admins:{}", services().globals.server_name())
.as_str()
.try_into()
.expect("#admins:server_name is a valid room alias"),
)
.expect("Database data for admin room alias must be valid")
.expect("Admin room must exist");
if let Ok(Some(conduit_room)) = services().admin.get_admin_room() {
loop {
tokio::select! {
Some(event) = receiver.recv() => {
let (mut message_content, reply) = match event {
AdminRoomEvent::SendMessage(content) => (content, None),
AdminRoomEvent::ProcessMessage(room_message, reply_id) => {
(self.process_admin_message(room_message).await, Some(reply_id))
}
};
loop {
tokio::select! {
Some(event) = receiver.recv() => {
let (mut message_content, reply) = match event {
AdminRoomEvent::SendMessage(content) => (content, None),
AdminRoomEvent::ProcessMessage(room_message, reply_id) => {
(self.process_admin_message(room_message).await, Some(reply_id))
let mutex_state = Arc::clone(
services().globals
.roomid_mutex_state
.write()
.await
.entry(conduit_room.clone())
.or_default(),
);
let state_lock = mutex_state.lock().await;
if let Some(reply) = reply {
message_content.relates_to = Some(Reply { in_reply_to: InReplyTo { event_id: reply.into() } });
}
};
let mutex_state = Arc::clone(
services().globals
.roomid_mutex_state
.write()
.await
.entry(conduit_room.clone())
.or_default(),
);
services().rooms.timeline.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomMessage,
content: to_raw_value(&message_content)
.expect("event is valid, we just created it"),
unsigned: None,
state_key: None,
redacts: None,
},
&conduit_user,
&conduit_room,
&state_lock)
.await
.unwrap();
let state_lock = mutex_state.lock().await;
if let Some(reply) = reply {
message_content.relates_to = Some(Reply { in_reply_to: InReplyTo { event_id: reply.into() } });
drop(state_lock);
}
services().rooms.timeline.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomMessage,
content: to_raw_value(&message_content)
.expect("event is valid, we just created it"),
unsigned: None,
state_key: None,
redacts: None,
},
&conduit_user,
&conduit_room,
&state_lock)
.await
.unwrap();
drop(state_lock);
}
}
}
@ -1096,14 +1101,13 @@ impl Service {
format!("#admins:{}", services().globals.server_name())
.try_into()
.expect("#admins:server_name is a valid alias name");
let admin_room_id = services()
.rooms
.alias
.resolve_local_alias(&admin_room_alias)?
.expect("Admin room must exist");
if room.to_string().eq(&admin_room_id) || room.to_string().eq(&admin_room_alias) {
return Ok(RoomMessageEventContent::text_plain("Not allowed to ban the admin room."));
if let Some(admin_room_id) = services().admin.get_admin_room()? {
if room.to_string().eq(&admin_room_id) || room.to_string().eq(&admin_room_alias) {
return Ok(RoomMessageEventContent::text_plain(
"Not allowed to ban the admin room.",
));
}
}
let room_id = if room.is_room_id() {
@ -1267,23 +1271,15 @@ impl Service {
let mut room_ban_count = 0;
let mut room_ids: Vec<&RoomId> = Vec::new();
let admin_room_alias: Box<RoomAliasId> =
format!("#admins:{}", services().globals.server_name())
.try_into()
.expect("#admins:server_name is a valid alias name");
let admin_room_id = services()
.rooms
.alias
.resolve_local_alias(&admin_room_alias)?
.expect("Admin room must exist");
for &room_id in &rooms_s {
match <&RoomId>::try_from(room_id) {
Ok(owned_room_id) => {
// silently ignore deleting admin room
if owned_room_id.eq(&admin_room_id) {
info!("User specified admin room in bulk ban list, ignoring");
continue;
if let Some(admin_room_id) = services().admin.get_admin_room()? {
if owned_room_id.eq(&admin_room_id) {
info!("User specified admin room in bulk ban list, ignoring");
continue;
}
}
room_ids.push(owned_room_id);
@ -2061,6 +2057,41 @@ impl Service {
},
}
},
DebugCommand::GetRoomState {
room_id,
} => {
let room_state = services()
.rooms
.state_accessor
.room_state_full(&room_id)
.await?
.values()
.map(|pdu| pdu.to_state_event())
.collect::<Vec<_>>();
if room_state.is_empty() {
return Ok(RoomMessageEventContent::text_plain(
"Unable to find room state in our database (vector is empty)",
));
}
let json_text = serde_json::to_string_pretty(&room_state).map_err(|e| {
error!("Failed converting room state vector in our database to pretty JSON: {e}");
Error::bad_database(
"Failed to convert room state events to pretty JSON, possible invalid room state events \
in our database",
)
})?;
return Ok(RoomMessageEventContent::text_html(
format!("{}\n```json\n{}\n```", "Found full room state", json_text),
format!(
"<p>{}</p>\n<pre><code class=\"language-json\">{}\n</code></pre>\n",
"Found full room state",
HtmlEscape(&json_text)
),
));
},
DebugCommand::ForceDeviceListUpdates => {
// Force E2EE device list updates for all users
for user_id in services().users.iter().filter_map(std::result::Result::ok) {
@ -2393,105 +2424,113 @@ impl Service {
Ok(())
}
/// Gets the room ID of the admin room
///
/// Errors are propagated from the database, and will have None if there is
/// no admin room
pub(crate) fn get_admin_room(&self) -> Result<Option<OwnedRoomId>> {
let admin_room_alias: Box<RoomAliasId> = format!("#admins:{}", services().globals.server_name())
.try_into()
.expect("#admins:server_name is a valid alias name");
services().rooms.alias.resolve_local_alias(&admin_room_alias)
}
/// Invite the user to the conduit admin room.
///
/// In conduit, this is equivalent to granting admin privileges.
pub(crate) async fn make_user_admin(&self, user_id: &UserId, displayname: String) -> Result<()> {
let admin_room_alias: Box<RoomAliasId> = format!("#admins:{}", services().globals.server_name())
.try_into()
.expect("#admins:server_name is a valid alias name");
let room_id = services().rooms.alias.resolve_local_alias(&admin_room_alias)?.expect("Admin room must exist");
if let Some(room_id) = services().admin.get_admin_room()? {
let mutex_state =
Arc::clone(services().globals.roomid_mutex_state.write().await.entry(room_id.clone()).or_default());
let state_lock = mutex_state.lock().await;
let mutex_state =
Arc::clone(services().globals.roomid_mutex_state.write().await.entry(room_id.clone()).or_default());
let state_lock = mutex_state.lock().await;
// Use the server user to grant the new admin's power level
let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name())
.expect("@conduit:server_name is valid");
// Use the server user to grant the new admin's power level
let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name())
.expect("@conduit:server_name is valid");
// Invite and join the real user
services()
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomMember,
content: to_raw_value(&RoomMemberEventContent {
membership: MembershipState::Invite,
displayname: None,
avatar_url: None,
is_direct: None,
third_party_invite: None,
blurhash: None,
reason: None,
join_authorized_via_users_server: None,
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some(user_id.to_string()),
redacts: None,
},
&conduit_user,
&room_id,
&state_lock,
)
.await?;
services()
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomMember,
content: to_raw_value(&RoomMemberEventContent {
membership: MembershipState::Join,
displayname: Some(displayname),
avatar_url: None,
is_direct: None,
third_party_invite: None,
blurhash: None,
reason: None,
join_authorized_via_users_server: None,
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some(user_id.to_string()),
redacts: None,
},
user_id,
&room_id,
&state_lock,
)
.await?;
// Invite and join the real user
services()
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomMember,
content: to_raw_value(&RoomMemberEventContent {
membership: MembershipState::Invite,
displayname: None,
avatar_url: None,
is_direct: None,
third_party_invite: None,
blurhash: None,
reason: None,
join_authorized_via_users_server: None,
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some(user_id.to_string()),
redacts: None,
},
&conduit_user,
&room_id,
&state_lock,
)
.await?;
services()
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomMember,
content: to_raw_value(&RoomMemberEventContent {
membership: MembershipState::Join,
displayname: Some(displayname),
avatar_url: None,
is_direct: None,
third_party_invite: None,
blurhash: None,
reason: None,
join_authorized_via_users_server: None,
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some(user_id.to_string()),
redacts: None,
},
user_id,
&room_id,
&state_lock,
)
.await?;
// Set power level
let mut users = BTreeMap::new();
users.insert(conduit_user.clone(), 100.into());
users.insert(user_id.to_owned(), 100.into());
// Set power level
let mut users = BTreeMap::new();
users.insert(conduit_user.clone(), 100.into());
users.insert(user_id.to_owned(), 100.into());
services()
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomPowerLevels,
content: to_raw_value(&RoomPowerLevelsEventContent {
users,
..Default::default()
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
&conduit_user,
&room_id,
&state_lock,
)
.await?;
services()
.rooms
.timeline
.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomPowerLevels,
content: to_raw_value(&RoomPowerLevelsEventContent {
users,
..Default::default()
})
.expect("event is valid, we just created it"),
unsigned: None,
state_key: Some("".to_owned()),
redacts: None,
},
&conduit_user,
&room_id,
&state_lock,
)
.await?;
// Send welcome message
services().rooms.timeline.build_and_append_pdu(
// Send welcome message
services().rooms.timeline.build_and_append_pdu(
PduBuilder {
event_type: TimelineEventType::RoomMessage,
content: to_raw_value(&RoomMessageEventContent::text_html(
@ -2508,7 +2547,10 @@ impl Service {
&state_lock,
).await?;
Ok(())
Ok(())
} else {
Ok(())
}
}
}

View file

@ -152,7 +152,7 @@ impl Service<'_> {
let keypair = match keypair {
Ok(k) => k,
Err(e) => {
error!("Keypair invalid. Deleting...");
error!("Homeserver signing keypair in database is invalid. Deleting...");
db.remove_keypair()?;
return Err(e);
},
@ -359,6 +359,8 @@ impl Service<'_> {
pub fn presence_offline_timeout_s(&self) -> u64 { self.config.presence_offline_timeout_s }
pub fn allow_incoming_read_receipts(&self) -> bool { self.config.allow_incoming_read_receipts }
pub fn rocksdb_log_level(&self) -> &String { &self.config.rocksdb_log_level }
pub fn rocksdb_max_log_file_size(&self) -> usize { self.config.rocksdb_max_log_file_size }
@ -495,6 +497,7 @@ fn reqwest_client_builder(config: &Config) -> Result<reqwest::ClientBuilder> {
});
let mut reqwest_client_builder = reqwest::Client::builder()
.trust_dns(true)
.pool_max_idle_per_host(0)
.connect_timeout(Duration::from_secs(60))
.timeout(Duration::from_secs(60 * 5))
@ -522,6 +525,7 @@ fn url_preview_reqwest_client_builder(config: &Config) -> Result<reqwest::Client
});
let mut reqwest_client_builder = reqwest::Client::builder()
.trust_dns(true)
.pool_max_idle_per_host(0)
.connect_timeout(Duration::from_secs(60))
.timeout(Duration::from_secs(60 * 5))

View file

@ -2,7 +2,8 @@ use crate::Result;
pub trait Data: Send + Sync {
fn create_file_metadata(
&self, mxc: String, width: u32, height: u32, content_disposition: Option<&str>, content_type: Option<&str>,
&self, sender_user: Option<&str>, mxc: String, width: u32, height: u32, content_disposition: Option<&str>,
content_type: Option<&str>,
) -> Result<Vec<u8>>;
fn delete_file_mxc(&self, mxc: String) -> Result<()>;

View file

@ -3,7 +3,7 @@ use std::{collections::HashMap, io::Cursor, sync::Arc, time::SystemTime};
pub(crate) use data::Data;
use image::imageops::FilterType;
use ruma::OwnedMxcUri;
use ruma::{OwnedMxcUri, OwnedUserId};
use serde::Serialize;
use tokio::{
fs::{self, File},
@ -45,10 +45,15 @@ pub struct Service {
impl Service {
/// Uploads a file.
pub async fn create(
&self, mxc: String, content_disposition: Option<&str>, content_type: Option<&str>, file: &[u8],
&self, sender_user: Option<OwnedUserId>, mxc: String, content_disposition: Option<&str>,
content_type: Option<&str>, file: &[u8],
) -> Result<()> {
// Width, Height = 0 if it's not a thumbnail
let key = self.db.create_file_metadata(mxc, 0, 0, content_disposition, content_type)?;
let key = if let Some(user) = sender_user {
self.db.create_file_metadata(Some(user.as_str()), mxc, 0, 0, content_disposition, content_type)?
} else {
self.db.create_file_metadata(None, mxc, 0, 0, content_disposition, content_type)?
};
let path;
@ -106,11 +111,17 @@ impl Service {
}
/// Uploads or replaces a file thumbnail.
#[allow(clippy::too_many_arguments)]
pub async fn upload_thumbnail(
&self, mxc: String, content_disposition: Option<&str>, content_type: Option<&str>, width: u32, height: u32,
file: &[u8],
&self, sender_user: Option<OwnedUserId>, mxc: String, content_disposition: Option<&str>,
content_type: Option<&str>, width: u32, height: u32, file: &[u8],
) -> Result<()> {
let key = self.db.create_file_metadata(mxc, width, height, content_disposition, content_type)?;
let key = if let Some(user) = sender_user {
self.db.create_file_metadata(Some(user.as_str()), mxc, width, height, content_disposition, content_type)?
} else {
self.db.create_file_metadata(None, mxc, width, height, content_disposition, content_type)?
};
let path;
#[allow(clippy::unnecessary_operation)] // error[E0658]: attributes on expressions are experimental
@ -183,8 +194,8 @@ impl Service {
debug!("Full MXC key from database: {:?}", key);
// we need to get the MXC URL from the first part of the key (the first 0xff /
// 255 push) this code does look kinda crazy but blame conduit for using magic
// keys
// 255 push). this is all necessary because of conduit using magic keys for
// media
let mut parts = key.split(|&b| b == 0xFF);
let mxc = parts
.next()
@ -403,6 +414,7 @@ impl Service {
// Save thumbnail in database so we don't have to generate it again next time
let thumbnail_key = self.db.create_file_metadata(
None,
mxc,
width,
height,

View file

@ -134,7 +134,7 @@ impl Services<'_> {
db,
},
spaces: rooms::spaces::Service {
roomid_spacechunk_cache: Mutex::new(LruCache::new(
roomid_spacehierarchy_cache: Mutex::new(LruCache::new(
(100.0 * config.conduit_cache_capacity_modifier) as usize,
)),
},
@ -175,7 +175,7 @@ impl Services<'_> {
let user_visibility_cache = self.rooms.state_accessor.user_visibility_cache.lock().unwrap().len();
let stateinfo_cache = self.rooms.state_compressor.stateinfo_cache.lock().unwrap().len();
let lasttimelinecount_cache = self.rooms.timeline.lasttimelinecount_cache.lock().await.len();
let roomid_spacechunk_cache = self.rooms.spaces.roomid_spacechunk_cache.lock().await.len();
let roomid_spacehierarchy_cache = self.rooms.spaces.roomid_spacehierarchy_cache.lock().await.len();
format!(
"\
@ -184,7 +184,7 @@ server_visibility_cache: {server_visibility_cache}
user_visibility_cache: {user_visibility_cache}
stateinfo_cache: {stateinfo_cache}
lasttimelinecount_cache: {lasttimelinecount_cache}
roomid_spacechunk_cache: {roomid_spacechunk_cache}"
roomid_spacehierarchy_cache: {roomid_spacehierarchy_cache}"
)
}
@ -205,7 +205,13 @@ roomid_spacechunk_cache: {roomid_spacechunk_cache}"
self.rooms.timeline.lasttimelinecount_cache.lock().await.clear();
}
if amount > 5 {
self.rooms.spaces.roomid_spacechunk_cache.lock().await.clear();
self.rooms.spaces.roomid_spacehierarchy_cache.lock().await.clear();
}
if amount > 6 {
self.globals.tls_name_override.write().unwrap().clear();
}
if amount > 7 {
self.globals.dns_resolver().clear_cache();
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -74,7 +74,7 @@ impl Service {
.await?;
},
TimelineEventType::SpaceChild => {
services().rooms.spaces.roomid_spacechunk_cache.lock().await.remove(&pdu.room_id);
services().rooms.spaces.roomid_spacehierarchy_cache.lock().await.remove(&pdu.room_id);
},
_ => continue,
}

View file

@ -26,7 +26,7 @@ use ruma::{
state_res,
state_res::{Event, RoomVersion},
uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName,
RoomAliasId, RoomId, RoomVersionId, ServerName, UserId,
RoomId, RoomVersionId, ServerName, UserId,
};
use serde::Deserialize;
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
@ -418,7 +418,7 @@ impl Service {
},
TimelineEventType::SpaceChild => {
if let Some(_state_key) = &pdu.state_key {
services().rooms.spaces.roomid_spacechunk_cache.lock().await.remove(&pdu.room_id);
services().rooms.spaces.roomid_spacehierarchy_cache.lock().await.remove(&pdu.room_id);
}
},
TimelineEventType::RoomMember => {
@ -461,10 +461,6 @@ impl Service {
if let Some(body) = content.body {
services().rooms.search.index_pdu(shortroomid, &pdu_id, &body)?;
let admin_room = services().rooms.alias.resolve_local_alias(
<&RoomAliasId>::try_from(format!("#admins:{}", services().globals.server_name()).as_str())
.expect("#admins:server_name is a valid room alias"),
)?;
let server_user = format!("@conduit:{}", services().globals.server_name());
let to_conduit = body.starts_with(&format!("{server_user}: "))
@ -477,8 +473,10 @@ impl Service {
// the administrator can execute commands as conduit
let from_conduit = pdu.sender == server_user && services().globals.emergency_password().is_none();
if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) {
services().admin.process_message(body, pdu.event_id.clone());
if let Some(admin_room) = services().admin.get_admin_room()? {
if to_conduit && !from_conduit && admin_room == pdu.room_id {
services().admin.process_message(body, pdu.event_id.clone());
}
}
}
},
@ -720,84 +718,82 @@ impl Service {
) -> Result<Arc<EventId>> {
let (pdu, pdu_json) = self.create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock)?;
let admin_room = services().rooms.alias.resolve_local_alias(
<&RoomAliasId>::try_from(format!("#admins:{}", services().globals.server_name()).as_str())
.expect("#admins:server_name is a valid room alias"),
)?;
if admin_room.filter(|v| v == room_id).is_some() {
match pdu.event_type() {
TimelineEventType::RoomEncryption => {
warn!("Encryption is not allowed in the admins room");
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Encryption is not allowed in the admins room.",
));
},
TimelineEventType::RoomMember => {
#[derive(Deserialize)]
struct ExtractMembership {
membership: MembershipState,
}
let target = pdu.state_key().filter(|v| v.starts_with('@')).unwrap_or(sender.as_str());
let server_name = services().globals.server_name();
let server_user = format!("@conduit:{server_name}");
let content = serde_json::from_str::<ExtractMembership>(pdu.content.get())
.map_err(|_| Error::bad_database("Invalid content in pdu."))?;
if content.membership == MembershipState::Leave {
if target == server_user {
warn!("Conduit user cannot leave from admins room");
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Conduit user cannot leave from admins room.",
));
if let Some(admin_room) = services().admin.get_admin_room()? {
if admin_room == room_id {
match pdu.event_type() {
TimelineEventType::RoomEncryption => {
warn!("Encryption is not allowed in the admins room");
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Encryption is not allowed in the admins room.",
));
},
TimelineEventType::RoomMember => {
#[derive(Deserialize)]
struct ExtractMembership {
membership: MembershipState,
}
let count = services()
.rooms
.state_cache
.room_members(room_id)
.filter_map(std::result::Result::ok)
.filter(|m| m.server_name() == server_name)
.filter(|m| m != target)
.count();
if count < 2 {
warn!("Last admin cannot leave from admins room");
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Last admin cannot leave from admins room.",
));
}
}
let target = pdu.state_key().filter(|v| v.starts_with('@')).unwrap_or(sender.as_str());
let server_name = services().globals.server_name();
let server_user = format!("@conduit:{server_name}");
let content = serde_json::from_str::<ExtractMembership>(pdu.content.get())
.map_err(|_| Error::bad_database("Invalid content in pdu."))?;
if content.membership == MembershipState::Ban && pdu.state_key().is_some() {
if target == server_user {
warn!("Conduit user cannot be banned in admins room");
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Conduit user cannot be banned in admins room.",
));
if content.membership == MembershipState::Leave {
if target == server_user {
warn!("Conduit user cannot leave from admins room");
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Conduit user cannot leave from admins room.",
));
}
let count = services()
.rooms
.state_cache
.room_members(room_id)
.filter_map(std::result::Result::ok)
.filter(|m| m.server_name() == server_name)
.filter(|m| m != target)
.count();
if count < 2 {
warn!("Last admin cannot leave from admins room");
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Last admin cannot leave from admins room.",
));
}
}
let count = services()
.rooms
.state_cache
.room_members(room_id)
.filter_map(std::result::Result::ok)
.filter(|m| m.server_name() == server_name)
.filter(|m| m != target)
.count();
if count < 2 {
warn!("Last admin cannot be banned in admins room");
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Last admin cannot be banned in admins room.",
));
if content.membership == MembershipState::Ban && pdu.state_key().is_some() {
if target == server_user {
warn!("Conduit user cannot be banned in admins room");
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Conduit user cannot be banned in admins room.",
));
}
let count = services()
.rooms
.state_cache
.room_members(room_id)
.filter_map(std::result::Result::ok)
.filter(|m| m.server_name() == server_name)
.filter(|m| m != target)
.count();
if count < 2 {
warn!("Last admin cannot be banned in admins room");
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Last admin cannot be banned in admins room.",
));
}
}
}
},
_ => {},
},
_ => {},
}
}
}
@ -1007,12 +1003,12 @@ impl Service {
return Ok(());
},
Err(e) => {
warn!("{backfill_server} could not provide backfill: {e}");
warn!("{backfill_server} failed to provide backfill: {e}");
},
}
}
info!("No servers could backfill");
info!("No servers could backfill, but backfill was needed");
Ok(())
}

View file

@ -25,7 +25,7 @@ use ruma::{
},
device_id,
events::{push_rules::PushRulesEvent, receipt::ReceiptType, AnySyncEphemeralRoomEvent, GlobalAccountDataEventType},
push, uint, MilliSecondsSinceUnixEpoch, OwnedServerName, OwnedUserId, ServerName, UInt, UserId,
push, uint, MilliSecondsSinceUnixEpoch, OwnedServerName, OwnedUserId, RoomId, ServerName, UInt, UserId,
};
use tokio::{
select,
@ -80,6 +80,7 @@ impl OutgoingKind {
pub enum SendingEventType {
Pdu(Vec<u8>), // pduid
Edu(Vec<u8>), // pdu json
Flush, // none
}
pub struct Service {
@ -237,9 +238,11 @@ impl Service {
events.push(e);
}
} else {
self.db.mark_as_active(&new_events)?;
for (e, _) in new_events {
events.push(e);
if !new_events.is_empty() {
self.db.mark_as_active(&new_events)?;
for (e, _) in new_events {
events.push(e);
}
}
if let OutgoingKind::Normal(server_name) = outgoing_kind {
@ -360,11 +363,11 @@ impl Service {
for user_id in device_list_changes {
// Empty prev id forces synapse to resync: https://github.com/matrix-org/synapse/blob/98aec1cc9da2bd6b8e34ffb282c85abf9b8b42ca/synapse/handlers/device.py#L767
// Because synapse resyncs, we can just insert dummy data
// Because synapse resyncs, we can just insert placeholder data
let edu = Edu::DeviceListUpdate(DeviceListUpdateContent {
user_id,
device_id: device_id!("dummy").to_owned(),
device_display_name: Some("Dummy".to_owned()),
device_id: device_id!("placeholder").to_owned(),
device_display_name: Some("Placeholder".to_owned()),
stream_id: uint!(1),
prev_id: Vec::new(),
deleted: None,
@ -421,6 +424,29 @@ impl Service {
Ok(())
}
#[tracing::instrument(skip(self, room_id))]
pub fn flush_room(&self, room_id: &RoomId) -> Result<()> {
let servers: HashSet<OwnedServerName> =
services().rooms.state_cache.room_servers(room_id).filter_map(std::result::Result::ok).collect();
self.flush_servers(servers.into_iter())
}
#[tracing::instrument(skip(self, servers))]
pub fn flush_servers<I: Iterator<Item = OwnedServerName>>(&self, servers: I) -> Result<()> {
let requests = servers
.into_iter()
.filter(|server| server != services().globals.server_name())
.map(OutgoingKind::Normal)
.collect::<Vec<_>>();
for outgoing_kind in requests.into_iter() {
self.sender.send((outgoing_kind, SendingEventType::Flush, Vec::<u8>::new())).unwrap();
}
Ok(())
}
/// Cleanup event data
/// Used for instance after we remove an appservice registration
#[tracing::instrument(skip(self))]
@ -461,6 +487,9 @@ impl Service {
SendingEventType::Edu(_) => {
// Appservices don't need EDUs (?)
},
SendingEventType::Flush => {
// flush only; no new content
},
}
}
@ -480,6 +509,7 @@ impl Service {
.iter()
.map(|e| match e {
SendingEventType::Edu(b) | SendingEventType::Pdu(b) => &**b,
SendingEventType::Flush => &[],
})
.collect::<Vec<_>>(),
)))
@ -521,6 +551,9 @@ impl Service {
SendingEventType::Edu(_) => {
// Push gateways don't need EDUs (?)
},
SendingEventType::Flush => {
// flush only; no new content
},
}
}
@ -601,6 +634,9 @@ impl Service {
edu_jsons.push(raw);
}
},
SendingEventType::Flush => {
// flush only; no new content
},
}
}
@ -618,6 +654,7 @@ impl Service {
.iter()
.map(|e| match e {
SendingEventType::Edu(b) | SendingEventType::Pdu(b) => &**b,
SendingEventType::Flush => &[],
})
.collect::<Vec<_>>(),
)))
@ -647,6 +684,10 @@ impl Service {
where
T: OutgoingRequest + Debug,
{
if !services().globals.allow_federation() {
return Err(Error::bad_config("Federation is disabled."));
}
if destination.is_ip_literal() || IPAddress::is_valid(destination.host()) {
info!(
"Destination {} is an IP literal, checking against IP range denylist.",

View file

@ -11,6 +11,7 @@ use argon2::{password_hash::SaltString, PasswordHasher};
use rand::prelude::*;
use ring::digest;
use ruma::{canonical_json::try_from_json_map, CanonicalJsonError, CanonicalJsonObject, OwnedUserId};
use tracing::debug;
use crate::{services, Error, Result};
@ -30,8 +31,11 @@ pub(crate) fn increment(old: Option<&[u8]>) -> Option<Vec<u8>> {
Some(number.to_be_bytes().to_vec())
}
/// Generates a new homeserver signing key. First 8 bytes are the version (a
/// random alphanumeric string), the rest are generated by Ed25519KeyPair
pub fn generate_keypair() -> Vec<u8> {
let mut value = random_string(8).as_bytes().to_vec();
debug!("Keypair version bytes: {value:?}");
value.push(0xFF);
value.extend_from_slice(
&ruma::signatures::Ed25519KeyPair::generate().expect("Ed25519KeyPair generation always works (?)"),
@ -58,6 +62,7 @@ pub fn user_id_from_bytes(bytes: &[u8]) -> Result<OwnedUserId> {
.map_err(|_| Error::bad_database("Failed to parse user id from bytes"))
}
/// Generats a random *alphanumeric* string
pub fn random_string(length: usize) -> String {
thread_rng().sample_iter(&rand::distributions::Alphanumeric).take(length).map(char::from).collect()
}