From 50acfe783289e6b9b8deb20b3c34f32653f61f11 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 29 Jan 2025 08:39:44 +0000 Subject: [PATCH 001/310] flatten auth chain iterations Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 7 +- src/api/server/event_auth.rs | 4 +- src/api/server/send_join.rs | 2 - src/api/server/state.rs | 2 - src/api/server/state_ids.rs | 8 +- src/service/rooms/auth_chain/mod.rs | 154 +++++++++--------- .../rooms/event_handler/resolve_state.rs | 15 +- .../rooms/event_handler/state_at_incoming.rs | 9 +- 8 files changed, 90 insertions(+), 111 deletions(-) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index cd892ded..4e0ce2e3 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -6,8 +6,9 @@ use std::{ }; use conduwuit::{ - debug_error, err, info, trace, utils, utils::string::EMPTY, warn, Error, PduEvent, PduId, - RawPduId, Result, + debug_error, err, info, trace, utils, + utils::{stream::ReadyExt, string::EMPTY}, + warn, Error, PduEvent, PduId, RawPduId, Result, }; use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{ @@ -54,7 +55,7 @@ pub(super) async fn get_auth_chain( .rooms .auth_chain .event_ids_iter(room_id, once(event_id.as_ref())) - .await? + .ready_filter_map(Result::ok) .count() .await; diff --git a/src/api/server/event_auth.rs b/src/api/server/event_auth.rs index 93e867a0..49dcd718 100644 --- a/src/api/server/event_auth.rs +++ b/src/api/server/event_auth.rs @@ -1,7 +1,7 @@ use std::{borrow::Borrow, iter::once}; use axum::extract::State; -use conduwuit::{Error, Result}; +use conduwuit::{utils::stream::ReadyExt, Error, Result}; use futures::StreamExt; use ruma::{ api::{client::error::ErrorKind, federation::authorization::get_event_authorization}, @@ -48,7 +48,7 @@ pub(crate) async fn get_event_authorization_route( .rooms .auth_chain .event_ids_iter(room_id, once(body.event_id.borrow())) - .await? + .ready_filter_map(Result::ok) .filter_map(|id| async move { services.rooms.timeline.get_pdu_json(&id).await.ok() }) .then(|pdu| services.sending.convert_to_outgoing_federation_event(pdu)) .collect() diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index 2b8a0eef..e81d7672 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -238,8 +238,6 @@ async fn create_join_event( .rooms .auth_chain .event_ids_iter(room_id, starting_events) - .await? - .map(Ok) .broad_and_then(|event_id| async move { services.rooms.timeline.get_pdu_json(&event_id).await }) diff --git a/src/api/server/state.rs b/src/api/server/state.rs index eab1f138..b16e61a0 100644 --- a/src/api/server/state.rs +++ b/src/api/server/state.rs @@ -56,8 +56,6 @@ pub(crate) async fn get_room_state_route( .rooms .auth_chain .event_ids_iter(&body.room_id, once(body.event_id.borrow())) - .await? - .map(Ok) .and_then(|id| async move { services.rooms.timeline.get_pdu_json(&id).await }) .and_then(|pdu| { services diff --git a/src/api/server/state_ids.rs b/src/api/server/state_ids.rs index 4973dd3a..7d0440bf 100644 --- a/src/api/server/state_ids.rs +++ b/src/api/server/state_ids.rs @@ -2,7 +2,7 @@ use std::{borrow::Borrow, iter::once}; use axum::extract::State; use conduwuit::{at, err, Result}; -use futures::StreamExt; +use futures::{StreamExt, TryStreamExt}; use ruma::{api::federation::event::get_room_state_ids, OwnedEventId}; use super::AccessCheck; @@ -44,10 +44,8 @@ pub(crate) async fn get_room_state_ids_route( .rooms .auth_chain .event_ids_iter(&body.room_id, once(body.event_id.borrow())) - .await? - .map(|id| (*id).to_owned()) - .collect() - .await; + .try_collect() + .await?; Ok(get_room_state_ids::v1::Response { auth_chain_ids, pdu_ids }) } diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index df2663b2..0ff96846 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -4,6 +4,7 @@ use std::{ collections::{BTreeSet, HashSet, VecDeque}, fmt::Debug, sync::Arc, + time::Instant, }; use conduwuit::{ @@ -14,7 +15,7 @@ use conduwuit::{ }, validated, warn, Err, Result, }; -use futures::{Stream, StreamExt, TryFutureExt, TryStreamExt}; +use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use ruma::{EventId, OwnedEventId, RoomId}; use self::data::Data; @@ -30,6 +31,8 @@ struct Services { timeline: Dep, } +type Bucket<'a> = BTreeSet<(u64, &'a EventId)>; + impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { @@ -45,42 +48,22 @@ impl crate::Service for Service { } #[implement(Service)] -pub async fn event_ids_iter<'a, I>( +pub fn event_ids_iter<'a, I>( &'a self, - room_id: &RoomId, + room_id: &'a RoomId, starting_events: I, -) -> Result + Send + '_> +) -> impl Stream> + Send + 'a where I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, { - let stream = self - .get_event_ids(room_id, starting_events) - .await? - .into_iter() - .stream(); - - Ok(stream) -} - -#[implement(Service)] -pub async fn get_event_ids<'a, I>( - &'a self, - room_id: &RoomId, - starting_events: I, -) -> Result> -where - I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, -{ - let chain = self.get_auth_chain(room_id, starting_events).await?; - let event_ids = self - .services - .short - .multi_get_eventid_from_short(chain.into_iter().stream()) - .ready_filter_map(Result::ok) - .collect() - .await; - - Ok(event_ids) + self.get_auth_chain(room_id, starting_events) + .map_ok(|chain| { + self.services + .short + .multi_get_eventid_from_short(chain.into_iter().stream()) + .ready_filter(Result::is_ok) + }) + .try_flatten_stream() } #[implement(Service)] @@ -94,9 +77,9 @@ where I: Iterator + Clone + Debug + ExactSizeIterator + Send + 'a, { const NUM_BUCKETS: usize = 50; //TODO: change possible w/o disrupting db? - const BUCKET: BTreeSet<(u64, &EventId)> = BTreeSet::new(); + const BUCKET: Bucket<'_> = BTreeSet::new(); - let started = std::time::Instant::now(); + let started = Instant::now(); let mut starting_ids = self .services .short @@ -120,53 +103,7 @@ where let full_auth_chain: Vec = buckets .into_iter() .try_stream() - .broad_and_then(|chunk| async move { - let chunk_key: Vec = chunk.iter().map(at!(0)).collect(); - - if chunk_key.is_empty() { - return Ok(Vec::new()); - } - - if let Ok(cached) = self.get_cached_eventid_authchain(&chunk_key).await { - return Ok(cached.to_vec()); - } - - let chunk_cache: Vec<_> = chunk - .into_iter() - .try_stream() - .broad_and_then(|(shortid, event_id)| async move { - if let Ok(cached) = self.get_cached_eventid_authchain(&[shortid]).await { - return Ok(cached.to_vec()); - } - - let auth_chain = self.get_auth_chain_inner(room_id, event_id).await?; - self.cache_auth_chain_vec(vec![shortid], auth_chain.as_slice()); - debug!( - ?event_id, - elapsed = ?started.elapsed(), - "Cache missed event" - ); - - Ok(auth_chain) - }) - .try_collect() - .map_ok(|chunk_cache: Vec<_>| chunk_cache.into_iter().flatten().collect()) - .map_ok(|mut chunk_cache: Vec<_>| { - chunk_cache.sort_unstable(); - chunk_cache.dedup(); - chunk_cache - }) - .await?; - - self.cache_auth_chain_vec(chunk_key, chunk_cache.as_slice()); - debug!( - chunk_cache_length = ?chunk_cache.len(), - elapsed = ?started.elapsed(), - "Cache missed chunk", - ); - - Ok(chunk_cache) - }) + .broad_and_then(|chunk| self.get_auth_chain_outer(room_id, started, chunk)) .try_collect() .map_ok(|auth_chain: Vec<_>| auth_chain.into_iter().flatten().collect()) .map_ok(|mut full_auth_chain: Vec<_>| { @@ -174,6 +111,7 @@ where full_auth_chain.dedup(); full_auth_chain }) + .boxed() .await?; debug!( @@ -185,6 +123,60 @@ where Ok(full_auth_chain) } +#[implement(Service)] +async fn get_auth_chain_outer( + &self, + room_id: &RoomId, + started: Instant, + chunk: Bucket<'_>, +) -> Result> { + let chunk_key: Vec = chunk.iter().map(at!(0)).collect(); + + if chunk_key.is_empty() { + return Ok(Vec::new()); + } + + if let Ok(cached) = self.get_cached_eventid_authchain(&chunk_key).await { + return Ok(cached.to_vec()); + } + + let chunk_cache: Vec<_> = chunk + .into_iter() + .try_stream() + .broad_and_then(|(shortid, event_id)| async move { + if let Ok(cached) = self.get_cached_eventid_authchain(&[shortid]).await { + return Ok(cached.to_vec()); + } + + let auth_chain = self.get_auth_chain_inner(room_id, event_id).await?; + self.cache_auth_chain_vec(vec![shortid], auth_chain.as_slice()); + debug!( + ?event_id, + elapsed = ?started.elapsed(), + "Cache missed event" + ); + + Ok(auth_chain) + }) + .try_collect() + .map_ok(|chunk_cache: Vec<_>| chunk_cache.into_iter().flatten().collect()) + .map_ok(|mut chunk_cache: Vec<_>| { + chunk_cache.sort_unstable(); + chunk_cache.dedup(); + chunk_cache + }) + .await?; + + self.cache_auth_chain_vec(chunk_key, chunk_cache.as_slice()); + debug!( + chunk_cache_length = ?chunk_cache.len(), + elapsed = ?started.elapsed(), + "Cache missed chunk", + ); + + Ok(chunk_cache) +} + #[implement(Service)] #[tracing::instrument(name = "inner", level = "trace", skip(self, room_id))] async fn get_auth_chain_inner( diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 1fd91ac6..03f7e822 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -44,18 +44,11 @@ pub async fn resolve_state( let auth_chain_sets: Vec> = fork_states .iter() .try_stream() - .wide_and_then(|state| async move { - let starting_events = state.values().map(Borrow::borrow); - - let auth_chain = self - .services + .wide_and_then(|state| { + self.services .auth_chain - .get_event_ids(room_id, starting_events) - .await? - .into_iter() - .collect(); - - Ok(auth_chain) + .event_ids_iter(room_id, state.values().map(Borrow::borrow)) + .try_collect() }) .try_collect() .await?; diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 7ef047ab..8730232a 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -10,7 +10,7 @@ use conduwuit::{ utils::stream::{BroadbandExt, IterStream}, PduEvent, Result, }; -use futures::{FutureExt, StreamExt}; +use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{state_res::StateMap, OwnedEventId, RoomId, RoomVersionId}; // TODO: if we know the prev_events of the incoming event we can avoid the @@ -140,10 +140,9 @@ pub(super) async fn state_at_incoming_resolved( let auth_chain: HashSet = self .services .auth_chain - .get_event_ids(room_id, starting_events.into_iter()) - .await? - .into_iter() - .collect(); + .event_ids_iter(room_id, starting_events.into_iter()) + .try_collect() + .await?; auth_chain_sets.push(auth_chain); fork_states.push(state); From 8658a4c2d009c627b305af08b8e296f41f3bc1c5 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 28 Jan 2025 19:25:56 -0500 Subject: [PATCH 002/310] misc nix CI fixes that might speed it up a bit Signed-off-by: June Clementine Strawberry Signed-off-by: strawberry --- .github/workflows/ci.yml | 4 +-- bin/complement | 4 ++- flake.nix | 6 ++--- nix/pkgs/complement/config.toml | 19 +++++++++++--- nix/pkgs/complement/default.nix | 14 ++++++++-- nix/pkgs/main/default.nix | 45 ++++++++++++++++++++------------- src/router/serve/tls.rs | 4 +-- 7 files changed, 65 insertions(+), 31 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 35d60aa1..9a3d518d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -88,8 +88,8 @@ jobs: ssh -q website "echo test" || ssh -q website "echo test" echo "Creating commit rev directory on web server" - ssh -q website "rm -rf /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/" || ssh -q website "rm -rf /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/" - ssh -q website "mkdir -v /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/" || ssh -q website "mkdir -v /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/" + ssh -q website "rm -rf /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/" || ssh -q website "rm -rf /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/" || true + ssh -q website "mkdir -v /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/" || ssh -q website "mkdir -v /var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/" || true echo "SSH_WEBSITE=1" >> "$GITHUB_ENV" diff --git a/bin/complement b/bin/complement index a1db4b32..118a4df3 100755 --- a/bin/complement +++ b/bin/complement @@ -34,7 +34,9 @@ toplevel="$(git rev-parse --show-toplevel)" pushd "$toplevel" > /dev/null -bin/nix-build-and-cache just .#linux-complement +#bin/nix-build-and-cache just .#linux-complement +bin/nix-build-and-cache just .#complement +#nom build .#complement docker load < result popd > /dev/null diff --git a/flake.nix b/flake.nix index 920d3d14..165cf372 100644 --- a/flake.nix +++ b/flake.nix @@ -169,10 +169,10 @@ # used for rust caching in CI to speed it up sccache - - # needed so we can get rid of gcc and other unused deps that bloat OCI images - removeReferencesTo ] + # valgrind is unavailable in static contexts + # used for CI and complement + ++ (if !stdenv.hostPlatform.isStatic then [ "valgrind" ] else []) # liburing is Linux-exclusive ++ lib.optional stdenv.hostPlatform.isLinux liburing # needed to build Rust applications on macOS diff --git a/nix/pkgs/complement/config.toml b/nix/pkgs/complement/config.toml index f20abee2..039f9c97 100644 --- a/nix/pkgs/complement/config.toml +++ b/nix/pkgs/complement/config.toml @@ -17,19 +17,30 @@ ip_range_denylist = [] url_preview_domain_contains_allowlist = ["*"] url_preview_domain_explicit_denylist = ["*"] media_compat_file_link = false -media_startup_check = false -prune_missing_media = false +media_startup_check = true +prune_missing_media = true log_colors = false admin_room_notices = false allow_check_for_updates = false -allow_unstable_room_versions = true rocksdb_log_level = "debug" rocksdb_max_log_files = 1 rocksdb_recovery_mode = 0 rocksdb_paranoid_file_checks = true log_guest_registrations = false allow_legacy_media = true -startup_netburst = false +startup_netburst = true + +# valgrind makes things so slow +dns_timeout = 60 +dns_attempts = 20 +request_conn_timeout = 60 +request_timeout = 120 +well_known_conn_timeout = 60 +well_known_timeout = 60 +federation_idle_timeout = 300 +sender_timeout = 300 +sender_idle_timeout = 300 +sender_retry_backoff_limit = 300 [global.tls] certs = "/certificate.crt" diff --git a/nix/pkgs/complement/default.nix b/nix/pkgs/complement/default.nix index e35cbf04..d7407ad9 100644 --- a/nix/pkgs/complement/default.nix +++ b/nix/pkgs/complement/default.nix @@ -9,19 +9,22 @@ , openssl , stdenv , tini +, valgrind , writeShellScriptBin }: let main' = main.override { profile = "test"; + #profile = "release-debuginfo"; all_features = true; disable_release_max_log_level = true; disable_features = [ - # no reason to use jemalloc for complement, just has compatibility/build issues "jemalloc" "jemalloc_stats" "jemalloc_prof" + "jemalloc_conf" + "io_uring" # console/CLI stuff isn't used or relevant for complement "console" "tokio_console" @@ -29,7 +32,7 @@ let "sentry_telemetry" "perf_measurements" # the containers don't use or need systemd signal support - "systemd" + #"systemd" # this is non-functional on nix for some reason "hardened_malloc" # dont include experimental features @@ -44,6 +47,13 @@ let "url_preview" ]; }; + # TODO: figure out why a suspicious amounnt of complement tests fail with valgrind only under complement. + # maybe issue with direct TLS mode? + #${lib.getExe' valgrind "valgrind"} \ + #--leak-check=no \ + #--undef-value-errors=no \ + #--exit-on-first-error=yes \ + #--error-exitcode=1 \ start = writeShellScriptBin "start" '' set -euxo pipefail diff --git a/nix/pkgs/main/default.nix b/nix/pkgs/main/default.nix index d7424d11..26f4d1a4 100644 --- a/nix/pkgs/main/default.nix +++ b/nix/pkgs/main/default.nix @@ -82,7 +82,7 @@ rust-jemalloc-sys' = (rust-jemalloc-sys.override { buildDepsOnlyEnv = let rocksdb' = (rocksdb.override { - jemalloc = rust-jemalloc-sys'; + jemalloc = lib.optional (featureEnabled "jemalloc") rust-jemalloc-sys'; # rocksdb fails to build with prefixed jemalloc, which is required on # darwin due to [1]. In this case, fall back to building rocksdb with # libc malloc. This should not cause conflicts, because all of the @@ -103,6 +103,11 @@ buildDepsOnlyEnv = ++ [ "-DPORTABLE=haswell" ]) else ([ "-DPORTABLE=1" ]) ) ++ old.cmakeFlags; + # outputs has "tools" which we dont need or use + outputs = [ "out" ]; + + # preInstall hooks has stuff for messing with ldb/sst_dump which we dont need or use + preInstall = ""; }); in { @@ -156,6 +161,19 @@ commonAttrs = { ]; }; + # This is redundant with CI + doCheck = false; + + cargoTestCommand = "cargo test --locked "; + cargoExtraArgs = "--no-default-features --locked " + + lib.optionalString + (features'' != []) + "--features " + (builtins.concatStringsSep "," features''); + cargoTestExtraArgs = "--no-default-features --locked " + + lib.optionalString + (features'' != []) + "--features " + (builtins.concatStringsSep "," features''); + dontStrip = profile == "dev" || profile == "test"; dontPatchELF = profile == "dev" || profile == "test"; @@ -181,9 +199,6 @@ commonAttrs = { # differing values for `NIX_CFLAGS_COMPILE`, which contributes to spurious # rebuilds of bindgen and its depedents. jq - - # needed so we can get rid of gcc and other unused deps that bloat OCI images - removeReferencesTo ] # needed to build Rust applications on macOS ++ lib.optionals stdenv.hostPlatform.isDarwin [ @@ -195,13 +210,6 @@ commonAttrs = { # https://discourse.nixos.org/t/compile-a-rust-binary-on-macos-dbcrossbar/8612 pkgsBuildHost.darwin.apple_sdk.frameworks.Security ]; - - # for some reason gcc and other weird deps are added to OCI images and bloats it up - # - # - postInstall = with pkgsBuildHost; '' - find "$out" -type f -exec remove-references-to -t ${stdenv.cc} -t ${gcc} -t ${llvm} -t ${rustc.unwrapped} -t ${rustc} '{}' + - ''; }; in @@ -210,15 +218,18 @@ craneLib.buildPackage ( commonAttrs // { env = buildDepsOnlyEnv; }); - cargoExtraArgs = "--no-default-features " + # This is redundant with CI + doCheck = false; + + cargoTestCommand = "cargo test --locked "; + cargoExtraArgs = "--no-default-features --locked " + + lib.optionalString + (features'' != []) + "--features " + (builtins.concatStringsSep "," features''); + cargoTestExtraArgs = "--no-default-features --locked " + lib.optionalString (features'' != []) "--features " + (builtins.concatStringsSep "," features''); - - # This is redundant with CI - cargoTestCommand = ""; - cargoCheckCommand = ""; - doCheck = false; env = buildPackageEnv; diff --git a/src/router/serve/tls.rs b/src/router/serve/tls.rs index 9d3fbd3b..f8e903c6 100644 --- a/src/router/serve/tls.rs +++ b/src/router/serve/tls.rs @@ -20,11 +20,11 @@ pub(super) async fn serve( let certs = tls .certs .as_ref() - .ok_or(err!(Config("tls.certs", "Missing required value in tls config section")))?; + .ok_or_else(|| err!(Config("tls.certs", "Missing required value in tls config section")))?; let key = tls .key .as_ref() - .ok_or(err!(Config("tls.key", "Missing required value in tls config section")))?; + .ok_or_else(|| err!(Config("tls.key", "Missing required value in tls config section")))?; // we use ring for ruma and hashing state, but aws-lc-rs is the new default. // without this, TLS mode will panic. From 3c8376d897e6a1b9b6b61f5ada05b2afec1ab937 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 29 Jan 2025 23:07:12 +0000 Subject: [PATCH 003/310] parallelize state-res pre-gathering Signed-off-by: Jason Volk --- .../rooms/event_handler/resolve_state.rs | 63 +++---- .../rooms/event_handler/state_at_incoming.rs | 173 +++++++++--------- 2 files changed, 123 insertions(+), 113 deletions(-) diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 03f7e822..c3de5f2f 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -5,11 +5,11 @@ use std::{ }; use conduwuit::{ - debug, err, implement, + err, implement, trace, utils::stream::{automatic_width, IterStream, ReadyExt, TryWidebandExt, WidebandExt}, - Result, + Error, Result, }; -use futures::{FutureExt, StreamExt, TryStreamExt}; +use futures::{future::try_join, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; use ruma::{ state_res::{self, StateMap}, OwnedEventId, RoomId, RoomVersionId, @@ -25,13 +25,13 @@ pub async fn resolve_state( room_version_id: &RoomVersionId, incoming_state: HashMap, ) -> Result>> { - debug!("Loading current room state ids"); + trace!("Loading current room state ids"); let current_sstatehash = self .services .state .get_room_shortstatehash(room_id) - .await - .map_err(|e| err!(Database(error!("No state for {room_id:?}: {e:?}"))))?; + .map_err(|e| err!(Database(error!("No state for {room_id:?}: {e:?}")))) + .await?; let current_state_ids: HashMap<_, _> = self .services @@ -40,8 +40,9 @@ pub async fn resolve_state( .collect() .await; + trace!("Loading fork states"); let fork_states = [current_state_ids, incoming_state]; - let auth_chain_sets: Vec> = fork_states + let auth_chain_sets = fork_states .iter() .try_stream() .wide_and_then(|state| { @@ -50,36 +51,33 @@ pub async fn resolve_state( .event_ids_iter(room_id, state.values().map(Borrow::borrow)) .try_collect() }) - .try_collect() - .await?; + .try_collect::>>(); - debug!("Loading fork states"); - let fork_states: Vec> = fork_states - .into_iter() + let fork_states = fork_states + .iter() .stream() - .wide_then(|fork_state| async move { + .wide_then(|fork_state| { let shortstatekeys = fork_state.keys().copied().stream(); - - let event_ids = fork_state.values().cloned().stream().boxed(); - + let event_ids = fork_state.values().cloned().stream(); self.services .short .multi_get_statekey_from_short(shortstatekeys) .zip(event_ids) .ready_filter_map(|(ty_sk, id)| Some((ty_sk.ok()?, id))) .collect() - .await }) - .collect() - .await; + .map(Ok::<_, Error>) + .try_collect::>>(); - debug!("Resolving state"); + let (fork_states, auth_chain_sets) = try_join(fork_states, auth_chain_sets).await?; + + trace!("Resolving state"); let state = self - .state_resolution(room_version_id, &fork_states, &auth_chain_sets) + .state_resolution(room_version_id, fork_states.iter(), &auth_chain_sets) .boxed() .await?; - debug!("State resolution done."); + trace!("State resolution done."); let state_events: Vec<_> = state .iter() .stream() @@ -92,7 +90,7 @@ pub async fn resolve_state( .collect() .await; - debug!("Compressing state..."); + trace!("Compressing state..."); let new_room_state: HashSet<_> = self .services .state_compressor @@ -109,20 +107,23 @@ pub async fn resolve_state( #[implement(super::Service)] #[tracing::instrument(name = "ruma", level = "debug", skip_all)] -pub async fn state_resolution( - &self, - room_version: &RoomVersionId, - state_sets: &[StateMap], - auth_chain_sets: &[HashSet], -) -> Result> { +pub async fn state_resolution<'a, StateSets>( + &'a self, + room_version: &'a RoomVersionId, + state_sets: StateSets, + auth_chain_sets: &'a [HashSet], +) -> Result> +where + StateSets: Iterator> + Clone + Send, +{ state_res::resolve( room_version, - state_sets.iter(), + state_sets, auth_chain_sets, &|event_id| self.event_fetch(event_id), &|event_id| self.event_exists(event_id), automatic_width(), ) - .await .map_err(|e| err!(error!("State resolution failed: {e:?}"))) + .await } diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 8730232a..8ae6354c 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -1,18 +1,20 @@ use std::{ borrow::Borrow, collections::{HashMap, HashSet}, + iter::Iterator, sync::Arc, }; use conduwuit::{ - debug, err, implement, - result::LogErr, - utils::stream::{BroadbandExt, IterStream}, + debug, err, implement, trace, + utils::stream::{BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, TryWidebandExt}, PduEvent, Result, }; -use futures::{FutureExt, StreamExt, TryStreamExt}; +use futures::{future::try_join, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; use ruma::{state_res::StateMap, OwnedEventId, RoomId, RoomVersionId}; +use crate::rooms::short::ShortStateHash; + // TODO: if we know the prev_events of the incoming event we can avoid the #[implement(super::Service)] // request and build the state from a known point and resolve if > 1 prev_event @@ -70,86 +72,44 @@ pub(super) async fn state_at_incoming_resolved( room_id: &RoomId, room_version_id: &RoomVersionId, ) -> Result>> { - debug!("Calculating state at event using state res"); - let mut extremity_sstatehashes = HashMap::with_capacity(incoming_pdu.prev_events.len()); - - let mut okay = true; - for prev_eventid in &incoming_pdu.prev_events { - let Ok(prev_event) = self.services.timeline.get_pdu(prev_eventid).await else { - okay = false; - break; - }; - - let Ok(sstatehash) = self - .services - .state_accessor - .pdu_shortstatehash(prev_eventid) - .await - else { - okay = false; - break; - }; - - extremity_sstatehashes.insert(sstatehash, prev_event); - } - - if !okay { + trace!("Calculating extremity statehashes..."); + let Ok(extremity_sstatehashes) = incoming_pdu + .prev_events + .iter() + .try_stream() + .broad_and_then(|prev_eventid| { + self.services + .timeline + .get_pdu(prev_eventid) + .map_ok(move |prev_event| (prev_eventid, prev_event)) + }) + .broad_and_then(|(prev_eventid, prev_event)| { + self.services + .state_accessor + .pdu_shortstatehash(prev_eventid) + .map_ok(move |sstatehash| (sstatehash, prev_event)) + }) + .try_collect::>() + .await + else { return Ok(None); - } + }; - let mut fork_states = Vec::with_capacity(extremity_sstatehashes.len()); - let mut auth_chain_sets = Vec::with_capacity(extremity_sstatehashes.len()); - for (sstatehash, prev_event) in extremity_sstatehashes { - let mut leaf_state: HashMap<_, _> = self - .services - .state_accessor - .state_full_ids(sstatehash) - .collect() - .await; - - if let Some(state_key) = &prev_event.state_key { - let shortstatekey = self - .services - .short - .get_or_create_shortstatekey(&prev_event.kind.to_string().into(), state_key) - .await; - - let event_id = &prev_event.event_id; - leaf_state.insert(shortstatekey, event_id.clone()); - // Now it's the state after the pdu - } - - let mut state = StateMap::with_capacity(leaf_state.len()); - let mut starting_events = Vec::with_capacity(leaf_state.len()); - for (k, id) in &leaf_state { - if let Ok((ty, st_key)) = self - .services - .short - .get_statekey_from_short(*k) - .await - .log_err() - { - // FIXME: Undo .to_string().into() when StateMap - // is updated to use StateEventType - state.insert((ty.to_string().into(), st_key), id.clone()); - } - - starting_events.push(id.borrow()); - } - - let auth_chain: HashSet = self - .services - .auth_chain - .event_ids_iter(room_id, starting_events.into_iter()) + trace!("Calculating fork states..."); + let (fork_states, auth_chain_sets): (Vec>, Vec>) = + extremity_sstatehashes + .into_iter() + .try_stream() + .wide_and_then(|(sstatehash, prev_event)| { + self.state_at_incoming_fork(room_id, sstatehash, prev_event) + }) .try_collect() + .map_ok(Vec::into_iter) + .map_ok(Iterator::unzip) .await?; - auth_chain_sets.push(auth_chain); - fork_states.push(state); - } - let Ok(new_state) = self - .state_resolution(room_version_id, &fork_states, &auth_chain_sets) + .state_resolution(room_version_id, fork_states.iter(), &auth_chain_sets) .boxed() .await else { @@ -157,16 +117,65 @@ pub(super) async fn state_at_incoming_resolved( }; new_state - .iter() + .into_iter() .stream() - .broad_then(|((event_type, state_key), event_id)| { + .broad_then(|((event_type, state_key), event_id)| async move { self.services .short - .get_or_create_shortstatekey(event_type, state_key) - .map(move |shortstatekey| (shortstatekey, event_id.clone())) + .get_or_create_shortstatekey(&event_type, &state_key) + .map(move |shortstatekey| (shortstatekey, event_id)) + .await }) .collect() .map(Some) .map(Ok) .await } + +#[implement(super::Service)] +async fn state_at_incoming_fork( + &self, + room_id: &RoomId, + sstatehash: ShortStateHash, + prev_event: PduEvent, +) -> Result<(StateMap, HashSet)> { + let mut leaf_state: HashMap<_, _> = self + .services + .state_accessor + .state_full_ids(sstatehash) + .collect() + .await; + + if let Some(state_key) = &prev_event.state_key { + let shortstatekey = self + .services + .short + .get_or_create_shortstatekey(&prev_event.kind.to_string().into(), state_key) + .await; + + let event_id = &prev_event.event_id; + leaf_state.insert(shortstatekey, event_id.clone()); + // Now it's the state after the pdu + } + + let auth_chain = self + .services + .auth_chain + .event_ids_iter(room_id, leaf_state.values().map(Borrow::borrow)) + .try_collect(); + + let fork_state = leaf_state + .iter() + .stream() + .broad_then(|(k, id)| { + self.services + .short + .get_statekey_from_short(*k) + .map_ok(|(ty, sk)| ((ty, sk), id.clone())) + }) + .ready_filter_map(Result::ok) + .collect() + .map(Ok); + + try_join(fork_state, auth_chain).await +} From 31c2968bb29e7447e56531333fb330da4ac08ede Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 29 Jan 2025 21:10:33 +0000 Subject: [PATCH 004/310] move db files command w/ filter args; misc related cleanup Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 66 +++++++++++++++++++++++++++++------- src/admin/debug/mod.rs | 8 +++++ src/admin/server/commands.rs | 15 +++----- src/admin/server/mod.rs | 3 -- src/database/engine/files.rs | 35 +++++-------------- src/service/globals/data.rs | 3 -- 6 files changed, 75 insertions(+), 55 deletions(-) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 4e0ce2e3..dcf9879c 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -7,7 +7,10 @@ use std::{ use conduwuit::{ debug_error, err, info, trace, utils, - utils::{stream::ReadyExt, string::EMPTY}, + utils::{ + stream::{IterStream, ReadyExt}, + string::EMPTY, + }, warn, Error, PduEvent, PduId, RawPduId, Result, }; use futures::{FutureExt, StreamExt, TryStreamExt}; @@ -640,6 +643,7 @@ pub(super) async fn force_set_room_state_from_server( room_id: room_id.clone().into(), event_id: first_pdu.event_id.clone(), }) + .boxed() .await?; for pdu in remote_state_response.pdus.clone() { @@ -648,6 +652,7 @@ pub(super) async fn force_set_room_state_from_server( .rooms .event_handler .parse_incoming_pdu(&pdu) + .boxed() .await { | Ok(t) => t, @@ -711,6 +716,7 @@ pub(super) async fn force_set_room_state_from_server( .rooms .event_handler .resolve_state(&room_id, &room_version, state) + .boxed() .await?; info!("Forcing new room state"); @@ -946,21 +952,57 @@ pub(super) async fn database_stats( property: Option, map: Option, ) -> Result { - let property = property.unwrap_or_else(|| "rocksdb.stats".to_owned()); let map_name = map.as_ref().map_or(EMPTY, String::as_str); + let property = property.unwrap_or_else(|| "rocksdb.stats".to_owned()); + self.services + .db + .iter() + .filter(|(&name, _)| map_name.is_empty() || map_name == name) + .try_stream() + .try_for_each(|(&name, map)| { + let res = map.property(&property).expect("invalid property"); + writeln!(self, "##### {name}:\n```\n{}\n```", res.trim()) + }) + .await?; - let mut out = String::new(); - for (&name, map) in self.services.db.iter() { - if !map_name.is_empty() && map_name != name { - continue; - } + Ok(RoomMessageEventContent::notice_plain("")) +} - let res = map.property(&property)?; - let res = res.trim(); - writeln!(out, "##### {name}:\n```\n{res}\n```")?; - } +#[admin_command] +pub(super) async fn database_files( + &self, + map: Option, + level: Option, +) -> Result { + let mut files: Vec<_> = self.services.db.db.file_list().collect::>()?; - Ok(RoomMessageEventContent::notice_markdown(out)) + files.sort_by_key(|f| f.name.clone()); + + writeln!(self, "| lev | sst | keys | dels | size | column |").await?; + writeln!(self, "| ---: | :--- | ---: | ---: | ---: | :--- |").await?; + files + .into_iter() + .filter(|file| { + map.as_deref() + .is_none_or(|map| map == file.column_family_name) + }) + .filter(|file| level.as_ref().is_none_or(|&level| level == file.level)) + .try_stream() + .try_for_each(|file| { + writeln!( + self, + "| {} | {:<13} | {:7}+ | {:4}- | {:9} | {} |", + file.level, + file.name, + file.num_entries, + file.num_deletions, + file.size, + file.column_family_name, + ) + }) + .await?; + + Ok(RoomMessageEventContent::notice_plain("")) } #[admin_command] diff --git a/src/admin/debug/mod.rs b/src/admin/debug/mod.rs index 07f7296b..db04ccf4 100644 --- a/src/admin/debug/mod.rs +++ b/src/admin/debug/mod.rs @@ -226,6 +226,14 @@ pub(super) enum DebugCommand { /// - Trim memory usage TrimMemory, + /// - List database files + DatabaseFiles { + map: Option, + + #[arg(long)] + level: Option, + }, + /// - Developer test stubs #[command(subcommand)] #[allow(non_snake_case)] diff --git a/src/admin/server/commands.rs b/src/admin/server/commands.rs index 910dce6e..d4cfa7d5 100644 --- a/src/admin/server/commands.rs +++ b/src/admin/server/commands.rs @@ -92,7 +92,7 @@ pub(super) async fn clear_caches(&self) -> Result { #[admin_command] pub(super) async fn list_backups(&self) -> Result { - let result = self.services.globals.db.backup_list()?; + let result = self.services.db.db.backup_list()?; if result.is_empty() { Ok(RoomMessageEventContent::text_plain("No backups found.")) @@ -103,31 +103,24 @@ pub(super) async fn list_backups(&self) -> Result { #[admin_command] pub(super) async fn backup_database(&self) -> Result { - let globals = Arc::clone(&self.services.globals); + let db = Arc::clone(&self.services.db); let mut result = self .services .server .runtime() - .spawn_blocking(move || match globals.db.backup() { + .spawn_blocking(move || match db.db.backup() { | Ok(()) => String::new(), | Err(e) => e.to_string(), }) .await?; if result.is_empty() { - result = self.services.globals.db.backup_list()?; + result = self.services.db.db.backup_list()?; } Ok(RoomMessageEventContent::notice_markdown(result)) } -#[admin_command] -pub(super) async fn list_database_files(&self) -> Result { - let result = self.services.globals.db.file_list()?; - - Ok(RoomMessageEventContent::notice_markdown(result)) -} - #[admin_command] pub(super) async fn admin_notice(&self, message: Vec) -> Result { let message = message.join(" "); diff --git a/src/admin/server/mod.rs b/src/admin/server/mod.rs index 3f3d6c5e..60615365 100644 --- a/src/admin/server/mod.rs +++ b/src/admin/server/mod.rs @@ -46,9 +46,6 @@ pub(super) enum ServerCommand { /// - List database backups ListBackups, - /// - List database files - ListDatabaseFiles, - /// - Send a message to the admin room. AdminNotice { message: Vec, diff --git a/src/database/engine/files.rs b/src/database/engine/files.rs index f603c57b..33d6fdc4 100644 --- a/src/database/engine/files.rs +++ b/src/database/engine/files.rs @@ -1,32 +1,15 @@ -use std::fmt::Write; - use conduwuit::{implement, Result}; +use rocksdb::LiveFile as SstFile; use super::Engine; +use crate::util::map_err; #[implement(Engine)] -pub fn file_list(&self) -> Result { - match self.db.live_files() { - | Err(e) => Ok(String::from(e)), - | Ok(mut files) => { - files.sort_by_key(|f| f.name.clone()); - let mut res = String::new(); - writeln!(res, "| lev | sst | keys | dels | size | column |")?; - writeln!(res, "| ---: | :--- | ---: | ---: | ---: | :--- |")?; - for file in files { - writeln!( - res, - "| {} | {:<13} | {:7}+ | {:4}- | {:9} | {} |", - file.level, - file.name, - file.num_entries, - file.num_deletions, - file.size, - file.column_family_name, - )?; - } - - Ok(res) - }, - } +pub fn file_list(&self) -> impl Iterator> + Send { + self.db + .live_files() + .map_err(map_err) + .into_iter() + .flat_map(Vec::into_iter) + .map(Ok) } diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index 07b4ac2c..39cb9be1 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -79,7 +79,4 @@ impl Data { #[inline] pub fn backup_list(&self) -> Result { self.db.db.backup_list() } - - #[inline] - pub fn file_list(&self) -> Result { self.db.db.file_list() } } From 1a8482b3b4865a7f38c342929489ba925a98e05c Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 30 Jan 2025 04:39:24 +0000 Subject: [PATCH 005/310] refactor incoming extremities retention; broad filter, single pass Signed-off-by: Jason Volk --- src/api/client/membership.rs | 7 +- .../event_handler/upgrade_outlier_pdu.rs | 74 +++++++++---------- src/service/rooms/state/mod.rs | 17 +++-- src/service/rooms/timeline/mod.rs | 44 ++++++----- 4 files changed, 74 insertions(+), 68 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index fccb9b53..d80aff0c 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -1,6 +1,7 @@ use std::{ borrow::Borrow, collections::{BTreeMap, HashMap, HashSet}, + iter::once, net::IpAddr, sync::Arc, }; @@ -1216,7 +1217,7 @@ async fn join_room_by_id_helper_remote( .append_pdu( &parsed_join_pdu, join_event, - vec![(*parsed_join_pdu.event_id).to_owned()], + once(parsed_join_pdu.event_id.borrow()), &state_lock, ) .await?; @@ -2195,7 +2196,7 @@ async fn knock_room_helper_local( .append_pdu( &parsed_knock_pdu, knock_event, - vec![(*parsed_knock_pdu.event_id).to_owned()], + once(parsed_knock_pdu.event_id.borrow()), &state_lock, ) .await?; @@ -2394,7 +2395,7 @@ async fn knock_room_helper_remote( .append_pdu( &parsed_knock_pdu, knock_event, - vec![(*parsed_knock_pdu.event_id).to_owned()], + once(parsed_knock_pdu.event_id.borrow()), &state_lock, ) .await?; diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index f0c8f0c5..ca351981 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -1,14 +1,18 @@ use std::{ borrow::Borrow, collections::{BTreeMap, HashSet}, + iter::once, sync::Arc, time::Instant, }; -use conduwuit::{debug, debug_info, err, implement, trace, warn, Err, Error, PduEvent, Result}; -use futures::{future::ready, StreamExt}; +use conduwuit::{ + debug, debug_info, err, implement, trace, + utils::stream::{BroadbandExt, ReadyExt}, + warn, Err, PduEvent, Result, +}; +use futures::{future::ready, FutureExt, StreamExt}; use ruma::{ - api::client::error::ErrorKind, events::{room::redaction::RoomRedactionEventContent, StateEventType, TimelineEventType}, state_res::{self, EventTypeExt}, CanonicalJsonValue, RoomId, RoomVersionId, ServerName, @@ -174,42 +178,34 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( // Now we calculate the set of extremities this room has after the incoming // event has been applied. We start with the previous extremities (aka leaves) trace!("Calculating extremities"); - let mut extremities: HashSet<_> = self + let extremities: Vec<_> = self .services .state .get_forward_extremities(room_id) .map(ToOwned::to_owned) + .ready_filter(|event_id| { + // Remove any that are referenced by this incoming event's prev_events + !incoming_pdu.prev_events.contains(event_id) + }) + .broad_filter_map(|event_id| async move { + // Only keep those extremities were not referenced yet + self.services + .pdu_metadata + .is_event_referenced(room_id, &event_id) + .await + .eq(&false) + .then_some(event_id) + }) .collect() .await; - // Remove any forward extremities that are referenced by this incoming event's - // prev_events - trace!( - "Calculated {} extremities; checking against {} prev_events", + debug!( + "Retained {} extremities checked against {} prev_events", extremities.len(), incoming_pdu.prev_events.len() ); - for prev_event in &incoming_pdu.prev_events { - extremities.remove(&(**prev_event)); - } - // Only keep those extremities were not referenced yet - let mut retained = HashSet::new(); - for id in &extremities { - if !self - .services - .pdu_metadata - .is_event_referenced(room_id, id) - .await - { - retained.insert(id.clone()); - } - } - - extremities.retain(|id| retained.contains(id)); - debug!("Retained {} extremities. Compressing state", extremities.len()); - - let state_ids_compressed: HashSet<_> = self + let state_ids_compressed: Arc> = self .services .state_compressor .compress_state_events( @@ -218,10 +214,9 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( .map(|(ssk, eid)| (ssk, eid.borrow())), ) .collect() + .map(Arc::new) .await; - let state_ids_compressed = Arc::new(state_ids_compressed); - if incoming_pdu.state_key.is_some() { debug!("Event is a state-event. Deriving new room state"); @@ -260,12 +255,14 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( // if not soft fail it if soft_fail { debug!("Soft failing event"); + let extremities = extremities.iter().map(Borrow::borrow); + self.services .timeline .append_incoming_pdu( &incoming_pdu, val, - extremities.iter().map(|e| (**e).to_owned()).collect(), + extremities, state_ids_compressed, soft_fail, &state_lock, @@ -273,27 +270,30 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( .await?; // Soft fail, we keep the event as an outlier but don't add it to the timeline - warn!("Event was soft failed: {incoming_pdu:?}"); self.services .pdu_metadata .mark_event_soft_failed(&incoming_pdu.event_id); - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Event has been soft failed")); + warn!("Event was soft failed: {incoming_pdu:?}"); + return Err!(Request(InvalidParam("Event has been soft failed"))); } - trace!("Appending pdu to timeline"); - extremities.insert(incoming_pdu.event_id.clone()); - // Now that the event has passed all auth it is added into the timeline. // We use the `state_at_event` instead of `state_after` so we accurately // represent the state for this event. + trace!("Appending pdu to timeline"); + let extremities = extremities + .iter() + .map(Borrow::borrow) + .chain(once(incoming_pdu.event_id.borrow())); + let pdu_id = self .services .timeline .append_incoming_pdu( &incoming_pdu, val, - extremities.into_iter().collect(), + extremities, state_ids_compressed, soft_fail, &state_lock, diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index fd303667..8cb4e586 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -398,13 +398,14 @@ impl Service { .ignore_err() } - pub async fn set_forward_extremities( - &self, - room_id: &RoomId, - event_ids: Vec, - _state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room - * state mutex */ - ) { + pub async fn set_forward_extremities<'a, I>( + &'a self, + room_id: &'a RoomId, + event_ids: I, + _state_lock: &'a RoomMutexGuard, + ) where + I: Iterator + Send + 'a, + { let prefix = (room_id, Interfix); self.db .roomid_pduleaves @@ -413,7 +414,7 @@ impl Service { .ready_for_each(|key| self.db.roomid_pduleaves.remove(key)) .await; - for event_id in &event_ids { + for event_id in event_ids { let key = (room_id, event_id); self.db.roomid_pduleaves.put_raw(key, event_id); } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index bf585a6b..8b3b67a7 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1,6 +1,7 @@ mod data; use std::{ + borrow::Borrow, cmp, collections::{BTreeMap, HashSet}, fmt::Write, @@ -260,14 +261,16 @@ impl Service { /// /// Returns pdu id #[tracing::instrument(level = "debug", skip_all)] - pub async fn append_pdu( - &self, - pdu: &PduEvent, + pub async fn append_pdu<'a, Leafs>( + &'a self, + pdu: &'a PduEvent, mut pdu_json: CanonicalJsonObject, - leaves: Vec, - state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room state - * mutex */ - ) -> Result { + leafs: Leafs, + state_lock: &'a RoomMutexGuard, + ) -> Result + where + Leafs: Iterator + Send + 'a, + { // Coalesce database writes for the remainder of this scope. let _cork = self.db.db.cork_and_flush(); @@ -335,7 +338,7 @@ impl Service { self.services .state - .set_forward_extremities(&pdu.room_id, leaves, state_lock) + .set_forward_extremities(&pdu.room_id, leafs, state_lock) .await; let insert_lock = self.mutex_insert.lock(&pdu.room_id).await; @@ -819,8 +822,7 @@ impl Service { pdu_builder: PduBuilder, sender: &UserId, room_id: &RoomId, - state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room state - * mutex */ + state_lock: &RoomMutexGuard, ) -> Result { let (pdu, pdu_json) = self .create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock) @@ -896,7 +898,7 @@ impl Service { pdu_json, // Since this PDU references all pdu_leaves we can update the leaves // of the room - vec![(*pdu.event_id).to_owned()], + once(pdu.event_id.borrow()), state_lock, ) .boxed() @@ -943,16 +945,18 @@ impl Service { /// Append the incoming event setting the state snapshot to the state from /// the server that sent the event. #[tracing::instrument(level = "debug", skip_all)] - pub async fn append_incoming_pdu( - &self, - pdu: &PduEvent, + pub async fn append_incoming_pdu<'a, Leafs>( + &'a self, + pdu: &'a PduEvent, pdu_json: CanonicalJsonObject, - new_room_leaves: Vec, + new_room_leafs: Leafs, state_ids_compressed: Arc>, soft_fail: bool, - state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room state - * mutex */ - ) -> Result> { + state_lock: &'a RoomMutexGuard, + ) -> Result> + where + Leafs: Iterator + Send + 'a, + { // We append to state before appending the pdu, so we don't have a moment in // time with the pdu without it's state. This is okay because append_pdu can't // fail. @@ -968,14 +972,14 @@ impl Service { self.services .state - .set_forward_extremities(&pdu.room_id, new_room_leaves, state_lock) + .set_forward_extremities(&pdu.room_id, new_room_leafs, state_lock) .await; return Ok(None); } let pdu_id = self - .append_pdu(pdu, pdu_json, new_room_leaves, state_lock) + .append_pdu(pdu, pdu_json, new_room_leafs, state_lock) .await?; Ok(Some(pdu_id)) From ff8bbd4cfa6ad9426bd9efbe610547dd89030c85 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 30 Jan 2025 05:14:45 +0000 Subject: [PATCH 006/310] untwist the redaction check stanza Signed-off-by: Jason Volk --- src/core/pdu/redact.rs | 18 +++++++ .../event_handler/upgrade_outlier_pdu.rs | 52 ++++--------------- 2 files changed, 28 insertions(+), 42 deletions(-) diff --git a/src/core/pdu/redact.rs b/src/core/pdu/redact.rs index 5d33eeca..7c332719 100644 --- a/src/core/pdu/redact.rs +++ b/src/core/pdu/redact.rs @@ -90,3 +90,21 @@ pub fn copy_redacts(&self) -> (Option, Box) { (self.redacts.clone(), self.content.clone()) } + +#[implement(super::Pdu)] +#[must_use] +pub fn redacts_id(&self, room_version: &RoomVersionId) -> Option { + use RoomVersionId::*; + + if self.kind != TimelineEventType::RoomRedaction { + return None; + } + + match *room_version { + | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => self.redacts.clone(), + | _ => + self.get_content::() + .ok()? + .redacts, + } +} diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index ca351981..03697558 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -13,9 +13,9 @@ use conduwuit::{ }; use futures::{future::ready, FutureExt, StreamExt}; use ruma::{ - events::{room::redaction::RoomRedactionEventContent, StateEventType, TimelineEventType}, + events::StateEventType, state_res::{self, EventTypeExt}, - CanonicalJsonValue, RoomId, RoomVersionId, ServerName, + CanonicalJsonValue, RoomId, ServerName, }; use super::{get_room_version_id, to_room_version}; @@ -127,46 +127,14 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( // Soft fail check before doing state res debug!("Performing soft-fail check"); - let soft_fail = { - use RoomVersionId::*; - - !auth_check - || incoming_pdu.kind == TimelineEventType::RoomRedaction - && match room_version_id { - | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { - if let Some(redact_id) = &incoming_pdu.redacts { - !self - .services - .state_accessor - .user_can_redact( - redact_id, - &incoming_pdu.sender, - &incoming_pdu.room_id, - true, - ) - .await? - } else { - false - } - }, - | _ => { - let content: RoomRedactionEventContent = incoming_pdu.get_content()?; - if let Some(redact_id) = &content.redacts { - !self - .services - .state_accessor - .user_can_redact( - redact_id, - &incoming_pdu.sender, - &incoming_pdu.room_id, - true, - ) - .await? - } else { - false - } - }, - } + let soft_fail = match (auth_check, incoming_pdu.redacts_id(&room_version_id)) { + | (false, _) => true, + | (true, None) => false, + | (true, Some(redact_id)) => + self.services + .state_accessor + .user_can_redact(&redact_id, &incoming_pdu.sender, &incoming_pdu.room_id, true) + .await?, }; // 13. Use state resolution to find new room state From 69837671bbc02b1cfba351e1c1321be506ef88b1 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 30 Jan 2025 09:28:34 +0000 Subject: [PATCH 007/310] simplify request handler task base Signed-off-by: Jason Volk --- src/core/metrics/mod.rs | 4 --- src/router/layers.rs | 33 ++++++++++++---------- src/router/request.rs | 59 +++++++-------------------------------- src/router/run.rs | 1 - src/router/serve/plain.rs | 7 ----- src/router/serve/unix.rs | 7 ++++- 6 files changed, 35 insertions(+), 76 deletions(-) diff --git a/src/core/metrics/mod.rs b/src/core/metrics/mod.rs index f2022166..8f7a5571 100644 --- a/src/core/metrics/mod.rs +++ b/src/core/metrics/mod.rs @@ -19,8 +19,6 @@ pub struct Metrics { runtime_intervals: std::sync::Mutex>, // TODO: move stats - pub requests_spawn_active: AtomicU32, - pub requests_spawn_finished: AtomicU32, pub requests_handle_active: AtomicU32, pub requests_handle_finished: AtomicU32, pub requests_panic: AtomicU32, @@ -48,8 +46,6 @@ impl Metrics { #[cfg(tokio_unstable)] runtime_intervals: std::sync::Mutex::new(runtime_intervals), - requests_spawn_active: AtomicU32::new(0), - requests_spawn_finished: AtomicU32::new(0), requests_handle_active: AtomicU32::new(0), requests_handle_finished: AtomicU32::new(0), requests_panic: AtomicU32::new(0), diff --git a/src/router/layers.rs b/src/router/layers.rs index 96bca4fd..c5227c22 100644 --- a/src/router/layers.rs +++ b/src/router/layers.rs @@ -5,7 +5,7 @@ use axum::{ Router, }; use axum_client_ip::SecureClientIpSource; -use conduwuit::{error, Result, Server}; +use conduwuit::{debug, error, Result, Server}; use conduwuit_api::router::state::Guard; use conduwuit_service::Services; use http::{ @@ -50,7 +50,6 @@ pub(crate) fn build(services: &Arc) -> Result<(Router, Guard)> { let layers = layers .layer(SetSensitiveHeadersLayer::new([header::AUTHORIZATION])) - .layer(axum::middleware::from_fn_with_state(Arc::clone(services), request::spawn)) .layer( TraceLayer::new_for_http() .make_span_with(tracing_span::<_>) @@ -196,20 +195,26 @@ fn catch_panic( } fn tracing_span(request: &http::Request) -> tracing::Span { - let path = request.extensions().get::().map_or_else( - || { - request - .uri() - .path_and_query() - .expect("all requests have a path") - .as_str() - }, - truncated_matched_path, - ); + let path = request + .extensions() + .get::() + .map_or_else(|| request_path_str(request), truncated_matched_path); - let method = request.method(); + tracing::span! { + parent: None, + debug::INFO_SPAN_LEVEL, + "router", + method = %request.method(), + %path, + } +} - tracing::debug_span!(parent: None, "router", %method, %path) +fn request_path_str(request: &http::Request) -> &str { + request + .uri() + .path_and_query() + .expect("all requests have a path") + .as_str() } fn truncated_matched_path(path: &MatchedPath) -> &str { diff --git a/src/router/request.rs b/src/router/request.rs index ca063338..f7b94417 100644 --- a/src/router/request.rs +++ b/src/router/request.rs @@ -8,48 +8,6 @@ use conduwuit::{debug, debug_error, debug_warn, err, error, trace, Result}; use conduwuit_service::Services; use http::{Method, StatusCode, Uri}; -#[tracing::instrument( - parent = None, - level = "trace", - skip_all, - fields( - handled = %services - .server - .metrics - .requests_spawn_finished - .fetch_add(1, Ordering::Relaxed), - active = %services - .server - .metrics - .requests_spawn_active - .fetch_add(1, Ordering::Relaxed), - ) -)] -pub(crate) async fn spawn( - State(services): State>, - req: http::Request, - next: axum::middleware::Next, -) -> Result { - let server = &services.server; - - #[cfg(debug_assertions)] - conduwuit::defer! {{ - _ = server - .metrics - .requests_spawn_active - .fetch_sub(1, Ordering::Relaxed); - }}; - - if !server.running() { - debug_warn!("unavailable pending shutdown"); - return Err(StatusCode::SERVICE_UNAVAILABLE); - } - - let fut = next.run(req); - let task = server.runtime().spawn(fut); - task.await.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR) -} - #[tracing::instrument( level = "debug", skip_all, @@ -71,17 +29,15 @@ pub(crate) async fn handle( req: http::Request, next: axum::middleware::Next, ) -> Result { - let server = &services.server; - #[cfg(debug_assertions)] conduwuit::defer! {{ - _ = server + _ = services.server .metrics .requests_handle_active .fetch_sub(1, Ordering::Relaxed); }}; - if !server.running() { + if !services.server.running() { debug_warn!( method = %req.method(), uri = %req.uri(), @@ -91,10 +47,15 @@ pub(crate) async fn handle( return Err(StatusCode::SERVICE_UNAVAILABLE); } - let uri = req.uri().clone(); let method = req.method().clone(); - let result = next.run(req).await; - handle_result(&method, &uri, result) + let uri = req.uri().clone(); + services + .server + .runtime() + .spawn(next.run(req)) + .await + .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR) + .and_then(|result| handle_result(&method, &uri, result)) } fn handle_result(method: &Method, uri: &Uri, result: Response) -> Result { diff --git a/src/router/run.rs b/src/router/run.rs index ea8a7666..605168b8 100644 --- a/src/router/run.rs +++ b/src/router/run.rs @@ -125,7 +125,6 @@ async fn handle_shutdown(server: Arc, tx: Sender<()>, handle: axum_serve let timeout = Duration::from_secs(36); debug!( ?timeout, - spawn_active = ?server.metrics.requests_spawn_active.load(Ordering::Relaxed), handle_active = ?server.metrics.requests_handle_active.load(Ordering::Relaxed), "Notifying for graceful shutdown" ); diff --git a/src/router/serve/plain.rs b/src/router/serve/plain.rs index 0e971f3c..535282b9 100644 --- a/src/router/serve/plain.rs +++ b/src/router/serve/plain.rs @@ -24,27 +24,20 @@ pub(super) async fn serve( info!("Listening on {addrs:?}"); while join_set.join_next().await.is_some() {} - let spawn_active = server.metrics.requests_spawn_active.load(Ordering::Relaxed); let handle_active = server .metrics .requests_handle_active .load(Ordering::Relaxed); debug_info!( - spawn_finished = server - .metrics - .requests_spawn_finished - .load(Ordering::Relaxed), handle_finished = server .metrics .requests_handle_finished .load(Ordering::Relaxed), panics = server.metrics.requests_panic.load(Ordering::Relaxed), - spawn_active, handle_active, "Stopped listening on {addrs:?}", ); - debug_assert!(spawn_active == 0, "active request tasks are not joined"); debug_assert!(handle_active == 0, "active request handles still pending"); Ok(()) diff --git a/src/router/serve/unix.rs b/src/router/serve/unix.rs index 6855b34c..6a030c30 100644 --- a/src/router/serve/unix.rs +++ b/src/router/serve/unix.rs @@ -159,7 +159,12 @@ async fn fini(server: &Arc, listener: UnixListener, mut tasks: JoinSet<( drop(listener); debug!("Waiting for requests to finish..."); - while server.metrics.requests_spawn_active.load(Ordering::Relaxed) > 0 { + while server + .metrics + .requests_handle_active + .load(Ordering::Relaxed) + .gt(&0) + { tokio::select! { task = tasks.join_next() => if task.is_none() { break; }, () = sleep(FINI_POLL_INTERVAL) => {}, From f698254c412b5a142567f6b0ad710aa212c9b34d Mon Sep 17 00:00:00 2001 From: morguldir Date: Fri, 31 Jan 2025 02:36:14 +0100 Subject: [PATCH 008/310] make registration tokens reloadable, and allow configuring multiple Signed-off-by: morguldir --- conduwuit-example.toml | 5 +++-- src/admin/room/alias.rs | 15 +++++++++------ src/core/config/mod.rs | 5 +++-- src/service/uiaa/mod.rs | 35 ++++++++++++++++++++++++++--------- 4 files changed, 41 insertions(+), 19 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 4062ba99..3fd95044 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -406,8 +406,9 @@ # #registration_token = -# Path to a file on the system that gets read for the registration token. -# this config option takes precedence/priority over "registration_token". +# Path to a file on the system that gets read for additional registration +# tokens. Multiple tokens can be added if you separate them with +# whitespace # # conduwuit must be able to access the file, and it must not be empty # diff --git a/src/admin/room/alias.rs b/src/admin/room/alias.rs index 9710cfc8..d3b956e1 100644 --- a/src/admin/room/alias.rs +++ b/src/admin/room/alias.rs @@ -72,7 +72,7 @@ pub(super) async fn reprocess( ))), }; match command { - | RoomAliasCommand::Set { force, room_id, .. } => + | RoomAliasCommand::Set { force, room_id, .. } => { match (force, services.rooms.alias.resolve_local_alias(&room_alias).await) { | (true, Ok(id)) => { match services.rooms.alias.set_alias( @@ -106,8 +106,9 @@ pub(super) async fn reprocess( ))), } }, - }, - | RoomAliasCommand::Remove { .. } => + } + }, + | RoomAliasCommand::Remove { .. } => { match services.rooms.alias.resolve_local_alias(&room_alias).await { | Ok(id) => match services .rooms @@ -124,15 +125,17 @@ pub(super) async fn reprocess( }, | Err(_) => Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")), - }, - | RoomAliasCommand::Which { .. } => + } + }, + | RoomAliasCommand::Which { .. } => { match services.rooms.alias.resolve_local_alias(&room_alias).await { | Ok(id) => Ok(RoomMessageEventContent::text_plain(format!( "Alias resolves to {id}" ))), | Err(_) => Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")), - }, + } + }, | RoomAliasCommand::List { .. } => unreachable!(), } }, diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 415c9ba9..ff038975 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -510,8 +510,9 @@ pub struct Config { /// display: sensitive pub registration_token: Option, - /// Path to a file on the system that gets read for the registration token. - /// this config option takes precedence/priority over "registration_token". + /// Path to a file on the system that gets read for additional registration + /// tokens. Multiple tokens can be added if you separate them with + /// whitespace /// /// conduwuit must be able to access the file, and it must not be empty /// diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index f7e55251..7084f32a 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -1,5 +1,5 @@ use std::{ - collections::BTreeMap, + collections::{BTreeMap, HashSet}, sync::{Arc, RwLock}, }; @@ -17,7 +17,7 @@ use ruma::{ CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedUserId, UserId, }; -use crate::{globals, users, Dep}; +use crate::{config, globals, users, Dep}; pub struct Service { userdevicesessionid_uiaarequest: RwLock, @@ -28,6 +28,7 @@ pub struct Service { struct Services { globals: Dep, users: Dep, + config: Dep, } struct Data { @@ -49,6 +50,7 @@ impl crate::Service for Service { services: Services { globals: args.depend::("globals"), users: args.depend::("users"), + config: args.depend::("config"), }, })) } @@ -56,6 +58,26 @@ impl crate::Service for Service { fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } +#[implement(Service)] +pub async fn read_tokens(&self) -> Result> { + let mut tokens = HashSet::new(); + if let Some(file) = &self.services.config.registration_token_file.as_ref() { + match std::fs::read_to_string(file) { + | Ok(text) => { + text.split_ascii_whitespace().for_each(|token| { + tokens.insert(token.to_owned()); + }); + }, + | Err(e) => error!("Failed to read the registration token file: {e}"), + } + }; + if let Some(token) = &self.services.config.registration_token { + tokens.insert(token.to_owned()); + } + + Ok(tokens) +} + /// Creates a new Uiaa session. Make sure the session token is unique. #[implement(Service)] pub fn create( @@ -152,13 +174,8 @@ pub async fn try_auth( uiaainfo.completed.push(AuthType::Password); }, | AuthData::RegistrationToken(t) => { - if self - .services - .globals - .registration_token - .as_ref() - .is_some_and(|reg_token| t.token.trim() == reg_token) - { + let tokens = self.read_tokens().await?; + if tokens.contains(t.token.trim()) { uiaainfo.completed.push(AuthType::RegistrationToken); } else { uiaainfo.auth_error = Some(ruma::api::client::error::StandardErrorBody { From e161e5dd61b006056ef35fbd034492130bffe150 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 31 Jan 2025 00:54:00 +0000 Subject: [PATCH 009/310] add pair_of! macro Signed-off-by: Jason Volk --- src/core/utils/mod.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 1a4b52da..c2d8ed45 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -84,6 +84,17 @@ macro_rules! apply { }; } +#[macro_export] +macro_rules! pair_of { + ($decl:ty) => { + ($decl, $decl) + }; + + ($init:expr) => { + ($init, $init) + }; +} + /// Functor for truthy #[macro_export] macro_rules! is_true { From 4ff1155bf0aefddd02e34ed9c709db25c0da3ecd Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 31 Jan 2025 01:23:27 +0000 Subject: [PATCH 010/310] reroll encrypted_room branch in incremental sync state Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 150 ++++++++++++++++++-------------------- 1 file changed, 69 insertions(+), 81 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index cd4dfc90..f5b612e4 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -6,7 +6,7 @@ use std::{ use axum::extract::State; use conduwuit::{ - at, err, error, extract_variant, is_equal_to, + at, err, error, extract_variant, is_equal_to, pair_of, pdu::EventHash, result::FlatOk, utils::{ @@ -16,7 +16,7 @@ use conduwuit::{ stream::{BroadbandExt, Tools, WidebandExt}, BoolExt, IterStream, ReadyExt, TryFutureExtExt, }, - Error, PduCount, PduEvent, Result, + PduCount, PduEvent, Result, }; use conduwuit_service::{ rooms::{ @@ -64,6 +64,8 @@ struct StateChanges { invited_member_count: Option, joined_since_last_sync: bool, state_events: Vec, + device_list_updates: HashSet, + left_encrypted_users: HashSet, } type PresenceUpdates = HashMap; @@ -325,18 +327,16 @@ pub(crate) async fn build_sync_events( // If the user doesn't share an encrypted room with the target anymore, we need // to tell them - let device_list_left = left_encrypted_users + let device_list_left: HashSet<_> = left_encrypted_users .into_iter() .stream() .broad_filter_map(|user_id| async move { - let no_shared_encrypted_room = - !share_encrypted_room(services, sender_user, &user_id, None).await; - no_shared_encrypted_room.then_some(user_id) - }) - .ready_fold(HashSet::new(), |mut device_list_left, user_id| { - device_list_left.insert(user_id); - device_list_left + share_encrypted_room(services, sender_user, &user_id, None) + .await + .eq(&false) + .then_some(user_id) }) + .collect() .await; let response = sync_events::v3::Response { @@ -730,14 +730,14 @@ async fn load_joined_room( .into(); let witness = witness.await; - let mut device_list_updates = HashSet::::new(); - let mut left_encrypted_users = HashSet::::new(); let StateChanges { heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events, + mut device_list_updates, + left_encrypted_users, } = if no_state_changes { StateChanges::default() } else { @@ -747,8 +747,6 @@ async fn load_joined_room( room_id, full_state, filter, - &mut device_list_updates, - &mut left_encrypted_users, since_shortstatehash, current_shortstatehash, joined_since_last_sync, @@ -919,8 +917,6 @@ async fn calculate_state_changes( room_id: &RoomId, full_state: bool, filter: &FilterDefinition, - device_list_updates: &mut HashSet, - left_encrypted_users: &mut HashSet, since_shortstatehash: Option, current_shortstatehash: ShortStateHash, joined_since_last_sync: bool, @@ -944,8 +940,6 @@ async fn calculate_state_changes( room_id, full_state, filter, - device_list_updates, - left_encrypted_users, since_shortstatehash, current_shortstatehash, joined_since_last_sync, @@ -1013,6 +1007,7 @@ async fn calculate_state_initial( invited_member_count, joined_since_last_sync: true, state_events, + ..Default::default() }) } @@ -1024,8 +1019,6 @@ async fn calculate_state_incremental( room_id: &RoomId, full_state: bool, _filter: &FilterDefinition, - device_list_updates: &mut HashSet, - left_encrypted_users: &mut HashSet, since_shortstatehash: Option, current_shortstatehash: ShortStateHash, joined_since_last_sync: bool, @@ -1063,79 +1056,72 @@ async fn calculate_state_incremental( .await; } - let encrypted_room = services - .rooms - .state_accessor - .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "") - .is_ok(); - let since_encryption = services .rooms .state_accessor .state_get(since_shortstatehash, &StateEventType::RoomEncryption, "") .is_ok(); - let (encrypted_room, since_encryption) = join(encrypted_room, since_encryption).await; + let encrypted_room = services + .rooms + .state_accessor + .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "") + .is_ok() + .await; - // Calculations: - let new_encrypted_room = encrypted_room && !since_encryption; + let (mut device_list_updates, left_encrypted_users) = delta_state_events + .iter() + .stream() + .ready_filter(|_| encrypted_room) + .ready_filter(|state_event| state_event.kind == RoomMember) + .ready_filter_map(|state_event| { + let content = state_event.get_content().ok()?; + let user_id = state_event.state_key.as_ref()?.parse().ok()?; + Some((content, user_id)) + }) + .ready_filter(|(_, user_id): &(RoomMemberEventContent, OwnedUserId)| { + user_id != sender_user + }) + .fold_default(|(mut dlu, mut leu): pair_of!(HashSet<_>), (content, user_id)| async move { + use MembershipState::*; + + let shares_encrypted_room = + |user_id| share_encrypted_room(services, sender_user, user_id, Some(room_id)); + + match content.membership { + | Join if !shares_encrypted_room(&user_id).await => dlu.insert(user_id), + | Leave => leu.insert(user_id), + | _ => false, + }; + + (dlu, leu) + }) + .await; + + // If the user is in a new encrypted room, give them all joined users + let new_encrypted_room = encrypted_room && !since_encryption.await; + if joined_since_last_sync && encrypted_room || new_encrypted_room { + services + .rooms + .state_cache + .room_members(room_id) + .ready_filter(|&user_id| sender_user != user_id) + .map(ToOwned::to_owned) + .broad_filter_map(|user_id| async move { + share_encrypted_room(services, sender_user, &user_id, Some(room_id)) + .await + .or_some(user_id) + }) + .ready_for_each(|user_id| { + device_list_updates.insert(user_id); + }) + .await; + } let send_member_count = delta_state_events .iter() .any(|event| event.kind == RoomMember); - if encrypted_room { - for state_event in &delta_state_events { - if state_event.kind != RoomMember { - continue; - } - - if let Some(state_key) = &state_event.state_key { - let user_id = UserId::parse(state_key) - .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; - - if user_id == sender_user { - continue; - } - - let content: RoomMemberEventContent = state_event.get_content()?; - - match content.membership { - | MembershipState::Join => { - // A new user joined an encrypted room - if !share_encrypted_room(services, sender_user, user_id, Some(room_id)) - .await - { - device_list_updates.insert(user_id.into()); - } - }, - | MembershipState::Leave => { - // Write down users that have left encrypted rooms we are in - left_encrypted_users.insert(user_id.into()); - }, - | _ => {}, - } - } - } - } - - if joined_since_last_sync && encrypted_room || new_encrypted_room { - let updates: Vec = services - .rooms - .state_cache - .room_members(room_id) - .ready_filter(|user_id| sender_user != *user_id) - .filter_map(|user_id| { - share_encrypted_room(services, sender_user, user_id, Some(room_id)) - .map(|res| res.or_some(user_id.to_owned())) - }) - .collect() - .await; - - // If the user is in a new encrypted room, give them all joined users - device_list_updates.extend(updates); - } - let (joined_member_count, invited_member_count, heroes) = if send_member_count { calculate_counts(services, room_id, sender_user).await? } else { @@ -1148,6 +1134,8 @@ async fn calculate_state_incremental( invited_member_count, joined_since_last_sync, state_events: delta_state_events, + device_list_updates, + left_encrypted_users, }) } From 4e0cedbe5122c478e63e26b3f5156475629ada3e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 31 Jan 2025 05:05:32 +0000 Subject: [PATCH 011/310] simplify v3 sync presence collecting Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 55 +++++++-------------------------------- 1 file changed, 10 insertions(+), 45 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index f5b612e4..cd95fa42 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -1,6 +1,6 @@ use std::{ cmp::{self}, - collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, + collections::{BTreeMap, HashMap, HashSet}, time::Duration, }; @@ -45,7 +45,7 @@ use ruma::{ uiaa::UiaaResponse, }, events::{ - presence::PresenceEvent, + presence::{PresenceEvent, PresenceEventContent}, room::member::{MembershipState, RoomMemberEventContent}, AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType::*, @@ -68,7 +68,7 @@ struct StateChanges { left_encrypted_users: HashSet, } -type PresenceUpdates = HashMap; +type PresenceUpdates = HashMap; /// # `GET /_matrix/client/r0/sync` /// @@ -351,9 +351,11 @@ pub(crate) async fn build_sync_events( next_batch: next_batch.to_string(), presence: Presence { events: presence_updates - .unwrap_or_default() - .into_values() - .map(|v| Raw::new(&v).expect("PresenceEvent always serializes successfully")) + .into_iter() + .flat_map(IntoIterator::into_iter) + .map(|(sender, content)| PresenceEvent { content, sender }) + .map(|ref event| Raw::new(event)) + .filter_map(Result::ok) .collect(), }, rooms: Rooms { @@ -390,45 +392,8 @@ async fn process_presence_updates( .map_ok(move |event| (user_id, event)) .ok() }) - .ready_fold(PresenceUpdates::new(), |mut updates, (user_id, event)| { - match updates.entry(user_id.into()) { - | Entry::Vacant(slot) => { - let mut new_event = event; - new_event.content.last_active_ago = match new_event.content.currently_active { - | Some(true) => None, - | _ => new_event.content.last_active_ago, - }; - - slot.insert(new_event); - }, - | Entry::Occupied(mut slot) => { - let curr_event = slot.get_mut(); - let curr_content = &mut curr_event.content; - let new_content = event.content; - - // Update existing presence event with more info - curr_content.presence = new_content.presence; - curr_content.status_msg = new_content - .status_msg - .or_else(|| curr_content.status_msg.take()); - curr_content.displayname = new_content - .displayname - .or_else(|| curr_content.displayname.take()); - curr_content.avatar_url = new_content - .avatar_url - .or_else(|| curr_content.avatar_url.take()); - curr_content.currently_active = new_content - .currently_active - .or(curr_content.currently_active); - curr_content.last_active_ago = match curr_content.currently_active { - | Some(true) => None, - | _ => new_content.last_active_ago.or(curr_content.last_active_ago), - }; - }, - }; - - updates - }) + .map(|(user_id, event)| (user_id.to_owned(), event.content)) + .collect() .await } From a4ef04cd1427ea2eeb474775e7c4c86937d063ab Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 31 Jan 2025 08:31:58 +0000 Subject: [PATCH 012/310] fix room join completion taking wrong sync branch Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index cd95fa42..e3f559f5 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -887,7 +887,7 @@ async fn calculate_state_changes( joined_since_last_sync: bool, witness: Option<&Witness>, ) -> Result { - if since_shortstatehash.is_none() || joined_since_last_sync { + if since_shortstatehash.is_none() { calculate_state_initial( services, sender_user, From 6983798487ec563be83a3ba8739afa9977d98741 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 31 Jan 2025 08:34:32 +0000 Subject: [PATCH 013/310] implement lazy-loading for incremental sync Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 182 ++++++++++++++++++++------------------ 1 file changed, 98 insertions(+), 84 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index e3f559f5..49246514 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -55,7 +55,10 @@ use ruma::{ }; use super::{load_timeline, share_encrypted_room}; -use crate::{client::ignored_filter, Ruma, RumaResponse}; +use crate::{ + client::{ignored_filter, lazy_loading_witness}, + Ruma, RumaResponse, +}; #[derive(Default)] struct StateChanges { @@ -633,10 +636,6 @@ async fn load_joined_room( }) .into(); - let no_state_changes = timeline_pdus.is_empty() - && (since_shortstatehash.is_none() - || since_shortstatehash.is_some_and(is_equal_to!(current_shortstatehash))); - let since_sender_member: OptionFuture<_> = since_shortstatehash .map(|short| { services @@ -658,11 +657,7 @@ async fn load_joined_room( let lazy_loading_enabled = filter.room.state.lazy_load_options.is_enabled() || filter.room.timeline.lazy_load_options.is_enabled(); - let generate_witness = - lazy_loading_enabled && (since_shortstatehash.is_none() || joined_since_last_sync); - - let lazy_reset = lazy_loading_enabled && since_shortstatehash.is_none(); - + let lazy_reset = since_shortstatehash.is_none(); let lazy_loading_context = &lazy_loading::Context { user_id: sender_user, device_id: sender_device, @@ -677,24 +672,10 @@ async fn load_joined_room( .into(); lazy_load_reset.await; - let witness: Option = generate_witness.then(|| { - timeline_pdus - .iter() - .map(|(_, pdu)| pdu.sender.clone()) - .chain(receipt_events.keys().cloned()) - .collect() - }); - - let witness: OptionFuture<_> = witness - .map(|witness| { - services - .rooms - .lazy_loading - .witness_retain(witness, lazy_loading_context) - }) + let witness: OptionFuture<_> = lazy_loading_enabled + .then(|| lazy_loading_witness(services, lazy_loading_context, timeline_pdus.iter())) .into(); - let witness = witness.await; let StateChanges { heroes, joined_member_count, @@ -703,23 +684,19 @@ async fn load_joined_room( state_events, mut device_list_updates, left_encrypted_users, - } = if no_state_changes { - StateChanges::default() - } else { - calculate_state_changes( - services, - sender_user, - room_id, - full_state, - filter, - since_shortstatehash, - current_shortstatehash, - joined_since_last_sync, - witness.as_ref(), - ) - .boxed() - .await? - }; + } = calculate_state_changes( + services, + sender_user, + room_id, + full_state, + filter, + since_shortstatehash, + current_shortstatehash, + joined_since_last_sync, + witness.await.as_ref(), + ) + .boxed() + .await?; let account_data_events = services .account_data @@ -908,6 +885,7 @@ async fn calculate_state_changes( since_shortstatehash, current_shortstatehash, joined_since_last_sync, + witness, ) .await } @@ -920,7 +898,7 @@ async fn calculate_state_initial( sender_user: &UserId, room_id: &RoomId, full_state: bool, - filter: &FilterDefinition, + _filter: &FilterDefinition, current_shortstatehash: ShortStateHash, witness: Option<&Witness>, ) -> Result { @@ -938,20 +916,14 @@ async fn calculate_state_initial( .zip(event_ids.into_iter().stream()) .ready_filter_map(|item| Some((item.0.ok()?, item.1))) .ready_filter_map(|((event_type, state_key), event_id)| { - let lazy_load_enabled = filter.room.state.lazy_load_options.is_enabled() - || filter.room.timeline.lazy_load_options.is_enabled(); - - if lazy_load_enabled + let lazy = !full_state && event_type == StateEventType::RoomMember - && !full_state && state_key.as_str().try_into().is_ok_and(|user_id: &UserId| { sender_user != user_id && witness.is_some_and(|witness| !witness.contains(user_id)) - }) { - return None; - } + }); - Some(event_id) + lazy.or_some(event_id) }) .broad_filter_map(|event_id: OwnedEventId| async move { services.rooms.timeline.get_pdu(&event_id).await.ok() @@ -978,7 +950,7 @@ async fn calculate_state_initial( #[tracing::instrument(name = "incremental", level = "trace", skip_all)] #[allow(clippy::too_many_arguments)] -async fn calculate_state_incremental( +async fn calculate_state_incremental<'a>( services: &Services, sender_user: &UserId, room_id: &RoomId, @@ -987,39 +959,80 @@ async fn calculate_state_incremental( since_shortstatehash: Option, current_shortstatehash: ShortStateHash, joined_since_last_sync: bool, + witness: Option<&'a Witness>, ) -> Result { - // Incremental /sync - let since_shortstatehash = - since_shortstatehash.expect("missing since_shortstatehash on incremental sync"); + let since_shortstatehash = since_shortstatehash.unwrap_or(current_shortstatehash); - let mut delta_state_events = Vec::new(); + let state_changed = since_shortstatehash != current_shortstatehash; - if since_shortstatehash != current_shortstatehash { - let current_state_ids = services + let state_get_id = |user_id: &'a UserId| { + services .rooms .state_accessor - .state_full_ids(current_shortstatehash) - .collect(); + .state_get_id(current_shortstatehash, &StateEventType::RoomMember, user_id.as_str()) + .ok() + }; - let since_state_ids = services - .rooms - .state_accessor - .state_full_ids(since_shortstatehash) - .collect(); + let lazy_state_ids: OptionFuture<_> = witness + .map(|witness| { + witness + .iter() + .stream() + .broad_filter_map(|user_id| state_get_id(user_id)) + .collect::>() + }) + .into(); - let (current_state_ids, since_state_ids): ( - HashMap<_, OwnedEventId>, - HashMap<_, OwnedEventId>, - ) = join(current_state_ids, since_state_ids).await; + let current_state_ids: OptionFuture<_> = state_changed + .then(|| { + services + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .collect::>() + }) + .into(); - current_state_ids - .iter() - .stream() - .ready_filter(|(key, id)| full_state || since_state_ids.get(key) != Some(id)) - .wide_filter_map(|(_, id)| services.rooms.timeline.get_pdu(id).ok()) - .ready_for_each(|pdu| delta_state_events.push(pdu)) - .await; - } + let since_state_ids: OptionFuture<_> = (state_changed && !full_state) + .then(|| { + services + .rooms + .state_accessor + .state_full_ids(since_shortstatehash) + .collect::>() + }) + .into(); + + let lazy_state_ids = lazy_state_ids + .map(Option::into_iter) + .map(|iter| iter.flat_map(Vec::into_iter)) + .map(IterStream::stream) + .flatten_stream(); + + let ref since_state_ids = since_state_ids.shared(); + let delta_state_events = current_state_ids + .map(Option::into_iter) + .map(|iter| iter.flat_map(Vec::into_iter)) + .map(IterStream::stream) + .flatten_stream() + .filter_map(|(shortstatekey, event_id): (u64, OwnedEventId)| async move { + since_state_ids + .clone() + .await + .is_none_or(|since_state| since_state.get(&shortstatekey) != Some(&event_id)) + .then_some(event_id) + }) + .chain(lazy_state_ids) + .broad_filter_map(|event_id: OwnedEventId| async move { + services + .rooms + .timeline + .get_pdu(&event_id) + .await + .map(move |pdu| (event_id, pdu)) + .ok() + }) + .collect::>(); let since_encryption = services .rooms @@ -1031,11 +1044,12 @@ async fn calculate_state_incremental( .rooms .state_accessor .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "") - .is_ok() - .await; + .is_ok(); + + let (delta_state_events, encrypted_room) = join(delta_state_events, encrypted_room).await; let (mut device_list_updates, left_encrypted_users) = delta_state_events - .iter() + .values() .stream() .ready_filter(|_| encrypted_room) .ready_filter(|state_event| state_event.kind == RoomMember) @@ -1084,7 +1098,7 @@ async fn calculate_state_incremental( } let send_member_count = delta_state_events - .iter() + .values() .any(|event| event.kind == RoomMember); let (joined_member_count, invited_member_count, heroes) = if send_member_count { @@ -1098,9 +1112,9 @@ async fn calculate_state_incremental( joined_member_count, invited_member_count, joined_since_last_sync, - state_events: delta_state_events, device_list_updates, left_encrypted_users, + state_events: delta_state_events.into_values().collect(), }) } From 09bc71caaba40321ec0f987574a94e788175c4f9 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 31 Jan 2025 09:08:13 +0000 Subject: [PATCH 014/310] fix missed concurrent fetch opportunities in sender (ffd0fd42424a) Signed-off-by: Jason Volk --- src/service/sending/sender.rs | 41 +++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 363bb994..f19b69da 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -13,7 +13,12 @@ use conduwuit::{ debug, err, error, result::LogErr, trace, - utils::{calculate_hash, continue_exponential_backoff_secs, stream::IterStream, ReadyExt}, + utils::{ + calculate_hash, continue_exponential_backoff_secs, + future::TryExtExt, + stream::{BroadbandExt, IterStream, WidebandExt}, + ReadyExt, + }, warn, Error, Result, }; use futures::{ @@ -474,20 +479,25 @@ impl Service { since: (u64, u64), max_edu_count: &AtomicU64, ) -> Option { - let server_rooms = self.services.state_cache.server_rooms(server_name); - - pin_mut!(server_rooms); let mut num = 0; - let mut receipts = BTreeMap::::new(); - while let Some(room_id) = server_rooms.next().await { - let receipt_map = self - .select_edus_receipts_room(room_id, since, max_edu_count, &mut num) - .await; + let receipts: BTreeMap = self + .services + .state_cache + .server_rooms(server_name) + .map(ToOwned::to_owned) + .broad_filter_map(|room_id| async move { + let receipt_map = self + .select_edus_receipts_room(&room_id, since, max_edu_count, &mut num) + .await; - if !receipt_map.read.is_empty() { - receipts.insert(room_id.into(), receipt_map); - } - } + receipt_map + .read + .is_empty() + .eq(&false) + .then_some((room_id, receipt_map)) + }) + .collect() + .await; if receipts.is_empty() { return None; @@ -820,9 +830,8 @@ impl Service { | _ => None, }) .stream() - .then(|pdu_id| self.services.timeline.get_pdu_json_from_id(pdu_id)) - .ready_filter_map(Result::ok) - .then(|pdu| self.convert_to_outgoing_federation_event(pdu)) + .wide_filter_map(|pdu_id| self.services.timeline.get_pdu_json_from_id(pdu_id).ok()) + .wide_then(|pdu| self.convert_to_outgoing_federation_event(pdu)) .collect() .await; From 2fa9621f3a358740917af7a55c5d0be1e1d79ae4 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 31 Jan 2025 11:54:06 +0000 Subject: [PATCH 015/310] flatten state_full_shortids Signed-off-by: Jason Volk --- src/service/rooms/state/mod.rs | 60 +++++++++++-------------- src/service/rooms/state_accessor/mod.rs | 47 +++++++++---------- 2 files changed, 48 insertions(+), 59 deletions(-) diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 8cb4e586..1b0d0d58 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -429,60 +429,54 @@ impl Service { sender: &UserId, state_key: Option<&str>, content: &serde_json::value::RawValue, - ) -> Result>> { + ) -> Result> { let Ok(shortstatehash) = self.get_room_shortstatehash(room_id).await else { return Ok(HashMap::new()); }; - let mut sauthevents: HashMap<_, _> = - state_res::auth_types_for_event(kind, sender, state_key, content)? - .iter() - .stream() - .broad_filter_map(|(event_type, state_key)| { - self.services - .short - .get_shortstatekey(event_type, state_key) - .map_ok(move |ssk| (ssk, (event_type, state_key))) - .map(Result::ok) - }) - .map(|(ssk, (event_type, state_key))| { - (ssk, (event_type.to_owned(), state_key.to_owned())) - }) - .collect() - .await; + let auth_types = state_res::auth_types_for_event(kind, sender, state_key, content)?; + + let sauthevents: HashMap<_, _> = auth_types + .iter() + .stream() + .broad_filter_map(|(event_type, state_key)| { + self.services + .short + .get_shortstatekey(event_type, state_key) + .map_ok(move |ssk| (ssk, (event_type, state_key))) + .map(Result::ok) + }) + .collect() + .await; let (state_keys, event_ids): (Vec<_>, Vec<_>) = self .services .state_accessor .state_full_shortids(shortstatehash) - .await - .map_err(|e| err!(Database(error!(?room_id, ?shortstatehash, "{e:?}"))))? - .into_iter() - .filter_map(|(shortstatekey, shorteventid)| { + .ready_filter_map(Result::ok) + .ready_filter_map(|(shortstatekey, shorteventid)| { sauthevents - .remove(&shortstatekey) - .map(|(event_type, state_key)| ((event_type, state_key), shorteventid)) + .get(&shortstatekey) + .map(|(ty, sk)| ((ty, sk), shorteventid)) }) - .unzip(); + .unzip() + .await; - let auth_pdus = self - .services + self.services .short .multi_get_eventid_from_short(event_ids.into_iter().stream()) .zip(state_keys.into_iter().stream()) - .ready_filter_map(|(event_id, tsk)| Some((tsk, event_id.ok()?))) - .broad_filter_map(|(tsk, event_id): (_, OwnedEventId)| async move { + .ready_filter_map(|(event_id, (ty, sk))| Some(((ty, sk), event_id.ok()?))) + .broad_filter_map(|((ty, sk), event_id): (_, OwnedEventId)| async move { self.services .timeline .get_pdu(&event_id) .await - .map(Arc::new) - .map(move |pdu| (tsk, pdu)) + .map(move |pdu| (((*ty).clone(), (*sk).clone()), pdu)) .ok() }) .collect() - .await; - - Ok(auth_pdus) + .map(Ok) + .await } } diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 0f5520bb..98aac138 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -1,6 +1,7 @@ use std::{ borrow::Borrow, fmt::Write, + ops::Deref, sync::{Arc, Mutex as StdMutex, Mutex}, }; @@ -10,8 +11,7 @@ use conduwuit::{ utils, utils::{ math::{usize_from_f64, Expected}, - stream::BroadbandExt, - IterStream, ReadyExt, + stream::{BroadbandExt, IterStream, ReadyExt, TryExpect}, }, Err, Error, PduEvent, Result, }; @@ -158,12 +158,8 @@ impl Service { ) -> impl Stream + Send + '_ { let short_ids = self .state_full_shortids(shortstatehash) - .map(|result| result.expect("missing shortstatehash")) - .map(Vec::into_iter) - .map(|iter| iter.map(at!(1))) - .map(IterStream::stream) - .flatten_stream() - .boxed(); + .expect_ok() + .map(at!(1)); self.services .short @@ -187,9 +183,8 @@ impl Service { { let shortids = self .state_full_shortids(shortstatehash) - .map(|result| result.expect("missing shortstatehash")) - .map(|vec| vec.into_iter().unzip()) - .boxed() + .expect_ok() + .unzip() .shared(); let shortstatekeys = shortids @@ -255,25 +250,25 @@ impl Service { } #[inline] - pub async fn state_full_shortids( + pub fn state_full_shortids( &self, shortstatehash: ShortStateHash, - ) -> Result> { - let shortids = self - .services + ) -> impl Stream> + Send + '_ { + self.services .state_compressor .load_shortstatehash_info(shortstatehash) - .await - .map_err(|e| err!(Database("Missing state IDs: {e}")))? - .pop() - .expect("there is always one layer") - .full_state - .iter() - .copied() - .map(parse_compressed_state_event) - .collect(); - - Ok(shortids) + .map_err(|e| err!(Database("Missing state IDs: {e}"))) + .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) + .map_ok(|full_state| { + full_state + .deref() + .iter() + .copied() + .map(parse_compressed_state_event) + .collect() + }) + .map_ok(|vec: Vec<_>| vec.into_iter().try_stream()) + .try_flatten_stream() } /// Returns a single PDU from `room_id` with key (`event_type`, From ea49b60273c987cc673c3aad439c6fbb50bb795f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 1 Feb 2025 22:28:09 +0000 Subject: [PATCH 016/310] add Option support to database deserializer Signed-off-by: Jason Volk --- src/database/de.rs | 23 ++++-- src/database/tests.rs | 159 +++++++++++++++++++++++++++++++++++++++++- 2 files changed, 176 insertions(+), 6 deletions(-) diff --git a/src/database/de.rs b/src/database/de.rs index 7cc8f00a..8e914fcc 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -22,7 +22,7 @@ pub(crate) fn from_slice<'a, T>(buf: &'a [u8]) -> Result where T: Deserialize<'a>, { - let mut deserializer = Deserializer { buf, pos: 0, seq: false }; + let mut deserializer = Deserializer { buf, pos: 0, rec: 0, seq: false }; T::deserialize(&mut deserializer).debug_inspect(|_| { deserializer @@ -35,6 +35,7 @@ where pub(crate) struct Deserializer<'de> { buf: &'de [u8], pos: usize, + rec: usize, seq: bool, } @@ -107,7 +108,7 @@ impl<'de> Deserializer<'de> { /// consumed None is returned instead. #[inline] fn record_peek_byte(&self) -> Option { - let started = self.pos != 0; + let started = self.pos != 0 || self.rec > 0; let buf = &self.buf[self.pos..]; debug_assert!( !started || buf[0] == Self::SEP, @@ -121,13 +122,14 @@ impl<'de> Deserializer<'de> { /// the start of the next record. (Case for some sequences) #[inline] fn record_start(&mut self) { - let started = self.pos != 0; + let started = self.pos != 0 || self.rec > 0; debug_assert!( !started || self.buf[self.pos] == Self::SEP, "Missing expected record separator at current position" ); self.inc_pos(started.into()); + self.inc_rec(1); } /// Consume all remaining bytes, which may include record separators, @@ -157,6 +159,9 @@ impl<'de> Deserializer<'de> { debug_assert!(self.pos <= self.buf.len(), "pos out of range"); } + #[inline] + fn inc_rec(&mut self, n: usize) { self.rec = self.rec.saturating_add(n); } + /// Unconsumed input bytes. #[inline] fn remaining(&self) -> Result { @@ -270,8 +275,16 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { } #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] - fn deserialize_option>(self, _visitor: V) -> Result { - unhandled!("deserialize Option not implemented") + fn deserialize_option>(self, visitor: V) -> Result { + if self + .buf + .get(self.pos) + .is_none_or(|b| *b == Deserializer::SEP) + { + visitor.visit_none() + } else { + visitor.visit_some(self) + } } #[cfg_attr(unabridged, tracing::instrument(level = "trace", skip_all))] diff --git a/src/database/tests.rs b/src/database/tests.rs index 2f143698..e6c85983 100644 --- a/src/database/tests.rs +++ b/src/database/tests.rs @@ -3,7 +3,7 @@ use std::fmt::Debug; use arrayvec::ArrayVec; -use conduwuit::ruma::{serde::Raw, RoomId, UserId}; +use conduwuit::ruma::{serde::Raw, EventId, RoomId, UserId}; use serde::Serialize; use crate::{ @@ -389,3 +389,160 @@ fn de_complex() { assert_eq!(arr, key, "deserialization of serialization does not match"); } + +#[test] +fn serde_tuple_option_value_some() { + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + + let mut aa = Vec::::new(); + aa.extend_from_slice(room_id.as_bytes()); + aa.push(0xFF); + aa.extend_from_slice(user_id.as_bytes()); + + let bb: (&RoomId, Option<&UserId>) = (room_id, Some(user_id)); + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (&RoomId, Option<&UserId>) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(bb.1, cc.1); + assert_eq!(cc.0, bb.0); +} + +#[test] +fn serde_tuple_option_value_none() { + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + + let mut aa = Vec::::new(); + aa.extend_from_slice(room_id.as_bytes()); + aa.push(0xFF); + + let bb: (&RoomId, Option<&UserId>) = (room_id, None); + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (&RoomId, Option<&UserId>) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(None, cc.1); + assert_eq!(cc.0, bb.0); +} + +#[test] +fn serde_tuple_option_none_value() { + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + + let mut aa = Vec::::new(); + aa.push(0xFF); + aa.extend_from_slice(user_id.as_bytes()); + + let bb: (Option<&RoomId>, &UserId) = (None, user_id); + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (Option<&RoomId>, &UserId) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(None, cc.0); + assert_eq!(cc.1, bb.1); +} + +#[test] +fn serde_tuple_option_some_value() { + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + + let mut aa = Vec::::new(); + aa.extend_from_slice(room_id.as_bytes()); + aa.push(0xFF); + aa.extend_from_slice(user_id.as_bytes()); + + let bb: (Option<&RoomId>, &UserId) = (Some(room_id), user_id); + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (Option<&RoomId>, &UserId) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(bb.0, cc.0); + assert_eq!(cc.1, bb.1); +} + +#[test] +fn serde_tuple_option_some_some() { + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + + let mut aa = Vec::::new(); + aa.extend_from_slice(room_id.as_bytes()); + aa.push(0xFF); + aa.extend_from_slice(user_id.as_bytes()); + + let bb: (Option<&RoomId>, Option<&UserId>) = (Some(room_id), Some(user_id)); + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (Option<&RoomId>, Option<&UserId>) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(cc.0, bb.0); + assert_eq!(bb.1, cc.1); +} + +#[test] +fn serde_tuple_option_none_none() { + let aa = vec![0xFF]; + + let bb: (Option<&RoomId>, Option<&UserId>) = (None, None); + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (Option<&RoomId>, Option<&UserId>) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(cc.0, bb.0); + assert_eq!(None, cc.1); +} + +#[test] +fn serde_tuple_option_some_none_some() { + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + + let mut aa = Vec::::new(); + aa.extend_from_slice(room_id.as_bytes()); + aa.push(0xFF); + aa.push(0xFF); + aa.extend_from_slice(user_id.as_bytes()); + + let bb: (Option<&RoomId>, Option<&EventId>, Option<&UserId>) = + (Some(room_id), None, Some(user_id)); + + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (Option<&RoomId>, Option<&EventId>, Option<&UserId>) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(bb.0, cc.0); + assert_eq!(None, cc.1); + assert_eq!(bb.1, cc.1); + assert_eq!(bb.2, cc.2); +} + +#[test] +fn serde_tuple_option_none_none_none() { + let aa = vec![0xFF, 0xFF]; + + let bb: (Option<&RoomId>, Option<&EventId>, Option<&UserId>) = (None, None, None); + let bbs = serialize_to_vec(&bb).expect("failed to serialize tuple"); + assert_eq!(aa, bbs); + + let cc: (Option<&RoomId>, Option<&EventId>, Option<&UserId>) = + de::from_slice(&bbs).expect("failed to deserialize tuple"); + + assert_eq!(None, cc.0); + assert_eq!(bb, cc); +} From 4add39d0fedcbe7946c6dfffac33d1e48111ea8b Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 31 Jan 2025 15:50:09 +0000 Subject: [PATCH 017/310] cache compressed state in a sorted structure for logarithmic queries with partial keys Signed-off-by: Jason Volk --- src/api/client/membership.rs | 9 +- .../rooms/event_handler/resolve_state.rs | 6 +- .../event_handler/upgrade_outlier_pdu.rs | 15 ++- src/service/rooms/state/mod.rs | 28 +++--- src/service/rooms/state_accessor/mod.rs | 99 ++++++++++++++----- src/service/rooms/state_compressor/mod.rs | 30 +++--- src/service/rooms/timeline/mod.rs | 4 +- 7 files changed, 118 insertions(+), 73 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index d80aff0c..449d44d5 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -46,7 +46,10 @@ use ruma::{ use service::{ appservice::RegistrationInfo, pdu::gen_event_id, - rooms::{state::RoomMutexGuard, state_compressor::HashSetCompressStateEvent}, + rooms::{ + state::RoomMutexGuard, + state_compressor::{CompressedState, HashSetCompressStateEvent}, + }, Services, }; @@ -1169,7 +1172,7 @@ async fn join_room_by_id_helper_remote( } info!("Compressing state from send_join"); - let compressed: HashSet<_> = services + let compressed: CompressedState = services .rooms .state_compressor .compress_state_events(state.iter().map(|(ssk, eid)| (ssk, eid.borrow()))) @@ -2340,7 +2343,7 @@ async fn knock_room_helper_remote( } info!("Compressing state from send_knock"); - let compressed: HashSet<_> = services + let compressed: CompressedState = services .rooms .state_compressor .compress_state_events(state_map.iter().map(|(ssk, eid)| (ssk, eid.borrow()))) diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index c3de5f2f..4d99b088 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -15,7 +15,7 @@ use ruma::{ OwnedEventId, RoomId, RoomVersionId, }; -use crate::rooms::state_compressor::CompressedStateEvent; +use crate::rooms::state_compressor::CompressedState; #[implement(super::Service)] #[tracing::instrument(name = "resolve", level = "debug", skip_all)] @@ -24,7 +24,7 @@ pub async fn resolve_state( room_id: &RoomId, room_version_id: &RoomVersionId, incoming_state: HashMap, -) -> Result>> { +) -> Result> { trace!("Loading current room state ids"); let current_sstatehash = self .services @@ -91,7 +91,7 @@ pub async fn resolve_state( .await; trace!("Compressing state..."); - let new_room_state: HashSet<_> = self + let new_room_state: CompressedState = self .services .state_compressor .compress_state_events( diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index 03697558..132daca7 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -1,10 +1,4 @@ -use std::{ - borrow::Borrow, - collections::{BTreeMap, HashSet}, - iter::once, - sync::Arc, - time::Instant, -}; +use std::{borrow::Borrow, collections::BTreeMap, iter::once, sync::Arc, time::Instant}; use conduwuit::{ debug, debug_info, err, implement, trace, @@ -19,7 +13,10 @@ use ruma::{ }; use super::{get_room_version_id, to_room_version}; -use crate::rooms::{state_compressor::HashSetCompressStateEvent, timeline::RawPduId}; +use crate::rooms::{ + state_compressor::{CompressedState, HashSetCompressStateEvent}, + timeline::RawPduId, +}; #[implement(super::Service)] pub(super) async fn upgrade_outlier_to_timeline_pdu( @@ -173,7 +170,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( incoming_pdu.prev_events.len() ); - let state_ids_compressed: Arc> = self + let state_ids_compressed: Arc = self .services .state_compressor .compress_state_events( diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 1b0d0d58..de90a89c 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -1,9 +1,4 @@ -use std::{ - collections::{HashMap, HashSet}, - fmt::Write, - iter::once, - sync::Arc, -}; +use std::{collections::HashMap, fmt::Write, iter::once, sync::Arc}; use conduwuit::{ err, @@ -33,7 +28,7 @@ use crate::{ globals, rooms, rooms::{ short::{ShortEventId, ShortStateHash}, - state_compressor::{parse_compressed_state_event, CompressedStateEvent}, + state_compressor::{parse_compressed_state_event, CompressedState}, }, Dep, }; @@ -102,10 +97,9 @@ impl Service { &self, room_id: &RoomId, shortstatehash: u64, - statediffnew: Arc>, - _statediffremoved: Arc>, - state_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room state - * mutex */ + statediffnew: Arc, + _statediffremoved: Arc, + state_lock: &RoomMutexGuard, ) -> Result { let event_ids = statediffnew .iter() @@ -176,7 +170,7 @@ impl Service { &self, event_id: &EventId, room_id: &RoomId, - state_ids_compressed: Arc>, + state_ids_compressed: Arc, ) -> Result { const KEY_LEN: usize = size_of::(); const VAL_LEN: usize = size_of::(); @@ -209,12 +203,12 @@ impl Service { let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed + let statediffnew: CompressedState = state_ids_compressed .difference(&parent_stateinfo.full_state) .copied() .collect(); - let statediffremoved: HashSet<_> = parent_stateinfo + let statediffremoved: CompressedState = parent_stateinfo .full_state .difference(&state_ids_compressed) .copied() @@ -222,7 +216,7 @@ impl Service { (Arc::new(statediffnew), Arc::new(statediffremoved)) } else { - (state_ids_compressed, Arc::new(HashSet::new())) + (state_ids_compressed, Arc::new(CompressedState::new())) }; self.services.state_compressor.save_state_from_diff( shortstatehash, @@ -300,10 +294,10 @@ impl Service { // TODO: statehash with deterministic inputs let shortstatehash = self.services.globals.next_count()?; - let mut statediffnew = HashSet::new(); + let mut statediffnew = CompressedState::new(); statediffnew.insert(new); - let mut statediffremoved = HashSet::new(); + let mut statediffremoved = CompressedState::new(); if let Some(replaces) = replaces { statediffremoved.insert(*replaces); } diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 98aac138..8b56c8b6 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -11,6 +11,7 @@ use conduwuit::{ utils, utils::{ math::{usize_from_f64, Expected}, + result::FlatOk, stream::{BroadbandExt, IterStream, ReadyExt, TryExpect}, }, Err, Error, PduEvent, Result, @@ -47,7 +48,7 @@ use crate::{ rooms::{ short::{ShortEventId, ShortStateHash, ShortStateKey}, state::RoomMutexGuard, - state_compressor::parse_compressed_state_event, + state_compressor::{compress_state_event, parse_compressed_state_event}, }, Dep, }; @@ -220,36 +221,88 @@ impl Service { Id: for<'de> Deserialize<'de> + Sized + ToOwned, ::Owned: Borrow, { - let shortstatekey = self - .services - .short - .get_shortstatekey(event_type, state_key) + let shorteventid = self + .state_get_shortid(shortstatehash, event_type, state_key) .await?; - let full_state = self - .services - .state_compressor - .load_shortstatehash_info(shortstatehash) - .await - .map_err(|e| err!(Database(error!(?event_type, ?state_key, "Missing state: {e:?}"))))? - .pop() - .expect("there is always one layer") - .full_state; - - let compressed = full_state - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .ok_or(err!(Database("No shortstatekey in compressed state")))?; - - let (_, shorteventid) = parse_compressed_state_event(*compressed); - self.services .short .get_eventid_from_short(shorteventid) .await } - #[inline] + /// Returns a single EventId from `room_id` with key (`event_type`, + /// `state_key`). + #[tracing::instrument(skip(self), level = "debug")] + pub async fn state_get_shortid( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, + ) -> Result { + let shortstatekey = self + .services + .short + .get_shortstatekey(event_type, state_key) + .await?; + + let start = compress_state_event(shortstatekey, 0); + let end = compress_state_event(shortstatekey, u64::MAX); + self.services + .state_compressor + .load_shortstatehash_info(shortstatehash) + .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) + .map_ok(|full_state| { + full_state + .range(start..end) + .next() + .copied() + .map(parse_compressed_state_event) + .map(at!(1)) + .ok_or(err!(Request(NotFound("Not found in room state")))) + }) + .await? + } + + #[tracing::instrument(skip(self), level = "debug")] + pub async fn state_contains( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, + ) -> bool { + let Ok(shortstatekey) = self + .services + .short + .get_shortstatekey(event_type, state_key) + .await + else { + return false; + }; + + self.state_contains_shortstatekey(shortstatehash, shortstatekey) + .await + } + + #[tracing::instrument(skip(self), level = "debug")] + pub async fn state_contains_shortstatekey( + &self, + shortstatehash: ShortStateHash, + shortstatekey: ShortStateKey, + ) -> bool { + let start = compress_state_event(shortstatekey, 0); + let end = compress_state_event(shortstatekey, u64::MAX); + + self.services + .state_compressor + .load_shortstatehash_info(shortstatehash) + .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) + .map_ok(|full_state| full_state.range(start..end).next().copied()) + .await + .flat_ok() + .is_some() + } + pub fn state_full_shortids( &self, shortstatehash: ShortStateHash, diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 532df360..3d68dff6 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -1,5 +1,5 @@ use std::{ - collections::{HashMap, HashSet}, + collections::{BTreeSet, HashMap}, fmt::{Debug, Write}, mem::size_of, sync::{Arc, Mutex}, @@ -63,8 +63,8 @@ type StateInfoLruCache = LruCache; type ShortStateInfoVec = Vec; type ParentStatesVec = Vec; -pub(crate) type CompressedState = HashSet; -pub(crate) type CompressedStateEvent = [u8; 2 * size_of::()]; +pub type CompressedState = BTreeSet; +pub type CompressedStateEvent = [u8; 2 * size_of::()]; impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { @@ -249,8 +249,8 @@ impl Service { pub fn save_state_from_diff( &self, shortstatehash: ShortStateHash, - statediffnew: Arc>, - statediffremoved: Arc>, + statediffnew: Arc, + statediffremoved: Arc, diff_to_sibling: usize, mut parent_states: ParentStatesVec, ) -> Result { @@ -363,7 +363,7 @@ impl Service { pub async fn save_state( &self, room_id: &RoomId, - new_state_ids_compressed: Arc>, + new_state_ids_compressed: Arc, ) -> Result { let previous_shortstatehash = self .services @@ -396,12 +396,12 @@ impl Service { let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = new_state_ids_compressed + let statediffnew: CompressedState = new_state_ids_compressed .difference(&parent_stateinfo.full_state) .copied() .collect(); - let statediffremoved: HashSet<_> = parent_stateinfo + let statediffremoved: CompressedState = parent_stateinfo .full_state .difference(&new_state_ids_compressed) .copied() @@ -409,7 +409,7 @@ impl Service { (Arc::new(statediffnew), Arc::new(statediffremoved)) } else { - (new_state_ids_compressed, Arc::new(HashSet::new())) + (new_state_ids_compressed, Arc::new(CompressedState::new())) }; if !already_existed { @@ -448,11 +448,11 @@ impl Service { .take_if(|parent| *parent != 0); debug_assert!(value.len() % STRIDE == 0, "value not aligned to stride"); - let num_values = value.len() / STRIDE; + let _num_values = value.len() / STRIDE; let mut add_mode = true; - let mut added = HashSet::with_capacity(num_values); - let mut removed = HashSet::with_capacity(num_values); + let mut added = CompressedState::new(); + let mut removed = CompressedState::new(); let mut i = STRIDE; while let Some(v) = value.get(i..expected!(i + 2 * STRIDE)) { @@ -469,8 +469,6 @@ impl Service { i = expected!(i + 2 * STRIDE); } - added.shrink_to_fit(); - removed.shrink_to_fit(); Ok(StateDiff { parent, added: Arc::new(added), @@ -507,7 +505,7 @@ impl Service { #[inline] #[must_use] -fn compress_state_event( +pub(crate) fn compress_state_event( shortstatekey: ShortStateKey, shorteventid: ShortEventId, ) -> CompressedStateEvent { @@ -523,7 +521,7 @@ fn compress_state_event( #[inline] #[must_use] -pub fn parse_compressed_state_event( +pub(crate) fn parse_compressed_state_event( compressed_event: CompressedStateEvent, ) -> (ShortStateKey, ShortEventId) { use utils::u64_from_u8; diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 8b3b67a7..a913034d 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -49,7 +49,7 @@ use crate::{ account_data, admin, appservice, appservice::NamespaceRegex, globals, pusher, rooms, - rooms::{short::ShortRoomId, state_compressor::CompressedStateEvent}, + rooms::{short::ShortRoomId, state_compressor::CompressedState}, sending, server_keys, users, Dep, }; @@ -950,7 +950,7 @@ impl Service { pdu: &'a PduEvent, pdu_json: CanonicalJsonObject, new_room_leafs: Leafs, - state_ids_compressed: Arc>, + state_ids_compressed: Arc, soft_fail: bool, state_lock: &'a RoomMutexGuard, ) -> Result> From 7ce782ddf4cb6989caff7a3781cfc667183b9b63 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 1 Feb 2025 01:17:28 +0000 Subject: [PATCH 018/310] fix jemalloc cfgs lacking msvc conditions Signed-off-by: Jason Volk --- src/core/config/check.rs | 2 +- src/database/pool.rs | 9 ++++++--- src/main/runtime.rs | 6 +++--- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/src/core/config/check.rs b/src/core/config/check.rs index 988d4143..5532c5a2 100644 --- a/src/core/config/check.rs +++ b/src/core/config/check.rs @@ -38,7 +38,7 @@ pub fn check(config: &Config) -> Result { )); } - if cfg!(all(feature = "hardened_malloc", feature = "jemalloc")) { + if cfg!(all(feature = "hardened_malloc", feature = "jemalloc", not(target_env = "msvc"))) { debug_warn!( "hardened_malloc and jemalloc compile-time features are both enabled, this causes \ jemalloc to be used." diff --git a/src/database/pool.rs b/src/database/pool.rs index 86516c31..c753855a 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -13,7 +13,7 @@ use std::{ use async_channel::{QueueStrategy, Receiver, RecvError, Sender}; use conduwuit::{ debug, debug_warn, err, error, implement, - result::{DebugInspect, LogDebugErr}, + result::DebugInspect, trace, utils::sys::compute::{get_affinity, nth_core_available, set_affinity}, Error, Result, Server, @@ -290,9 +290,12 @@ fn worker_init(&self, id: usize) { // affinity is empty (no-op) if there's only one queue set_affinity(affinity.clone()); - #[cfg(feature = "jemalloc")] + #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] if affinity.clone().count() == 1 && conduwuit::alloc::je::is_affine_arena() { - use conduwuit::alloc::je::this_thread::{arena_id, set_arena}; + use conduwuit::{ + alloc::je::this_thread::{arena_id, set_arena}, + result::LogDebugErr, + }; let id = affinity.clone().next().expect("at least one id"); diff --git a/src/main/runtime.rs b/src/main/runtime.rs index 9f4f60f8..02b9931f 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -122,7 +122,7 @@ fn set_worker_affinity() { set_worker_mallctl(id); } -#[cfg(feature = "jemalloc")] +#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] fn set_worker_mallctl(id: usize) { use conduwuit::alloc::je::{ is_affine_arena, @@ -143,7 +143,7 @@ fn set_worker_mallctl(id: usize) { } } -#[cfg(not(feature = "jemalloc"))] +#[cfg(any(not(feature = "jemalloc"), target_env = "msvc"))] fn set_worker_mallctl(_: usize) {} #[tracing::instrument( @@ -189,7 +189,7 @@ fn thread_park() { } fn gc_on_park() { - #[cfg(feature = "jemalloc")] + #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] conduwuit::alloc::je::this_thread::decay() .log_debug_err() .ok(); From b4d22bd05e3cf81476669cc2e37eef60eeade07e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 1 Feb 2025 23:41:05 +0000 Subject: [PATCH 019/310] remove unnecessary cf arc refcnt workaround log errors and panics propagating through the request task join Signed-off-by: Jason Volk --- src/database/engine.rs | 6 ++--- src/database/engine/open.rs | 6 ++--- src/database/map.rs | 12 ++++----- src/database/map/open.rs | 5 +--- src/router/request.rs | 52 ++++++++++++++++++++++++++++--------- 5 files changed, 53 insertions(+), 28 deletions(-) diff --git a/src/database/engine.rs b/src/database/engine.rs index be3d62cf..22e2b9c8 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -30,13 +30,13 @@ use crate::{ }; pub struct Engine { + pub(crate) db: Db, + pub(crate) pool: Arc, + pub(crate) ctx: Arc, pub(super) read_only: bool, pub(super) secondary: bool, pub(crate) checksums: bool, corks: AtomicU32, - pub(crate) db: Db, - pub(crate) pool: Arc, - pub(crate) ctx: Arc, } pub(crate) type Db = DBWithThreadMode; diff --git a/src/database/engine/open.rs b/src/database/engine/open.rs index ad724765..59dabce1 100644 --- a/src/database/engine/open.rs +++ b/src/database/engine/open.rs @@ -56,13 +56,13 @@ pub(crate) async fn open(ctx: Arc, desc: &[Descriptor]) -> Result, - cf: Arc, watchers: Watchers, - write_options: WriteOptions, + cf: Arc, + db: Arc, read_options: ReadOptions, cache_read_options: ReadOptions, + write_options: WriteOptions, } impl Map { pub(crate) fn open(db: &Arc, name: &'static str) -> Result> { Ok(Arc::new(Self { name, - db: db.clone(), - cf: open::open(db, name), watchers: Watchers::default(), - write_options: write_options_default(db), + cf: open::open(db, name), + db: db.clone(), read_options: read_options_default(db), cache_read_options: cache_read_options_default(db), + write_options: write_options_default(db), })) } diff --git a/src/database/map/open.rs b/src/database/map/open.rs index 6ecec044..07f7a0c6 100644 --- a/src/database/map/open.rs +++ b/src/database/map/open.rs @@ -30,8 +30,5 @@ pub(super) fn open(db: &Arc, name: &str) -> Arc { // lifetime parameter. We should not hold this handle, even in its Arc, after // closing the database (dropping `Engine`). Since `Arc` is a sibling // member along with this handle in `Map`, that is prevented. - unsafe { - Arc::increment_strong_count(cf_ptr); - Arc::from_raw(cf_ptr) - } + unsafe { Arc::from_raw(cf_ptr) } } diff --git a/src/router/request.rs b/src/router/request.rs index f7b94417..19cd751b 100644 --- a/src/router/request.rs +++ b/src/router/request.rs @@ -1,4 +1,7 @@ -use std::sync::{atomic::Ordering, Arc}; +use std::{ + fmt::Debug, + sync::{atomic::Ordering, Arc}, +}; use axum::{ extract::State, @@ -12,16 +15,16 @@ use http::{Method, StatusCode, Uri}; level = "debug", skip_all, fields( - handled = %services - .server - .metrics - .requests_handle_finished - .fetch_add(1, Ordering::Relaxed), active = %services .server .metrics .requests_handle_active .fetch_add(1, Ordering::Relaxed), + handled = %services + .server + .metrics + .requests_handle_finished + .load(Ordering::Relaxed), ) )] pub(crate) async fn handle( @@ -31,6 +34,10 @@ pub(crate) async fn handle( ) -> Result { #[cfg(debug_assertions)] conduwuit::defer! {{ + _ = services.server + .metrics + .requests_handle_finished + .fetch_add(1, Ordering::Relaxed); _ = services.server .metrics .requests_handle_active @@ -47,21 +54,35 @@ pub(crate) async fn handle( return Err(StatusCode::SERVICE_UNAVAILABLE); } - let method = req.method().clone(); let uri = req.uri().clone(); - services + let method = req.method().clone(); + let services_ = services.clone(); + let task = services .server .runtime() - .spawn(next.run(req)) - .await - .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR) - .and_then(|result| handle_result(&method, &uri, result)) + .spawn(async move { execute(services_, req, next).await }); + + task.await + .map_err(unhandled) + .and_then(move |result| handle_result(&method, &uri, result)) +} + +async fn execute( + // we made a safety contract that Services will not go out of scope + // during the request; this ensures a reference is accounted for at + // the base frame of the task regardless of its detachment. + _services: Arc, + req: http::Request, + next: axum::middleware::Next, +) -> Response { + next.run(req).await } fn handle_result(method: &Method, uri: &Uri, result: Response) -> Result { let status = result.status(); let reason = status.canonical_reason().unwrap_or("Unknown Reason"); let code = status.as_u16(); + if status.is_server_error() { error!(method = ?method, uri = ?uri, "{code} {reason}"); } else if status.is_client_error() { @@ -78,3 +99,10 @@ fn handle_result(method: &Method, uri: &Uri, result: Response) -> Result(e: Error) -> StatusCode { + error!("unhandled error or panic during request: {e:?}"); + + StatusCode::INTERNAL_SERVER_ERROR +} From bd6d4bc58f45251313b33e65947a4131ea9114e7 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 2 Feb 2025 10:07:00 +0000 Subject: [PATCH 020/310] enforce timeout on request layers Signed-off-by: Jason Volk --- Cargo.toml | 3 ++- conduwuit-example.toml | 12 ++++++++++++ src/core/config/mod.rs | 24 ++++++++++++++++++++++++ src/router/layers.rs | 4 ++++ src/router/request.rs | 23 ++++++++++++++++++----- 5 files changed, 60 insertions(+), 6 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index c4af4a7c..1cf787c6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -127,12 +127,13 @@ version = "0.6.2" default-features = false features = [ "add-extension", + "catch-panic", "cors", "sensitive-headers", "set-header", + "timeout", "trace", "util", - "catch-panic", ] [workspace.dependencies.rustls] diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 3fd95044..f4f42365 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -377,6 +377,18 @@ # #pusher_idle_timeout = 15 +# Maximum time to receive a request from a client (seconds). +# +#client_receive_timeout = 75 + +# Maximum time to process a request received from a client (seconds). +# +#client_request_timeout = 180 + +# Maximum time to transmit a response to a client (seconds) +# +#client_response_timeout = 120 + # Enables registration. If set to false, no users can register on this # server. # diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index ff038975..b8cfd91b 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -480,6 +480,24 @@ pub struct Config { #[serde(default = "default_pusher_idle_timeout")] pub pusher_idle_timeout: u64, + /// Maximum time to receive a request from a client (seconds). + /// + /// default: 75 + #[serde(default = "default_client_receive_timeout")] + pub client_receive_timeout: u64, + + /// Maximum time to process a request received from a client (seconds). + /// + /// default: 180 + #[serde(default = "default_client_request_timeout")] + pub client_request_timeout: u64, + + /// Maximum time to transmit a response to a client (seconds) + /// + /// default: 120 + #[serde(default = "default_client_response_timeout")] + pub client_response_timeout: u64, + /// Enables registration. If set to false, no users can register on this /// server. /// @@ -2170,3 +2188,9 @@ fn default_stream_width_default() -> usize { 32 } fn default_stream_width_scale() -> f32 { 1.0 } fn default_stream_amplification() -> usize { 1024 } + +fn default_client_receive_timeout() -> u64 { 75 } + +fn default_client_request_timeout() -> u64 { 180 } + +fn default_client_response_timeout() -> u64 { 120 } diff --git a/src/router/layers.rs b/src/router/layers.rs index c5227c22..e8a8b7e8 100644 --- a/src/router/layers.rs +++ b/src/router/layers.rs @@ -18,6 +18,7 @@ use tower_http::{ cors::{self, CorsLayer}, sensitive_headers::SetSensitiveHeadersLayer, set_header::SetResponseHeaderLayer, + timeout::{RequestBodyTimeoutLayer, ResponseBodyTimeoutLayer, TimeoutLayer}, trace::{DefaultOnFailure, DefaultOnRequest, DefaultOnResponse, TraceLayer}, }; use tracing::Level; @@ -59,6 +60,9 @@ pub(crate) fn build(services: &Arc) -> Result<(Router, Guard)> { ) .layer(axum::middleware::from_fn_with_state(Arc::clone(services), request::handle)) .layer(SecureClientIpSource::ConnectInfo.into_extension()) + .layer(ResponseBodyTimeoutLayer::new(Duration::from_secs(server.config.client_response_timeout))) + .layer(RequestBodyTimeoutLayer::new(Duration::from_secs(server.config.client_receive_timeout))) + .layer(TimeoutLayer::new(Duration::from_secs(server.config.client_request_timeout))) .layer(SetResponseHeaderLayer::if_not_present( HeaderName::from_static("origin-agent-cluster"), // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin-Agent-Cluster HeaderValue::from_static("?1"), diff --git a/src/router/request.rs b/src/router/request.rs index 19cd751b..68ea742c 100644 --- a/src/router/request.rs +++ b/src/router/request.rs @@ -10,8 +10,10 @@ use axum::{ use conduwuit::{debug, debug_error, debug_warn, err, error, trace, Result}; use conduwuit_service::Services; use http::{Method, StatusCode, Uri}; +use tracing::Span; #[tracing::instrument( + name = "request", level = "debug", skip_all, fields( @@ -57,23 +59,34 @@ pub(crate) async fn handle( let uri = req.uri().clone(); let method = req.method().clone(); let services_ = services.clone(); - let task = services - .server - .runtime() - .spawn(async move { execute(services_, req, next).await }); + let parent = Span::current(); + let task = services.server.runtime().spawn(async move { + tokio::select! { + response = execute(&services_, req, next, parent) => response, + () = services_.server.until_shutdown() => + StatusCode::SERVICE_UNAVAILABLE.into_response(), + } + }); task.await .map_err(unhandled) .and_then(move |result| handle_result(&method, &uri, result)) } +#[tracing::instrument( + name = "handle", + level = "debug", + parent = parent, + skip_all, +)] async fn execute( // we made a safety contract that Services will not go out of scope // during the request; this ensures a reference is accounted for at // the base frame of the task regardless of its detachment. - _services: Arc, + _services: &Arc, req: http::Request, next: axum::middleware::Next, + parent: Span, ) -> Response { next.run(req).await } From ffe3b0faf2740faa53415a661466c19d4fe722ad Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 2 Feb 2025 10:43:02 +0000 Subject: [PATCH 021/310] make shutdown grace periods configurable Signed-off-by: Jason Volk --- conduwuit-example.toml | 8 ++++++++ src/core/config/mod.rs | 16 ++++++++++++++++ src/router/request.rs | 13 +++++++++++-- src/router/run.rs | 3 ++- src/service/sending/sender.rs | 5 ++--- 5 files changed, 39 insertions(+), 6 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index f4f42365..3e64522c 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -389,6 +389,14 @@ # #client_response_timeout = 120 +# Grace period for clean shutdown of client requests (seconds). +# +#client_shutdown_timeout = 10 + +# Grace period for clean shutdown of federation requests (seconds). +# +#sender_shutdown_timeout = 5 + # Enables registration. If set to false, no users can register on this # server. # diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index b8cfd91b..ff80d1cf 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -498,6 +498,18 @@ pub struct Config { #[serde(default = "default_client_response_timeout")] pub client_response_timeout: u64, + /// Grace period for clean shutdown of client requests (seconds). + /// + /// default: 10 + #[serde(default = "default_client_shutdown_timeout")] + pub client_shutdown_timeout: u64, + + /// Grace period for clean shutdown of federation requests (seconds). + /// + /// default: 5 + #[serde(default = "default_sender_shutdown_timeout")] + pub sender_shutdown_timeout: u64, + /// Enables registration. If set to false, no users can register on this /// server. /// @@ -2194,3 +2206,7 @@ fn default_client_receive_timeout() -> u64 { 75 } fn default_client_request_timeout() -> u64 { 180 } fn default_client_response_timeout() -> u64 { 120 } + +fn default_client_shutdown_timeout() -> u64 { 15 } + +fn default_sender_shutdown_timeout() -> u64 { 5 } diff --git a/src/router/request.rs b/src/router/request.rs index 68ea742c..e0373646 100644 --- a/src/router/request.rs +++ b/src/router/request.rs @@ -1,6 +1,7 @@ use std::{ fmt::Debug, sync::{atomic::Ordering, Arc}, + time::Duration, }; use axum::{ @@ -9,7 +10,9 @@ use axum::{ }; use conduwuit::{debug, debug_error, debug_warn, err, error, trace, Result}; use conduwuit_service::Services; +use futures::FutureExt; use http::{Method, StatusCode, Uri}; +use tokio::time::sleep; use tracing::Span; #[tracing::instrument( @@ -63,8 +66,14 @@ pub(crate) async fn handle( let task = services.server.runtime().spawn(async move { tokio::select! { response = execute(&services_, req, next, parent) => response, - () = services_.server.until_shutdown() => - StatusCode::SERVICE_UNAVAILABLE.into_response(), + response = services_.server.until_shutdown() + .then(|()| { + let timeout = services_.server.config.client_shutdown_timeout; + let timeout = Duration::from_secs(timeout); + sleep(timeout) + }) + .map(|()| StatusCode::SERVICE_UNAVAILABLE) + .map(IntoResponse::into_response) => response, } }); diff --git a/src/router/run.rs b/src/router/run.rs index 605168b8..26701735 100644 --- a/src/router/run.rs +++ b/src/router/run.rs @@ -122,7 +122,8 @@ async fn handle_shutdown(server: Arc, tx: Sender<()>, handle: axum_serve error!("failed sending shutdown transaction to channel: {e}"); } - let timeout = Duration::from_secs(36); + let timeout = server.config.client_shutdown_timeout; + let timeout = Duration::from_secs(timeout); debug!( ?timeout, handle_active = ?server.metrics.requests_handle_active.load(Ordering::Relaxed), diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index f19b69da..3e86de2d 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -67,8 +67,6 @@ type SendingFuture<'a> = BoxFuture<'a, SendingResult>; type SendingFutures<'a> = FuturesUnordered>; type CurTransactionStatus = HashMap; -const CLEANUP_TIMEOUT_MS: u64 = 3500; - const SELECT_PRESENCE_LIMIT: usize = 256; const SELECT_RECEIPT_LIMIT: usize = 256; const SELECT_EDU_LIMIT: usize = EDU_LIMIT - 2; @@ -216,8 +214,9 @@ impl Service { time::{sleep_until, Instant}, }; + let timeout = self.server.config.sender_shutdown_timeout; + let timeout = Duration::from_secs(timeout); let now = Instant::now(); - let timeout = Duration::from_millis(CLEANUP_TIMEOUT_MS); let deadline = now.checked_add(timeout).unwrap_or(now); loop { trace!("Waiting for {} requests to complete...", futures.len()); From a774afe8370bd6eed3deed6e663229e8457d73c7 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 2 Feb 2025 08:59:14 +0000 Subject: [PATCH 022/310] modernize remove_to_device_events Signed-off-by: Jason Volk --- src/service/users/mod.rs | 43 ++++++++++++++++------------------------ 1 file changed, 17 insertions(+), 26 deletions(-) diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index b2d3a94a..e5caed47 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,12 +1,12 @@ -use std::{collections::BTreeMap, mem, mem::size_of, sync::Arc}; +use std::{collections::BTreeMap, mem, sync::Arc}; use conduwuit::{ debug_warn, err, trace, utils::{self, stream::TryIgnore, string::Unquoted, ReadyExt}, Err, Error, Result, Server, }; -use database::{Database, Deserialized, Ignore, Interfix, Json, Map}; -use futures::{FutureExt, Stream, StreamExt, TryFutureExt}; +use database::{Deserialized, Ignore, Interfix, Json, Map}; +use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{ api::client::{device::Device, error::ErrorKind, filter::FilterDefinition}, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, @@ -28,7 +28,6 @@ pub struct Service { struct Services { server: Arc, - db: Arc, account_data: Dep, admin: Dep, globals: Dep, @@ -64,7 +63,6 @@ impl crate::Service for Service { Ok(Arc::new(Self { services: Services { server: args.server.clone(), - db: args.db.clone(), account_data: args.depend::("account_data"), admin: args.depend::("admin"), globals: args.depend::("globals"), @@ -801,35 +799,28 @@ impl Service { .map(|(_, val): (Ignore, Raw)| val) } - pub async fn remove_to_device_events( + pub async fn remove_to_device_events( &self, user_id: &UserId, device_id: &DeviceId, - until: u64, - ) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xFF); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xFF); + until: Until, + ) where + Until: Into> + Send, + { + type Key<'a> = (&'a UserId, &'a DeviceId, u64); - let mut last = prefix.clone(); - last.extend_from_slice(&until.to_be_bytes()); - - let _cork = self.services.db.cork_and_flush(); + let until = until.into().unwrap_or(u64::MAX); + let from = (user_id, device_id, until); self.db .todeviceid_events - .rev_raw_keys_from(&last) // this includes last + .rev_keys_from(&from) .ignore_err() - .ready_take_while(move |key| key.starts_with(&prefix)) - .map(|key| { - let len = key.len(); - let start = len.saturating_sub(size_of::()); - let count = utils::u64_from_u8(&key[start..len]); - (key, count) + .ready_take_while(move |(user_id_, device_id_, _): &Key<'_>| { + user_id == *user_id_ && device_id == *device_id_ + }) + .ready_for_each(|key: Key<'_>| { + self.db.todeviceid_events.del(key); }) - .ready_take_while(move |(_, count)| *count <= until) - .ready_for_each(|(key, _)| self.db.todeviceid_events.remove(&key)) - .boxed() .await; } From 5e59ce37c4799c24723997326e1ccc26bb3345b0 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 31 Jan 2025 13:51:39 +0000 Subject: [PATCH 023/310] snapshot sync results at next_batch upper-bound Signed-off-by: Jason Volk --- src/admin/query/account_data.rs | 2 +- src/admin/query/users.rs | 2 +- src/api/client/sync/v3.rs | 10 +++++----- src/api/client/sync/v4.rs | 13 +++++++++---- src/api/client/sync/v5.rs | 8 ++++---- src/service/account_data/mod.rs | 12 +++++++----- src/service/users/mod.rs | 18 ++++++++++++++---- 7 files changed, 41 insertions(+), 24 deletions(-) diff --git a/src/admin/query/account_data.rs b/src/admin/query/account_data.rs index b75d8234..bb8ddeff 100644 --- a/src/admin/query/account_data.rs +++ b/src/admin/query/account_data.rs @@ -41,7 +41,7 @@ async fn changes_since( let results: Vec<_> = self .services .account_data - .changes_since(room_id.as_deref(), &user_id, since) + .changes_since(room_id.as_deref(), &user_id, since, None) .collect() .await; let query_time = timer.elapsed(); diff --git a/src/admin/query/users.rs b/src/admin/query/users.rs index 3715ac25..c517d9dd 100644 --- a/src/admin/query/users.rs +++ b/src/admin/query/users.rs @@ -413,7 +413,7 @@ async fn get_to_device_events( let result = self .services .users - .get_to_device_events(&user_id, &device_id) + .get_to_device_events(&user_id, &device_id, None, None) .collect::>() .await; let query_time = timer.elapsed(); diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 49246514..b548aa23 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -290,20 +290,20 @@ pub(crate) async fn build_sync_events( let account_data = services .account_data - .changes_since(None, sender_user, since) + .changes_since(None, sender_user, since, Some(next_batch)) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global)) .collect(); // Look for device list updates of this account let keys_changed = services .users - .keys_changed(sender_user, since, None) + .keys_changed(sender_user, since, Some(next_batch)) .map(ToOwned::to_owned) .collect::>(); let to_device_events = services .users - .get_to_device_events(sender_user, sender_device) + .get_to_device_events(sender_user, sender_device, Some(since), Some(next_batch)) .collect::>(); let device_one_time_keys_count = services @@ -700,14 +700,14 @@ async fn load_joined_room( let account_data_events = services .account_data - .changes_since(Some(room_id), sender_user, since) + .changes_since(Some(room_id), sender_user, since, Some(next_batch)) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) .collect(); // Look for device list updates in this room let device_updates = services .users - .room_keys_changed(room_id, since, None) + .room_keys_changed(room_id, since, Some(next_batch)) .map(|(user_id, _)| user_id) .map(ToOwned::to_owned) .collect::>(); diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index b7967498..66793ba1 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -153,7 +153,7 @@ pub(crate) async fn sync_events_v4_route( if body.extensions.account_data.enabled.unwrap_or(false) { account_data.global = services .account_data - .changes_since(None, sender_user, globalsince) + .changes_since(None, sender_user, globalsince, Some(next_batch)) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global)) .collect() .await; @@ -164,7 +164,7 @@ pub(crate) async fn sync_events_v4_route( room.clone(), services .account_data - .changes_since(Some(&room), sender_user, globalsince) + .changes_since(Some(&room), sender_user, globalsince, Some(next_batch)) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) .collect() .await, @@ -531,7 +531,7 @@ pub(crate) async fn sync_events_v4_route( room_id.to_owned(), services .account_data - .changes_since(Some(room_id), sender_user, *roomsince) + .changes_since(Some(room_id), sender_user, *roomsince, Some(next_batch)) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) .collect() .await, @@ -779,7 +779,12 @@ pub(crate) async fn sync_events_v4_route( Some(sync_events::v4::ToDevice { events: services .users - .get_to_device_events(sender_user, &sender_device) + .get_to_device_events( + sender_user, + &sender_device, + Some(globalsince), + Some(next_batch), + ) .collect() .await, next_batch: next_batch.to_string(), diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index 66647f0e..e7b5fe74 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -390,7 +390,7 @@ async fn process_rooms( room_id.to_owned(), services .account_data - .changes_since(Some(room_id), sender_user, *roomsince) + .changes_since(Some(room_id), sender_user, *roomsince, Some(next_batch)) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) .collect() .await, @@ -644,7 +644,7 @@ async fn collect_account_data( account_data.global = services .account_data - .changes_since(None, sender_user, globalsince) + .changes_since(None, sender_user, globalsince, None) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global)) .collect() .await; @@ -655,7 +655,7 @@ async fn collect_account_data( room.clone(), services .account_data - .changes_since(Some(room), sender_user, globalsince) + .changes_since(Some(room), sender_user, globalsince, None) .ready_filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room)) .collect() .await, @@ -876,7 +876,7 @@ async fn collect_to_device( next_batch: next_batch.to_string(), events: services .users - .get_to_device_events(sender_user, sender_device) + .get_to_device_events(sender_user, sender_device, None, Some(next_batch)) .collect() .await, }) diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index ddbc15a4..5a943f88 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -5,7 +5,7 @@ use conduwuit::{ utils::{result::LogErr, stream::TryIgnore, ReadyExt}, Err, Result, }; -use database::{Deserialized, Handle, Interfix, Json, Map}; +use database::{Deserialized, Handle, Ignore, Json, Map}; use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{ events::{ @@ -131,18 +131,20 @@ pub fn changes_since<'a>( room_id: Option<&'a RoomId>, user_id: &'a UserId, since: u64, + to: Option, ) -> impl Stream + Send + 'a { - let prefix = (room_id, user_id, Interfix); - let prefix = database::serialize_key(prefix).expect("failed to serialize prefix"); + type Key<'a> = (Option<&'a RoomId>, &'a UserId, u64, Ignore); // Skip the data that's exactly at since, because we sent that last time let first_possible = (room_id, user_id, since.saturating_add(1)); self.db .roomuserdataid_accountdata - .stream_from_raw(&first_possible) + .stream_from(&first_possible) .ignore_err() - .ready_take_while(move |(k, _)| k.starts_with(&prefix)) + .ready_take_while(move |((room_id_, user_id_, count, _), _): &(Key<'_>, _)| { + room_id == *room_id_ && user_id == *user_id_ && to.is_none_or(|to| *count <= to) + }) .map(move |(_, v)| { match room_id { | Some(_) => serde_json::from_slice::>(v) diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index e5caed47..68b87541 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,7 +1,7 @@ use std::{collections::BTreeMap, mem, sync::Arc}; use conduwuit::{ - debug_warn, err, trace, + at, debug_warn, err, trace, utils::{self, stream::TryIgnore, string::Unquoted, ReadyExt}, Err, Error, Result, Server, }; @@ -790,13 +790,23 @@ impl Service { &'a self, user_id: &'a UserId, device_id: &'a DeviceId, + since: Option, + to: Option, ) -> impl Stream> + Send + 'a { - let prefix = (user_id, device_id, Interfix); + type Key<'a> = (&'a UserId, &'a DeviceId, u64); + + let from = (user_id, device_id, since.map_or(0, |since| since.saturating_add(1))); + self.db .todeviceid_events - .stream_prefix(&prefix) + .stream_from(&from) .ignore_err() - .map(|(_, val): (Ignore, Raw)| val) + .ready_take_while(move |((user_id_, device_id_, count), _): &(Key<'_>, _)| { + user_id == *user_id_ + && device_id == *device_id_ + && to.is_none_or(|to| *count <= to) + }) + .map(at!(1)) } pub async fn remove_to_device_events( From 32f990fc72c6bfbf4a869dac9f5b2b88ee334684 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 2 Feb 2025 23:19:35 +0000 Subject: [PATCH 024/310] fix the panic counter in the tower layer Signed-off-by: Jason Volk --- src/router/layers.rs | 18 +++++++------- src/router/request.rs | 56 ++++++++++++++++++++----------------------- 2 files changed, 35 insertions(+), 39 deletions(-) diff --git a/src/router/layers.rs b/src/router/layers.rs index e8a8b7e8..7ebec16e 100644 --- a/src/router/layers.rs +++ b/src/router/layers.rs @@ -49,6 +49,7 @@ pub(crate) fn build(services: &Arc) -> Result<(Router, Guard)> { ))] let layers = layers.layer(compression_layer(server)); + let services_ = services.clone(); let layers = layers .layer(SetSensitiveHeadersLayer::new([header::AUTHORIZATION])) .layer( @@ -89,7 +90,7 @@ pub(crate) fn build(services: &Arc) -> Result<(Router, Guard)> { )) .layer(cors_layer(server)) .layer(body_limit_layer(server)) - .layer(CatchPanicLayer::custom(catch_panic)); + .layer(CatchPanicLayer::custom(move |panic| catch_panic(panic, services_.clone()))); let (router, guard) = router::build(services); Ok((router.layer(layers), guard)) @@ -167,15 +168,14 @@ fn body_limit_layer(server: &Server) -> DefaultBodyLimit { #[allow(clippy::needless_pass_by_value)] fn catch_panic( err: Box, + services: Arc, ) -> http::Response> { - //TODO: XXX - /* - conduwuit_service::services() - .server - .metrics - .requests_panic - .fetch_add(1, std::sync::atomic::Ordering::Release); - */ + services + .server + .metrics + .requests_panic + .fetch_add(1, std::sync::atomic::Ordering::Release); + let details = if let Some(s) = err.downcast_ref::() { s.clone() } else if let Some(s) = err.downcast_ref::<&str>() { diff --git a/src/router/request.rs b/src/router/request.rs index e0373646..b6c22d45 100644 --- a/src/router/request.rs +++ b/src/router/request.rs @@ -15,40 +15,12 @@ use http::{Method, StatusCode, Uri}; use tokio::time::sleep; use tracing::Span; -#[tracing::instrument( - name = "request", - level = "debug", - skip_all, - fields( - active = %services - .server - .metrics - .requests_handle_active - .fetch_add(1, Ordering::Relaxed), - handled = %services - .server - .metrics - .requests_handle_finished - .load(Ordering::Relaxed), - ) -)] +#[tracing::instrument(name = "request", level = "debug", skip_all)] pub(crate) async fn handle( State(services): State>, req: http::Request, next: axum::middleware::Next, ) -> Result { - #[cfg(debug_assertions)] - conduwuit::defer! {{ - _ = services.server - .metrics - .requests_handle_finished - .fetch_add(1, Ordering::Relaxed); - _ = services.server - .metrics - .requests_handle_active - .fetch_sub(1, Ordering::Relaxed); - }}; - if !services.server.running() { debug_warn!( method = %req.method(), @@ -87,16 +59,40 @@ pub(crate) async fn handle( level = "debug", parent = parent, skip_all, + fields( + active = %services + .server + .metrics + .requests_handle_active + .fetch_add(1, Ordering::Relaxed), + handled = %services + .server + .metrics + .requests_handle_finished + .load(Ordering::Relaxed), + ) )] async fn execute( // we made a safety contract that Services will not go out of scope // during the request; this ensures a reference is accounted for at // the base frame of the task regardless of its detachment. - _services: &Arc, + services: &Arc, req: http::Request, next: axum::middleware::Next, parent: Span, ) -> Response { + #[cfg(debug_assertions)] + conduwuit::defer! {{ + _ = services.server + .metrics + .requests_handle_finished + .fetch_add(1, Ordering::Relaxed); + _ = services.server + .metrics + .requests_handle_active + .fetch_sub(1, Ordering::Relaxed); + }}; + next.run(req).await } From da4b94d80dc9939ad385860af764ed1a1837b84e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 2 Feb 2025 22:13:27 +0000 Subject: [PATCH 025/310] trap panics when running in gdb Signed-off-by: Jason Volk --- src/core/debug.rs | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/src/core/debug.rs b/src/core/debug.rs index ca0f2f2e..8a5eccfd 100644 --- a/src/core/debug.rs +++ b/src/core/debug.rs @@ -1,6 +1,6 @@ #![allow(clippy::disallowed_macros)] -use std::{any::Any, panic}; +use std::{any::Any, env, panic, sync::LazyLock}; // Export debug proc_macros pub use conduwuit_macros::recursion_depth; @@ -58,16 +58,26 @@ pub const INFO_SPAN_LEVEL: Level = if cfg!(debug_assertions) { Level::DEBUG }; -pub fn set_panic_trap() { +pub static DEBUGGER: LazyLock = + LazyLock::new(|| env::var("_").unwrap_or_default().ends_with("gdb")); + +#[cfg_attr(debug_assertions, crate::ctor)] +#[cfg_attr(not(debug_assertions), allow(dead_code))] +fn set_panic_trap() { + if !*DEBUGGER { + return; + } + let next = panic::take_hook(); panic::set_hook(Box::new(move |info| { panic_handler(info, &next); })); } -#[inline(always)] +#[cold] +#[inline(never)] #[allow(deprecated_in_future)] -fn panic_handler(info: &panic::PanicHookInfo<'_>, next: &dyn Fn(&panic::PanicHookInfo<'_>)) { +pub fn panic_handler(info: &panic::PanicHookInfo<'_>, next: &dyn Fn(&panic::PanicHookInfo<'_>)) { trap(); next(info); } From 106bcd30b75b6846be197fc5431063b0b82c4336 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 2 Feb 2025 07:40:08 +0000 Subject: [PATCH 026/310] optimize incremental sync state diff Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 366 +++++++++-------- src/service/rooms/state_accessor/mod.rs | 523 +++++++++++++----------- 2 files changed, 474 insertions(+), 415 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index b548aa23..a97e4329 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -7,13 +7,13 @@ use std::{ use axum::extract::State; use conduwuit::{ at, err, error, extract_variant, is_equal_to, pair_of, - pdu::EventHash, + pdu::{Event, EventHash}, + ref_at, result::FlatOk, utils::{ self, - future::OptionExt, math::ruma_from_u64, - stream::{BroadbandExt, Tools, WidebandExt}, + stream::{BroadbandExt, Tools, TryExpect, WidebandExt}, BoolExt, IterStream, ReadyExt, TryFutureExtExt, }, PduCount, PduEvent, Result, @@ -53,19 +53,16 @@ use ruma::{ serde::Raw, uint, DeviceId, EventId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId, }; +use service::rooms::short::{ShortEventId, ShortStateKey}; use super::{load_timeline, share_encrypted_room}; -use crate::{ - client::{ignored_filter, lazy_loading_witness}, - Ruma, RumaResponse, -}; +use crate::{client::ignored_filter, Ruma, RumaResponse}; #[derive(Default)] struct StateChanges { heroes: Option>, joined_member_count: Option, invited_member_count: Option, - joined_since_last_sync: bool, state_events: Vec, device_list_updates: HashSet, left_encrypted_users: HashSet, @@ -625,6 +622,40 @@ async fn load_joined_room( .await?; let (timeline_pdus, limited) = timeline; + let initial = since_shortstatehash.is_none(); + let lazy_loading_enabled = filter.room.state.lazy_load_options.is_enabled() + || filter.room.timeline.lazy_load_options.is_enabled(); + + let lazy_loading_context = &lazy_loading::Context { + user_id: sender_user, + device_id: sender_device, + room_id, + token: Some(since), + options: Some(&filter.room.state.lazy_load_options), + }; + + // Reset lazy loading because this is an initial sync + let lazy_load_reset: OptionFuture<_> = initial + .then(|| services.rooms.lazy_loading.reset(lazy_loading_context)) + .into(); + + lazy_load_reset.await; + let witness: OptionFuture<_> = lazy_loading_enabled + .then(|| { + let witness: Witness = timeline_pdus + .iter() + .map(ref_at!(1)) + .map(Event::sender) + .map(Into::into) + .chain(receipt_events.keys().map(Into::into)) + .collect(); + + services + .rooms + .lazy_loading + .witness_retain(witness, lazy_loading_context) + }) + .into(); let last_notification_read: OptionFuture<_> = timeline_pdus .is_empty() @@ -646,41 +677,20 @@ async fn load_joined_room( }) .into(); + let (last_notification_read, since_sender_member, witness) = + join3(last_notification_read, since_sender_member, witness).await; + let joined_since_last_sync = since_sender_member - .await .flatten() .is_none_or(|content: RoomMemberEventContent| { content.membership != MembershipState::Join }); - let lazy_loading_enabled = filter.room.state.lazy_load_options.is_enabled() - || filter.room.timeline.lazy_load_options.is_enabled(); - - let lazy_reset = since_shortstatehash.is_none(); - let lazy_loading_context = &lazy_loading::Context { - user_id: sender_user, - device_id: sender_device, - room_id, - token: None, - options: Some(&filter.room.state.lazy_load_options), - }; - - // Reset lazy loading because this is an initial sync - let lazy_load_reset: OptionFuture<_> = lazy_reset - .then(|| services.rooms.lazy_loading.reset(lazy_loading_context)) - .into(); - - lazy_load_reset.await; - let witness: OptionFuture<_> = lazy_loading_enabled - .then(|| lazy_loading_witness(services, lazy_loading_context, timeline_pdus.iter())) - .into(); - let StateChanges { heroes, joined_member_count, invited_member_count, - joined_since_last_sync, state_events, mut device_list_updates, left_encrypted_users, @@ -693,7 +703,7 @@ async fn load_joined_room( since_shortstatehash, current_shortstatehash, joined_since_last_sync, - witness.await.as_ref(), + witness.as_ref(), ) .boxed() .await?; @@ -719,28 +729,7 @@ async fn load_joined_room( .map(|(_, pdu)| pdu.to_sync_room_event()) .collect(); - let typing_events = services - .rooms - .typing - .last_typing_update(room_id) - .and_then(|count| async move { - if count <= since { - return Ok(Vec::>::new()); - } - - let typings = services - .rooms - .typing - .typings_all(room_id, sender_user) - .await?; - - Ok(vec![serde_json::from_str(&serde_json::to_string(&typings)?)?]) - }) - .unwrap_or(Vec::new()); - - let send_notification_counts = last_notification_read - .is_none_or(|&count| count > since) - .await; + let send_notification_counts = last_notification_read.is_none_or(|count| count > since); let notification_count: OptionFuture<_> = send_notification_counts .then(|| { @@ -764,8 +753,27 @@ async fn load_joined_room( }) .into(); - let events = join3(room_events, account_data_events, typing_events); + let typing_events = services + .rooms + .typing + .last_typing_update(room_id) + .and_then(|count| async move { + if count <= since { + return Ok(Vec::>::new()); + } + + let typings = services + .rooms + .typing + .typings_all(room_id, sender_user) + .await?; + + Ok(vec![serde_json::from_str(&serde_json::to_string(&typings)?)?]) + }) + .unwrap_or(Vec::new()); + let unread_notifications = join(notification_count, highlight_count); + let events = join3(room_events, account_data_events, typing_events); let (unread_notifications, events, device_updates) = join3(unread_notifications, events, device_updates) .boxed() @@ -942,7 +950,6 @@ async fn calculate_state_initial( heroes, joined_member_count, invited_member_count, - joined_since_last_sync: true, state_events, ..Default::default() }) @@ -952,7 +959,7 @@ async fn calculate_state_initial( #[allow(clippy::too_many_arguments)] async fn calculate_state_incremental<'a>( services: &Services, - sender_user: &UserId, + sender_user: &'a UserId, room_id: &RoomId, full_state: bool, _filter: &FilterDefinition, @@ -965,102 +972,130 @@ async fn calculate_state_incremental<'a>( let state_changed = since_shortstatehash != current_shortstatehash; - let state_get_id = |user_id: &'a UserId| { - services - .rooms - .state_accessor - .state_get_id(current_shortstatehash, &StateEventType::RoomMember, user_id.as_str()) - .ok() - }; - - let lazy_state_ids: OptionFuture<_> = witness - .map(|witness| { - witness - .iter() - .stream() - .broad_filter_map(|user_id| state_get_id(user_id)) - .collect::>() - }) - .into(); - - let current_state_ids: OptionFuture<_> = state_changed - .then(|| { - services - .rooms - .state_accessor - .state_full_ids(current_shortstatehash) - .collect::>() - }) - .into(); - - let since_state_ids: OptionFuture<_> = (state_changed && !full_state) - .then(|| { - services - .rooms - .state_accessor - .state_full_ids(since_shortstatehash) - .collect::>() - }) - .into(); - - let lazy_state_ids = lazy_state_ids - .map(Option::into_iter) - .map(|iter| iter.flat_map(Vec::into_iter)) - .map(IterStream::stream) - .flatten_stream(); - - let ref since_state_ids = since_state_ids.shared(); - let delta_state_events = current_state_ids - .map(Option::into_iter) - .map(|iter| iter.flat_map(Vec::into_iter)) - .map(IterStream::stream) - .flatten_stream() - .filter_map(|(shortstatekey, event_id): (u64, OwnedEventId)| async move { - since_state_ids - .clone() - .await - .is_none_or(|since_state| since_state.get(&shortstatekey) != Some(&event_id)) - .then_some(event_id) - }) - .chain(lazy_state_ids) - .broad_filter_map(|event_id: OwnedEventId| async move { - services - .rooms - .timeline - .get_pdu(&event_id) - .await - .map(move |pdu| (event_id, pdu)) - .ok() - }) - .collect::>(); - - let since_encryption = services - .rooms - .state_accessor - .state_get(since_shortstatehash, &StateEventType::RoomEncryption, "") - .is_ok(); - let encrypted_room = services .rooms .state_accessor .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "") - .is_ok(); + .is_ok() + .await; - let (delta_state_events, encrypted_room) = join(delta_state_events, encrypted_room).await; + let state_get_shorteventid = |user_id: &'a UserId| { + services + .rooms + .state_accessor + .state_get_shortid( + current_shortstatehash, + &StateEventType::RoomMember, + user_id.as_str(), + ) + .ok() + }; - let (mut device_list_updates, left_encrypted_users) = delta_state_events - .values() + let lazy_state_ids: OptionFuture<_> = witness + .filter(|_| !full_state && !encrypted_room) + .map(|witness| { + witness + .iter() + .stream() + .broad_filter_map(|user_id| state_get_shorteventid(user_id)) + .into_future() + }) + .into(); + + let state_diff: OptionFuture<_> = (!full_state && state_changed) + .then(|| { + services + .rooms + .state_accessor + .state_added((since_shortstatehash, current_shortstatehash)) + .boxed() + .into_future() + }) + .into(); + + let current_state_ids: OptionFuture<_> = full_state + .then(|| { + services + .rooms + .state_accessor + .state_full_shortids(current_shortstatehash) + .expect_ok() + .boxed() + .into_future() + }) + .into(); + + let lazy_state_ids = lazy_state_ids + .map(|opt| { + opt.map(|(curr, next)| { + let opt = curr; + let iter = Option::into_iter(opt); + IterStream::stream(iter).chain(next) + }) + }) + .map(Option::into_iter) + .map(IterStream::stream) + .flatten_stream() + .flatten(); + + let state_diff_ids = state_diff + .map(|opt| { + opt.map(|(curr, next)| { + let opt = curr; + let iter = Option::into_iter(opt); + IterStream::stream(iter).chain(next) + }) + }) + .map(Option::into_iter) + .map(IterStream::stream) + .flatten_stream() + .flatten(); + + let state_events = current_state_ids + .map(|opt| { + opt.map(|(curr, next)| { + let opt = curr; + let iter = Option::into_iter(opt); + IterStream::stream(iter).chain(next) + }) + }) + .map(Option::into_iter) + .map(IterStream::stream) + .flatten_stream() + .flatten() + .chain(state_diff_ids) + .broad_filter_map(|(shortstatekey, shorteventid)| async move { + if witness.is_none() || encrypted_room { + return Some(shorteventid); + } + + lazy_filter(services, sender_user, shortstatekey, shorteventid).await + }) + .chain(lazy_state_ids) + .broad_filter_map(|shorteventid| { + services + .rooms + .short + .get_eventid_from_short(shorteventid) + .ok() + }) + .broad_filter_map(|event_id: OwnedEventId| async move { + services.rooms.timeline.get_pdu(&event_id).await.ok() + }) + .collect::>() + .await; + + let (device_list_updates, left_encrypted_users) = state_events + .iter() .stream() .ready_filter(|_| encrypted_room) .ready_filter(|state_event| state_event.kind == RoomMember) .ready_filter_map(|state_event| { - let content = state_event.get_content().ok()?; - let user_id = state_event.state_key.as_ref()?.parse().ok()?; + let content: RoomMemberEventContent = state_event.get_content().ok()?; + let user_id: OwnedUserId = state_event.state_key.as_ref()?.parse().ok()?; + Some((content, user_id)) }) - .ready_filter(|(_, user_id): &(RoomMemberEventContent, OwnedUserId)| { - user_id != sender_user - }) .fold_default(|(mut dlu, mut leu): pair_of!(HashSet<_>), (content, user_id)| async move { use MembershipState::*; @@ -1068,8 +1103,9 @@ async fn calculate_state_incremental<'a>( |user_id| share_encrypted_room(services, sender_user, user_id, Some(room_id)); match content.membership { - | Join if !shares_encrypted_room(&user_id).await => dlu.insert(user_id), | Leave => leu.insert(user_id), + | Join if joined_since_last_sync || !shares_encrypted_room(&user_id).await => + dlu.insert(user_id), | _ => false, }; @@ -1077,29 +1113,7 @@ async fn calculate_state_incremental<'a>( }) .await; - // If the user is in a new encrypted room, give them all joined users - let new_encrypted_room = encrypted_room && !since_encryption.await; - if joined_since_last_sync && encrypted_room || new_encrypted_room { - services - .rooms - .state_cache - .room_members(room_id) - .ready_filter(|&user_id| sender_user != user_id) - .map(ToOwned::to_owned) - .broad_filter_map(|user_id| async move { - share_encrypted_room(services, sender_user, &user_id, Some(room_id)) - .await - .or_some(user_id) - }) - .ready_for_each(|user_id| { - device_list_updates.insert(user_id); - }) - .await; - } - - let send_member_count = delta_state_events - .values() - .any(|event| event.kind == RoomMember); + let send_member_count = state_events.iter().any(|event| event.kind == RoomMember); let (joined_member_count, invited_member_count, heroes) = if send_member_count { calculate_counts(services, room_id, sender_user).await? @@ -1111,13 +1125,29 @@ async fn calculate_state_incremental<'a>( heroes, joined_member_count, invited_member_count, - joined_since_last_sync, + state_events, device_list_updates, left_encrypted_users, - state_events: delta_state_events.into_values().collect(), }) } +async fn lazy_filter( + services: &Services, + sender_user: &UserId, + shortstatekey: ShortStateKey, + shorteventid: ShortEventId, +) -> Option { + let (event_type, state_key) = services + .rooms + .short + .get_statekey_from_short(shortstatekey) + .await + .ok()?; + + (event_type != StateEventType::RoomMember || state_key == sender_user.as_str()) + .then_some(shorteventid) +} + async fn calculate_counts( services: &Services, room_id: &RoomId, diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 8b56c8b6..bed8d210 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -6,7 +6,7 @@ use std::{ }; use conduwuit::{ - at, err, error, + at, err, error, pair_of, pdu::PduBuilder, utils, utils::{ @@ -17,7 +17,7 @@ use conduwuit::{ Err, Error, PduEvent, Result, }; use database::{Deserialized, Map}; -use futures::{FutureExt, Stream, StreamExt, TryFutureExt}; +use futures::{future::try_join, FutureExt, Stream, StreamExt, TryFutureExt}; use lru_cache::LruCache; use ruma::{ events::{ @@ -48,7 +48,7 @@ use crate::{ rooms::{ short::{ShortEventId, ShortStateHash, ShortStateKey}, state::RoomMutexGuard, - state_compressor::{compress_state_event, parse_compressed_state_event}, + state_compressor::{compress_state_event, parse_compressed_state_event, CompressedState}, }, Dep, }; @@ -143,6 +143,256 @@ impl crate::Service for Service { } impl Service { + /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). + pub async fn room_state_get_content( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, + ) -> Result + where + T: for<'de> Deserialize<'de>, + { + self.room_state_get(room_id, event_type, state_key) + .await + .and_then(|event| event.get_content()) + } + + /// Returns the full room state. + #[tracing::instrument(skip(self), level = "debug")] + pub fn room_state_full<'a>( + &'a self, + room_id: &'a RoomId, + ) -> impl Stream> + Send + 'a { + self.services + .state + .get_room_shortstatehash(room_id) + .map_ok(|shortstatehash| self.state_full(shortstatehash).map(Ok)) + .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) + .try_flatten_stream() + } + + /// Returns the full room state pdus + #[tracing::instrument(skip(self), level = "debug")] + pub fn room_state_full_pdus<'a>( + &'a self, + room_id: &'a RoomId, + ) -> impl Stream> + Send + 'a { + self.services + .state + .get_room_shortstatehash(room_id) + .map_ok(|shortstatehash| self.state_full_pdus(shortstatehash).map(Ok)) + .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) + .try_flatten_stream() + } + + /// Returns a single EventId from `room_id` with key (`event_type`, + /// `state_key`). + #[tracing::instrument(skip(self), level = "debug")] + pub async fn room_state_get_id( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, + ) -> Result + where + Id: for<'de> Deserialize<'de> + Sized + ToOwned, + ::Owned: Borrow, + { + self.services + .state + .get_room_shortstatehash(room_id) + .and_then(|shortstatehash| self.state_get_id(shortstatehash, event_type, state_key)) + .await + } + + /// Returns a single PDU from `room_id` with key (`event_type`, + /// `state_key`). + #[tracing::instrument(skip(self), level = "debug")] + pub async fn room_state_get( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, + ) -> Result { + self.services + .state + .get_room_shortstatehash(room_id) + .and_then(|shortstatehash| self.state_get(shortstatehash, event_type, state_key)) + .await + } + + /// The user was a joined member at this state (potentially in the past) + #[inline] + async fn user_was_joined(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { + self.user_membership(shortstatehash, user_id).await == MembershipState::Join + } + + /// The user was an invited or joined room member at this state (potentially + /// in the past) + #[inline] + async fn user_was_invited(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { + let s = self.user_membership(shortstatehash, user_id).await; + s == MembershipState::Join || s == MembershipState::Invite + } + + /// Get membership for given user in state + async fn user_membership( + &self, + shortstatehash: ShortStateHash, + user_id: &UserId, + ) -> MembershipState { + self.state_get_content(shortstatehash, &StateEventType::RoomMember, user_id.as_str()) + .await + .map_or(MembershipState::Leave, |c: RoomMemberEventContent| c.membership) + } + + /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). + pub async fn state_get_content( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, + ) -> Result + where + T: for<'de> Deserialize<'de>, + { + self.state_get(shortstatehash, event_type, state_key) + .await + .and_then(|event| event.get_content()) + } + + #[tracing::instrument(skip(self), level = "debug")] + pub async fn state_contains( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, + ) -> bool { + let Ok(shortstatekey) = self + .services + .short + .get_shortstatekey(event_type, state_key) + .await + else { + return false; + }; + + self.state_contains_shortstatekey(shortstatehash, shortstatekey) + .await + } + + #[tracing::instrument(skip(self), level = "debug")] + pub async fn state_contains_shortstatekey( + &self, + shortstatehash: ShortStateHash, + shortstatekey: ShortStateKey, + ) -> bool { + let start = compress_state_event(shortstatekey, 0); + let end = compress_state_event(shortstatekey, u64::MAX); + + self.load_full_state(shortstatehash) + .map_ok(|full_state| full_state.range(start..end).next().copied()) + .await + .flat_ok() + .is_some() + } + + /// Returns a single PDU from `room_id` with key (`event_type`, + /// `state_key`). + pub async fn state_get( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, + ) -> Result { + self.state_get_id(shortstatehash, event_type, state_key) + .and_then(|event_id: OwnedEventId| async move { + self.services.timeline.get_pdu(&event_id).await + }) + .await + } + + /// Returns a single EventId from `room_id` with key (`event_type`, + /// `state_key`). + #[tracing::instrument(skip(self), level = "debug")] + pub async fn state_get_id( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, + ) -> Result + where + Id: for<'de> Deserialize<'de> + Sized + ToOwned, + ::Owned: Borrow, + { + let shorteventid = self + .state_get_shortid(shortstatehash, event_type, state_key) + .await?; + + self.services + .short + .get_eventid_from_short(shorteventid) + .await + } + + /// Returns a single EventId from `room_id` with key (`event_type`, + /// `state_key`). + #[tracing::instrument(skip(self), level = "debug")] + pub async fn state_get_shortid( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, + ) -> Result { + let shortstatekey = self + .services + .short + .get_shortstatekey(event_type, state_key) + .await?; + + let start = compress_state_event(shortstatekey, 0); + let end = compress_state_event(shortstatekey, u64::MAX); + self.load_full_state(shortstatehash) + .map_ok(|full_state| { + full_state + .range(start..end) + .next() + .copied() + .map(parse_compressed_state_event) + .map(at!(1)) + .ok_or(err!(Request(NotFound("Not found in room state")))) + }) + .await? + } + + /// Returns the state events removed between the interval (present in .0 but + /// not in .1) + #[inline] + pub fn state_removed( + &self, + shortstatehash: pair_of!(ShortStateHash), + ) -> impl Stream + Send + '_ { + self.state_added((shortstatehash.1, shortstatehash.0)) + } + + /// Returns the state events added between the interval (present in .1 but + /// not in .0) + #[tracing::instrument(skip(self), level = "debug")] + pub fn state_added<'a>( + &'a self, + shortstatehash: pair_of!(ShortStateHash), + ) -> impl Stream + Send + 'a { + let a = self.load_full_state(shortstatehash.0); + let b = self.load_full_state(shortstatehash.1); + try_join(a, b) + .map_ok(|(a, b)| b.difference(&a).copied().collect::>()) + .map_ok(IterStream::try_stream) + .try_flatten_stream() + .expect_ok() + .map(parse_compressed_state_event) + } + pub fn state_full( &self, shortstatehash: ShortStateHash, @@ -208,110 +458,11 @@ impl Service { .ready_filter_map(|(event_id, shortstatekey)| Some((shortstatekey, event_id.ok()?))) } - /// Returns a single EventId from `room_id` with key (`event_type`, - /// `state_key`). - #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_get_id( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - Id: for<'de> Deserialize<'de> + Sized + ToOwned, - ::Owned: Borrow, - { - let shorteventid = self - .state_get_shortid(shortstatehash, event_type, state_key) - .await?; - - self.services - .short - .get_eventid_from_short(shorteventid) - .await - } - - /// Returns a single EventId from `room_id` with key (`event_type`, - /// `state_key`). - #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_get_shortid( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result { - let shortstatekey = self - .services - .short - .get_shortstatekey(event_type, state_key) - .await?; - - let start = compress_state_event(shortstatekey, 0); - let end = compress_state_event(shortstatekey, u64::MAX); - self.services - .state_compressor - .load_shortstatehash_info(shortstatehash) - .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) - .map_ok(|full_state| { - full_state - .range(start..end) - .next() - .copied() - .map(parse_compressed_state_event) - .map(at!(1)) - .ok_or(err!(Request(NotFound("Not found in room state")))) - }) - .await? - } - - #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_contains( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> bool { - let Ok(shortstatekey) = self - .services - .short - .get_shortstatekey(event_type, state_key) - .await - else { - return false; - }; - - self.state_contains_shortstatekey(shortstatehash, shortstatekey) - .await - } - - #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_contains_shortstatekey( - &self, - shortstatehash: ShortStateHash, - shortstatekey: ShortStateKey, - ) -> bool { - let start = compress_state_event(shortstatekey, 0); - let end = compress_state_event(shortstatekey, u64::MAX); - - self.services - .state_compressor - .load_shortstatehash_info(shortstatehash) - .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) - .map_ok(|full_state| full_state.range(start..end).next().copied()) - .await - .flat_ok() - .is_some() - } - pub fn state_full_shortids( &self, shortstatehash: ShortStateHash, ) -> impl Stream> + Send + '_ { - self.services - .state_compressor - .load_shortstatehash_info(shortstatehash) - .map_err(|e| err!(Database("Missing state IDs: {e}"))) - .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) + self.load_full_state(shortstatehash) .map_ok(|full_state| { full_state .deref() @@ -324,59 +475,32 @@ impl Service { .try_flatten_stream() } - /// Returns a single PDU from `room_id` with key (`event_type`, - /// `state_key`). - pub async fn state_get( + async fn load_full_state( &self, shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result { - self.state_get_id(shortstatehash, event_type, state_key) - .and_then(|event_id: OwnedEventId| async move { - self.services.timeline.get_pdu(&event_id).await + ) -> Result> { + self.services + .state_compressor + .load_shortstatehash_info(shortstatehash) + .map_err(|e| err!(Database("Missing state IDs: {e}"))) + .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) + .await + } + + /// Returns the state hash for this pdu. + pub async fn pdu_shortstatehash(&self, event_id: &EventId) -> Result { + const BUFSIZE: usize = size_of::(); + + self.services + .short + .get_shorteventid(event_id) + .and_then(|shorteventid| { + self.db + .shorteventid_shortstatehash + .aqry::(&shorteventid) }) .await - } - - /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). - pub async fn state_get_content( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - T: for<'de> Deserialize<'de>, - { - self.state_get(shortstatehash, event_type, state_key) - .await - .and_then(|event| event.get_content()) - } - - /// Get membership for given user in state - async fn user_membership( - &self, - shortstatehash: ShortStateHash, - user_id: &UserId, - ) -> MembershipState { - self.state_get_content(shortstatehash, &StateEventType::RoomMember, user_id.as_str()) - .await - .map_or(MembershipState::Leave, |c: RoomMemberEventContent| c.membership) - } - - /// The user was a joined member at this state (potentially in the past) - #[inline] - async fn user_was_joined(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { - self.user_membership(shortstatehash, user_id).await == MembershipState::Join - } - - /// The user was an invited or joined room member at this state (potentially - /// in the past) - #[inline] - async fn user_was_invited(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { - let s = self.user_membership(shortstatehash, user_id).await; - s == MembershipState::Join || s == MembershipState::Invite + .deserialized() } /// Whether a server is allowed to see an event through federation, based on @@ -521,101 +645,6 @@ impl Service { } } - /// Returns the state hash for this pdu. - pub async fn pdu_shortstatehash(&self, event_id: &EventId) -> Result { - const BUFSIZE: usize = size_of::(); - - self.services - .short - .get_shorteventid(event_id) - .and_then(|shorteventid| { - self.db - .shorteventid_shortstatehash - .aqry::(&shorteventid) - }) - .await - .deserialized() - } - - /// Returns the full room state. - #[tracing::instrument(skip(self), level = "debug")] - pub fn room_state_full<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream> + Send + 'a { - self.services - .state - .get_room_shortstatehash(room_id) - .map_ok(|shortstatehash| self.state_full(shortstatehash).map(Ok)) - .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) - .try_flatten_stream() - } - - /// Returns the full room state pdus - #[tracing::instrument(skip(self), level = "debug")] - pub fn room_state_full_pdus<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream> + Send + 'a { - self.services - .state - .get_room_shortstatehash(room_id) - .map_ok(|shortstatehash| self.state_full_pdus(shortstatehash).map(Ok)) - .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) - .try_flatten_stream() - } - - /// Returns a single EventId from `room_id` with key (`event_type`, - /// `state_key`). - #[tracing::instrument(skip(self), level = "debug")] - pub async fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - Id: for<'de> Deserialize<'de> + Sized + ToOwned, - ::Owned: Borrow, - { - self.services - .state - .get_room_shortstatehash(room_id) - .and_then(|shortstatehash| self.state_get_id(shortstatehash, event_type, state_key)) - .await - } - - /// Returns a single PDU from `room_id` with key (`event_type`, - /// `state_key`). - #[tracing::instrument(skip(self), level = "debug")] - pub async fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result { - self.services - .state - .get_room_shortstatehash(room_id) - .and_then(|shortstatehash| self.state_get(shortstatehash, event_type, state_key)) - .await - } - - /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). - pub async fn room_state_get_content( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - T: for<'de> Deserialize<'de>, - { - self.room_state_get(room_id, event_type, state_key) - .await - .and_then(|event| event.get_content()) - } - pub async fn get_name(&self, room_id: &RoomId) -> Result { self.room_state_get_content(room_id, &StateEventType::RoomName, "") .await From b3271e0d653de1c585b1b5db95447045b0453b06 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 2 Feb 2025 17:27:39 +0000 Subject: [PATCH 027/310] split state_accessor Signed-off-by: Jason Volk --- src/service/rooms/state_accessor/mod.rs | 634 +----------------- .../rooms/state_accessor/room_state.rs | 90 +++ .../rooms/state_accessor/server_can.rs | 73 ++ src/service/rooms/state_accessor/state.rs | 320 +++++++++ src/service/rooms/state_accessor/user_can.rs | 187 ++++++ 5 files changed, 684 insertions(+), 620 deletions(-) create mode 100644 src/service/rooms/state_accessor/room_state.rs create mode 100644 src/service/rooms/state_accessor/server_can.rs create mode 100644 src/service/rooms/state_accessor/state.rs create mode 100644 src/service/rooms/state_accessor/user_can.rs diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index bed8d210..b7952ce6 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -1,23 +1,19 @@ +mod room_state; +mod server_can; +mod state; +mod user_can; + use std::{ - borrow::Borrow, fmt::Write, - ops::Deref, sync::{Arc, Mutex as StdMutex, Mutex}, }; use conduwuit::{ - at, err, error, pair_of, - pdu::PduBuilder, - utils, - utils::{ - math::{usize_from_f64, Expected}, - result::FlatOk, - stream::{BroadbandExt, IterStream, ReadyExt, TryExpect}, - }, - Err, Error, PduEvent, Result, + err, utils, + utils::math::{usize_from_f64, Expected}, + Result, }; -use database::{Deserialized, Map}; -use futures::{future::try_join, FutureExt, Stream, StreamExt, TryFutureExt}; +use database::Map; use lru_cache::LruCache; use ruma::{ events::{ @@ -29,29 +25,19 @@ use ruma::{ guest_access::{GuestAccess, RoomGuestAccessEventContent}, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent, RoomMembership}, - member::{MembershipState, RoomMemberEventContent}, + member::RoomMemberEventContent, name::RoomNameEventContent, - power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, topic::RoomTopicEventContent, }, - StateEventType, TimelineEventType, + StateEventType, }, room::RoomType, space::SpaceRoomJoinRule, - EventEncryptionAlgorithm, EventId, JsOption, OwnedEventId, OwnedRoomAliasId, OwnedRoomId, - OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, + EventEncryptionAlgorithm, JsOption, OwnedRoomAliasId, OwnedRoomId, OwnedServerName, + OwnedUserId, RoomId, UserId, }; -use serde::Deserialize; -use crate::{ - rooms, - rooms::{ - short::{ShortEventId, ShortStateHash, ShortStateKey}, - state::RoomMutexGuard, - state_compressor::{compress_state_event, parse_compressed_state_event, CompressedState}, - }, - Dep, -}; +use crate::{rooms, rooms::short::ShortStateHash, Dep}; pub struct Service { pub server_visibility_cache: Mutex>, @@ -143,508 +129,6 @@ impl crate::Service for Service { } impl Service { - /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). - pub async fn room_state_get_content( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - T: for<'de> Deserialize<'de>, - { - self.room_state_get(room_id, event_type, state_key) - .await - .and_then(|event| event.get_content()) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self), level = "debug")] - pub fn room_state_full<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream> + Send + 'a { - self.services - .state - .get_room_shortstatehash(room_id) - .map_ok(|shortstatehash| self.state_full(shortstatehash).map(Ok)) - .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) - .try_flatten_stream() - } - - /// Returns the full room state pdus - #[tracing::instrument(skip(self), level = "debug")] - pub fn room_state_full_pdus<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream> + Send + 'a { - self.services - .state - .get_room_shortstatehash(room_id) - .map_ok(|shortstatehash| self.state_full_pdus(shortstatehash).map(Ok)) - .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) - .try_flatten_stream() - } - - /// Returns a single EventId from `room_id` with key (`event_type`, - /// `state_key`). - #[tracing::instrument(skip(self), level = "debug")] - pub async fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - Id: for<'de> Deserialize<'de> + Sized + ToOwned, - ::Owned: Borrow, - { - self.services - .state - .get_room_shortstatehash(room_id) - .and_then(|shortstatehash| self.state_get_id(shortstatehash, event_type, state_key)) - .await - } - - /// Returns a single PDU from `room_id` with key (`event_type`, - /// `state_key`). - #[tracing::instrument(skip(self), level = "debug")] - pub async fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result { - self.services - .state - .get_room_shortstatehash(room_id) - .and_then(|shortstatehash| self.state_get(shortstatehash, event_type, state_key)) - .await - } - - /// The user was a joined member at this state (potentially in the past) - #[inline] - async fn user_was_joined(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { - self.user_membership(shortstatehash, user_id).await == MembershipState::Join - } - - /// The user was an invited or joined room member at this state (potentially - /// in the past) - #[inline] - async fn user_was_invited(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { - let s = self.user_membership(shortstatehash, user_id).await; - s == MembershipState::Join || s == MembershipState::Invite - } - - /// Get membership for given user in state - async fn user_membership( - &self, - shortstatehash: ShortStateHash, - user_id: &UserId, - ) -> MembershipState { - self.state_get_content(shortstatehash, &StateEventType::RoomMember, user_id.as_str()) - .await - .map_or(MembershipState::Leave, |c: RoomMemberEventContent| c.membership) - } - - /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). - pub async fn state_get_content( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - T: for<'de> Deserialize<'de>, - { - self.state_get(shortstatehash, event_type, state_key) - .await - .and_then(|event| event.get_content()) - } - - #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_contains( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> bool { - let Ok(shortstatekey) = self - .services - .short - .get_shortstatekey(event_type, state_key) - .await - else { - return false; - }; - - self.state_contains_shortstatekey(shortstatehash, shortstatekey) - .await - } - - #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_contains_shortstatekey( - &self, - shortstatehash: ShortStateHash, - shortstatekey: ShortStateKey, - ) -> bool { - let start = compress_state_event(shortstatekey, 0); - let end = compress_state_event(shortstatekey, u64::MAX); - - self.load_full_state(shortstatehash) - .map_ok(|full_state| full_state.range(start..end).next().copied()) - .await - .flat_ok() - .is_some() - } - - /// Returns a single PDU from `room_id` with key (`event_type`, - /// `state_key`). - pub async fn state_get( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result { - self.state_get_id(shortstatehash, event_type, state_key) - .and_then(|event_id: OwnedEventId| async move { - self.services.timeline.get_pdu(&event_id).await - }) - .await - } - - /// Returns a single EventId from `room_id` with key (`event_type`, - /// `state_key`). - #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_get_id( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result - where - Id: for<'de> Deserialize<'de> + Sized + ToOwned, - ::Owned: Borrow, - { - let shorteventid = self - .state_get_shortid(shortstatehash, event_type, state_key) - .await?; - - self.services - .short - .get_eventid_from_short(shorteventid) - .await - } - - /// Returns a single EventId from `room_id` with key (`event_type`, - /// `state_key`). - #[tracing::instrument(skip(self), level = "debug")] - pub async fn state_get_shortid( - &self, - shortstatehash: ShortStateHash, - event_type: &StateEventType, - state_key: &str, - ) -> Result { - let shortstatekey = self - .services - .short - .get_shortstatekey(event_type, state_key) - .await?; - - let start = compress_state_event(shortstatekey, 0); - let end = compress_state_event(shortstatekey, u64::MAX); - self.load_full_state(shortstatehash) - .map_ok(|full_state| { - full_state - .range(start..end) - .next() - .copied() - .map(parse_compressed_state_event) - .map(at!(1)) - .ok_or(err!(Request(NotFound("Not found in room state")))) - }) - .await? - } - - /// Returns the state events removed between the interval (present in .0 but - /// not in .1) - #[inline] - pub fn state_removed( - &self, - shortstatehash: pair_of!(ShortStateHash), - ) -> impl Stream + Send + '_ { - self.state_added((shortstatehash.1, shortstatehash.0)) - } - - /// Returns the state events added between the interval (present in .1 but - /// not in .0) - #[tracing::instrument(skip(self), level = "debug")] - pub fn state_added<'a>( - &'a self, - shortstatehash: pair_of!(ShortStateHash), - ) -> impl Stream + Send + 'a { - let a = self.load_full_state(shortstatehash.0); - let b = self.load_full_state(shortstatehash.1); - try_join(a, b) - .map_ok(|(a, b)| b.difference(&a).copied().collect::>()) - .map_ok(IterStream::try_stream) - .try_flatten_stream() - .expect_ok() - .map(parse_compressed_state_event) - } - - pub fn state_full( - &self, - shortstatehash: ShortStateHash, - ) -> impl Stream + Send + '_ { - self.state_full_pdus(shortstatehash) - .ready_filter_map(|pdu| { - Some(((pdu.kind.to_string().into(), pdu.state_key.clone()?), pdu)) - }) - } - - pub fn state_full_pdus( - &self, - shortstatehash: ShortStateHash, - ) -> impl Stream + Send + '_ { - let short_ids = self - .state_full_shortids(shortstatehash) - .expect_ok() - .map(at!(1)); - - self.services - .short - .multi_get_eventid_from_short(short_ids) - .ready_filter_map(Result::ok) - .broad_filter_map(move |event_id: OwnedEventId| async move { - self.services.timeline.get_pdu(&event_id).await.ok() - }) - } - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self), level = "debug")] - pub fn state_full_ids<'a, Id>( - &'a self, - shortstatehash: ShortStateHash, - ) -> impl Stream + Send + 'a - where - Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned + 'a, - ::Owned: Borrow, - { - let shortids = self - .state_full_shortids(shortstatehash) - .expect_ok() - .unzip() - .shared(); - - let shortstatekeys = shortids - .clone() - .map(at!(0)) - .map(Vec::into_iter) - .map(IterStream::stream) - .flatten_stream(); - - let shorteventids = shortids - .map(at!(1)) - .map(Vec::into_iter) - .map(IterStream::stream) - .flatten_stream(); - - self.services - .short - .multi_get_eventid_from_short(shorteventids) - .zip(shortstatekeys) - .ready_filter_map(|(event_id, shortstatekey)| Some((shortstatekey, event_id.ok()?))) - } - - pub fn state_full_shortids( - &self, - shortstatehash: ShortStateHash, - ) -> impl Stream> + Send + '_ { - self.load_full_state(shortstatehash) - .map_ok(|full_state| { - full_state - .deref() - .iter() - .copied() - .map(parse_compressed_state_event) - .collect() - }) - .map_ok(|vec: Vec<_>| vec.into_iter().try_stream()) - .try_flatten_stream() - } - - async fn load_full_state( - &self, - shortstatehash: ShortStateHash, - ) -> Result> { - self.services - .state_compressor - .load_shortstatehash_info(shortstatehash) - .map_err(|e| err!(Database("Missing state IDs: {e}"))) - .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) - .await - } - - /// Returns the state hash for this pdu. - pub async fn pdu_shortstatehash(&self, event_id: &EventId) -> Result { - const BUFSIZE: usize = size_of::(); - - self.services - .short - .get_shorteventid(event_id) - .and_then(|shorteventid| { - self.db - .shorteventid_shortstatehash - .aqry::(&shorteventid) - }) - .await - .deserialized() - } - - /// Whether a server is allowed to see an event through federation, based on - /// the room's history_visibility at that event's state. - #[tracing::instrument(skip_all, level = "trace")] - pub async fn server_can_see_event( - &self, - origin: &ServerName, - room_id: &RoomId, - event_id: &EventId, - ) -> bool { - let Ok(shortstatehash) = self.pdu_shortstatehash(event_id).await else { - return true; - }; - - if let Some(visibility) = self - .server_visibility_cache - .lock() - .expect("locked") - .get_mut(&(origin.to_owned(), shortstatehash)) - { - return *visibility; - } - - let history_visibility = self - .state_get_content(shortstatehash, &StateEventType::RoomHistoryVisibility, "") - .await - .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { - c.history_visibility - }); - - let current_server_members = self - .services - .state_cache - .room_members(room_id) - .ready_filter(|member| member.server_name() == origin); - - let visibility = match history_visibility { - | HistoryVisibility::WorldReadable | HistoryVisibility::Shared => true, - | HistoryVisibility::Invited => { - // Allow if any member on requesting server was AT LEAST invited, else deny - current_server_members - .any(|member| self.user_was_invited(shortstatehash, member)) - .await - }, - | HistoryVisibility::Joined => { - // Allow if any member on requested server was joined, else deny - current_server_members - .any(|member| self.user_was_joined(shortstatehash, member)) - .await - }, - | _ => { - error!("Unknown history visibility {history_visibility}"); - false - }, - }; - - self.server_visibility_cache - .lock() - .expect("locked") - .insert((origin.to_owned(), shortstatehash), visibility); - - visibility - } - - /// Whether a user is allowed to see an event, based on - /// the room's history_visibility at that event's state. - #[tracing::instrument(skip_all, level = "trace")] - pub async fn user_can_see_event( - &self, - user_id: &UserId, - room_id: &RoomId, - event_id: &EventId, - ) -> bool { - let Ok(shortstatehash) = self.pdu_shortstatehash(event_id).await else { - return true; - }; - - if let Some(visibility) = self - .user_visibility_cache - .lock() - .expect("locked") - .get_mut(&(user_id.to_owned(), shortstatehash)) - { - return *visibility; - } - - let currently_member = self.services.state_cache.is_joined(user_id, room_id).await; - - let history_visibility = self - .state_get_content(shortstatehash, &StateEventType::RoomHistoryVisibility, "") - .await - .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { - c.history_visibility - }); - - let visibility = match history_visibility { - | HistoryVisibility::WorldReadable => true, - | HistoryVisibility::Shared => currently_member, - | HistoryVisibility::Invited => { - // Allow if any member on requesting server was AT LEAST invited, else deny - self.user_was_invited(shortstatehash, user_id).await - }, - | HistoryVisibility::Joined => { - // Allow if any member on requested server was joined, else deny - self.user_was_joined(shortstatehash, user_id).await - }, - | _ => { - error!("Unknown history visibility {history_visibility}"); - false - }, - }; - - self.user_visibility_cache - .lock() - .expect("locked") - .insert((user_id.to_owned(), shortstatehash), visibility); - - visibility - } - - /// Whether a user is allowed to see an event, based on - /// the room's history_visibility at that event's state. - #[tracing::instrument(skip_all, level = "trace")] - pub async fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> bool { - if self.services.state_cache.is_joined(user_id, room_id).await { - return true; - } - - let history_visibility = self - .room_state_get_content(room_id, &StateEventType::RoomHistoryVisibility, "") - .await - .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { - c.history_visibility - }); - - match history_visibility { - | HistoryVisibility::Invited => - self.services.state_cache.is_invited(user_id, room_id).await, - | HistoryVisibility::WorldReadable => true, - | _ => false, - } - } - pub async fn get_name(&self, room_id: &RoomId) -> Result { self.room_state_get_content(room_id, &StateEventType::RoomName, "") .await @@ -669,28 +153,6 @@ impl Service { .await } - pub async fn user_can_invite( - &self, - room_id: &RoomId, - sender: &UserId, - target_user: &UserId, - state_lock: &RoomMutexGuard, - ) -> bool { - self.services - .timeline - .create_hash_and_sign_event( - PduBuilder::state( - target_user.into(), - &RoomMemberEventContent::new(MembershipState::Invite), - ), - sender, - room_id, - state_lock, - ) - .await - .is_ok() - } - /// Checks if guests are able to view room content without joining pub async fn is_world_readable(&self, room_id: &RoomId) -> bool { self.room_state_get_content(room_id, &StateEventType::RoomHistoryVisibility, "") @@ -726,74 +188,6 @@ impl Service { .map(|c: RoomTopicEventContent| c.topic) } - /// Checks if a given user can redact a given event - /// - /// If federation is true, it allows redaction events from any user of the - /// same server as the original event sender - pub async fn user_can_redact( - &self, - redacts: &EventId, - sender: &UserId, - room_id: &RoomId, - federation: bool, - ) -> Result { - let redacting_event = self.services.timeline.get_pdu(redacts).await; - - if redacting_event - .as_ref() - .is_ok_and(|pdu| pdu.kind == TimelineEventType::RoomCreate) - { - return Err!(Request(Forbidden("Redacting m.room.create is not safe, forbidding."))); - } - - if redacting_event - .as_ref() - .is_ok_and(|pdu| pdu.kind == TimelineEventType::RoomServerAcl) - { - return Err!(Request(Forbidden( - "Redacting m.room.server_acl will result in the room being inaccessible for \ - everyone (empty allow key), forbidding." - ))); - } - - if let Ok(pl_event_content) = self - .room_state_get_content::( - room_id, - &StateEventType::RoomPowerLevels, - "", - ) - .await - { - let pl_event: RoomPowerLevels = pl_event_content.into(); - Ok(pl_event.user_can_redact_event_of_other(sender) - || pl_event.user_can_redact_own_event(sender) - && if let Ok(redacting_event) = redacting_event { - if federation { - redacting_event.sender.server_name() == sender.server_name() - } else { - redacting_event.sender == sender - } - } else { - false - }) - } else { - // Falling back on m.room.create to judge power level - if let Ok(room_create) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "") - .await - { - Ok(room_create.sender == sender - || redacting_event - .as_ref() - .is_ok_and(|redacting_event| redacting_event.sender == sender)) - } else { - Err(Error::bad_database( - "No m.room.power_levels or m.room.create events in database for room", - )) - } - } - } - /// Returns the join rule (`SpaceRoomJoinRule`) for a given room pub async fn get_join_rule( &self, diff --git a/src/service/rooms/state_accessor/room_state.rs b/src/service/rooms/state_accessor/room_state.rs new file mode 100644 index 00000000..98a82cea --- /dev/null +++ b/src/service/rooms/state_accessor/room_state.rs @@ -0,0 +1,90 @@ +use std::borrow::Borrow; + +use conduwuit::{err, implement, PduEvent, Result}; +use futures::{Stream, StreamExt, TryFutureExt}; +use ruma::{events::StateEventType, EventId, RoomId}; +use serde::Deserialize; + +/// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). +#[implement(super::Service)] +pub async fn room_state_get_content( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, +) -> Result +where + T: for<'de> Deserialize<'de>, +{ + self.room_state_get(room_id, event_type, state_key) + .await + .and_then(|event| event.get_content()) +} + +/// Returns the full room state. +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn room_state_full<'a>( + &'a self, + room_id: &'a RoomId, +) -> impl Stream> + Send + 'a { + self.services + .state + .get_room_shortstatehash(room_id) + .map_ok(|shortstatehash| self.state_full(shortstatehash).map(Ok)) + .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) + .try_flatten_stream() +} + +/// Returns the full room state pdus +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn room_state_full_pdus<'a>( + &'a self, + room_id: &'a RoomId, +) -> impl Stream> + Send + 'a { + self.services + .state + .get_room_shortstatehash(room_id) + .map_ok(|shortstatehash| self.state_full_pdus(shortstatehash).map(Ok)) + .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) + .try_flatten_stream() +} + +/// Returns a single EventId from `room_id` with key (`event_type`, +/// `state_key`). +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub async fn room_state_get_id( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, +) -> Result +where + Id: for<'de> Deserialize<'de> + Sized + ToOwned, + ::Owned: Borrow, +{ + self.services + .state + .get_room_shortstatehash(room_id) + .and_then(|shortstatehash| self.state_get_id(shortstatehash, event_type, state_key)) + .await +} + +/// Returns a single PDU from `room_id` with key (`event_type`, +/// `state_key`). +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub async fn room_state_get( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, +) -> Result { + self.services + .state + .get_room_shortstatehash(room_id) + .and_then(|shortstatehash| self.state_get(shortstatehash, event_type, state_key)) + .await +} diff --git a/src/service/rooms/state_accessor/server_can.rs b/src/service/rooms/state_accessor/server_can.rs new file mode 100644 index 00000000..4d834227 --- /dev/null +++ b/src/service/rooms/state_accessor/server_can.rs @@ -0,0 +1,73 @@ +use conduwuit::{error, implement, utils::stream::ReadyExt}; +use futures::StreamExt; +use ruma::{ + events::{ + room::history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + StateEventType, + }, + EventId, RoomId, ServerName, +}; + +/// Whether a server is allowed to see an event through federation, based on +/// the room's history_visibility at that event's state. +#[implement(super::Service)] +#[tracing::instrument(skip_all, level = "trace")] +pub async fn server_can_see_event( + &self, + origin: &ServerName, + room_id: &RoomId, + event_id: &EventId, +) -> bool { + let Ok(shortstatehash) = self.pdu_shortstatehash(event_id).await else { + return true; + }; + + if let Some(visibility) = self + .server_visibility_cache + .lock() + .expect("locked") + .get_mut(&(origin.to_owned(), shortstatehash)) + { + return *visibility; + } + + let history_visibility = self + .state_get_content(shortstatehash, &StateEventType::RoomHistoryVisibility, "") + .await + .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { + c.history_visibility + }); + + let current_server_members = self + .services + .state_cache + .room_members(room_id) + .ready_filter(|member| member.server_name() == origin); + + let visibility = match history_visibility { + | HistoryVisibility::WorldReadable | HistoryVisibility::Shared => true, + | HistoryVisibility::Invited => { + // Allow if any member on requesting server was AT LEAST invited, else deny + current_server_members + .any(|member| self.user_was_invited(shortstatehash, member)) + .await + }, + | HistoryVisibility::Joined => { + // Allow if any member on requested server was joined, else deny + current_server_members + .any(|member| self.user_was_joined(shortstatehash, member)) + .await + }, + | _ => { + error!("Unknown history visibility {history_visibility}"); + false + }, + }; + + self.server_visibility_cache + .lock() + .expect("locked") + .insert((origin.to_owned(), shortstatehash), visibility); + + visibility +} diff --git a/src/service/rooms/state_accessor/state.rs b/src/service/rooms/state_accessor/state.rs new file mode 100644 index 00000000..c47a5693 --- /dev/null +++ b/src/service/rooms/state_accessor/state.rs @@ -0,0 +1,320 @@ +use std::{borrow::Borrow, ops::Deref, sync::Arc}; + +use conduwuit::{ + at, err, implement, pair_of, + utils::{ + result::FlatOk, + stream::{BroadbandExt, IterStream, ReadyExt, TryExpect}, + }, + PduEvent, Result, +}; +use database::Deserialized; +use futures::{future::try_join, FutureExt, Stream, StreamExt, TryFutureExt}; +use ruma::{ + events::{ + room::member::{MembershipState, RoomMemberEventContent}, + StateEventType, + }, + EventId, OwnedEventId, UserId, +}; +use serde::Deserialize; + +use crate::rooms::{ + short::{ShortEventId, ShortStateHash, ShortStateKey}, + state_compressor::{compress_state_event, parse_compressed_state_event, CompressedState}, +}; + +/// The user was a joined member at this state (potentially in the past) +#[implement(super::Service)] +#[inline] +pub async fn user_was_joined(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { + self.user_membership(shortstatehash, user_id).await == MembershipState::Join +} + +/// The user was an invited or joined room member at this state (potentially +/// in the past) +#[implement(super::Service)] +#[inline] +pub async fn user_was_invited(&self, shortstatehash: ShortStateHash, user_id: &UserId) -> bool { + let s = self.user_membership(shortstatehash, user_id).await; + s == MembershipState::Join || s == MembershipState::Invite +} + +/// Get membership for given user in state +#[implement(super::Service)] +pub async fn user_membership( + &self, + shortstatehash: ShortStateHash, + user_id: &UserId, +) -> MembershipState { + self.state_get_content(shortstatehash, &StateEventType::RoomMember, user_id.as_str()) + .await + .map_or(MembershipState::Leave, |c: RoomMemberEventContent| c.membership) +} + +/// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). +#[implement(super::Service)] +pub async fn state_get_content( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, +) -> Result +where + T: for<'de> Deserialize<'de>, +{ + self.state_get(shortstatehash, event_type, state_key) + .await + .and_then(|event| event.get_content()) +} + +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub async fn state_contains( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, +) -> bool { + let Ok(shortstatekey) = self + .services + .short + .get_shortstatekey(event_type, state_key) + .await + else { + return false; + }; + + self.state_contains_shortstatekey(shortstatehash, shortstatekey) + .await +} + +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub async fn state_contains_shortstatekey( + &self, + shortstatehash: ShortStateHash, + shortstatekey: ShortStateKey, +) -> bool { + let start = compress_state_event(shortstatekey, 0); + let end = compress_state_event(shortstatekey, u64::MAX); + + self.load_full_state(shortstatehash) + .map_ok(|full_state| full_state.range(start..=end).next().copied()) + .await + .flat_ok() + .is_some() +} + +/// Returns a single PDU from `room_id` with key (`event_type`, +/// `state_key`). +#[implement(super::Service)] +pub async fn state_get( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, +) -> Result { + self.state_get_id(shortstatehash, event_type, state_key) + .and_then(|event_id: OwnedEventId| async move { + self.services.timeline.get_pdu(&event_id).await + }) + .await +} + +/// Returns a single EventId from `room_id` with key (`event_type`, +/// `state_key`). +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub async fn state_get_id( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, +) -> Result +where + Id: for<'de> Deserialize<'de> + Sized + ToOwned, + ::Owned: Borrow, +{ + let shorteventid = self + .state_get_shortid(shortstatehash, event_type, state_key) + .await?; + + self.services + .short + .get_eventid_from_short(shorteventid) + .await +} + +/// Returns a single EventId from `room_id` with key (`event_type`, +/// `state_key`). +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub async fn state_get_shortid( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, + state_key: &str, +) -> Result { + let shortstatekey = self + .services + .short + .get_shortstatekey(event_type, state_key) + .await?; + + let start = compress_state_event(shortstatekey, 0); + let end = compress_state_event(shortstatekey, u64::MAX); + self.load_full_state(shortstatehash) + .map_ok(|full_state| { + full_state + .range(start..=end) + .next() + .copied() + .map(parse_compressed_state_event) + .map(at!(1)) + .ok_or(err!(Request(NotFound("Not found in room state")))) + }) + .await? +} + +/// Returns the state events removed between the interval (present in .0 but +/// not in .1) +#[implement(super::Service)] +#[inline] +pub fn state_removed( + &self, + shortstatehash: pair_of!(ShortStateHash), +) -> impl Stream + Send + '_ { + self.state_added((shortstatehash.1, shortstatehash.0)) +} + +/// Returns the state events added between the interval (present in .1 but +/// not in .0) +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn state_added<'a>( + &'a self, + shortstatehash: pair_of!(ShortStateHash), +) -> impl Stream + Send + 'a { + let a = self.load_full_state(shortstatehash.0); + let b = self.load_full_state(shortstatehash.1); + try_join(a, b) + .map_ok(|(a, b)| b.difference(&a).copied().collect::>()) + .map_ok(IterStream::try_stream) + .try_flatten_stream() + .expect_ok() + .map(parse_compressed_state_event) +} + +#[implement(super::Service)] +pub fn state_full( + &self, + shortstatehash: ShortStateHash, +) -> impl Stream + Send + '_ { + self.state_full_pdus(shortstatehash) + .ready_filter_map(|pdu| { + Some(((pdu.kind.to_string().into(), pdu.state_key.clone()?), pdu)) + }) +} + +#[implement(super::Service)] +pub fn state_full_pdus( + &self, + shortstatehash: ShortStateHash, +) -> impl Stream + Send + '_ { + let short_ids = self + .state_full_shortids(shortstatehash) + .expect_ok() + .map(at!(1)); + + self.services + .short + .multi_get_eventid_from_short(short_ids) + .ready_filter_map(Result::ok) + .broad_filter_map(move |event_id: OwnedEventId| async move { + self.services.timeline.get_pdu(&event_id).await.ok() + }) +} + +/// Builds a StateMap by iterating over all keys that start +/// with state_hash, this gives the full state for the given state_hash. +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn state_full_ids<'a, Id>( + &'a self, + shortstatehash: ShortStateHash, +) -> impl Stream + Send + 'a +where + Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned + 'a, + ::Owned: Borrow, +{ + let shortids = self + .state_full_shortids(shortstatehash) + .expect_ok() + .unzip() + .shared(); + + let shortstatekeys = shortids + .clone() + .map(at!(0)) + .map(Vec::into_iter) + .map(IterStream::stream) + .flatten_stream(); + + let shorteventids = shortids + .map(at!(1)) + .map(Vec::into_iter) + .map(IterStream::stream) + .flatten_stream(); + + self.services + .short + .multi_get_eventid_from_short(shorteventids) + .zip(shortstatekeys) + .ready_filter_map(|(event_id, shortstatekey)| Some((shortstatekey, event_id.ok()?))) +} + +#[implement(super::Service)] +pub fn state_full_shortids( + &self, + shortstatehash: ShortStateHash, +) -> impl Stream> + Send + '_ { + self.load_full_state(shortstatehash) + .map_ok(|full_state| { + full_state + .deref() + .iter() + .copied() + .map(parse_compressed_state_event) + .collect() + }) + .map_ok(|vec: Vec<_>| vec.into_iter().try_stream()) + .try_flatten_stream() +} + +#[implement(super::Service)] +async fn load_full_state(&self, shortstatehash: ShortStateHash) -> Result> { + self.services + .state_compressor + .load_shortstatehash_info(shortstatehash) + .map_err(|e| err!(Database("Missing state IDs: {e}"))) + .map_ok(|vec| vec.last().expect("at least one layer").full_state.clone()) + .await +} + +/// Returns the state hash for this pdu. +#[implement(super::Service)] +pub async fn pdu_shortstatehash(&self, event_id: &EventId) -> Result { + const BUFSIZE: usize = size_of::(); + + self.services + .short + .get_shorteventid(event_id) + .and_then(|shorteventid| { + self.db + .shorteventid_shortstatehash + .aqry::(&shorteventid) + }) + .await + .deserialized() +} diff --git a/src/service/rooms/state_accessor/user_can.rs b/src/service/rooms/state_accessor/user_can.rs new file mode 100644 index 00000000..725a4fba --- /dev/null +++ b/src/service/rooms/state_accessor/user_can.rs @@ -0,0 +1,187 @@ +use conduwuit::{error, implement, pdu::PduBuilder, Err, Error, Result}; +use ruma::{ + events::{ + room::{ + history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + member::{MembershipState, RoomMemberEventContent}, + power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, + }, + StateEventType, TimelineEventType, + }, + EventId, RoomId, UserId, +}; + +use crate::rooms::state::RoomMutexGuard; + +/// Checks if a given user can redact a given event +/// +/// If federation is true, it allows redaction events from any user of the +/// same server as the original event sender +#[implement(super::Service)] +pub async fn user_can_redact( + &self, + redacts: &EventId, + sender: &UserId, + room_id: &RoomId, + federation: bool, +) -> Result { + let redacting_event = self.services.timeline.get_pdu(redacts).await; + + if redacting_event + .as_ref() + .is_ok_and(|pdu| pdu.kind == TimelineEventType::RoomCreate) + { + return Err!(Request(Forbidden("Redacting m.room.create is not safe, forbidding."))); + } + + if redacting_event + .as_ref() + .is_ok_and(|pdu| pdu.kind == TimelineEventType::RoomServerAcl) + { + return Err!(Request(Forbidden( + "Redacting m.room.server_acl will result in the room being inaccessible for \ + everyone (empty allow key), forbidding." + ))); + } + + if let Ok(pl_event_content) = self + .room_state_get_content::( + room_id, + &StateEventType::RoomPowerLevels, + "", + ) + .await + { + let pl_event: RoomPowerLevels = pl_event_content.into(); + Ok(pl_event.user_can_redact_event_of_other(sender) + || pl_event.user_can_redact_own_event(sender) + && if let Ok(redacting_event) = redacting_event { + if federation { + redacting_event.sender.server_name() == sender.server_name() + } else { + redacting_event.sender == sender + } + } else { + false + }) + } else { + // Falling back on m.room.create to judge power level + if let Ok(room_create) = self + .room_state_get(room_id, &StateEventType::RoomCreate, "") + .await + { + Ok(room_create.sender == sender + || redacting_event + .as_ref() + .is_ok_and(|redacting_event| redacting_event.sender == sender)) + } else { + Err(Error::bad_database( + "No m.room.power_levels or m.room.create events in database for room", + )) + } + } +} + +/// Whether a user is allowed to see an event, based on +/// the room's history_visibility at that event's state. +#[implement(super::Service)] +#[tracing::instrument(skip_all, level = "trace")] +pub async fn user_can_see_event( + &self, + user_id: &UserId, + room_id: &RoomId, + event_id: &EventId, +) -> bool { + let Ok(shortstatehash) = self.pdu_shortstatehash(event_id).await else { + return true; + }; + + if let Some(visibility) = self + .user_visibility_cache + .lock() + .expect("locked") + .get_mut(&(user_id.to_owned(), shortstatehash)) + { + return *visibility; + } + + let currently_member = self.services.state_cache.is_joined(user_id, room_id).await; + + let history_visibility = self + .state_get_content(shortstatehash, &StateEventType::RoomHistoryVisibility, "") + .await + .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { + c.history_visibility + }); + + let visibility = match history_visibility { + | HistoryVisibility::WorldReadable => true, + | HistoryVisibility::Shared => currently_member, + | HistoryVisibility::Invited => { + // Allow if any member on requesting server was AT LEAST invited, else deny + self.user_was_invited(shortstatehash, user_id).await + }, + | HistoryVisibility::Joined => { + // Allow if any member on requested server was joined, else deny + self.user_was_joined(shortstatehash, user_id).await + }, + | _ => { + error!("Unknown history visibility {history_visibility}"); + false + }, + }; + + self.user_visibility_cache + .lock() + .expect("locked") + .insert((user_id.to_owned(), shortstatehash), visibility); + + visibility +} + +/// Whether a user is allowed to see an event, based on +/// the room's history_visibility at that event's state. +#[implement(super::Service)] +#[tracing::instrument(skip_all, level = "trace")] +pub async fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> bool { + if self.services.state_cache.is_joined(user_id, room_id).await { + return true; + } + + let history_visibility = self + .room_state_get_content(room_id, &StateEventType::RoomHistoryVisibility, "") + .await + .map_or(HistoryVisibility::Shared, |c: RoomHistoryVisibilityEventContent| { + c.history_visibility + }); + + match history_visibility { + | HistoryVisibility::Invited => + self.services.state_cache.is_invited(user_id, room_id).await, + | HistoryVisibility::WorldReadable => true, + | _ => false, + } +} + +#[implement(super::Service)] +pub async fn user_can_invite( + &self, + room_id: &RoomId, + sender: &UserId, + target_user: &UserId, + state_lock: &RoomMutexGuard, +) -> bool { + self.services + .timeline + .create_hash_and_sign_event( + PduBuilder::state( + target_user.into(), + &RoomMemberEventContent::new(MembershipState::Invite), + ), + sender, + room_id, + state_lock, + ) + .await + .is_ok() +} From d32534164c0092a30ac351337b7dd34aa8f5d456 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 4 Feb 2025 20:30:33 +0000 Subject: [PATCH 028/310] fix soft-failed redaction regression (ff8bbd4cfa) Signed-off-by: Jason Volk --- src/service/rooms/event_handler/upgrade_outlier_pdu.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index 132daca7..b33b0388 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -128,7 +128,8 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( | (false, _) => true, | (true, None) => false, | (true, Some(redact_id)) => - self.services + !self + .services .state_accessor .user_can_redact(&redact_id, &incoming_pdu.sender, &incoming_pdu.room_id, true) .await?, From 80277f6aa2629a8b9dc2b4e96a64d8e508d47270 Mon Sep 17 00:00:00 2001 From: Nineko Date: Tue, 4 Feb 2025 16:46:00 -0500 Subject: [PATCH 029/310] Adds .gitattributes to the projects to prevent LN and CLRF conflicts. (#681) --- .gitattributes | 87 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 .gitattributes diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000..3dfaca65 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,87 @@ +# taken from https://github.com/gitattributes/gitattributes/blob/46a8961ad73f5bd4d8d193708840fbc9e851d702/Rust.gitattributes +# Auto detect text files and perform normalization +* text=auto + +*.rs text diff=rust +*.toml text diff=toml +Cargo.lock text + +# taken from https://github.com/gitattributes/gitattributes/blob/46a8961ad73f5bd4d8d193708840fbc9e851d702/Common.gitattributes +# Documents +*.bibtex text diff=bibtex +*.doc diff=astextplain +*.DOC diff=astextplain +*.docx diff=astextplain +*.DOCX diff=astextplain +*.dot diff=astextplain +*.DOT diff=astextplain +*.pdf diff=astextplain +*.PDF diff=astextplain +*.rtf diff=astextplain +*.RTF diff=astextplain +*.md text diff=markdown +*.mdx text diff=markdown +*.tex text diff=tex +*.adoc text +*.textile text +*.mustache text +*.csv text eol=crlf +*.tab text +*.tsv text +*.txt text +*.sql text +*.epub diff=astextplain + +# Graphics +*.png binary +*.jpg binary +*.jpeg binary +*.gif binary +*.tif binary +*.tiff binary +*.ico binary +# SVG treated as text by default. +*.svg text +*.eps binary + +# Scripts +*.bash text eol=lf +*.fish text eol=lf +*.ksh text eol=lf +*.sh text eol=lf +*.zsh text eol=lf +# These are explicitly windows files and should use crlf +*.bat text eol=crlf +*.cmd text eol=crlf +*.ps1 text eol=crlf + +# Serialisation +*.json text +*.toml text +*.xml text +*.yaml text +*.yml text + +# Archives +*.7z binary +*.bz binary +*.bz2 binary +*.bzip2 binary +*.gz binary +*.lz binary +*.lzma binary +*.rar binary +*.tar binary +*.taz binary +*.tbz binary +*.tbz2 binary +*.tgz binary +*.tlz binary +*.txz binary +*.xz binary +*.Z binary +*.zip binary +*.zst binary + +# Text files where line endings should be preserved +*.patch -text \ No newline at end of file From 62180897c02d9c306b2179f3685e60ffdc615c1f Mon Sep 17 00:00:00 2001 From: Niko Date: Sat, 1 Feb 2025 18:35:23 -0500 Subject: [PATCH 030/310] Added blurhash.rs to fascilitate blurhashing. Signed-off-by: Niko --- Cargo.lock | 373 +++++++++++++++++++++++++++++++++- Cargo.toml | 8 +- conduwuit-example.toml | 18 ++ src/api/Cargo.toml | 1 + src/api/client/media.rs | 21 ++ src/core/Cargo.toml | 1 + src/core/config/mod.rs | 40 +++- src/main/Cargo.toml | 1 + src/service/Cargo.toml | 3 + src/service/media/blurhash.rs | 159 +++++++++++++++ src/service/media/mod.rs | 3 +- 11 files changed, 621 insertions(+), 7 deletions(-) create mode 100644 src/service/media/blurhash.rs diff --git a/Cargo.lock b/Cargo.lock index e379aebb..b710d6fc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -26,6 +26,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "aligned-vec" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4aa90d7ce82d4be67b64039a3d588d38dbcc6736577de4a847025ce5b0c468d1" + [[package]] name = "alloc-no-stdlib" version = "2.0.4" @@ -53,12 +59,29 @@ version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" +[[package]] +name = "arbitrary" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" + [[package]] name = "arc-swap" version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" +[[package]] +name = "arg_enum_proc_macro" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ae92a5119aa49cdbcf6b9f893fe4e1d98b04ccbf82ee0584ad948a44a734dea" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + [[package]] name = "argon2" version = "0.5.3" @@ -173,6 +196,29 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +[[package]] +name = "av1-grain" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6678909d8c5d46a42abcf571271e15fdbc0a225e3646cf23762cd415046c78bf" +dependencies = [ + "anyhow", + "arrayvec", + "log", + "nom", + "num-rational", + "v_frame", +] + +[[package]] +name = "avif-serialize" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e335041290c43101ca215eed6f43ec437eb5a42125573f600fc3fa42b9bddd62" +dependencies = [ + "arrayvec", +] + [[package]] name = "aws-lc-rs" version = "1.12.1" @@ -385,6 +431,12 @@ dependencies = [ "which", ] +[[package]] +name = "bit_field" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc827186963e592360843fb5ba4b973e145841266c1357f7180c43526f2e5b61" + [[package]] name = "bitflags" version = "1.3.2" @@ -397,6 +449,12 @@ version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" +[[package]] +name = "bitstream-io" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6099cdc01846bc367c4e7dd630dc5966dccf36b652fae7a74e17b640411a91b2" + [[package]] name = "blake2" version = "0.10.6" @@ -415,6 +473,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "blurhash" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e79769241dcd44edf79a732545e8b5cec84c247ac060f5252cd51885d093a8fc" +dependencies = [ + "image", +] + [[package]] name = "brotli" version = "7.0.0" @@ -436,6 +503,12 @@ dependencies = [ "alloc-stdlib", ] +[[package]] +name = "built" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c360505aed52b7ec96a3636c3f039d99103c37d1d9b4f7a8c743d3ea9ffcd03b" + [[package]] name = "bumpalo" version = "3.16.0" @@ -513,6 +586,16 @@ dependencies = [ "nom", ] +[[package]] +name = "cfg-expr" +version = "0.15.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d067ad48b8650848b989a59a86c6c36a995d02d2bf778d45c3c5d57bc2718f02" +dependencies = [ + "smallvec", + "target-lexicon", +] + [[package]] name = "cfg-if" version = "1.0.0" @@ -822,6 +905,7 @@ dependencies = [ "arrayvec", "async-trait", "base64 0.22.1", + "blurhash", "bytes", "conduwuit_core", "conduwuit_database", @@ -1071,6 +1155,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "crunchy" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" + [[package]] name = "crypto-common" version = "0.1.6" @@ -1252,7 +1342,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -1275,6 +1365,21 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "exr" +version = "1.73.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f83197f59927b46c04a183a619b7c29df34e63e63c7869320862268c0ef687e0" +dependencies = [ + "bit_field", + "half", + "lebe", + "miniz_oxide", + "rayon-core", + "smallvec", + "zune-inflate", +] + [[package]] name = "fdeflate" version = "0.3.7" @@ -1519,6 +1624,16 @@ dependencies = [ "tracing", ] +[[package]] +name = "half" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +dependencies = [ + "cfg-if", + "crunchy", +] + [[package]] name = "hardened_malloc-rs" version = "0.1.2+12" @@ -1973,10 +2088,16 @@ dependencies = [ "bytemuck", "byteorder-lite", "color_quant", + "exr", "gif", "image-webp", "num-traits", "png", + "qoi", + "ravif", + "rayon", + "rgb", + "tiff", "zune-core", "zune-jpeg", ] @@ -1991,6 +2112,12 @@ dependencies = [ "quick-error 2.0.1", ] +[[package]] +name = "imgref" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0263a3d970d5c054ed9312c0057b4f3bde9c0b33836d3637361d4a9e6e7a408" + [[package]] name = "indexmap" version = "1.9.3" @@ -2024,6 +2151,17 @@ version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" +[[package]] +name = "interpolate_name" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c34819042dc3d3971c46c2190835914dfbe0c3c13f61449b2997f4e9722dfa60" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + [[package]] name = "ipaddress" version = "0.1.3" @@ -2089,6 +2227,12 @@ dependencies = [ "libc", ] +[[package]] +name = "jpeg-decoder" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5d4a7da358eff58addd2877a45865158f0d78c911d43a5784ceb7bbf52833b0" + [[package]] name = "js-sys" version = "0.3.77" @@ -2172,12 +2316,28 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" +[[package]] +name = "lebe" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03087c2bad5e1034e8cace5926dec053fb3790248370865f5117a7d0213354c8" + [[package]] name = "libc" version = "0.2.169" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" +[[package]] +name = "libfuzzer-sys" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf78f52d400cf2d84a3a973a78a592b4adc535739e0a5597a0da6f0c357adc75" +dependencies = [ + "arbitrary", + "cc", +] + [[package]] name = "libloading" version = "0.8.6" @@ -2185,7 +2345,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] @@ -2243,6 +2403,15 @@ dependencies = [ "futures-sink", ] +[[package]] +name = "loop9" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fae87c125b03c1d2c0150c90365d7d6bcc53fb73a9acaef207d2d065860f062" +dependencies = [ + "imgref", +] + [[package]] name = "lru-cache" version = "0.1.2" @@ -2321,6 +2490,16 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" +[[package]] +name = "maybe-rayon" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ea1f30cedd69f0a2954655f7188c6a834246d2bcf1e315e2ac40c4b24dc9519" +dependencies = [ + "cfg-if", + "rayon", +] + [[package]] name = "memchr" version = "2.7.4" @@ -2434,6 +2613,12 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9e591e719385e6ebaeb5ce5d3887f7d5676fceca6411d1925ccc95745f3d6f7" +[[package]] +name = "noop_proc_macro" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0676bb32a98c1a483ce53e500a81ad9c3d5b3f7c920c28c24e9cb0980d0b5bc8" + [[package]] name = "nu-ansi-term" version = "0.46.0" @@ -2483,6 +2668,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" +[[package]] +name = "num-derive" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + [[package]] name = "num-integer" version = "0.1.46" @@ -2907,6 +3103,25 @@ dependencies = [ "yansi", ] +[[package]] +name = "profiling" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afbdc74edc00b6f6a218ca6a5364d6226a259d4b8ea1af4a0ea063f27e179f4d" +dependencies = [ + "profiling-procmacros", +] + +[[package]] +name = "profiling-procmacros" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a65f2e60fbf1063868558d69c6beacf412dc755f9fc020f514b7955fc914fe30" +dependencies = [ + "quote", + "syn 2.0.96", +] + [[package]] name = "prost" version = "0.13.4" @@ -2957,6 +3172,15 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "007d8adb5ddab6f8e3f491ac63566a7d5002cc7ed73901f72057943fa71ae1ae" +[[package]] +name = "qoi" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f6d64c71eb498fe9eae14ce4ec935c555749aef511cca85b5568910d6e48001" +dependencies = [ + "bytemuck", +] + [[package]] name = "quick-error" version = "1.2.3" @@ -3018,7 +3242,7 @@ dependencies = [ "once_cell", "socket2", "tracing", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -3060,6 +3284,76 @@ dependencies = [ "getrandom", ] +[[package]] +name = "rav1e" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd87ce80a7665b1cce111f8a16c1f3929f6547ce91ade6addf4ec86a8dda5ce9" +dependencies = [ + "arbitrary", + "arg_enum_proc_macro", + "arrayvec", + "av1-grain", + "bitstream-io", + "built", + "cfg-if", + "interpolate_name", + "itertools 0.12.1", + "libc", + "libfuzzer-sys", + "log", + "maybe-rayon", + "new_debug_unreachable", + "noop_proc_macro", + "num-derive", + "num-traits", + "once_cell", + "paste", + "profiling", + "rand", + "rand_chacha", + "simd_helpers", + "system-deps", + "thiserror 1.0.69", + "v_frame", + "wasm-bindgen", +] + +[[package]] +name = "ravif" +version = "0.11.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2413fd96bd0ea5cdeeb37eaf446a22e6ed7b981d792828721e74ded1980a45c6" +dependencies = [ + "avif-serialize", + "imgref", + "loop9", + "quick-error 2.0.1", + "rav1e", + "rayon", + "rgb", +] + +[[package]] +name = "rayon" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" +dependencies = [ + "either", + "rayon-core", +] + +[[package]] +name = "rayon-core" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" +dependencies = [ + "crossbeam-deque", + "crossbeam-utils", +] + [[package]] name = "redox_syscall" version = "0.5.8" @@ -3172,6 +3466,12 @@ dependencies = [ "quick-error 1.2.3", ] +[[package]] +name = "rgb" +version = "0.8.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57397d16646700483b67d2dd6511d79318f9d057fdbd21a4066aeac8b41d310a" + [[package]] name = "ring" version = "0.17.8" @@ -3479,7 +3779,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -3945,6 +4245,15 @@ version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d66dc143e6b11c1eddc06d5c423cfc97062865baf299914ab64caa38182078fe" +[[package]] +name = "simd_helpers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95890f873bec569a0362c235787f3aca6e1e887302ba4840839bcc6459c42da6" +dependencies = [ + "quote", +] + [[package]] name = "siphasher" version = "0.3.11" @@ -4096,6 +4405,25 @@ dependencies = [ "syn 2.0.96", ] +[[package]] +name = "system-deps" +version = "6.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3e535eb8dded36d55ec13eddacd30dec501792ff23a0b1682c38601b8cf2349" +dependencies = [ + "cfg-expr", + "heck", + "pkg-config", + "toml", + "version-compare", +] + +[[package]] +name = "target-lexicon" +version = "0.12.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" + [[package]] name = "tendril" version = "0.4.3" @@ -4205,6 +4533,17 @@ dependencies = [ "threadpool", ] +[[package]] +name = "tiff" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba1310fcea54c6a9a4fd1aad794ecc02c31682f6bfbecdf460bf19533eed1e3e" +dependencies = [ + "flate2", + "jpeg-decoder", + "weezl", +] + [[package]] name = "tikv-jemalloc-ctl" version = "0.6.0" @@ -4744,6 +5083,17 @@ dependencies = [ "serde", ] +[[package]] +name = "v_frame" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d6f32aaa24bacd11e488aa9ba66369c7cd514885742c9fe08cfe85884db3e92b" +dependencies = [ + "aligned-vec", + "num-traits", + "wasm-bindgen", +] + [[package]] name = "valuable" version = "0.1.1" @@ -4756,6 +5106,12 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" +[[package]] +name = "version-compare" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "852e951cb7832cb45cb1169900d19760cfa39b82bc0ea9c0e5a14ae88411c98b" + [[package]] name = "version_check" version = "0.9.5" @@ -5324,6 +5680,15 @@ version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f423a2c17029964870cfaabb1f13dfab7d092a62a29a89264f4d36990ca414a" +[[package]] +name = "zune-inflate" +version = "0.2.54" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73ab332fe2f6680068f3582b16a24f90ad7096d5d39b974d1c0aff0125116f02" +dependencies = [ + "simd-adler32", +] + [[package]] name = "zune-jpeg" version = "0.4.14" diff --git a/Cargo.toml b/Cargo.toml index 1cf787c6..c580d22d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -179,7 +179,7 @@ version = "0.5.3" features = ["alloc", "rand"] default-features = false -# Used to generate thumbnails for images +# Used to generate thumbnails for images & blurhashes [workspace.dependencies.image] version = "0.25.5" default-features = false @@ -190,6 +190,12 @@ features = [ "webp", ] +[workspace.dependencies.blurhash] +version = "0.2.3" +default-features = false +features = [ + "fast-linear-to-srgb","image" +] # logging [workspace.dependencies.log] version = "0.4.22" diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 3e64522c..f9da856d 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1607,3 +1607,21 @@ # This item is undocumented. Please contribute documentation for it. # #support_mxid = + +[global.blurhashing] + +# blurhashing x component, 4 is recommended by https://blurha.sh/ +# +#components_x = 4 + +# blurhashing y component, 3 is recommended by https://blurha.sh/ +# +#components_y = 3 + +# Max raw size that the server will blurhash, this is the size of the +# image after converting it to raw data, it should be higher than the +# upload limit but not too high. The higher it is the higher the +# potential load will be for clients requesting blurhashes. The default +# is 33.55MB. Setting it to 0 disables blurhashing. +# +#blurhash_max_raw_size = 33554432 diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index 385e786f..8a5ef3f0 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -17,6 +17,7 @@ crate-type = [ ] [features] +blurhashing=[] element_hacks = [] release_max_log_level = [ "tracing/max_level_trace", diff --git a/src/api/client/media.rs b/src/api/client/media.rs index afbc218a..115f2581 100644 --- a/src/api/client/media.rs +++ b/src/api/client/media.rs @@ -62,6 +62,27 @@ pub(crate) async fn create_content_route( media_id: &utils::random_string(MXC_LENGTH), }; + #[cfg(feature = "blurhashing")] + { + if body.generate_blurhash { + let (blurhash, create_media_result) = tokio::join!( + services + .media + .create_blurhash(&body.file, content_type, filename), + services.media.create( + &mxc, + Some(user), + Some(&content_disposition), + content_type, + &body.file + ) + ); + return create_media_result.map(|()| create_content::v3::Response { + content_uri: mxc.to_string().into(), + blurhash, + }); + } + } services .media .create(&mxc, Some(user), Some(&content_disposition), content_type, &body.file) diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index ef2df4ff..5d46ec3b 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -54,6 +54,7 @@ sentry_telemetry = [] conduwuit_mods = [ "dep:libloading" ] +blurhashing = [] [dependencies] argon2.workspace = true diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index ff80d1cf..9514f7a0 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -52,7 +52,7 @@ use crate::{err, error::Error, utils::sys, Result}; ### For more information, see: ### https://conduwuit.puppyirl.gay/configuration.html "#, - ignore = "catchall well_known tls" + ignore = "catchall well_known tls blurhashing" )] pub struct Config { /// The server_name is the pretty name of this server. It is used as a @@ -1789,6 +1789,9 @@ pub struct Config { #[serde(default = "true_fn")] pub config_reload_signal: bool, + // external structure; separate section + #[serde(default)] + pub blurhashing: BlurhashConfig, #[serde(flatten)] #[allow(clippy::zero_sized_map_values)] // this is a catchall, the map shouldn't be zero at runtime @@ -1839,6 +1842,31 @@ pub struct WellKnownConfig { pub support_mxid: Option, } +#[derive(Clone, Copy, Debug, Deserialize, Default)] +#[allow(rustdoc::broken_intra_doc_links, rustdoc::bare_urls)] +#[config_example_generator(filename = "conduwuit-example.toml", section = "global.blurhashing")] +pub struct BlurhashConfig { + /// blurhashing x component, 4 is recommended by https://blurha.sh/ + /// + /// default: 4 + #[serde(default = "default_blurhash_x_component")] + pub components_x: u32, + /// blurhashing y component, 3 is recommended by https://blurha.sh/ + /// + /// default: 3 + #[serde(default = "default_blurhash_y_component")] + pub components_y: u32, + /// Max raw size that the server will blurhash, this is the size of the + /// image after converting it to raw data, it should be higher than the + /// upload limit but not too high. The higher it is the higher the + /// potential load will be for clients requesting blurhashes. The default + /// is 33.55MB. Setting it to 0 disables blurhashing. + /// + /// default: 33554432 + #[serde(default = "default_blurhash_max_raw_size")] + pub blurhash_max_raw_size: u64, +} + #[derive(Deserialize, Clone, Debug)] #[serde(transparent)] struct ListeningPort { @@ -2210,3 +2238,13 @@ fn default_client_response_timeout() -> u64 { 120 } fn default_client_shutdown_timeout() -> u64 { 15 } fn default_sender_shutdown_timeout() -> u64 { 5 } + +// blurhashing defaults recommended by https://blurha.sh/ +// 2^25 +pub(super) fn default_blurhash_max_raw_size() -> u64 { 33_554_432 } + +pub(super) fn default_blurhash_x_component() -> u32 { 4 } + +pub(super) fn default_blurhash_y_component() -> u32 { 3 } + +// end recommended & blurhashing defaults diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml index f774c37a..7e1cb86b 100644 --- a/src/main/Cargo.toml +++ b/src/main/Cargo.toml @@ -101,6 +101,7 @@ perf_measurements = [ "conduwuit-core/perf_measurements", "conduwuit-core/sentry_telemetry", ] +blurhashing =["conduwuit-service/blurhashing","conduwuit-core/blurhashing","conduwuit-api/blurhashing"] # increases performance, reduces build times, and reduces binary size by not compiling or # genreating code for log level filters that users will generally not use (debug and trace) release_max_log_level = [ diff --git a/src/service/Cargo.toml b/src/service/Cargo.toml index c4f75453..30183179 100644 --- a/src/service/Cargo.toml +++ b/src/service/Cargo.toml @@ -44,6 +44,7 @@ url_preview = [ zstd_compression = [ "reqwest/zstd", ] +blurhashing = ["dep:image","dep:blurhash"] [dependencies] arrayvec.workspace = true @@ -82,6 +83,8 @@ tracing.workspace = true url.workspace = true webpage.workspace = true webpage.optional = true +blurhash.workspace = true +blurhash.optional = true [lints] workspace = true diff --git a/src/service/media/blurhash.rs b/src/service/media/blurhash.rs new file mode 100644 index 00000000..c470925c --- /dev/null +++ b/src/service/media/blurhash.rs @@ -0,0 +1,159 @@ +use std::{fmt::Display, io::Cursor, path::Path}; + +use blurhash::encode_image; +use conduwuit::{config::BlurhashConfig as CoreBlurhashConfig, debug_error, implement, trace}; +use image::{DynamicImage, ImageDecoder, ImageError, ImageFormat, ImageReader}; + +use super::Service; +#[implement(Service)] +pub async fn create_blurhash( + &self, + file: &[u8], + content_type: Option<&str>, + file_name: Option<&str>, +) -> Option { + let config = BlurhashConfig::from(self.services.server.config.blurhashing); + if config.size_limit == 0 { + trace!("since 0 means disabled blurhashing, skipped blurhashing logic"); + return None; + } + let file_data = file.to_owned(); + let content_type = content_type.map(String::from); + let file_name = file_name.map(String::from); + + let blurhashing_result = tokio::task::spawn_blocking(move || { + get_blurhash_from_request(&file_data, content_type, file_name, config) + }) + .await + .expect("no join error"); + + match blurhashing_result { + | Ok(result) => Some(result), + | Err(e) => { + debug_error!("Error when blurhashing: {e}"); + None + }, + } +} + +/// Returns the blurhash or a blurhash error which implements Display. +fn get_blurhash_from_request( + data: &[u8], + mime: Option, + filename: Option, + config: BlurhashConfig, +) -> Result { + // Get format image is supposed to be in + let format = get_format_from_data_mime_and_filename(data, mime, filename)?; + // Get the image reader for said image format + let decoder = get_image_decoder_with_format_and_data(format, data)?; + // Check image size makes sense before unpacking whole image + if is_image_above_size_limit(&decoder, config) { + return Err(BlurhashingError::ImageTooLarge); + } + // decode the image finally + let image = DynamicImage::from_decoder(decoder)?; + + blurhash_an_image(&image, config) +} + +/// Gets the Image Format value from the data,mime, and filename +/// It first checks if the mime is a valid image format +/// Then it checks if the filename has a format, otherwise just guess based on +/// the binary data Assumes that mime and filename extension won't be for a +/// different file format than file. +fn get_format_from_data_mime_and_filename( + data: &[u8], + mime: Option, + filename: Option, +) -> Result { + let mut image_format = None; + if let Some(mime) = mime { + image_format = ImageFormat::from_mime_type(mime); + } + if let (Some(filename), None) = (filename, image_format) { + if let Some(extension) = Path::new(&filename).extension() { + image_format = ImageFormat::from_mime_type(extension.to_string_lossy()); + } + } + + if let Some(format) = image_format { + Ok(format) + } else { + image::guess_format(data).map_err(Into::into) + } +} + +fn get_image_decoder_with_format_and_data( + image_format: ImageFormat, + data: &[u8], +) -> Result, BlurhashingError> { + let mut image_reader = ImageReader::new(Cursor::new(data)); + image_reader.set_format(image_format); + Ok(Box::new(image_reader.into_decoder()?)) +} + +fn is_image_above_size_limit( + decoder: &T, + blurhash_config: BlurhashConfig, +) -> bool { + decoder.total_bytes() >= blurhash_config.size_limit +} +#[inline] +fn blurhash_an_image( + image: &DynamicImage, + blurhash_config: BlurhashConfig, +) -> Result { + Ok(encode_image( + blurhash_config.components_x, + blurhash_config.components_y, + &image.to_rgba8(), + )?) +} +#[derive(Clone, Copy)] +pub struct BlurhashConfig { + components_x: u32, + components_y: u32, + /// size limit in bytes + size_limit: u64, +} + +impl From for BlurhashConfig { + fn from(value: CoreBlurhashConfig) -> Self { + Self { + components_x: value.components_x, + components_y: value.components_y, + size_limit: value.blurhash_max_raw_size, + } + } +} + +#[derive(Debug)] +pub(crate) enum BlurhashingError { + ImageError(Box), + HashingLibError(Box), + ImageTooLarge, +} +impl From for BlurhashingError { + fn from(value: ImageError) -> Self { Self::ImageError(Box::new(value)) } +} + +impl From for BlurhashingError { + fn from(value: blurhash::Error) -> Self { Self::HashingLibError(Box::new(value)) } +} + +impl Display for BlurhashingError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Blurhash Error:")?; + match &self { + | Self::ImageTooLarge => write!(f, "Image was too large to blurhash")?, + | Self::HashingLibError(e) => + write!(f, "There was an error with the blurhashing library => {e}")?, + + | Self::ImageError(e) => + write!(f, "There was an error with the image loading library => {e}")?, + }; + + Ok(()) + } +} diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 0d98853d..7775173a 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -1,10 +1,11 @@ +#[cfg(feature = "blurhashing")] +pub mod blurhash; mod data; pub(super) mod migrations; mod preview; mod remote; mod tests; mod thumbnail; - use std::{path::PathBuf, sync::Arc, time::SystemTime}; use async_trait::async_trait; From 442bb9889c45e5b17cdf5c7fd90e4751f7582400 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 4 Feb 2025 02:24:50 +0000 Subject: [PATCH 031/310] improvements on blurhashing feature Signed-off-by: Jason Volk --- Cargo.toml | 4 +- src/api/Cargo.toml | 1 - src/api/client/media.rs | 44 +++++-------- src/core/Cargo.toml | 1 - src/main/Cargo.toml | 4 +- src/service/media/blurhash.rs | 113 +++++++++++++++++++--------------- src/service/media/mod.rs | 1 - 7 files changed, 87 insertions(+), 81 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index c580d22d..b25d9175 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -194,8 +194,10 @@ features = [ version = "0.2.3" default-features = false features = [ - "fast-linear-to-srgb","image" + "fast-linear-to-srgb", + "image", ] + # logging [workspace.dependencies.log] version = "0.4.22" diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index 8a5ef3f0..385e786f 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -17,7 +17,6 @@ crate-type = [ ] [features] -blurhashing=[] element_hacks = [] release_max_log_level = [ "tracing/max_level_trace", diff --git a/src/api/client/media.rs b/src/api/client/media.rs index 115f2581..0cff8185 100644 --- a/src/api/client/media.rs +++ b/src/api/client/media.rs @@ -57,40 +57,28 @@ pub(crate) async fn create_content_route( let filename = body.filename.as_deref(); let content_type = body.content_type.as_deref(); let content_disposition = make_content_disposition(None, content_type, filename); - let mxc = Mxc { + let ref mxc = Mxc { server_name: services.globals.server_name(), media_id: &utils::random_string(MXC_LENGTH), }; - #[cfg(feature = "blurhashing")] - { - if body.generate_blurhash { - let (blurhash, create_media_result) = tokio::join!( - services - .media - .create_blurhash(&body.file, content_type, filename), - services.media.create( - &mxc, - Some(user), - Some(&content_disposition), - content_type, - &body.file - ) - ); - return create_media_result.map(|()| create_content::v3::Response { - content_uri: mxc.to_string().into(), - blurhash, - }); - } - } services .media - .create(&mxc, Some(user), Some(&content_disposition), content_type, &body.file) - .await - .map(|()| create_content::v3::Response { - content_uri: mxc.to_string().into(), - blurhash: None, - }) + .create(mxc, Some(user), Some(&content_disposition), content_type, &body.file) + .await?; + + let blurhash = body.generate_blurhash.then(|| { + services + .media + .create_blurhash(&body.file, content_type, filename) + .ok() + .flatten() + }); + + Ok(create_content::v3::Response { + content_uri: mxc.to_string().into(), + blurhash: blurhash.flatten(), + }) } /// # `GET /_matrix/client/v1/media/thumbnail/{serverName}/{mediaId}` diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index 5d46ec3b..ef2df4ff 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -54,7 +54,6 @@ sentry_telemetry = [] conduwuit_mods = [ "dep:libloading" ] -blurhashing = [] [dependencies] argon2.workspace = true diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml index 7e1cb86b..87ca48c8 100644 --- a/src/main/Cargo.toml +++ b/src/main/Cargo.toml @@ -49,6 +49,9 @@ default = [ "zstd_compression", ] +blurhashing = [ + "conduwuit-service/blurhashing", +] brotli_compression = [ "conduwuit-api/brotli_compression", "conduwuit-core/brotli_compression", @@ -101,7 +104,6 @@ perf_measurements = [ "conduwuit-core/perf_measurements", "conduwuit-core/sentry_telemetry", ] -blurhashing =["conduwuit-service/blurhashing","conduwuit-core/blurhashing","conduwuit-api/blurhashing"] # increases performance, reduces build times, and reduces binary size by not compiling or # genreating code for log level filters that users will generally not use (debug and trace) release_max_log_level = [ diff --git a/src/service/media/blurhash.rs b/src/service/media/blurhash.rs index c470925c..aa6685b2 100644 --- a/src/service/media/blurhash.rs +++ b/src/service/media/blurhash.rs @@ -1,56 +1,58 @@ -use std::{fmt::Display, io::Cursor, path::Path}; +use std::{error::Error, ffi::OsStr, fmt::Display, io::Cursor, path::Path}; -use blurhash::encode_image; -use conduwuit::{config::BlurhashConfig as CoreBlurhashConfig, debug_error, implement, trace}; +use conduwuit::{config::BlurhashConfig as CoreBlurhashConfig, err, implement, Result}; use image::{DynamicImage, ImageDecoder, ImageError, ImageFormat, ImageReader}; use super::Service; #[implement(Service)] -pub async fn create_blurhash( +pub fn create_blurhash( &self, file: &[u8], content_type: Option<&str>, file_name: Option<&str>, -) -> Option { +) -> Result> { + if !cfg!(feature = "blurhashing") { + return Ok(None); + } + let config = BlurhashConfig::from(self.services.server.config.blurhashing); + + // since 0 means disabled blurhashing, skipped blurhashing if config.size_limit == 0 { - trace!("since 0 means disabled blurhashing, skipped blurhashing logic"); - return None; + return Ok(None); } - let file_data = file.to_owned(); - let content_type = content_type.map(String::from); - let file_name = file_name.map(String::from); - let blurhashing_result = tokio::task::spawn_blocking(move || { - get_blurhash_from_request(&file_data, content_type, file_name, config) - }) - .await - .expect("no join error"); - - match blurhashing_result { - | Ok(result) => Some(result), - | Err(e) => { - debug_error!("Error when blurhashing: {e}"); - None - }, - } + get_blurhash_from_request(file, content_type, file_name, config) + .map_err(|e| err!(debug_error!("blurhashing error: {e}"))) + .map(Some) } /// Returns the blurhash or a blurhash error which implements Display. +#[tracing::instrument( + name = "blurhash", + level = "debug", + skip(data), + fields( + bytes = data.len(), + ), +)] fn get_blurhash_from_request( data: &[u8], - mime: Option, - filename: Option, + mime: Option<&str>, + filename: Option<&str>, config: BlurhashConfig, ) -> Result { // Get format image is supposed to be in let format = get_format_from_data_mime_and_filename(data, mime, filename)?; + // Get the image reader for said image format let decoder = get_image_decoder_with_format_and_data(format, data)?; + // Check image size makes sense before unpacking whole image if is_image_above_size_limit(&decoder, config) { return Err(BlurhashingError::ImageTooLarge); } + // decode the image finally let image = DynamicImage::from_decoder(decoder)?; @@ -64,24 +66,17 @@ fn get_blurhash_from_request( /// different file format than file. fn get_format_from_data_mime_and_filename( data: &[u8], - mime: Option, - filename: Option, + mime: Option<&str>, + filename: Option<&str>, ) -> Result { - let mut image_format = None; - if let Some(mime) = mime { - image_format = ImageFormat::from_mime_type(mime); - } - if let (Some(filename), None) = (filename, image_format) { - if let Some(extension) = Path::new(&filename).extension() { - image_format = ImageFormat::from_mime_type(extension.to_string_lossy()); - } - } + let extension = filename + .map(Path::new) + .and_then(Path::extension) + .map(OsStr::to_string_lossy); - if let Some(format) = image_format { - Ok(format) - } else { - image::guess_format(data).map_err(Into::into) - } + mime.or(extension.as_deref()) + .and_then(ImageFormat::from_mime_type) + .map_or_else(|| image::guess_format(data).map_err(Into::into), Ok) } fn get_image_decoder_with_format_and_data( @@ -99,23 +94,37 @@ fn is_image_above_size_limit( ) -> bool { decoder.total_bytes() >= blurhash_config.size_limit } + +#[cfg(feature = "blurhashing")] +#[tracing::instrument(name = "encode", level = "debug", skip_all)] #[inline] fn blurhash_an_image( image: &DynamicImage, blurhash_config: BlurhashConfig, ) -> Result { - Ok(encode_image( + Ok(blurhash::encode_image( blurhash_config.components_x, blurhash_config.components_y, &image.to_rgba8(), )?) } -#[derive(Clone, Copy)] + +#[cfg(not(feature = "blurhashing"))] +#[inline] +fn blurhash_an_image( + _image: &DynamicImage, + _blurhash_config: BlurhashConfig, +) -> Result { + Err(BlurhashingError::Unavailable) +} + +#[derive(Clone, Copy, Debug)] pub struct BlurhashConfig { - components_x: u32, - components_y: u32, + pub components_x: u32, + pub components_y: u32, + /// size limit in bytes - size_limit: u64, + pub size_limit: u64, } impl From for BlurhashConfig { @@ -129,15 +138,20 @@ impl From for BlurhashConfig { } #[derive(Debug)] -pub(crate) enum BlurhashingError { +pub enum BlurhashingError { + HashingLibError(Box), ImageError(Box), - HashingLibError(Box), ImageTooLarge, + + #[cfg(not(feature = "blurhashing"))] + Unavailable, } + impl From for BlurhashingError { fn from(value: ImageError) -> Self { Self::ImageError(Box::new(value)) } } +#[cfg(feature = "blurhashing")] impl From for BlurhashingError { fn from(value: blurhash::Error) -> Self { Self::HashingLibError(Box::new(value)) } } @@ -152,6 +166,9 @@ impl Display for BlurhashingError { | Self::ImageError(e) => write!(f, "There was an error with the image loading library => {e}")?, + + #[cfg(not(feature = "blurhashing"))] + | Self::Unavailable => write!(f, "Blurhashing is not supported")?, }; Ok(()) diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 7775173a..f5913f43 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -1,4 +1,3 @@ -#[cfg(feature = "blurhashing")] pub mod blurhash; mod data; pub(super) mod migrations; From 04656a78865dfd60176965c5ae531d1939e0dd7d Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 5 Feb 2025 03:00:47 +0000 Subject: [PATCH 032/310] fix spaces pagination bug Signed-off-by: Jason Volk --- src/service/rooms/spaces/mod.rs | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 1ee2727c..11794752 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -582,7 +582,7 @@ impl Service { parents.pop_front(); parents.push_back(room); - let short_room_ids: Vec<_> = parents + let next_short_room_ids: Vec<_> = parents .iter() .stream() .filter_map(|room_id| async move { @@ -591,16 +591,18 @@ impl Service { .collect() .await; - Some( - PaginationToken { - short_room_ids, - limit: UInt::new(max_depth) - .expect("When sent in request it must have been valid UInt"), - max_depth: UInt::new(max_depth) - .expect("When sent in request it must have been valid UInt"), - suggested_only, - } - .to_string(), + (next_short_room_ids != short_room_ids && !next_short_room_ids.is_empty()).then( + || { + PaginationToken { + short_room_ids: next_short_room_ids, + limit: UInt::new(max_depth) + .expect("When sent in request it must have been valid UInt"), + max_depth: UInt::new(max_depth) + .expect("When sent in request it must have been valid UInt"), + suggested_only, + } + .to_string() + }, ) } else { None From 9158edfb7c98229af43b2124e972723b1ab4e75a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 5 Feb 2025 05:10:30 +0000 Subject: [PATCH 033/310] fix empty join timeline bug Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 48 ++++++++++++++++++++++++++++----------- 1 file changed, 35 insertions(+), 13 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index a97e4329..1d1a91ba 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -691,7 +691,7 @@ async fn load_joined_room( heroes, joined_member_count, invited_member_count, - state_events, + mut state_events, mut device_list_updates, left_encrypted_users, } = calculate_state_changes( @@ -708,6 +708,39 @@ async fn load_joined_room( .boxed() .await?; + let is_sender_membership = |pdu: &PduEvent| { + pdu.kind == StateEventType::RoomMember.into() + && pdu + .state_key + .as_deref() + .is_some_and(is_equal_to!(sender_user.as_str())) + }; + + let joined_sender_member: Option<_> = (joined_since_last_sync && timeline_pdus.is_empty()) + .then(|| { + state_events + .iter() + .position(is_sender_membership) + .map(|pos| state_events.swap_remove(pos)) + }) + .flatten(); + + let prev_batch = timeline_pdus.first().map(at!(0)).or_else(|| { + joined_sender_member + .is_some() + .then_some(since) + .map(Into::into) + }); + + let room_events = timeline_pdus + .into_iter() + .stream() + .wide_filter_map(|item| ignored_filter(services, item, sender_user)) + .map(at!(1)) + .chain(joined_sender_member.into_iter().stream()) + .map(|pdu| pdu.to_sync_room_event()) + .collect::>(); + let account_data_events = services .account_data .changes_since(Some(room_id), sender_user, since, Some(next_batch)) @@ -722,13 +755,6 @@ async fn load_joined_room( .map(ToOwned::to_owned) .collect::>(); - let room_events = timeline_pdus - .iter() - .stream() - .wide_filter_map(|item| ignored_filter(services, item.clone(), sender_user)) - .map(|(_, pdu)| pdu.to_sync_room_event()) - .collect(); - let send_notification_counts = last_notification_read.is_none_or(|count| count > since); let notification_count: OptionFuture<_> = send_notification_counts @@ -830,12 +856,8 @@ async fn load_joined_room( unread_notifications: UnreadNotificationsCount { highlight_count, notification_count }, timeline: Timeline { limited: limited || joined_since_last_sync, + prev_batch: prev_batch.as_ref().map(ToString::to_string), events: room_events, - prev_batch: timeline_pdus - .first() - .map(at!(0)) - .as_ref() - .map(ToString::to_string), }, state: RoomState { events: state_events From f80d85e1076f1f155a7484d5ad80acbb58a9b1ac Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 5 Feb 2025 01:43:27 -0500 Subject: [PATCH 034/310] add SIGUSR1 systemctl reload config support to systemd units Signed-off-by: strawberry --- arch/conduwuit.service | 3 ++- debian/conduwuit.service | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/conduwuit.service b/arch/conduwuit.service index 7c05c259..4b7853e3 100644 --- a/arch/conduwuit.service +++ b/arch/conduwuit.service @@ -7,7 +7,8 @@ RequiresMountsFor=/var/lib/private/conduwuit [Service] DynamicUser=yes -Type=notify +Type=notify-reload +ReloadSignal=SIGUSR1 AmbientCapabilities= CapabilityBoundingSet= diff --git a/debian/conduwuit.service b/debian/conduwuit.service index 3c2ec49d..452544bf 100644 --- a/debian/conduwuit.service +++ b/debian/conduwuit.service @@ -8,7 +8,8 @@ Documentation=https://conduwuit.puppyirl.gay/ DynamicUser=yes User=conduwuit Group=conduwuit -Type=notify +Type=notify-reload +ReloadSignal=SIGUSR1 Environment="CONDUWUIT_CONFIG=/etc/conduwuit/conduwuit.toml" From f6dfc9538f8b625c2ae28a462ecf4b7e3d208f85 Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 5 Feb 2025 01:44:49 -0500 Subject: [PATCH 035/310] bump ruwuma to stop erroring on duplicate yaml values on appservice EDUs (we dont implement this atm anyways) Signed-off-by: strawberry --- Cargo.lock | 26 +++++++++++++------------- Cargo.toml | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b710d6fc..926099b5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3490,7 +3490,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "assign", "js_int", @@ -3512,7 +3512,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "js_int", "ruma-common", @@ -3524,7 +3524,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "as_variant", "assign", @@ -3547,7 +3547,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "as_variant", "base64 0.22.1", @@ -3578,7 +3578,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "as_variant", "indexmap 2.7.0", @@ -3603,7 +3603,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "bytes", "http", @@ -3621,7 +3621,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3630,7 +3630,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "js_int", "ruma-common", @@ -3640,7 +3640,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3655,7 +3655,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "js_int", "ruma-common", @@ -3667,7 +3667,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "headers", "http", @@ -3680,7 +3680,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3696,7 +3696,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b560338b2a50dbf61ecfe80808b9b095ad4cec00#b560338b2a50dbf61ecfe80808b9b095ad4cec00" +source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" dependencies = [ "futures-util", "js_int", diff --git a/Cargo.toml b/Cargo.toml index b25d9175..ce483bbc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -342,7 +342,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "b560338b2a50dbf61ecfe80808b9b095ad4cec00" +rev = "517ac4572276a2e0ad587113776c544b51166f08" features = [ "compat", "rand", From fda8b3680986dc8e038d51b93f7d36bf5c991ef6 Mon Sep 17 00:00:00 2001 From: strawberry Date: Wed, 5 Feb 2025 01:45:21 -0500 Subject: [PATCH 036/310] add more systemd notify integration with stopping/reloading/ready states Signed-off-by: strawberry --- src/core/server.rs | 12 ++++++++++-- src/router/run.rs | 4 ---- src/service/config/mod.rs | 8 ++++++++ 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/src/core/server.rs b/src/core/server.rs index 45ba7420..80493c94 100644 --- a/src/core/server.rs +++ b/src/core/server.rs @@ -69,6 +69,10 @@ impl Server { return Err!("Reloading not enabled"); } + #[cfg(all(feature = "systemd", target_os = "linux"))] + sd_notify::notify(true, &[sd_notify::NotifyState::Reloading]) + .expect("failed to notify systemd of reloading state"); + if self.reloading.swap(true, Ordering::AcqRel) { return Err!("Reloading already in progress"); } @@ -83,7 +87,7 @@ impl Server { }) } - pub fn restart(&self) -> Result<()> { + pub fn restart(&self) -> Result { if self.restarting.swap(true, Ordering::AcqRel) { return Err!("Restart already in progress"); } @@ -93,7 +97,11 @@ impl Server { }) } - pub fn shutdown(&self) -> Result<()> { + pub fn shutdown(&self) -> Result { + #[cfg(all(feature = "systemd", target_os = "linux"))] + sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]) + .expect("failed to notify systemd of stopping state"); + if self.stopping.swap(true, Ordering::AcqRel) { return Err!("Shutdown already in progress"); } diff --git a/src/router/run.rs b/src/router/run.rs index 26701735..024cb813 100644 --- a/src/router/run.rs +++ b/src/router/run.rs @@ -100,10 +100,6 @@ pub(crate) async fn stop(services: Arc) -> Result<()> { ); } - #[cfg(all(feature = "systemd", target_os = "linux"))] - sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]) - .expect("failed to notify systemd of stopping state"); - info!("Shutdown complete."); Ok(()) } diff --git a/src/service/config/mod.rs b/src/service/config/mod.rs index 8bd09a52..c9ac37a3 100644 --- a/src/service/config/mod.rs +++ b/src/service/config/mod.rs @@ -43,7 +43,15 @@ impl Deref for Service { #[implement(Service)] fn handle_reload(&self) -> Result { if self.server.config.config_reload_signal { + #[cfg(all(feature = "systemd", target_os = "linux"))] + sd_notify::notify(true, &[sd_notify::NotifyState::Reloading]) + .expect("failed to notify systemd of reloading state"); + self.reload(iter::empty())?; + + #[cfg(all(feature = "systemd", target_os = "linux"))] + sd_notify::notify(true, &[sd_notify::NotifyState::Ready]) + .expect("failed to notify systemd of ready state"); } Ok(()) From 62d80b97e65237539a103ded87f4e650ddafe4b8 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 6 Feb 2025 03:14:37 +0000 Subject: [PATCH 037/310] add systemd unit logging mode Signed-off-by: Jason Volk --- src/core/log/console.rs | 77 +++++++++++++++++++++++++++++++++--- src/core/log/mod.rs | 4 +- src/main/logging.rs | 4 +- src/service/admin/console.rs | 5 ++- 4 files changed, 78 insertions(+), 12 deletions(-) diff --git a/src/core/log/console.rs b/src/core/log/console.rs index 0bc44fa7..1f04ba26 100644 --- a/src/core/log/console.rs +++ b/src/core/log/console.rs @@ -1,3 +1,5 @@ +use std::{env, io, sync::LazyLock}; + use tracing::{ field::{Field, Visit}, Event, Level, Subscriber, @@ -7,12 +9,59 @@ use tracing_subscriber::{ fmt, fmt::{ format::{Compact, DefaultVisitor, Format, Full, Pretty, Writer}, - FmtContext, FormatEvent, FormatFields, + FmtContext, FormatEvent, FormatFields, MakeWriter, }, registry::LookupSpan, }; -use crate::{Config, Result}; +use crate::{apply, Config, Result}; + +static SYSTEMD_MODE: LazyLock = + LazyLock::new(|| env::var("SYSTEMD_EXEC_PID").is_ok() && env::var("JOURNAL_STREAM").is_ok()); + +pub struct ConsoleWriter { + stdout: io::Stdout, + stderr: io::Stderr, + _journal_stream: [u64; 2], + use_stderr: bool, +} + +impl ConsoleWriter { + #[must_use] + pub fn new(_config: &Config) -> Self { + let journal_stream = get_journal_stream(); + Self { + stdout: io::stdout(), + stderr: io::stderr(), + _journal_stream: journal_stream.into(), + use_stderr: journal_stream.0 != 0, + } + } +} + +impl<'a> MakeWriter<'a> for ConsoleWriter { + type Writer = &'a Self; + + fn make_writer(&'a self) -> Self::Writer { self } +} + +impl io::Write for &'_ ConsoleWriter { + fn write(&mut self, buf: &[u8]) -> io::Result { + if self.use_stderr { + self.stderr.lock().write(buf) + } else { + self.stdout.lock().write(buf) + } + } + + fn flush(&mut self) -> io::Result<()> { + if self.use_stderr { + self.stderr.lock().flush() + } else { + self.stdout.lock().flush() + } + } +} pub struct ConsoleFormat { _compact: Format, @@ -20,10 +69,6 @@ pub struct ConsoleFormat { pretty: Format, } -struct ConsoleVisitor<'a> { - visitor: DefaultVisitor<'a>, -} - impl ConsoleFormat { #[must_use] pub fn new(config: &Config) -> Self { @@ -68,6 +113,10 @@ where } } +struct ConsoleVisitor<'a> { + visitor: DefaultVisitor<'a>, +} + impl<'writer> FormatFields<'writer> for ConsoleFormat { fn format_fields(&self, writer: Writer<'writer>, fields: R) -> Result<(), std::fmt::Error> where @@ -92,3 +141,19 @@ impl Visit for ConsoleVisitor<'_> { self.visitor.record_debug(field, value); } } + +#[must_use] +fn get_journal_stream() -> (u64, u64) { + is_systemd_mode() + .then(|| env::var("JOURNAL_STREAM").ok()) + .flatten() + .as_deref() + .and_then(|s| s.split_once(':')) + .map(apply!(2, str::parse)) + .map(apply!(2, Result::unwrap_or_default)) + .unwrap_or((0, 0)) +} + +#[inline] +#[must_use] +pub fn is_systemd_mode() -> bool { *SYSTEMD_MODE } diff --git a/src/core/log/mod.rs b/src/core/log/mod.rs index 0c51a383..0c1840d0 100644 --- a/src/core/log/mod.rs +++ b/src/core/log/mod.rs @@ -2,14 +2,14 @@ pub mod capture; pub mod color; -mod console; +pub mod console; pub mod fmt; pub mod fmt_span; mod reload; mod suppress; pub use capture::Capture; -pub use console::ConsoleFormat; +pub use console::{is_systemd_mode, ConsoleFormat, ConsoleWriter}; pub use reload::{LogLevelReloadHandles, ReloadHandle}; pub use suppress::Suppress; pub use tracing::Level; diff --git a/src/main/logging.rs b/src/main/logging.rs index 85945e8a..35e482de 100644 --- a/src/main/logging.rs +++ b/src/main/logging.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use conduwuit::{ config::Config, debug_warn, err, - log::{capture, fmt_span, ConsoleFormat, LogLevelReloadHandles}, + log::{capture, fmt_span, ConsoleFormat, ConsoleWriter, LogLevelReloadHandles}, result::UnwrapOrErr, Result, }; @@ -30,7 +30,7 @@ pub(crate) fn init( .with_span_events(console_span_events) .event_format(ConsoleFormat::new(config)) .fmt_fields(ConsoleFormat::new(config)) - .map_writer(|w| w); + .with_writer(ConsoleWriter::new(config)); let (console_reload_filter, console_reload_handle) = reload::Layer::new(console_filter.clone()); diff --git a/src/service/admin/console.rs b/src/service/admin/console.rs index de201f4b..59b9a31b 100644 --- a/src/service/admin/console.rs +++ b/src/service/admin/console.rs @@ -1,10 +1,11 @@ #![cfg(feature = "console")] + use std::{ collections::VecDeque, sync::{Arc, Mutex}, }; -use conduwuit::{debug, defer, error, log, Server}; +use conduwuit::{debug, defer, error, log, log::is_systemd_mode, Server}; use futures::future::{AbortHandle, Abortable}; use ruma::events::room::message::RoomMessageEventContent; use rustyline_async::{Readline, ReadlineError, ReadlineEvent}; @@ -123,7 +124,7 @@ impl Console { } async fn readline(self: &Arc) -> Result { - let _suppression = log::Suppress::new(&self.server); + let _suppression = (!is_systemd_mode()).then(|| log::Suppress::new(&self.server)); let (mut readline, _writer) = Readline::new(PROMPT.to_owned())?; let self_ = Arc::clone(self); From 16b07ae3ecf6dee591b79dd6198cb3e5a99410be Mon Sep 17 00:00:00 2001 From: strawberry Date: Thu, 6 Feb 2025 16:47:10 -0500 Subject: [PATCH 038/310] add default systemd support for a TTY to use console mode from Signed-off-by: strawberry --- arch/conduwuit.service | 12 ++++++++++++ debian/conduwuit.service | 12 ++++++++++++ 2 files changed, 24 insertions(+) diff --git a/arch/conduwuit.service b/arch/conduwuit.service index 4b7853e3..fa3616d8 100644 --- a/arch/conduwuit.service +++ b/arch/conduwuit.service @@ -10,6 +10,18 @@ DynamicUser=yes Type=notify-reload ReloadSignal=SIGUSR1 +TTYPath=/dev/tty25 +DeviceAllow=char-tty +StandardInput=tty-force +StandardOutput=tty +StandardError=journal+console +TTYReset=yes +# uncomment to allow buffer to be cleared every restart +TTYVTDisallocate=no + +TTYColumns=120 +TTYRows=40 + AmbientCapabilities= CapabilityBoundingSet= diff --git a/debian/conduwuit.service b/debian/conduwuit.service index 452544bf..4d6f4eef 100644 --- a/debian/conduwuit.service +++ b/debian/conduwuit.service @@ -11,6 +11,18 @@ Group=conduwuit Type=notify-reload ReloadSignal=SIGUSR1 +TTYPath=/dev/tty25 +DeviceAllow=char-tty +StandardInput=tty-force +StandardOutput=tty +StandardError=journal+console +TTYReset=yes +# uncomment to allow buffer to be cleared every restart +TTYVTDisallocate=no + +TTYColumns=120 +TTYRows=40 + Environment="CONDUWUIT_CONFIG=/etc/conduwuit/conduwuit.toml" ExecStart=/usr/sbin/conduwuit From f761d4d5c9e347699725bff0437a8df3b1b3db59 Mon Sep 17 00:00:00 2001 From: strawberry Date: Thu, 6 Feb 2025 16:48:19 -0500 Subject: [PATCH 039/310] bump db version to 17, cleanup, rerun old migrations for users who downgraded Signed-off-by: strawberry --- src/service/globals/data.rs | 3 +-- src/service/migrations.rs | 32 +++++++++++++------------------- 2 files changed, 14 insertions(+), 21 deletions(-) diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index 39cb9be1..26a18607 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -69,9 +69,8 @@ impl Data { } #[inline] - pub fn bump_database_version(&self, new_version: u64) -> Result<()> { + pub fn bump_database_version(&self, new_version: u64) { self.global.raw_put(b"version", new_version); - Ok(()) } #[inline] diff --git a/src/service/migrations.rs b/src/service/migrations.rs index 27b4ab5a..9c3ea293 100644 --- a/src/service/migrations.rs +++ b/src/service/migrations.rs @@ -27,15 +27,7 @@ use crate::{media, Services}; /// - If database is opened at lesser version we apply migrations up to this. /// Note that named-feature migrations may also be performed when opening at /// equal or lesser version. These are expected to be backward-compatible. -pub(crate) const DATABASE_VERSION: u64 = 13; - -/// Conduit's database version. -/// -/// Conduit bumped the database version to 16, but did not introduce any -/// breaking changes. Their database migrations are extremely fragile and risky, -/// and also do not really apply to us, so just to retain Conduit -> conduwuit -/// compatibility we'll check for both versions. -pub(crate) const CONDUIT_DATABASE_VERSION: u64 = 16; +pub(crate) const DATABASE_VERSION: u64 = 17; pub(crate) async fn migrations(services: &Services) -> Result<()> { let users_count = services.users.count().await; @@ -63,10 +55,7 @@ pub(crate) async fn migrations(services: &Services) -> Result<()> { async fn fresh(services: &Services) -> Result<()> { let db = &services.db; - services - .globals - .db - .bump_database_version(DATABASE_VERSION)?; + services.globals.db.bump_database_version(DATABASE_VERSION); db["global"].insert(b"feat_sha256_media", []); db["global"].insert(b"fix_bad_double_separator_in_state_cache", []); @@ -130,6 +119,7 @@ async fn migrate(services: &Services) -> Result<()> { .get(b"fix_referencedevents_missing_sep") .await .is_not_found() + || services.globals.db.database_version().await < 17 { fix_referencedevents_missing_sep(services).await?; } @@ -138,15 +128,19 @@ async fn migrate(services: &Services) -> Result<()> { .get(b"fix_readreceiptid_readreceipt_duplicates") .await .is_not_found() + || services.globals.db.database_version().await < 17 { fix_readreceiptid_readreceipt_duplicates(services).await?; } - let version_match = services.globals.db.database_version().await == DATABASE_VERSION - || services.globals.db.database_version().await == CONDUIT_DATABASE_VERSION; + if services.globals.db.database_version().await < 17 { + services.globals.db.bump_database_version(17); + info!("Migration: Bumped database version to 17"); + } - assert!( - version_match, + assert_eq!( + services.globals.db.database_version().await, + DATABASE_VERSION, "Failed asserting local database version {} is equal to known latest conduwuit database \ version {}", services.globals.db.database_version().await, @@ -290,7 +284,7 @@ async fn db_lt_12(services: &Services) -> Result<()> { .await?; } - services.globals.db.bump_database_version(12)?; + services.globals.db.bump_database_version(12); info!("Migration: 11 -> 12 finished"); Ok(()) } @@ -335,7 +329,7 @@ async fn db_lt_13(services: &Services) -> Result<()> { .await?; } - services.globals.db.bump_database_version(13)?; + services.globals.db.bump_database_version(13); info!("Migration: 12 -> 13 finished"); Ok(()) } From ef2d307c15dba1731dc6b4d67e758f27590640c6 Mon Sep 17 00:00:00 2001 From: strawberry Date: Thu, 6 Feb 2025 16:48:48 -0500 Subject: [PATCH 040/310] fix warnings and errors when building with no features Signed-off-by: strawberry --- src/main/runtime.rs | 11 ++--- src/service/media/blurhash.rs | 87 +++++++++++++++++---------------- src/service/media/migrations.rs | 8 +-- 3 files changed, 51 insertions(+), 55 deletions(-) diff --git a/src/main/runtime.rs b/src/main/runtime.rs index 02b9931f..474b373b 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -8,13 +8,11 @@ use std::{ time::Duration, }; +#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] +use conduwuit::result::LogDebugErr; use conduwuit::{ is_true, - result::LogDebugErr, - utils::{ - available_parallelism, - sys::compute::{nth_core_available, set_affinity}, - }, + utils::sys::compute::{nth_core_available, set_affinity}, Result, }; use tokio::runtime::Builder; @@ -25,6 +23,7 @@ const WORKER_NAME: &str = "conduwuit:worker"; const WORKER_MIN: usize = 2; const WORKER_KEEPALIVE: u64 = 36; const MAX_BLOCKING_THREADS: usize = 1024; +#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] const DISABLE_MUZZY_THRESHOLD: usize = 4; static WORKER_AFFINITY: OnceLock = OnceLock::new(); @@ -137,7 +136,7 @@ fn set_worker_mallctl(id: usize) { .get() .expect("GC_MUZZY initialized by runtime::new()"); - let muzzy_auto_disable = available_parallelism() >= DISABLE_MUZZY_THRESHOLD; + let muzzy_auto_disable = conduwuit::utils::available_parallelism() >= DISABLE_MUZZY_THRESHOLD; if matches!(muzzy_option, Some(false) | None if muzzy_auto_disable) { set_muzzy_decay(-1).log_debug_err().ok(); } diff --git a/src/service/media/blurhash.rs b/src/service/media/blurhash.rs index aa6685b2..60ade723 100644 --- a/src/service/media/blurhash.rs +++ b/src/service/media/blurhash.rs @@ -1,20 +1,30 @@ -use std::{error::Error, ffi::OsStr, fmt::Display, io::Cursor, path::Path}; - -use conduwuit::{config::BlurhashConfig as CoreBlurhashConfig, err, implement, Result}; -use image::{DynamicImage, ImageDecoder, ImageError, ImageFormat, ImageReader}; +#[cfg(feature = "blurhashing")] +use conduwuit::config::BlurhashConfig as CoreBlurhashConfig; +use conduwuit::{implement, Result}; use super::Service; + #[implement(Service)] +#[cfg(not(feature = "blurhashing"))] +pub fn create_blurhash( + &self, + _file: &[u8], + _content_type: Option<&str>, + _file_name: Option<&str>, +) -> Result> { + conduwuit::debug_warn!("blurhashing on upload support was not compiled"); + + Ok(None) +} + +#[implement(Service)] +#[cfg(feature = "blurhashing")] pub fn create_blurhash( &self, file: &[u8], content_type: Option<&str>, file_name: Option<&str>, ) -> Result> { - if !cfg!(feature = "blurhashing") { - return Ok(None); - } - let config = BlurhashConfig::from(self.services.server.config.blurhashing); // since 0 means disabled blurhashing, skipped blurhashing @@ -23,7 +33,7 @@ pub fn create_blurhash( } get_blurhash_from_request(file, content_type, file_name, config) - .map_err(|e| err!(debug_error!("blurhashing error: {e}"))) + .map_err(|e| conduwuit::err!(debug_error!("blurhashing error: {e}"))) .map(Some) } @@ -36,6 +46,7 @@ pub fn create_blurhash( bytes = data.len(), ), )] +#[cfg(feature = "blurhashing")] fn get_blurhash_from_request( data: &[u8], mime: Option<&str>, @@ -53,8 +64,7 @@ fn get_blurhash_from_request( return Err(BlurhashingError::ImageTooLarge); } - // decode the image finally - let image = DynamicImage::from_decoder(decoder)?; + let image = image::DynamicImage::from_decoder(decoder)?; blurhash_an_image(&image, config) } @@ -64,31 +74,34 @@ fn get_blurhash_from_request( /// Then it checks if the filename has a format, otherwise just guess based on /// the binary data Assumes that mime and filename extension won't be for a /// different file format than file. +#[cfg(feature = "blurhashing")] fn get_format_from_data_mime_and_filename( data: &[u8], mime: Option<&str>, filename: Option<&str>, -) -> Result { +) -> Result { let extension = filename - .map(Path::new) - .and_then(Path::extension) - .map(OsStr::to_string_lossy); + .map(std::path::Path::new) + .and_then(std::path::Path::extension) + .map(std::ffi::OsStr::to_string_lossy); mime.or(extension.as_deref()) - .and_then(ImageFormat::from_mime_type) + .and_then(image::ImageFormat::from_mime_type) .map_or_else(|| image::guess_format(data).map_err(Into::into), Ok) } +#[cfg(feature = "blurhashing")] fn get_image_decoder_with_format_and_data( - image_format: ImageFormat, + image_format: image::ImageFormat, data: &[u8], -) -> Result, BlurhashingError> { - let mut image_reader = ImageReader::new(Cursor::new(data)); +) -> Result, BlurhashingError> { + let mut image_reader = image::ImageReader::new(std::io::Cursor::new(data)); image_reader.set_format(image_format); Ok(Box::new(image_reader.into_decoder()?)) } -fn is_image_above_size_limit( +#[cfg(feature = "blurhashing")] +fn is_image_above_size_limit( decoder: &T, blurhash_config: BlurhashConfig, ) -> bool { @@ -99,7 +112,7 @@ fn is_image_above_size_limit( #[tracing::instrument(name = "encode", level = "debug", skip_all)] #[inline] fn blurhash_an_image( - image: &DynamicImage, + image: &image::DynamicImage, blurhash_config: BlurhashConfig, ) -> Result { Ok(blurhash::encode_image( @@ -109,15 +122,6 @@ fn blurhash_an_image( )?) } -#[cfg(not(feature = "blurhashing"))] -#[inline] -fn blurhash_an_image( - _image: &DynamicImage, - _blurhash_config: BlurhashConfig, -) -> Result { - Err(BlurhashingError::Unavailable) -} - #[derive(Clone, Copy, Debug)] pub struct BlurhashConfig { pub components_x: u32, @@ -127,6 +131,7 @@ pub struct BlurhashConfig { pub size_limit: u64, } +#[cfg(feature = "blurhashing")] impl From for BlurhashConfig { fn from(value: CoreBlurhashConfig) -> Self { Self { @@ -138,17 +143,17 @@ impl From for BlurhashConfig { } #[derive(Debug)] +#[cfg(feature = "blurhashing")] pub enum BlurhashingError { - HashingLibError(Box), - ImageError(Box), + HashingLibError(Box), + #[cfg(feature = "blurhashing")] + ImageError(Box), ImageTooLarge, - - #[cfg(not(feature = "blurhashing"))] - Unavailable, } -impl From for BlurhashingError { - fn from(value: ImageError) -> Self { Self::ImageError(Box::new(value)) } +#[cfg(feature = "blurhashing")] +impl From for BlurhashingError { + fn from(value: image::ImageError) -> Self { Self::ImageError(Box::new(value)) } } #[cfg(feature = "blurhashing")] @@ -156,19 +161,17 @@ impl From for BlurhashingError { fn from(value: blurhash::Error) -> Self { Self::HashingLibError(Box::new(value)) } } -impl Display for BlurhashingError { +#[cfg(feature = "blurhashing")] +impl std::fmt::Display for BlurhashingError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "Blurhash Error:")?; match &self { | Self::ImageTooLarge => write!(f, "Image was too large to blurhash")?, | Self::HashingLibError(e) => write!(f, "There was an error with the blurhashing library => {e}")?, - + #[cfg(feature = "blurhashing")] | Self::ImageError(e) => write!(f, "There was an error with the image loading library => {e}")?, - - #[cfg(not(feature = "blurhashing"))] - | Self::Unavailable => write!(f, "Blurhashing is not supported")?, }; Ok(()) diff --git a/src/service/media/migrations.rs b/src/service/media/migrations.rs index 9555edd7..8526ffcd 100644 --- a/src/service/media/migrations.rs +++ b/src/service/media/migrations.rs @@ -13,7 +13,7 @@ use conduwuit::{ warn, Config, Result, }; -use crate::{migrations, Services}; +use crate::Services; /// Migrates a media directory from legacy base64 file names to sha2 file names. /// All errors are fatal. Upon success the database is keyed to not perform this @@ -48,12 +48,6 @@ pub(crate) async fn migrate_sha256_media(services: &Services) -> Result<()> { } } - // Apply fix from when sha256_media was backward-incompat and bumped the schema - // version from 13 to 14. For users satisfying these conditions we can go back. - if services.globals.db.database_version().await == 14 && migrations::DATABASE_VERSION == 13 { - services.globals.db.bump_database_version(13)?; - } - db["global"].insert(b"feat_sha256_media", []); info!("Finished applying sha256_media"); Ok(()) From c7c9f0e4a60ffd4b497bb8e426ffc34c5e118913 Mon Sep 17 00:00:00 2001 From: strawberry Date: Thu, 6 Feb 2025 16:57:30 -0500 Subject: [PATCH 041/310] catch clippy lints for --no-default-features builds Signed-off-by: strawberry --- engage.toml | 50 +++++++++++++++++++++++++++++++------------------- 1 file changed, 31 insertions(+), 19 deletions(-) diff --git a/engage.toml b/engage.toml index 1d6a5475..279e999c 100644 --- a/engage.toml +++ b/engage.toml @@ -101,7 +101,6 @@ direnv exec . \ cargo clippy \ --workspace \ --profile test \ - --all-targets \ --color=always \ -- \ -D warnings @@ -116,13 +115,27 @@ env DIRENV_DEVSHELL=all-features \ cargo clippy \ --workspace \ --profile test \ - --all-targets \ --all-features \ --color=always \ -- \ -D warnings """ +[[task]] +name = "clippy/no-features" +group = "lints" +script = """ +env DIRENV_DEVSHELL=no-features \ + direnv exec . \ + cargo clippy \ + --workspace \ + --profile test \ + --no-default-features \ + --color=always \ + -- \ + -D warnings +""" + [[task]] name = "clippy/jemalloc" group = "lints" @@ -131,26 +144,12 @@ direnv exec . \ cargo clippy \ --workspace \ --profile test \ - --features jemalloc \ - --all-targets \ + --features=jemalloc \ --color=always \ -- \ -D warnings """ -#[[task]] -#name = "clippy/hardened_malloc" -#group = "lints" -#script = """ -#cargo clippy \ -# --workspace \ -# --features hardened_malloc \ -# --all-targets \ -# --color=always \ -# -- \ -# -D warnings -#""" - [[task]] name = "lychee" group = "lints" @@ -170,7 +169,6 @@ env DIRENV_DEVSHELL=all-features \ cargo test \ --workspace \ --profile test \ - --all-targets \ --all-features \ --color=always \ -- \ @@ -186,7 +184,21 @@ env DIRENV_DEVSHELL=default \ cargo test \ --workspace \ --profile test \ - --all-targets \ + --color=always \ + -- \ + --color=always +""" + +[[task]] +name = "cargo/no-features" +group = "tests" +script = """ +env DIRENV_DEVSHELL=no-features \ + direnv exec . \ + cargo test \ + --workspace \ + --profile test \ + --no-default-features \ --color=always \ -- \ --color=always From 43e6c27bb772461722409e9c56146a106d6c6343 Mon Sep 17 00:00:00 2001 From: strawberry Date: Thu, 6 Feb 2025 18:07:49 -0500 Subject: [PATCH 042/310] misc nix tweaks to maybe speedup ci Signed-off-by: strawberry --- bin/complement | 3 +- flake.nix | 14 +------- nix/pkgs/complement/config.toml | 21 +++++++++--- nix/pkgs/complement/default.nix | 6 ---- nix/pkgs/main/default.nix | 58 +++++++++++++++++---------------- src/router/serve/tls.rs | 9 +++-- 6 files changed, 54 insertions(+), 57 deletions(-) diff --git a/bin/complement b/bin/complement index a1db4b32..a4c62856 100755 --- a/bin/complement +++ b/bin/complement @@ -34,7 +34,8 @@ toplevel="$(git rev-parse --show-toplevel)" pushd "$toplevel" > /dev/null -bin/nix-build-and-cache just .#linux-complement +#bin/nix-build-and-cache just .#linux-complement +bin/nix-build-and-cache just .#complement docker load < result popd > /dev/null diff --git a/flake.nix b/flake.nix index 920d3d14..3cef1af5 100644 --- a/flake.nix +++ b/flake.nix @@ -169,21 +169,9 @@ # used for rust caching in CI to speed it up sccache - - # needed so we can get rid of gcc and other unused deps that bloat OCI images - removeReferencesTo ] # liburing is Linux-exclusive - ++ lib.optional stdenv.hostPlatform.isLinux liburing - # needed to build Rust applications on macOS - ++ lib.optionals stdenv.hostPlatform.isDarwin [ - # https://github.com/NixOS/nixpkgs/issues/206242 - # ld: library not found for -liconv - libiconv - # https://stackoverflow.com/questions/69869574/properly-adding-darwin-apple-sdk-to-a-nix-shell - # https://discourse.nixos.org/t/compile-a-rust-binary-on-macos-dbcrossbar/8612 - pkgsBuildHost.darwin.apple_sdk.frameworks.Security - ]) + ++ lib.optional stdenv.hostPlatform.isLinux liburing) ++ scope.main.buildInputs ++ scope.main.propagatedBuildInputs ++ scope.main.nativeBuildInputs; diff --git a/nix/pkgs/complement/config.toml b/nix/pkgs/complement/config.toml index f20abee2..99c151c5 100644 --- a/nix/pkgs/complement/config.toml +++ b/nix/pkgs/complement/config.toml @@ -17,19 +17,32 @@ ip_range_denylist = [] url_preview_domain_contains_allowlist = ["*"] url_preview_domain_explicit_denylist = ["*"] media_compat_file_link = false -media_startup_check = false -prune_missing_media = false +media_startup_check = true +prune_missing_media = true log_colors = false admin_room_notices = false allow_check_for_updates = false -allow_unstable_room_versions = true +intentionally_unknown_config_option_for_testing = true rocksdb_log_level = "debug" rocksdb_max_log_files = 1 rocksdb_recovery_mode = 0 rocksdb_paranoid_file_checks = true log_guest_registrations = false allow_legacy_media = true -startup_netburst = false +startup_netburst = true +startup_netburst_keep = -1 + +# valgrind makes things so slow +dns_timeout = 60 +dns_attempts = 20 +request_conn_timeout = 60 +request_timeout = 120 +well_known_conn_timeout = 60 +well_known_timeout = 60 +federation_idle_timeout = 300 +sender_timeout = 300 +sender_idle_timeout = 300 +sender_retry_backoff_limit = 300 [global.tls] certs = "/certificate.crt" diff --git a/nix/pkgs/complement/default.nix b/nix/pkgs/complement/default.nix index e35cbf04..d9af0779 100644 --- a/nix/pkgs/complement/default.nix +++ b/nix/pkgs/complement/default.nix @@ -18,18 +18,12 @@ let all_features = true; disable_release_max_log_level = true; disable_features = [ - # no reason to use jemalloc for complement, just has compatibility/build issues - "jemalloc" - "jemalloc_stats" - "jemalloc_prof" # console/CLI stuff isn't used or relevant for complement "console" "tokio_console" # sentry telemetry isn't useful for complement, disabled by default anyways "sentry_telemetry" "perf_measurements" - # the containers don't use or need systemd signal support - "systemd" # this is non-functional on nix for some reason "hardened_malloc" # dont include experimental features diff --git a/nix/pkgs/main/default.nix b/nix/pkgs/main/default.nix index d7424d11..4150b389 100644 --- a/nix/pkgs/main/default.nix +++ b/nix/pkgs/main/default.nix @@ -82,7 +82,7 @@ rust-jemalloc-sys' = (rust-jemalloc-sys.override { buildDepsOnlyEnv = let rocksdb' = (rocksdb.override { - jemalloc = rust-jemalloc-sys'; + jemalloc = lib.optional (featureEnabled "jemalloc") rust-jemalloc-sys'; # rocksdb fails to build with prefixed jemalloc, which is required on # darwin due to [1]. In this case, fall back to building rocksdb with # libc malloc. This should not cause conflicts, because all of the @@ -103,6 +103,12 @@ buildDepsOnlyEnv = ++ [ "-DPORTABLE=haswell" ]) else ([ "-DPORTABLE=1" ]) ) ++ old.cmakeFlags; + + # outputs has "tools" which we dont need or use + outputs = [ "out" ]; + + # preInstall hooks has stuff for messing with ldb/sst_dump which we dont need or use + preInstall = ""; }); in { @@ -156,6 +162,19 @@ commonAttrs = { ]; }; + # This is redundant with CI + doCheck = false; + + cargoTestCommand = "cargo test --locked "; + cargoExtraArgs = "--no-default-features --locked " + + lib.optionalString + (features'' != []) + "--features " + (builtins.concatStringsSep "," features''); + cargoTestExtraArgs = "--no-default-features --locked " + + lib.optionalString + (features'' != []) + "--features " + (builtins.concatStringsSep "," features''); + dontStrip = profile == "dev" || profile == "test"; dontPatchELF = profile == "dev" || profile == "test"; @@ -181,27 +200,7 @@ commonAttrs = { # differing values for `NIX_CFLAGS_COMPILE`, which contributes to spurious # rebuilds of bindgen and its depedents. jq - - # needed so we can get rid of gcc and other unused deps that bloat OCI images - removeReferencesTo - ] - # needed to build Rust applications on macOS - ++ lib.optionals stdenv.hostPlatform.isDarwin [ - # https://github.com/NixOS/nixpkgs/issues/206242 - # ld: library not found for -liconv - libiconv - - # https://stackoverflow.com/questions/69869574/properly-adding-darwin-apple-sdk-to-a-nix-shell - # https://discourse.nixos.org/t/compile-a-rust-binary-on-macos-dbcrossbar/8612 - pkgsBuildHost.darwin.apple_sdk.frameworks.Security - ]; - - # for some reason gcc and other weird deps are added to OCI images and bloats it up - # - # - postInstall = with pkgsBuildHost; '' - find "$out" -type f -exec remove-references-to -t ${stdenv.cc} -t ${gcc} -t ${llvm} -t ${rustc.unwrapped} -t ${rustc} '{}' + - ''; + ]; }; in @@ -210,15 +209,18 @@ craneLib.buildPackage ( commonAttrs // { env = buildDepsOnlyEnv; }); - cargoExtraArgs = "--no-default-features " + # This is redundant with CI + doCheck = false; + + cargoTestCommand = "cargo test --locked "; + cargoExtraArgs = "--no-default-features --locked " + + lib.optionalString + (features'' != []) + "--features " + (builtins.concatStringsSep "," features''); + cargoTestExtraArgs = "--no-default-features --locked " + lib.optionalString (features'' != []) "--features " + (builtins.concatStringsSep "," features''); - - # This is redundant with CI - cargoTestCommand = ""; - cargoCheckCommand = ""; - doCheck = false; env = buildPackageEnv; diff --git a/src/router/serve/tls.rs b/src/router/serve/tls.rs index 9d3fbd3b..ab1a9371 100644 --- a/src/router/serve/tls.rs +++ b/src/router/serve/tls.rs @@ -17,14 +17,13 @@ pub(super) async fn serve( addrs: Vec, ) -> Result { let tls = &server.config.tls; - let certs = tls - .certs - .as_ref() - .ok_or(err!(Config("tls.certs", "Missing required value in tls config section")))?; + let certs = tls.certs.as_ref().ok_or_else(|| { + err!(Config("tls.certs", "Missing required value in tls config section")) + })?; let key = tls .key .as_ref() - .ok_or(err!(Config("tls.key", "Missing required value in tls config section")))?; + .ok_or_else(|| err!(Config("tls.key", "Missing required value in tls config section")))?; // we use ring for ruma and hashing state, but aws-lc-rs is the new default. // without this, TLS mode will panic. From add2e0e9eefc2cfcc154b1e4877988f15ca682a7 Mon Sep 17 00:00:00 2001 From: strawberry Date: Thu, 6 Feb 2025 18:20:02 -0500 Subject: [PATCH 043/310] bump rust-rocksdb Signed-off-by: strawberry --- Cargo.lock | 4 ++-- deps/rust-rocksdb/Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 926099b5..82962421 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3711,7 +3711,7 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" version = "0.32.0+9.10.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=1f032427d3a0e7b0f13c04b4e34712bd8610291b#1f032427d3a0e7b0f13c04b4e34712bd8610291b" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=7b0e1bbe395a41ba8a11347a4921da590e3ad0d9#7b0e1bbe395a41ba8a11347a4921da590e3ad0d9" dependencies = [ "bindgen", "bzip2-sys", @@ -3728,7 +3728,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.36.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=1f032427d3a0e7b0f13c04b4e34712bd8610291b#1f032427d3a0e7b0f13c04b4e34712bd8610291b" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=7b0e1bbe395a41ba8a11347a4921da590e3ad0d9#7b0e1bbe395a41ba8a11347a4921da590e3ad0d9" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml index ba8259a3..c6af428d 100644 --- a/deps/rust-rocksdb/Cargo.toml +++ b/deps/rust-rocksdb/Cargo.toml @@ -27,7 +27,7 @@ malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] [dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "1f032427d3a0e7b0f13c04b4e34712bd8610291b" +rev = "7b0e1bbe395a41ba8a11347a4921da590e3ad0d9" #branch = "master" default-features = false From 8345ea2cd31d26bcf5c5eb61bbda5cd9958c11c5 Mon Sep 17 00:00:00 2001 From: strawberry Date: Thu, 6 Feb 2025 18:47:54 -0500 Subject: [PATCH 044/310] add --locked and --no-fail-fast to cargo test, add other feature test Signed-off-by: strawberry --- engage.toml | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/engage.toml b/engage.toml index 279e999c..c1a2be1f 100644 --- a/engage.toml +++ b/engage.toml @@ -86,6 +86,7 @@ env DIRENV_DEVSHELL=all-features \ direnv exec . \ cargo doc \ --workspace \ + --locked \ --profile test \ --all-features \ --no-deps \ @@ -100,6 +101,7 @@ script = """ direnv exec . \ cargo clippy \ --workspace \ + --locked \ --profile test \ --color=always \ -- \ @@ -114,6 +116,7 @@ env DIRENV_DEVSHELL=all-features \ direnv exec . \ cargo clippy \ --workspace \ + --locked \ --profile test \ --all-features \ --color=always \ @@ -129,6 +132,7 @@ env DIRENV_DEVSHELL=no-features \ direnv exec . \ cargo clippy \ --workspace \ + --locked \ --profile test \ --no-default-features \ --color=always \ @@ -137,14 +141,16 @@ env DIRENV_DEVSHELL=no-features \ """ [[task]] -name = "clippy/jemalloc" +name = "clippy/other-features" group = "lints" script = """ direnv exec . \ cargo clippy \ --workspace \ + --locked \ --profile test \ - --features=jemalloc \ + --no-default-features \ + --features=console,systemd,element_hacks,direct_tls,perf_measurements,brotli_compression,blurhashing \ --color=always \ -- \ -D warnings @@ -168,7 +174,10 @@ env DIRENV_DEVSHELL=all-features \ direnv exec . \ cargo test \ --workspace \ + --locked \ --profile test \ + --all-targets \ + --no-fail-fast \ --all-features \ --color=always \ -- \ @@ -183,7 +192,10 @@ env DIRENV_DEVSHELL=default \ direnv exec . \ cargo test \ --workspace \ + --locked \ --profile test \ + --all-targets \ + --no-fail-fast \ --color=always \ -- \ --color=always @@ -197,7 +209,10 @@ env DIRENV_DEVSHELL=no-features \ direnv exec . \ cargo test \ --workspace \ + --locked \ --profile test \ + --all-targets \ + --no-fail-fast \ --no-default-features \ --color=always \ -- \ From 88e7e50daff94ef8e3fe3d67e72214f002fdb22b Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 7 Feb 2025 11:49:00 -0500 Subject: [PATCH 045/310] add missing source OCI image label metadata Signed-off-by: strawberry --- nix/pkgs/oci-image/default.nix | 1 + 1 file changed, 1 insertion(+) diff --git a/nix/pkgs/oci-image/default.nix b/nix/pkgs/oci-image/default.nix index 5520c920..1650053d 100644 --- a/nix/pkgs/oci-image/default.nix +++ b/nix/pkgs/oci-image/default.nix @@ -36,6 +36,7 @@ dockerTools.buildLayeredImage { "org.opencontainers.image.documentation" = "https://conduwuit.puppyirl.gay/"; "org.opencontainers.image.licenses" = "Apache-2.0"; "org.opencontainers.image.revision" = inputs.self.rev or inputs.self.dirtyRev or ""; + "org.opencontainers.image.source" = "https://github.com/girlbossceo/conduwuit"; "org.opencontainers.image.title" = main.pname; "org.opencontainers.image.url" = "https://conduwuit.puppyirl.gay/"; "org.opencontainers.image.vendor" = "girlbossceo"; From cfcd6eb1a6a117db94e6f9e631a0d881a62d3299 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 7 Feb 2025 18:00:58 -0500 Subject: [PATCH 046/310] bump ruwuma to stop erroring on empty push response body Signed-off-by: strawberry --- Cargo.lock | 26 +++++++++++++------------- Cargo.toml | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 82962421..caef5859 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3490,7 +3490,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "assign", "js_int", @@ -3512,7 +3512,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "js_int", "ruma-common", @@ -3524,7 +3524,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "as_variant", "assign", @@ -3547,7 +3547,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "as_variant", "base64 0.22.1", @@ -3578,7 +3578,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "as_variant", "indexmap 2.7.0", @@ -3603,7 +3603,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "bytes", "http", @@ -3621,7 +3621,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3630,7 +3630,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "js_int", "ruma-common", @@ -3640,7 +3640,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3655,7 +3655,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "js_int", "ruma-common", @@ -3667,7 +3667,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "headers", "http", @@ -3680,7 +3680,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3696,7 +3696,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=517ac4572276a2e0ad587113776c544b51166f08#517ac4572276a2e0ad587113776c544b51166f08" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" dependencies = [ "futures-util", "js_int", diff --git a/Cargo.toml b/Cargo.toml index ce483bbc..38654be3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -342,7 +342,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "517ac4572276a2e0ad587113776c544b51166f08" +rev = "f5667c6292adb43fbe4725d31d6b5127a0cf60ce" features = [ "compat", "rand", From b6e9dc3d98704c56027219d3775336910a0136c6 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sun, 9 Feb 2025 10:17:28 -0500 Subject: [PATCH 047/310] comment out borked ci thing for now Signed-off-by: strawberry --- .github/workflows/ci.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 35d60aa1..24f2db45 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -128,7 +128,7 @@ jobs: - name: Restore and cache Nix store # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags - if: ${{ !startsWith(github.ref, 'refs/tags/') }} + #if: ${{ !startsWith(github.ref, 'refs/tags/') }} uses: nix-community/cache-nix-action@v5.1.0 with: # restore and save a cache using this key @@ -191,14 +191,14 @@ jobs: - name: Run sccache-cache # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags - if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} + #if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} uses: mozilla-actions/sccache-action@main # use rust-cache - uses: Swatinem/rust-cache@v2 # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags - if: ${{ !startsWith(github.ref, 'refs/tags/') }} + #if: ${{ !startsWith(github.ref, 'refs/tags/') }} with: cache-all-crates: "true" cache-on-failure: "true" @@ -323,7 +323,7 @@ jobs: - name: Restore and cache Nix store # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags - if: ${{ !startsWith(github.ref, 'refs/tags/') }} + #if: ${{ !startsWith(github.ref, 'refs/tags/') }} uses: nix-community/cache-nix-action@v5.1.0 with: # restore and save a cache using this key @@ -379,14 +379,14 @@ jobs: - name: Run sccache-cache # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags - if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} + #if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} uses: mozilla-actions/sccache-action@main # use rust-cache - uses: Swatinem/rust-cache@v2 # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags - if: ${{ !startsWith(github.ref, 'refs/tags/') }} + #if: ${{ !startsWith(github.ref, 'refs/tags/') }} with: cache-all-crates: "true" cache-on-failure: "true" @@ -679,7 +679,7 @@ jobs: - name: Run sccache-cache # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags - if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} + #if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} uses: mozilla-actions/sccache-action@main # use rust-cache From e3b81f7b6488b5c483e8b13e3959fe591bf4cb92 Mon Sep 17 00:00:00 2001 From: Dzming Li Date: Mon, 10 Feb 2025 22:45:57 +0800 Subject: [PATCH 048/310] Fix in caddyfile guide If the reverse_proxy directive is omitted before 127.0.0.1:6167 in your Caddyfile, enabling the service with systemctl enable will result in an error. --- docs/deploying/generic.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/deploying/generic.md b/docs/deploying/generic.md index cc50544e..8ca2f387 100644 --- a/docs/deploying/generic.md +++ b/docs/deploying/generic.md @@ -216,7 +216,7 @@ your server name). ```caddyfile your.server.name, your.server.name:8448 { # TCP reverse_proxy - 127.0.0.1:6167 + reverse_proxy 127.0.0.1:6167 # UNIX socket #reverse_proxy unix//run/conduwuit/conduwuit.sock } From 3ec43be95965488d720403264c4edc6170c67c02 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 6 Feb 2025 04:30:17 +0000 Subject: [PATCH 049/310] join initial fetches in get_relations() skip recursion for max_depth=0 Signed-off-by: Jason Volk --- src/service/rooms/pdu_metadata/mod.rs | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index 4cb14ebc..ba289f9b 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -2,7 +2,7 @@ mod data; use std::sync::Arc; use conduwuit::{PduCount, Result}; -use futures::StreamExt; +use futures::{future::try_join, StreamExt}; use ruma::{api::Direction, EventId, RoomId, UserId}; use self::data::{Data, PdusIterItem}; @@ -54,10 +54,16 @@ impl Service { max_depth: u8, dir: Direction, ) -> Vec { - let room_id = self.services.short.get_or_create_shortroomid(room_id).await; + let room_id = self.services.short.get_shortroomid(room_id); - let target = match self.services.timeline.get_pdu_count(target).await { - | Ok(PduCount::Normal(c)) => c, + let target = self.services.timeline.get_pdu_count(target); + + let Ok((room_id, target)) = try_join(room_id, target).await else { + return Vec::new(); + }; + + let target = match target { + | PduCount::Normal(c) => c, // TODO: Support backfilled relations | _ => 0, // This will result in an empty iterator }; @@ -68,7 +74,11 @@ impl Service { .collect() .await; - let mut stack: Vec<_> = pdus.iter().map(|pdu| (pdu.clone(), 1)).collect(); + let mut stack: Vec<_> = pdus + .iter() + .filter(|_| max_depth > 0) + .map(|pdu| (pdu.clone(), 1)) + .collect(); 'limit: while let Some(stack_pdu) = stack.pop() { let target = match stack_pdu.0 .0 { From 2d71d5590a81cd26f22181131d2e5a6439fe391d Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 6 Feb 2025 09:53:53 +0000 Subject: [PATCH 050/310] fix pdu add_relation() helper Signed-off-by: Jason Volk --- src/core/pdu/unsigned.rs | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/src/core/pdu/unsigned.rs b/src/core/pdu/unsigned.rs index fa305d71..fe4d6a1c 100644 --- a/src/core/pdu/unsigned.rs +++ b/src/core/pdu/unsigned.rs @@ -46,23 +46,26 @@ pub fn add_age(&mut self) -> Result { } #[implement(Pdu)] -pub fn add_relation(&mut self, name: &str, pdu: &Pdu) -> Result { - let mut unsigned: BTreeMap = self +pub fn add_relation(&mut self, name: &str, pdu: Option<&Pdu>) -> Result { + use serde_json::Map; + + let mut unsigned: Map = self .unsigned .as_ref() - .map_or_else(|| Ok(BTreeMap::new()), |u| serde_json::from_str(u.get())) + .map_or_else(|| Ok(Map::new()), |u| serde_json::from_str(u.get())) .map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?; - let relations: &mut JsonValue = unsigned.entry("m.relations".into()).or_default(); - if relations.as_object_mut().is_none() { - let mut object = serde_json::Map::::new(); - _ = relations.as_object_mut().insert(&mut object); - } + let pdu = pdu + .map(serde_json::to_value) + .transpose()? + .unwrap_or_else(|| JsonValue::Object(Map::new())); - relations + unsigned + .entry("m.relations") + .or_insert(JsonValue::Object(Map::new())) .as_object_mut() - .expect("we just created it") - .insert(name.to_owned(), serde_json::to_value(pdu)?); + .unwrap() + .insert(name.to_owned(), pdu); self.unsigned = to_raw_value(&unsigned) .map(Some) From 565837ad753bbd6d346157c5b52a6a0275984e50 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 6 Feb 2025 04:21:39 +0000 Subject: [PATCH 051/310] request auth media first Signed-off-by: Jason Volk --- src/service/media/remote.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/service/media/remote.rs b/src/service/media/remote.rs index ca73c3ef..72f1184e 100644 --- a/src/service/media/remote.rs +++ b/src/service/media/remote.rs @@ -32,12 +32,12 @@ pub async fn fetch_remote_thumbnail( self.check_fetch_authorized(mxc)?; let result = self - .fetch_thumbnail_unauthenticated(mxc, user, server, timeout_ms, dim) + .fetch_thumbnail_authenticated(mxc, user, server, timeout_ms, dim) .await; if let Err(Error::Request(NotFound, ..)) = &result { return self - .fetch_thumbnail_authenticated(mxc, user, server, timeout_ms, dim) + .fetch_thumbnail_unauthenticated(mxc, user, server, timeout_ms, dim) .await; } @@ -55,12 +55,12 @@ pub async fn fetch_remote_content( self.check_fetch_authorized(mxc)?; let result = self - .fetch_content_unauthenticated(mxc, user, server, timeout_ms) + .fetch_content_authenticated(mxc, user, server, timeout_ms) .await; if let Err(Error::Request(NotFound, ..)) = &result { return self - .fetch_content_authenticated(mxc, user, server, timeout_ms) + .fetch_content_unauthenticated(mxc, user, server, timeout_ms) .await; } From 31ab84e9284ce7d5b6ec9fb212970b1a9e18fe7f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 6 Feb 2025 10:23:17 +0000 Subject: [PATCH 052/310] simplify client event endpoint Signed-off-by: Jason Volk --- src/api/client/message.rs | 53 ++++++++++++++++++++++++++---------- src/api/client/room/event.rs | 40 +++++++++++---------------- src/core/pdu/unsigned.rs | 31 +++++++++++---------- 3 files changed, 71 insertions(+), 53 deletions(-) diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 321d8013..bb4e72dd 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -1,6 +1,6 @@ use axum::extract::State; use conduwuit::{ - at, is_equal_to, + at, utils::{ result::{FlatOk, LogErr}, stream::{BroadbandExt, TryIgnore, WidebandExt}, @@ -30,7 +30,7 @@ use service::{ use crate::Ruma; /// list of safe and common non-state events to ignore if the user is ignored -const IGNORED_MESSAGE_TYPES: &[TimelineEventType; 17] = &[ +const IGNORED_MESSAGE_TYPES: &[TimelineEventType] = &[ Audio, CallInvite, Emote, @@ -225,34 +225,50 @@ async fn get_member_event( .ok() } +#[inline] pub(crate) async fn ignored_filter( services: &Services, item: PdusIterItem, user_id: &UserId, ) -> Option { - let (_, pdu) = &item; + let (_, ref pdu) = item; + is_ignored_pdu(services, pdu, user_id) + .await + .eq(&false) + .then_some(item) +} + +#[inline] +pub(crate) async fn is_ignored_pdu( + services: &Services, + pdu: &PduEvent, + user_id: &UserId, +) -> bool { // exclude Synapse's dummy events from bloating up response bodies. clients // don't need to see this. if pdu.kind.to_cow_str() == "org.matrix.dummy_event" { - return None; + return true; } - if IGNORED_MESSAGE_TYPES.binary_search(&pdu.kind).is_ok() - && (services.users.user_is_ignored(&pdu.sender, user_id).await - || services - .server - .config - .forbidden_remote_server_names - .iter() - .any(is_equal_to!(pdu.sender().server_name()))) + let ignored_type = IGNORED_MESSAGE_TYPES.binary_search(&pdu.kind).is_ok(); + + let ignored_server = services + .server + .config + .forbidden_remote_server_names + .contains(pdu.sender().server_name()); + + if ignored_type + && (ignored_server || services.users.user_is_ignored(&pdu.sender, user_id).await) { - return None; + return true; } - Some(item) + false } +#[inline] pub(crate) async fn visibility_filter( services: &Services, item: PdusIterItem, @@ -268,7 +284,16 @@ pub(crate) async fn visibility_filter( .then_some(item) } +#[inline] pub(crate) fn event_filter(item: PdusIterItem, filter: &RoomEventFilter) -> Option { let (_, pdu) = &item; pdu.matches(filter).then_some(item) } + +#[cfg_attr(debug_assertions, conduwuit::ctor)] +fn _is_sorted() { + debug_assert!( + IGNORED_MESSAGE_TYPES.is_sorted(), + "IGNORED_MESSAGE_TYPES must be sorted by the developer" + ); +} diff --git a/src/api/client/room/event.rs b/src/api/client/room/event.rs index bc5ec0d7..f0ae64dd 100644 --- a/src/api/client/room/event.rs +++ b/src/api/client/room/event.rs @@ -1,52 +1,44 @@ use axum::extract::State; use conduwuit::{err, Err, Event, Result}; -use futures::{try_join, FutureExt, TryFutureExt}; +use futures::{future::try_join, FutureExt, TryFutureExt}; use ruma::api::client::room::get_room_event; -use crate::{client::ignored_filter, Ruma}; +use crate::{client::is_ignored_pdu, Ruma}; /// # `GET /_matrix/client/r0/rooms/{roomId}/event/{eventId}` /// /// Gets a single event. pub(crate) async fn get_room_event_route( - State(services): State, + State(ref services): State, ref body: Ruma, ) -> Result { + let event_id = &body.event_id; + let room_id = &body.room_id; + let event = services .rooms .timeline - .get_pdu(&body.event_id) - .map_err(|_| err!(Request(NotFound("Event {} not found.", &body.event_id)))); - - let token = services - .rooms - .timeline - .get_pdu_count(&body.event_id) - .map_err(|_| err!(Request(NotFound("Event not found.")))); + .get_pdu(event_id) + .map_err(|_| err!(Request(NotFound("Event {} not found.", event_id)))); let visible = services .rooms .state_accessor - .user_can_see_event(body.sender_user(), &body.room_id, &body.event_id) + .user_can_see_event(body.sender_user(), room_id, event_id) .map(Ok); - let (token, mut event, visible) = try_join!(token, event, visible)?; + let (mut event, visible) = try_join(event, visible).await?; - if !visible - || ignored_filter(&services, (token, event.clone()), body.sender_user()) - .await - .is_none() - { + if !visible || is_ignored_pdu(services, &event, body.sender_user()).await { return Err!(Request(Forbidden("You don't have permission to view this event."))); } - if event.event_id() != &body.event_id || event.room_id() != body.room_id { - return Err!(Request(NotFound("Event not found"))); - } + debug_assert!( + event.event_id() == event_id && event.room_id() == room_id, + "Fetched PDU must match requested" + ); event.add_age().ok(); - let event = event.to_room_event(); - - Ok(get_room_event::v3::Response { event }) + Ok(get_room_event::v3::Response { event: event.to_room_event() }) } diff --git a/src/core/pdu/unsigned.rs b/src/core/pdu/unsigned.rs index fe4d6a1c..8482a48a 100644 --- a/src/core/pdu/unsigned.rs +++ b/src/core/pdu/unsigned.rs @@ -9,11 +9,13 @@ use crate::{err, implement, is_true, Result}; #[implement(Pdu)] pub fn remove_transaction_id(&mut self) -> Result { + use BTreeMap as Map; + let Some(unsigned) = &self.unsigned else { return Ok(()); }; - let mut unsigned: BTreeMap> = serde_json::from_str(unsigned.get()) + let mut unsigned: Map<&str, Box> = serde_json::from_str(unsigned.get()) .map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?; unsigned.remove("transaction_id"); @@ -26,10 +28,13 @@ pub fn remove_transaction_id(&mut self) -> Result { #[implement(Pdu)] pub fn add_age(&mut self) -> Result { - let mut unsigned: BTreeMap> = self + use BTreeMap as Map; + + let mut unsigned: Map<&str, Box> = self .unsigned - .as_ref() - .map_or_else(|| Ok(BTreeMap::new()), |u| serde_json::from_str(u.get())) + .as_deref() + .map(RawJsonValue::get) + .map_or_else(|| Ok(Map::new()), serde_json::from_str) .map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?; // deliberately allowing for the possibility of negative age @@ -37,10 +42,8 @@ pub fn add_age(&mut self) -> Result { let then: i128 = self.origin_server_ts.into(); let this_age = now.saturating_sub(then); - unsigned.insert("age".to_owned(), to_raw_value(&this_age).expect("age is valid")); - self.unsigned = to_raw_value(&unsigned) - .map(Some) - .expect("unsigned is valid"); + unsigned.insert("age", to_raw_value(&this_age)?); + self.unsigned = Some(to_raw_value(&unsigned)?); Ok(()) } @@ -51,8 +54,9 @@ pub fn add_relation(&mut self, name: &str, pdu: Option<&Pdu>) -> Result { let mut unsigned: Map = self .unsigned - .as_ref() - .map_or_else(|| Ok(Map::new()), |u| serde_json::from_str(u.get())) + .as_deref() + .map(RawJsonValue::get) + .map_or_else(|| Ok(Map::new()), serde_json::from_str) .map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?; let pdu = pdu @@ -64,12 +68,9 @@ pub fn add_relation(&mut self, name: &str, pdu: Option<&Pdu>) -> Result { .entry("m.relations") .or_insert(JsonValue::Object(Map::new())) .as_object_mut() - .unwrap() - .insert(name.to_owned(), pdu); + .map(|object| object.insert(name.to_owned(), pdu)); - self.unsigned = to_raw_value(&unsigned) - .map(Some) - .expect("unsigned is valid"); + self.unsigned = Some(to_raw_value(&unsigned)?); Ok(()) } From d8e94ee965d961fd7c8a042b0ed32d7a38190668 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 6 Feb 2025 20:08:00 +0000 Subject: [PATCH 053/310] split spaces service Signed-off-by: Jason Volk --- src/api/client/space.rs | 183 ++++++++++- src/api/server/hierarchy.rs | 70 ++++- src/service/rooms/spaces/mod.rs | 311 +------------------ src/service/rooms/spaces/pagination_token.rs | 76 +++++ 4 files changed, 318 insertions(+), 322 deletions(-) create mode 100644 src/service/rooms/spaces/pagination_token.rs diff --git a/src/api/client/space.rs b/src/api/client/space.rs index 409c9083..8f54de2a 100644 --- a/src/api/client/space.rs +++ b/src/api/client/space.rs @@ -1,9 +1,15 @@ -use std::str::FromStr; +use std::{collections::VecDeque, str::FromStr}; use axum::extract::State; +use conduwuit::{checked, pdu::ShortRoomId, utils::stream::IterStream}; +use futures::{StreamExt, TryFutureExt}; use ruma::{ api::client::{error::ErrorKind, space::get_hierarchy}, - UInt, + OwnedRoomId, OwnedServerName, RoomId, UInt, UserId, +}; +use service::{ + rooms::spaces::{get_parent_children_via, summary_to_chunk, SummaryAccessibility}, + Services, }; use crate::{service::rooms::spaces::PaginationToken, Error, Result, Ruma}; @@ -16,8 +22,6 @@ pub(crate) async fn get_hierarchy_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let limit = body .limit .unwrap_or_else(|| UInt::from(10_u32)) @@ -43,16 +47,163 @@ pub(crate) async fn get_hierarchy_route( } } - services - .rooms - .spaces - .get_client_hierarchy( - sender_user, - &body.room_id, - limit.try_into().unwrap_or(10), - key.map_or(vec![], |token| token.short_room_ids), - max_depth.into(), - body.suggested_only, - ) - .await + get_client_hierarchy( + &services, + body.sender_user(), + &body.room_id, + limit.try_into().unwrap_or(10), + key.map_or(vec![], |token| token.short_room_ids), + max_depth.into(), + body.suggested_only, + ) + .await +} + +async fn get_client_hierarchy( + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + limit: usize, + short_room_ids: Vec, + max_depth: u64, + suggested_only: bool, +) -> Result { + let mut parents = VecDeque::new(); + + // Don't start populating the results if we have to start at a specific room. + let mut populate_results = short_room_ids.is_empty(); + + let mut stack = vec![vec![(room_id.to_owned(), match room_id.server_name() { + | Some(server_name) => vec![server_name.into()], + | None => vec![], + })]]; + + let mut results = Vec::with_capacity(limit); + + while let Some((current_room, via)) = { next_room_to_traverse(&mut stack, &mut parents) } { + if results.len() >= limit { + break; + } + + match ( + services + .rooms + .spaces + .get_summary_and_children_client(¤t_room, suggested_only, sender_user, &via) + .await?, + current_room == room_id, + ) { + | (Some(SummaryAccessibility::Accessible(summary)), _) => { + let mut children: Vec<(OwnedRoomId, Vec)> = + get_parent_children_via(&summary, suggested_only) + .into_iter() + .filter(|(room, _)| parents.iter().all(|parent| parent != room)) + .rev() + .collect(); + + if populate_results { + results.push(summary_to_chunk(*summary.clone())); + } else { + children = children + .iter() + .rev() + .stream() + .skip_while(|(room, _)| { + services + .rooms + .short + .get_shortroomid(room) + .map_ok(|short| Some(&short) != short_room_ids.get(parents.len())) + .unwrap_or_else(|_| false) + }) + .map(Clone::clone) + .collect::)>>() + .await + .into_iter() + .rev() + .collect(); + + if children.is_empty() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Room IDs in token were not found.", + )); + } + + // We have reached the room after where we last left off + let parents_len = parents.len(); + if checked!(parents_len + 1)? == short_room_ids.len() { + populate_results = true; + } + } + + let parents_len: u64 = parents.len().try_into()?; + if !children.is_empty() && parents_len < max_depth { + parents.push_back(current_room.clone()); + stack.push(children); + } + // Root room in the space hierarchy, we return an error + // if this one fails. + }, + | (Some(SummaryAccessibility::Inaccessible), true) => { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "The requested room is inaccessible", + )); + }, + | (None, true) => { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "The requested room was not found", + )); + }, + // Just ignore other unavailable rooms + | (None | Some(SummaryAccessibility::Inaccessible), false) => (), + } + } + + Ok(get_hierarchy::v1::Response { + next_batch: if let Some((room, _)) = next_room_to_traverse(&mut stack, &mut parents) { + parents.pop_front(); + parents.push_back(room); + + let next_short_room_ids: Vec<_> = parents + .iter() + .stream() + .filter_map(|room_id| async move { + services.rooms.short.get_shortroomid(room_id).await.ok() + }) + .collect() + .await; + + (next_short_room_ids != short_room_ids && !next_short_room_ids.is_empty()).then( + || { + PaginationToken { + short_room_ids: next_short_room_ids, + limit: UInt::new(max_depth) + .expect("When sent in request it must have been valid UInt"), + max_depth: UInt::new(max_depth) + .expect("When sent in request it must have been valid UInt"), + suggested_only, + } + .to_string() + }, + ) + } else { + None + }, + rooms: results, + }) +} + +fn next_room_to_traverse( + stack: &mut Vec)>>, + parents: &mut VecDeque, +) -> Option<(OwnedRoomId, Vec)> { + while stack.last().is_some_and(Vec::is_empty) { + stack.pop(); + parents.pop_back(); + } + + stack.last_mut().and_then(Vec::pop) } diff --git a/src/api/server/hierarchy.rs b/src/api/server/hierarchy.rs index a10df6ac..bcf2f7bc 100644 --- a/src/api/server/hierarchy.rs +++ b/src/api/server/hierarchy.rs @@ -1,7 +1,12 @@ use axum::extract::State; -use ruma::api::{client::error::ErrorKind, federation::space::get_hierarchy}; +use conduwuit::{Err, Result}; +use ruma::{api::federation::space::get_hierarchy, RoomId, ServerName}; +use service::{ + rooms::spaces::{get_parent_children_via, Identifier, SummaryAccessibility}, + Services, +}; -use crate::{Error, Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/federation/v1/hierarchy/{roomId}` /// @@ -11,13 +16,58 @@ pub(crate) async fn get_hierarchy_route( State(services): State, body: Ruma, ) -> Result { - if services.rooms.metadata.exists(&body.room_id).await { - services - .rooms - .spaces - .get_federation_hierarchy(&body.room_id, body.origin(), body.suggested_only) - .await - } else { - Err(Error::BadRequest(ErrorKind::NotFound, "Room does not exist.")) + if !services.rooms.metadata.exists(&body.room_id).await { + return Err!(Request(NotFound("Room does not exist."))); + } + + get_hierarchy(&services, &body.room_id, body.origin(), body.suggested_only).await +} + +/// Gets the response for the space hierarchy over federation request +/// +/// Errors if the room does not exist, so a check if the room exists should +/// be done +async fn get_hierarchy( + services: &Services, + room_id: &RoomId, + server_name: &ServerName, + suggested_only: bool, +) -> Result { + match services + .rooms + .spaces + .get_summary_and_children_local(&room_id.to_owned(), Identifier::ServerName(server_name)) + .await? + { + | Some(SummaryAccessibility::Accessible(room)) => { + let mut children = Vec::new(); + let mut inaccessible_children = Vec::new(); + + for (child, _via) in get_parent_children_via(&room, suggested_only) { + match services + .rooms + .spaces + .get_summary_and_children_local(&child, Identifier::ServerName(server_name)) + .await? + { + | Some(SummaryAccessibility::Accessible(summary)) => { + children.push((*summary).into()); + }, + | Some(SummaryAccessibility::Inaccessible) => { + inaccessible_children.push(child); + }, + | None => (), + } + } + + Ok(get_hierarchy::v1::Response { + room: *room, + children, + inaccessible_children, + }) + }, + | Some(SummaryAccessibility::Inaccessible) => + Err!(Request(NotFound("The requested room is inaccessible"))), + | None => Err!(Request(NotFound("The requested room was not found"))), } } diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 11794752..1e2b0a9f 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -1,22 +1,14 @@ +mod pagination_token; mod tests; -use std::{ - collections::{HashMap, VecDeque}, - fmt::{Display, Formatter}, - str::FromStr, - sync::Arc, -}; +use std::{collections::HashMap, sync::Arc}; -use conduwuit::{ - checked, debug_info, err, - utils::{math::usize_from_f64, IterStream}, - Error, Result, -}; -use futures::{StreamExt, TryFutureExt}; +use conduwuit::{debug_info, err, utils::math::usize_from_f64, Error, Result}; +use futures::StreamExt; use lru_cache::LruCache; use ruma::{ api::{ - client::{self, error::ErrorKind, space::SpaceHierarchyRoomsChunk}, + client::{error::ErrorKind, space::SpaceHierarchyRoomsChunk}, federation::{ self, space::{SpaceHierarchyChildSummary, SpaceHierarchyParentSummary}, @@ -29,11 +21,12 @@ use ruma::{ }, serde::Raw, space::SpaceRoomJoinRule, - OwnedRoomId, OwnedServerName, RoomId, ServerName, UInt, UserId, + OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, }; use tokio::sync::Mutex; -use crate::{rooms, rooms::short::ShortRoomId, sending, Dep}; +pub use self::pagination_token::PaginationToken; +use crate::{rooms, sending, Dep}; pub struct CachedSpaceHierarchySummary { summary: SpaceHierarchyParentSummary, @@ -44,81 +37,10 @@ pub enum SummaryAccessibility { Inaccessible, } -// TODO: perhaps use some better form of token rather than just room count -#[derive(Debug, Eq, PartialEq)] -pub struct PaginationToken { - /// Path down the hierarchy of the room to start the response at, - /// excluding the root space. - pub short_room_ids: Vec, - pub limit: UInt, - pub max_depth: UInt, - pub suggested_only: bool, -} - -impl FromStr for PaginationToken { - type Err = Error; - - fn from_str(value: &str) -> Result { - let mut values = value.split('_'); - - let mut pag_tok = || { - let rooms = values - .next()? - .split(',') - .filter_map(|room_s| u64::from_str(room_s).ok()) - .collect(); - - Some(Self { - short_room_ids: rooms, - limit: UInt::from_str(values.next()?).ok()?, - max_depth: UInt::from_str(values.next()?).ok()?, - suggested_only: { - let slice = values.next()?; - - if values.next().is_none() { - if slice == "true" { - true - } else if slice == "false" { - false - } else { - None? - } - } else { - None? - } - }, - }) - }; - - if let Some(token) = pag_tok() { - Ok(token) - } else { - Err(Error::BadRequest(ErrorKind::InvalidParam, "invalid token")) - } - } -} - -impl Display for PaginationToken { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - write!( - f, - "{}_{}_{}_{}", - self.short_room_ids - .iter() - .map(ToString::to_string) - .collect::>() - .join(","), - self.limit, - self.max_depth, - self.suggested_only - ) - } -} - /// Identifier used to check if rooms are accessible /// /// None is used if you want to return the room, no matter if accessible or not -enum Identifier<'a> { +pub enum Identifier<'a> { UserId(&'a UserId), ServerName(&'a ServerName), } @@ -164,60 +86,8 @@ impl crate::Service for Service { } impl Service { - /// Gets the response for the space hierarchy over federation request - /// - /// Errors if the room does not exist, so a check if the room exists should - /// be done - pub async fn get_federation_hierarchy( - &self, - room_id: &RoomId, - server_name: &ServerName, - suggested_only: bool, - ) -> Result { - match self - .get_summary_and_children_local( - &room_id.to_owned(), - Identifier::ServerName(server_name), - ) - .await? - { - | Some(SummaryAccessibility::Accessible(room)) => { - let mut children = Vec::new(); - let mut inaccessible_children = Vec::new(); - - for (child, _via) in get_parent_children_via(&room, suggested_only) { - match self - .get_summary_and_children_local( - &child, - Identifier::ServerName(server_name), - ) - .await? - { - | Some(SummaryAccessibility::Accessible(summary)) => { - children.push((*summary).into()); - }, - | Some(SummaryAccessibility::Inaccessible) => { - inaccessible_children.push(child); - }, - | None => (), - } - } - - Ok(federation::space::get_hierarchy::v1::Response { - room: *room, - children, - inaccessible_children, - }) - }, - | Some(SummaryAccessibility::Inaccessible) => - Err(Error::BadRequest(ErrorKind::NotFound, "The requested room is inaccessible")), - | None => - Err(Error::BadRequest(ErrorKind::NotFound, "The requested room was not found")), - } - } - /// Gets the summary of a space using solely local information - async fn get_summary_and_children_local( + pub async fn get_summary_and_children_local( &self, current_room: &OwnedRoomId, identifier: Identifier<'_>, @@ -366,7 +236,7 @@ impl Service { /// Gets the summary of a space using either local or remote (federation) /// sources - async fn get_summary_and_children_client( + pub async fn get_summary_and_children_client( &self, current_room: &OwnedRoomId, suggested_only: bool, @@ -470,147 +340,6 @@ impl Service { }) } - pub async fn get_client_hierarchy( - &self, - sender_user: &UserId, - room_id: &RoomId, - limit: usize, - short_room_ids: Vec, - max_depth: u64, - suggested_only: bool, - ) -> Result { - let mut parents = VecDeque::new(); - - // Don't start populating the results if we have to start at a specific room. - let mut populate_results = short_room_ids.is_empty(); - - let mut stack = vec![vec![(room_id.to_owned(), match room_id.server_name() { - | Some(server_name) => vec![server_name.into()], - | None => vec![], - })]]; - - let mut results = Vec::with_capacity(limit); - - while let Some((current_room, via)) = { next_room_to_traverse(&mut stack, &mut parents) } - { - if results.len() >= limit { - break; - } - - match ( - self.get_summary_and_children_client( - ¤t_room, - suggested_only, - sender_user, - &via, - ) - .await?, - current_room == room_id, - ) { - | (Some(SummaryAccessibility::Accessible(summary)), _) => { - let mut children: Vec<(OwnedRoomId, Vec)> = - get_parent_children_via(&summary, suggested_only) - .into_iter() - .filter(|(room, _)| parents.iter().all(|parent| parent != room)) - .rev() - .collect(); - - if populate_results { - results.push(summary_to_chunk(*summary.clone())); - } else { - children = children - .iter() - .rev() - .stream() - .skip_while(|(room, _)| { - self.services - .short - .get_shortroomid(room) - .map_ok(|short| { - Some(&short) != short_room_ids.get(parents.len()) - }) - .unwrap_or_else(|_| false) - }) - .map(Clone::clone) - .collect::)>>() - .await - .into_iter() - .rev() - .collect(); - - if children.is_empty() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Room IDs in token were not found.", - )); - } - - // We have reached the room after where we last left off - let parents_len = parents.len(); - if checked!(parents_len + 1)? == short_room_ids.len() { - populate_results = true; - } - } - - let parents_len: u64 = parents.len().try_into()?; - if !children.is_empty() && parents_len < max_depth { - parents.push_back(current_room.clone()); - stack.push(children); - } - // Root room in the space hierarchy, we return an error - // if this one fails. - }, - | (Some(SummaryAccessibility::Inaccessible), true) => { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "The requested room is inaccessible", - )); - }, - | (None, true) => { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "The requested room was not found", - )); - }, - // Just ignore other unavailable rooms - | (None | Some(SummaryAccessibility::Inaccessible), false) => (), - } - } - - Ok(client::space::get_hierarchy::v1::Response { - next_batch: if let Some((room, _)) = next_room_to_traverse(&mut stack, &mut parents) { - parents.pop_front(); - parents.push_back(room); - - let next_short_room_ids: Vec<_> = parents - .iter() - .stream() - .filter_map(|room_id| async move { - self.services.short.get_shortroomid(room_id).await.ok() - }) - .collect() - .await; - - (next_short_room_ids != short_room_ids && !next_short_room_ids.is_empty()).then( - || { - PaginationToken { - short_room_ids: next_short_room_ids, - limit: UInt::new(max_depth) - .expect("When sent in request it must have been valid UInt"), - max_depth: UInt::new(max_depth) - .expect("When sent in request it must have been valid UInt"), - suggested_only, - } - .to_string() - }, - ) - } else { - None - }, - rooms: results, - }) - } - /// Simply returns the stripped m.space.child events of a room async fn get_stripped_space_child_events( &self, @@ -757,7 +486,8 @@ impl From for SpaceHierarchyRoomsChunk { /// Here because cannot implement `From` across ruma-federation-api and /// ruma-client-api types -fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyRoomsChunk { +#[must_use] +pub fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyRoomsChunk { let SpaceHierarchyParentSummary { canonical_alias, name, @@ -790,7 +520,8 @@ fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyRooms /// Returns the children of a SpaceHierarchyParentSummary, making use of the /// children_state field -fn get_parent_children_via( +#[must_use] +pub fn get_parent_children_via( parent: &SpaceHierarchyParentSummary, suggested_only: bool, ) -> Vec<(OwnedRoomId, Vec)> { @@ -808,15 +539,3 @@ fn get_parent_children_via( }) .collect() } - -fn next_room_to_traverse( - stack: &mut Vec)>>, - parents: &mut VecDeque, -) -> Option<(OwnedRoomId, Vec)> { - while stack.last().is_some_and(Vec::is_empty) { - stack.pop(); - parents.pop_back(); - } - - stack.last_mut().and_then(Vec::pop) -} diff --git a/src/service/rooms/spaces/pagination_token.rs b/src/service/rooms/spaces/pagination_token.rs new file mode 100644 index 00000000..8f019e8d --- /dev/null +++ b/src/service/rooms/spaces/pagination_token.rs @@ -0,0 +1,76 @@ +use std::{ + fmt::{Display, Formatter}, + str::FromStr, +}; + +use conduwuit::{Error, Result}; +use ruma::{api::client::error::ErrorKind, UInt}; + +use crate::rooms::short::ShortRoomId; + +// TODO: perhaps use some better form of token rather than just room count +#[derive(Debug, Eq, PartialEq)] +pub struct PaginationToken { + /// Path down the hierarchy of the room to start the response at, + /// excluding the root space. + pub short_room_ids: Vec, + pub limit: UInt, + pub max_depth: UInt, + pub suggested_only: bool, +} + +impl FromStr for PaginationToken { + type Err = Error; + + fn from_str(value: &str) -> Result { + let mut values = value.split('_'); + let mut pag_tok = || { + let short_room_ids = values + .next()? + .split(',') + .filter_map(|room_s| u64::from_str(room_s).ok()) + .collect(); + + let limit = UInt::from_str(values.next()?).ok()?; + let max_depth = UInt::from_str(values.next()?).ok()?; + let slice = values.next()?; + let suggested_only = if values.next().is_none() { + if slice == "true" { + true + } else if slice == "false" { + false + } else { + None? + } + } else { + None? + }; + + Some(Self { + short_room_ids, + limit, + max_depth, + suggested_only, + }) + }; + + if let Some(token) = pag_tok() { + Ok(token) + } else { + Err(Error::BadRequest(ErrorKind::InvalidParam, "invalid token")) + } + } +} + +impl Display for PaginationToken { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + let short_room_ids = self + .short_room_ids + .iter() + .map(ToString::to_string) + .collect::>() + .join(","); + + write!(f, "{short_room_ids}_{}_{}_{}", self.limit, self.max_depth, self.suggested_only) + } +} From 5428526120cf49efda7b129d48b5a35ea1d87dde Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 6 Feb 2025 23:03:24 +0000 Subject: [PATCH 054/310] add tail-efficient logic extension Signed-off-by: Jason Volk --- src/core/utils/future/bool_ext.rs | 82 +++++++++++++++++++++++++++++++ src/core/utils/future/mod.rs | 2 + 2 files changed, 84 insertions(+) create mode 100644 src/core/utils/future/bool_ext.rs diff --git a/src/core/utils/future/bool_ext.rs b/src/core/utils/future/bool_ext.rs new file mode 100644 index 00000000..6cb2f1fe --- /dev/null +++ b/src/core/utils/future/bool_ext.rs @@ -0,0 +1,82 @@ +//! Extended external extensions to futures::FutureExt + +use std::marker::Unpin; + +use futures::{ + future::{select_ok, try_join, try_join_all, try_select}, + Future, FutureExt, +}; + +pub trait BoolExt +where + Self: Future + Send, +{ + fn and(self, b: B) -> impl Future + Send + where + B: Future + Send, + Self: Sized; + + fn or(self, b: B) -> impl Future + Send + where + B: Future + Send + Unpin, + Self: Sized + Unpin; +} + +pub async fn and(args: I) -> impl Future + Send +where + I: Iterator + Send, + F: Future + Send, +{ + type Result = crate::Result<(), ()>; + + let args = args.map(|a| a.map(|a| a.then_some(()).ok_or(Result::Err(())))); + + try_join_all(args).map(|result| result.is_ok()) +} + +pub async fn or(args: I) -> impl Future + Send +where + I: Iterator + Send, + F: Future + Send + Unpin, +{ + type Result = crate::Result<(), ()>; + + let args = args.map(|a| a.map(|a| a.then_some(()).ok_or(Result::Err(())))); + + select_ok(args).map(|result| result.is_ok()) +} + +impl BoolExt for Fut +where + Fut: Future + Send, +{ + #[inline] + fn and(self, b: B) -> impl Future + Send + where + B: Future + Send, + Self: Sized, + { + type Result = crate::Result<(), ()>; + + let a = self.map(|a| a.then_some(()).ok_or(Result::Err(()))); + + let b = b.map(|b| b.then_some(()).ok_or(Result::Err(()))); + + try_join(a, b).map(|result| result.is_ok()) + } + + #[inline] + fn or(self, b: B) -> impl Future + Send + where + B: Future + Send + Unpin, + Self: Sized + Unpin, + { + type Result = crate::Result<(), ()>; + + let a = self.map(|a| a.then_some(()).ok_or(Result::Err(()))); + + let b = b.map(|b| b.then_some(()).ok_or(Result::Err(()))); + + try_select(a, b).map(|result| result.is_ok()) + } +} diff --git a/src/core/utils/future/mod.rs b/src/core/utils/future/mod.rs index 153dcfe1..2198a84f 100644 --- a/src/core/utils/future/mod.rs +++ b/src/core/utils/future/mod.rs @@ -1,7 +1,9 @@ +mod bool_ext; mod ext_ext; mod option_ext; mod try_ext_ext; +pub use bool_ext::{and, or, BoolExt}; pub use ext_ext::ExtExt; pub use option_ext::OptionExt; pub use try_ext_ext::TryExtExt; From 59c073d0d86ca8a6b9606037e2278890b5b84821 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 6 Feb 2025 23:58:45 +0000 Subject: [PATCH 055/310] add unconstrained feature to service worker Signed-off-by: Jason Volk --- src/service/manager.rs | 9 +++++++-- src/service/sending/mod.rs | 13 +++++++++++-- src/service/service.rs | 5 +++++ 3 files changed, 23 insertions(+), 4 deletions(-) diff --git a/src/service/manager.rs b/src/service/manager.rs index ea33d285..e0d885c2 100644 --- a/src/service/manager.rs +++ b/src/service/manager.rs @@ -1,7 +1,7 @@ use std::{panic::AssertUnwindSafe, sync::Arc, time::Duration}; use conduwuit::{debug, debug_warn, error, trace, utils::time, warn, Err, Error, Result, Server}; -use futures::FutureExt; +use futures::{FutureExt, TryFutureExt}; use tokio::{ sync::{Mutex, MutexGuard}, task::{JoinHandle, JoinSet}, @@ -183,9 +183,14 @@ async fn worker(service: Arc) -> WorkerResult { let service_ = Arc::clone(&service); let result = AssertUnwindSafe(service_.worker()) .catch_unwind() - .await .map_err(Error::from_panic); + let result = if service.unconstrained() { + tokio::task::unconstrained(result).await + } else { + result.await + }; + // flattens JoinError for panic into worker's Error (service, result.unwrap_or_else(Err)) } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index b146ad49..86b219f7 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -22,7 +22,7 @@ use ruma::{ RoomId, ServerName, UserId, }; use smallvec::SmallVec; -use tokio::task::JoinSet; +use tokio::{task, task::JoinSet}; use self::data::Data; pub use self::{ @@ -111,8 +111,15 @@ impl crate::Service for Service { .enumerate() .fold(JoinSet::new(), |mut joinset, (id, _)| { let self_ = self.clone(); + let worker = self_.sender(id); + let worker = if self.unconstrained() { + task::unconstrained(worker).boxed() + } else { + worker.boxed() + }; + let runtime = self.server.runtime(); - let _abort = joinset.spawn_on(self_.sender(id).boxed(), runtime); + let _abort = joinset.spawn_on(worker, runtime); joinset }); @@ -139,6 +146,8 @@ impl crate::Service for Service { } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } + + fn unconstrained(&self) -> bool { true } } impl Service { diff --git a/src/service/service.rs b/src/service/service.rs index 7adb189e..cad01437 100644 --- a/src/service/service.rs +++ b/src/service/service.rs @@ -39,6 +39,11 @@ pub(crate) trait Service: Any + Send + Sync { /// Return the name of the service. /// i.e. `crate::service::make_name(std::module_path!())` fn name(&self) -> &str; + + /// Return true if the service worker opts out of the tokio cooperative + /// budgeting. This can reduce tail latency at the risk of event loop + /// starvation. + fn unconstrained(&self) -> bool { false } } /// Args are passed to `Service::build` when a service is constructed. This From e123a5b660a21ae444e154ac60812468c878ec58 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 7 Feb 2025 01:16:46 +0000 Subject: [PATCH 056/310] add state accessories for iterating state_keys of a type Signed-off-by: Jason Volk --- src/service/rooms/state_accessor/state.rs | 124 ++++++++++++++++++++-- 1 file changed, 114 insertions(+), 10 deletions(-) diff --git a/src/service/rooms/state_accessor/state.rs b/src/service/rooms/state_accessor/state.rs index c47a5693..3cf168c1 100644 --- a/src/service/rooms/state_accessor/state.rs +++ b/src/service/rooms/state_accessor/state.rs @@ -9,7 +9,7 @@ use conduwuit::{ PduEvent, Result, }; use database::Deserialized; -use futures::{future::try_join, FutureExt, Stream, StreamExt, TryFutureExt}; +use futures::{future::try_join, pin_mut, FutureExt, Stream, StreamExt, TryFutureExt}; use ruma::{ events::{ room::member::{MembershipState, RoomMemberEventContent}, @@ -69,7 +69,6 @@ where } #[implement(super::Service)] -#[tracing::instrument(skip(self), level = "debug")] pub async fn state_contains( &self, shortstatehash: ShortStateHash, @@ -90,7 +89,18 @@ pub async fn state_contains( } #[implement(super::Service)] -#[tracing::instrument(skip(self), level = "debug")] +pub async fn state_contains_type( + &self, + shortstatehash: ShortStateHash, + event_type: &StateEventType, +) -> bool { + let state_keys = self.state_keys(shortstatehash, event_type); + + pin_mut!(state_keys); + state_keys.next().await.is_some() +} + +#[implement(super::Service)] pub async fn state_contains_shortstatekey( &self, shortstatehash: ShortStateHash, @@ -125,7 +135,6 @@ pub async fn state_get( /// Returns a single EventId from `room_id` with key (`event_type`, /// `state_key`). #[implement(super::Service)] -#[tracing::instrument(skip(self), level = "debug")] pub async fn state_get_id( &self, shortstatehash: ShortStateHash, @@ -149,7 +158,6 @@ where /// Returns a single EventId from `room_id` with key (`event_type`, /// `state_key`). #[implement(super::Service)] -#[tracing::instrument(skip(self), level = "debug")] pub async fn state_get_shortid( &self, shortstatehash: ShortStateHash, @@ -177,6 +185,103 @@ pub async fn state_get_shortid( .await? } +/// Iterates the state_keys for an event_type in the state; current state +/// event_id included. +#[implement(super::Service)] +pub fn state_keys_with_ids<'a, Id>( + &'a self, + shortstatehash: ShortStateHash, + event_type: &'a StateEventType, +) -> impl Stream + Send + 'a +where + Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned + 'a, + ::Owned: Borrow, +{ + let state_keys_with_short_ids = self + .state_keys_with_shortids(shortstatehash, event_type) + .unzip() + .map(|(ssks, sids): (Vec, Vec)| (ssks, sids)) + .shared(); + + let state_keys = state_keys_with_short_ids + .clone() + .map(at!(0)) + .map(Vec::into_iter) + .map(IterStream::stream) + .flatten_stream(); + + let shorteventids = state_keys_with_short_ids + .map(at!(1)) + .map(Vec::into_iter) + .map(IterStream::stream) + .flatten_stream(); + + self.services + .short + .multi_get_eventid_from_short(shorteventids) + .zip(state_keys) + .ready_filter_map(|(eid, sk)| eid.map(move |eid| (sk, eid)).ok()) +} + +/// Iterates the state_keys for an event_type in the state; current state +/// event_id included. +#[implement(super::Service)] +pub fn state_keys_with_shortids<'a>( + &'a self, + shortstatehash: ShortStateHash, + event_type: &'a StateEventType, +) -> impl Stream + Send + 'a { + let short_ids = self + .state_full_shortids(shortstatehash) + .expect_ok() + .unzip() + .map(|(ssks, sids): (Vec, Vec)| (ssks, sids)) + .shared(); + + let shortstatekeys = short_ids + .clone() + .map(at!(0)) + .map(Vec::into_iter) + .map(IterStream::stream) + .flatten_stream(); + + let shorteventids = short_ids + .map(at!(1)) + .map(Vec::into_iter) + .map(IterStream::stream) + .flatten_stream(); + + self.services + .short + .multi_get_statekey_from_short(shortstatekeys) + .zip(shorteventids) + .ready_filter_map(|(res, id)| res.map(|res| (res, id)).ok()) + .ready_filter_map(move |((event_type_, state_key), event_id)| { + event_type_.eq(event_type).then_some((state_key, event_id)) + }) +} + +/// Iterates the state_keys for an event_type in the state +#[implement(super::Service)] +pub fn state_keys<'a>( + &'a self, + shortstatehash: ShortStateHash, + event_type: &'a StateEventType, +) -> impl Stream + Send + 'a { + let short_ids = self + .state_full_shortids(shortstatehash) + .expect_ok() + .map(at!(0)); + + self.services + .short + .multi_get_statekey_from_short(short_ids) + .ready_filter_map(Result::ok) + .ready_filter_map(move |(event_type_, state_key)| { + event_type_.eq(event_type).then_some(state_key) + }) +} + /// Returns the state events removed between the interval (present in .0 but /// not in .1) #[implement(super::Service)] @@ -191,11 +296,10 @@ pub fn state_removed( /// Returns the state events added between the interval (present in .1 but /// not in .0) #[implement(super::Service)] -#[tracing::instrument(skip(self), level = "debug")] -pub fn state_added<'a>( - &'a self, +pub fn state_added( + &self, shortstatehash: pair_of!(ShortStateHash), -) -> impl Stream + Send + 'a { +) -> impl Stream + Send + '_ { let a = self.load_full_state(shortstatehash.0); let b = self.load_full_state(shortstatehash.1); try_join(a, b) @@ -239,7 +343,6 @@ pub fn state_full_pdus( /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[implement(super::Service)] -#[tracing::instrument(skip(self), level = "debug")] pub fn state_full_ids<'a, Id>( &'a self, shortstatehash: ShortStateHash, @@ -293,6 +396,7 @@ pub fn state_full_shortids( } #[implement(super::Service)] +#[tracing::instrument(name = "load", level = "debug", skip(self))] async fn load_full_state(&self, shortstatehash: ShortStateHash) -> Result> { self.services .state_compressor From ecc9099127cc6779cd74723ae6169f7a22276ab7 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 7 Feb 2025 23:18:02 +0000 Subject: [PATCH 057/310] add conf item to re-disable atomic flush Signed-off-by: Jason Volk --- conduwuit-example.toml | 7 +++++++ src/core/config/mod.rs | 7 +++++++ src/database/engine/db_opts.rs | 4 ++-- 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index f9da856d..9b6f6ce0 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -925,6 +925,13 @@ # #rocksdb_checksums = true +# Enables the "atomic flush" mode in rocksdb. This option is not intended +# for users. It may be removed or ignored in future versions. Atomic flush +# may be enabled by the paranoid to possibly improve database integrity at +# the cost of performance. +# +#rocksdb_atomic_flush = false + # Database repair mode (for RocksDB SST corruption). # # Use this option when the server reports corruption while running or diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 9514f7a0..e66532ee 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1089,6 +1089,13 @@ pub struct Config { #[serde(default = "true_fn")] pub rocksdb_checksums: bool, + /// Enables the "atomic flush" mode in rocksdb. This option is not intended + /// for users. It may be removed or ignored in future versions. Atomic flush + /// may be enabled by the paranoid to possibly improve database integrity at + /// the cost of performance. + #[serde(default)] + pub rocksdb_atomic_flush: bool, + /// Database repair mode (for RocksDB SST corruption). /// /// Use this option when the server reports corruption while running or diff --git a/src/database/engine/db_opts.rs b/src/database/engine/db_opts.rs index 01847257..6abeb4b0 100644 --- a/src/database/engine/db_opts.rs +++ b/src/database/engine/db_opts.rs @@ -29,9 +29,9 @@ pub(crate) fn db_options(config: &Config, env: &Env, row_cache: &Cache) -> Resul opts.set_max_file_opening_threads(0); // IO - opts.set_atomic_flush(true); opts.set_manual_wal_flush(true); - opts.set_enable_pipelined_write(false); + opts.set_atomic_flush(config.rocksdb_atomic_flush); + opts.set_enable_pipelined_write(!config.rocksdb_atomic_flush); if config.rocksdb_direct_io { opts.set_use_direct_reads(true); opts.set_use_direct_io_for_flush_and_compaction(true); From b872f8e593afaee437331edd429a2d801f069aab Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 8 Feb 2025 00:16:37 +0000 Subject: [PATCH 058/310] optimize with SmallString; consolidate related re-exports Signed-off-by: Jason Volk --- Cargo.lock | 16 ++++++++++++---- Cargo.toml | 4 ++++ src/api/client/room/create.rs | 8 ++++---- src/api/client/room/upgrade.rs | 12 ++++++------ src/api/client/state.rs | 2 +- src/api/client/sync/v3.rs | 2 +- src/api/client/sync/v4.rs | 17 +++++++---------- src/api/client/sync/v5.rs | 18 +++++++----------- src/core/Cargo.toml | 2 ++ src/core/mod.rs | 5 ++++- src/core/pdu/builder.rs | 9 ++++++--- src/core/pdu/mod.rs | 8 +++++--- src/core/pdu/state_key.rs | 8 ++++++++ src/database/Cargo.toml | 2 -- src/database/de.rs | 5 +++-- src/database/keyval.rs | 3 +-- src/database/map/contains.rs | 2 +- src/database/map/insert.rs | 3 +-- src/database/map/qry.rs | 3 +-- src/database/map/remove.rs | 3 +-- src/database/pool.rs | 2 +- src/database/tests.rs | 6 ++++-- src/service/Cargo.toml | 2 -- src/service/migrations.rs | 4 +++- src/service/resolver/cache.rs | 2 +- src/service/resolver/fed.rs | 3 +-- src/service/resolver/mod.rs | 3 +-- .../rooms/event_handler/handle_outlier_pdu.rs | 10 ++++------ .../rooms/event_handler/resolve_state.rs | 1 + .../rooms/event_handler/state_at_incoming.rs | 1 + src/service/rooms/pdu_metadata/data.rs | 2 +- src/service/rooms/search/mod.rs | 2 +- src/service/rooms/short/mod.rs | 9 ++++----- src/service/rooms/state_accessor/room_state.rs | 4 ++-- src/service/rooms/state_accessor/state.rs | 12 ++++++------ src/service/rooms/state_accessor/user_can.rs | 2 +- src/service/rooms/state_compressor/mod.rs | 2 +- src/service/rooms/timeline/mod.rs | 8 ++++---- src/service/sending/mod.rs | 2 +- 39 files changed, 113 insertions(+), 96 deletions(-) create mode 100644 src/core/pdu/state_key.rs diff --git a/Cargo.lock b/Cargo.lock index caef5859..5981a2a6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -822,6 +822,8 @@ dependencies = [ "serde_json", "serde_regex", "serde_yaml", + "smallstr", + "smallvec", "thiserror 2.0.11", "tikv-jemalloc-ctl", "tikv-jemalloc-sys", @@ -839,7 +841,6 @@ dependencies = [ name = "conduwuit_database" version = "0.5.0" dependencies = [ - "arrayvec", "async-channel", "conduwuit_core", "const-str", @@ -850,7 +851,6 @@ dependencies = [ "rust-rocksdb-uwu", "serde", "serde_json", - "smallvec", "tokio", "tracing", ] @@ -902,7 +902,6 @@ dependencies = [ name = "conduwuit_service" version = "0.5.0" dependencies = [ - "arrayvec", "async-trait", "base64 0.22.1", "blurhash", @@ -929,7 +928,6 @@ dependencies = [ "serde_json", "serde_yaml", "sha2", - "smallvec", "termimad", "tokio", "tracing", @@ -4275,6 +4273,16 @@ dependencies = [ "autocfg", ] +[[package]] +name = "smallstr" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63b1aefdf380735ff8ded0b15f31aab05daf1f70216c01c02a12926badd1df9d" +dependencies = [ + "serde", + "smallvec", +] + [[package]] name = "smallvec" version = "1.13.2" diff --git a/Cargo.toml b/Cargo.toml index 38654be3..b93877bd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -40,6 +40,10 @@ features = [ "write", ] +[workspace.dependencies.smallstr] +version = "0.3" +features = ["ffi", "std", "union"] + [workspace.dependencies.const-str] version = "0.5.7" diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index a401b63d..e362b3b3 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -2,7 +2,7 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{ - debug_info, debug_warn, err, error, info, pdu::PduBuilder, warn, Err, Error, Result, + debug_info, debug_warn, err, error, info, pdu::PduBuilder, warn, Err, Error, Result, StateKey, }; use futures::FutureExt; use ruma::{ @@ -198,7 +198,7 @@ pub(crate) async fn create_room_route( event_type: TimelineEventType::RoomCreate, content: to_raw_value(&create_content) .expect("create event content serialization"), - state_key: Some(String::new()), + state_key: Some(StateKey::new()), ..Default::default() }, sender_user, @@ -267,7 +267,7 @@ pub(crate) async fn create_room_route( event_type: TimelineEventType::RoomPowerLevels, content: to_raw_value(&power_levels_content) .expect("serialized power_levels event content"), - state_key: Some(String::new()), + state_key: Some(StateKey::new()), ..Default::default() }, sender_user, @@ -371,7 +371,7 @@ pub(crate) async fn create_room_route( } // Implicit state key defaults to "" - pdu_builder.state_key.get_or_insert_with(String::new); + pdu_builder.state_key.get_or_insert_with(StateKey::new); // Silently skip encryption events if they are not allowed if pdu_builder.event_type == TimelineEventType::RoomEncryption diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs index 2f9706f4..a624f95f 100644 --- a/src/api/client/room/upgrade.rs +++ b/src/api/client/room/upgrade.rs @@ -1,7 +1,7 @@ use std::cmp::max; use axum::extract::State; -use conduwuit::{err, info, pdu::PduBuilder, Error, Result}; +use conduwuit::{err, info, pdu::PduBuilder, Error, Result, StateKey}; use futures::StreamExt; use ruma::{ api::client::{error::ErrorKind, room::upgrade_room}, @@ -77,7 +77,7 @@ pub(crate) async fn upgrade_room_route( .rooms .timeline .build_and_append_pdu( - PduBuilder::state(String::new(), &RoomTombstoneEventContent { + PduBuilder::state(StateKey::new(), &RoomTombstoneEventContent { body: "This room has been replaced".to_owned(), replacement_room: replacement_room.clone(), }), @@ -159,7 +159,7 @@ pub(crate) async fn upgrade_room_route( content: to_raw_value(&create_event_content) .expect("event is valid, we just created it"), unsigned: None, - state_key: Some(String::new()), + state_key: Some(StateKey::new()), redacts: None, timestamp: None, }, @@ -188,7 +188,7 @@ pub(crate) async fn upgrade_room_route( }) .expect("event is valid, we just created it"), unsigned: None, - state_key: Some(sender_user.to_string()), + state_key: Some(sender_user.as_str().into()), redacts: None, timestamp: None, }, @@ -217,7 +217,7 @@ pub(crate) async fn upgrade_room_route( PduBuilder { event_type: event_type.to_string().into(), content: event_content, - state_key: Some(String::new()), + state_key: Some(StateKey::new()), ..Default::default() }, sender_user, @@ -272,7 +272,7 @@ pub(crate) async fn upgrade_room_route( .rooms .timeline .build_and_append_pdu( - PduBuilder::state(String::new(), &RoomPowerLevelsEventContent { + PduBuilder::state(StateKey::new(), &RoomPowerLevelsEventContent { events_default: new_level, invite: new_level, ..power_levels_event_content diff --git a/src/api/client/state.rs b/src/api/client/state.rs index 8555f88b..f73ffa46 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -172,7 +172,7 @@ async fn send_state_event_for_key_helper( PduBuilder { event_type: event_type.to_string().into(), content: serde_json::from_str(json.json().get())?, - state_key: Some(String::from(state_key)), + state_key: Some(state_key.into()), timestamp, ..Default::default() }, diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 1d1a91ba..f9dcd5ec 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -441,7 +441,7 @@ async fn handle_left_room( kind: RoomMember, content: serde_json::from_str(r#"{"membership":"leave"}"#) .expect("this is valid JSON"), - state_key: Some(sender_user.to_string()), + state_key: Some(sender_user.as_str().into()), unsigned: None, // The following keys are dropped on conversion room_id: room_id.clone(), diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 66793ba1..4e474ef3 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -29,7 +29,7 @@ use ruma::{ TimelineEventType::*, }, serde::Raw, - uint, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UInt, + uint, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, }; use service::rooms::read_receipt::pack_receipts; @@ -258,12 +258,9 @@ pub(crate) async fn sync_events_v4_route( continue; }; if pdu.kind == RoomMember { - if let Some(state_key) = &pdu.state_key { - let user_id = - OwnedUserId::parse(state_key.clone()).map_err(|_| { - Error::bad_database("Invalid UserId in member PDU.") - })?; - + if let Some(Ok(user_id)) = + pdu.state_key.as_deref().map(UserId::parse) + { if user_id == *sender_user { continue; } @@ -275,18 +272,18 @@ pub(crate) async fn sync_events_v4_route( if !share_encrypted_room( &services, sender_user, - &user_id, + user_id, Some(room_id), ) .await { - device_list_changes.insert(user_id); + device_list_changes.insert(user_id.to_owned()); } }, | MembershipState::Leave => { // Write down users that have left encrypted rooms we // are in - left_encrypted_users.insert(user_id); + left_encrypted_users.insert(user_id.to_owned()); }, | _ => {}, } diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index e7b5fe74..f8ee1047 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -25,7 +25,7 @@ use ruma::{ }, serde::Raw, state_res::TypeStateKey, - uint, DeviceId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UInt, UserId, + uint, DeviceId, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, }; use service::{rooms::read_receipt::pack_receipts, PduCount}; @@ -765,13 +765,9 @@ async fn collect_e2ee<'a>( continue; }; if pdu.kind == TimelineEventType::RoomMember { - if let Some(state_key) = &pdu.state_key { - let user_id = - OwnedUserId::parse(state_key.clone()).map_err(|_| { - Error::bad_database("Invalid UserId in member PDU.") - })?; - - if user_id == *sender_user { + if let Some(Ok(user_id)) = pdu.state_key.as_deref().map(UserId::parse) + { + if user_id == sender_user { continue; } @@ -782,18 +778,18 @@ async fn collect_e2ee<'a>( if !share_encrypted_room( &services, sender_user, - &user_id, + user_id, Some(room_id), ) .await { - device_list_changes.insert(user_id); + device_list_changes.insert(user_id.to_owned()); } }, | MembershipState::Leave => { // Write down users that have left encrypted rooms we // are in - left_encrypted_users.insert(user_id); + left_encrypted_users.insert(user_id.to_owned()); }, | _ => {}, } diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index ef2df4ff..d4b0c83b 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -92,6 +92,8 @@ serde_json.workspace = true serde_regex.workspace = true serde_yaml.workspace = true serde.workspace = true +smallvec.workspace = true +smallstr.workspace = true thiserror.workspace = true tikv-jemallocator.optional = true tikv-jemallocator.workspace = true diff --git a/src/core/mod.rs b/src/core/mod.rs index 1416ed9e..ee128628 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -10,14 +10,17 @@ pub mod pdu; pub mod server; pub mod utils; +pub use ::arrayvec; pub use ::http; pub use ::ruma; +pub use ::smallstr; +pub use ::smallvec; pub use ::toml; pub use ::tracing; pub use config::Config; pub use error::Error; pub use info::{rustc_flags_capture, version, version::version}; -pub use pdu::{Event, PduBuilder, PduCount, PduEvent, PduId, RawPduId}; +pub use pdu::{Event, PduBuilder, PduCount, PduEvent, PduId, RawPduId, StateKey}; pub use server::Server; pub use utils::{ctor, dtor, implement, result, result::Result}; diff --git a/src/core/pdu/builder.rs b/src/core/pdu/builder.rs index b25d4e9e..0efee128 100644 --- a/src/core/pdu/builder.rs +++ b/src/core/pdu/builder.rs @@ -7,6 +7,8 @@ use ruma::{ use serde::Deserialize; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; +use super::StateKey; + /// Build the start of a PDU in order to add it to the Database. #[derive(Debug, Deserialize)] pub struct Builder { @@ -17,7 +19,7 @@ pub struct Builder { pub unsigned: Option, - pub state_key: Option, + pub state_key: Option, pub redacts: Option, @@ -29,15 +31,16 @@ pub struct Builder { type Unsigned = BTreeMap; impl Builder { - pub fn state(state_key: String, content: &T) -> Self + pub fn state(state_key: S, content: &T) -> Self where T: EventContent, + S: Into, { Self { event_type: content.event_type().into(), content: to_raw_value(content) .expect("Builder failed to serialize state event content to RawValue"), - state_key: Some(state_key), + state_key: Some(state_key.into()), ..Self::default() } } diff --git a/src/core/pdu/mod.rs b/src/core/pdu/mod.rs index 1a8f6a70..9cb42239 100644 --- a/src/core/pdu/mod.rs +++ b/src/core/pdu/mod.rs @@ -8,6 +8,7 @@ mod id; mod raw_id; mod redact; mod relation; +mod state_key; mod strip; #[cfg(test)] mod tests; @@ -17,7 +18,7 @@ use std::cmp::Ordering; use ruma::{ events::TimelineEventType, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, - OwnedRoomId, OwnedUserId, UInt, + OwnedRoomId, OwnedServerName, OwnedUserId, UInt, }; use serde::{Deserialize, Serialize}; use serde_json::value::RawValue as RawJsonValue; @@ -29,6 +30,7 @@ pub use self::{ event_id::*, id::*, raw_id::*, + state_key::{ShortStateKey, StateKey}, Count as PduCount, Id as PduId, Pdu as PduEvent, RawId as RawPduId, }; use crate::Result; @@ -40,13 +42,13 @@ pub struct Pdu { pub room_id: OwnedRoomId, pub sender: OwnedUserId, #[serde(skip_serializing_if = "Option::is_none")] - pub origin: Option, + pub origin: Option, pub origin_server_ts: UInt, #[serde(rename = "type")] pub kind: TimelineEventType, pub content: Box, #[serde(skip_serializing_if = "Option::is_none")] - pub state_key: Option, + pub state_key: Option, pub prev_events: Vec, pub depth: UInt, pub auth_events: Vec, diff --git a/src/core/pdu/state_key.rs b/src/core/pdu/state_key.rs new file mode 100644 index 00000000..4af4fcf7 --- /dev/null +++ b/src/core/pdu/state_key.rs @@ -0,0 +1,8 @@ +use smallstr::SmallString; + +use super::ShortId; + +pub type StateKey = SmallString<[u8; INLINE_SIZE]>; +pub type ShortStateKey = ShortId; + +const INLINE_SIZE: usize = 48; diff --git a/src/database/Cargo.toml b/src/database/Cargo.toml index 557c9a3e..067c6f5f 100644 --- a/src/database/Cargo.toml +++ b/src/database/Cargo.toml @@ -34,7 +34,6 @@ zstd_compression = [ ] [dependencies] -arrayvec.workspace = true async-channel.workspace = true conduwuit-core.workspace = true const-str.workspace = true @@ -45,7 +44,6 @@ minicbor-serde.workspace = true rust-rocksdb.workspace = true serde.workspace = true serde_json.workspace = true -smallvec.workspace = true tokio.workspace = true tracing.workspace = true diff --git a/src/database/de.rs b/src/database/de.rs index 8e914fcc..441bb4ec 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -1,5 +1,6 @@ -use arrayvec::ArrayVec; -use conduwuit::{checked, debug::DebugInspect, err, utils::string, Error, Result}; +use conduwuit::{ + arrayvec::ArrayVec, checked, debug::DebugInspect, err, utils::string, Error, Result, +}; use serde::{ de, de::{DeserializeSeed, Visitor}, diff --git a/src/database/keyval.rs b/src/database/keyval.rs index 056e53d1..f572d15f 100644 --- a/src/database/keyval.rs +++ b/src/database/keyval.rs @@ -1,6 +1,5 @@ -use conduwuit::Result; +use conduwuit::{smallvec::SmallVec, Result}; use serde::{Deserialize, Serialize}; -use smallvec::SmallVec; use crate::{de, ser}; diff --git a/src/database/map/contains.rs b/src/database/map/contains.rs index 424f8970..7a09b358 100644 --- a/src/database/map/contains.rs +++ b/src/database/map/contains.rs @@ -1,7 +1,7 @@ use std::{convert::AsRef, fmt::Debug, future::Future, io::Write, sync::Arc}; -use arrayvec::ArrayVec; use conduwuit::{ + arrayvec::ArrayVec, err, implement, utils::{future::TryExtExt, result::FlatOk}, Result, diff --git a/src/database/map/insert.rs b/src/database/map/insert.rs index 68c305af..6f010097 100644 --- a/src/database/map/insert.rs +++ b/src/database/map/insert.rs @@ -5,8 +5,7 @@ use std::{convert::AsRef, fmt::Debug, io::Write}; -use arrayvec::ArrayVec; -use conduwuit::implement; +use conduwuit::{arrayvec::ArrayVec, implement}; use rocksdb::WriteBatchWithTransaction; use serde::Serialize; diff --git a/src/database/map/qry.rs b/src/database/map/qry.rs index 401eba43..178f4a61 100644 --- a/src/database/map/qry.rs +++ b/src/database/map/qry.rs @@ -1,7 +1,6 @@ use std::{convert::AsRef, fmt::Debug, io::Write, sync::Arc}; -use arrayvec::ArrayVec; -use conduwuit::{implement, Result}; +use conduwuit::{arrayvec::ArrayVec, implement, Result}; use futures::Future; use serde::Serialize; diff --git a/src/database/map/remove.rs b/src/database/map/remove.rs index ec37bbfe..a7ae9133 100644 --- a/src/database/map/remove.rs +++ b/src/database/map/remove.rs @@ -1,7 +1,6 @@ use std::{convert::AsRef, fmt::Debug, io::Write}; -use arrayvec::ArrayVec; -use conduwuit::implement; +use conduwuit::{arrayvec::ArrayVec, implement}; use serde::Serialize; use crate::{keyval::KeyBuf, ser, util::or_else}; diff --git a/src/database/pool.rs b/src/database/pool.rs index c753855a..7636ff5e 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -14,6 +14,7 @@ use async_channel::{QueueStrategy, Receiver, RecvError, Sender}; use conduwuit::{ debug, debug_warn, err, error, implement, result::DebugInspect, + smallvec::SmallVec, trace, utils::sys::compute::{get_affinity, nth_core_available, set_affinity}, Error, Result, Server, @@ -21,7 +22,6 @@ use conduwuit::{ use futures::{channel::oneshot, TryFutureExt}; use oneshot::Sender as ResultSender; use rocksdb::Direction; -use smallvec::SmallVec; use self::configure::configure; use crate::{keyval::KeyBuf, stream, Handle, Map}; diff --git a/src/database/tests.rs b/src/database/tests.rs index e6c85983..594170e8 100644 --- a/src/database/tests.rs +++ b/src/database/tests.rs @@ -2,8 +2,10 @@ use std::fmt::Debug; -use arrayvec::ArrayVec; -use conduwuit::ruma::{serde::Raw, EventId, RoomId, UserId}; +use conduwuit::{ + arrayvec::ArrayVec, + ruma::{serde::Raw, EventId, RoomId, UserId}, +}; use serde::Serialize; use crate::{ diff --git a/src/service/Cargo.toml b/src/service/Cargo.toml index 30183179..caeea318 100644 --- a/src/service/Cargo.toml +++ b/src/service/Cargo.toml @@ -47,7 +47,6 @@ zstd_compression = [ blurhashing = ["dep:image","dep:blurhash"] [dependencies] -arrayvec.workspace = true async-trait.workspace = true base64.workspace = true bytes.workspace = true @@ -75,7 +74,6 @@ serde_json.workspace = true serde.workspace = true serde_yaml.workspace = true sha2.workspace = true -smallvec.workspace = true termimad.workspace = true termimad.optional = true tokio.workspace = true diff --git a/src/service/migrations.rs b/src/service/migrations.rs index 9c3ea293..69b1be4e 100644 --- a/src/service/migrations.rs +++ b/src/service/migrations.rs @@ -507,8 +507,10 @@ async fn fix_referencedevents_missing_sep(services: &Services) -> Result { } async fn fix_readreceiptid_readreceipt_duplicates(services: &Services) -> Result { + use conduwuit::arrayvec::ArrayString; use ruma::identifiers_validation::MAX_BYTES; - type ArrayId = arrayvec::ArrayString; + + type ArrayId = ArrayString; type Key<'a> = (&'a RoomId, u64, &'a UserId); warn!("Fixing undeleted entries in readreceiptid_readreceipt..."); diff --git a/src/service/resolver/cache.rs b/src/service/resolver/cache.rs index 22a92865..7b4f104d 100644 --- a/src/service/resolver/cache.rs +++ b/src/service/resolver/cache.rs @@ -1,7 +1,7 @@ use std::{net::IpAddr, sync::Arc, time::SystemTime}; -use arrayvec::ArrayVec; use conduwuit::{ + arrayvec::ArrayVec, at, err, implement, utils::{math::Expected, rand, stream::TryIgnore}, Result, diff --git a/src/service/resolver/fed.rs b/src/service/resolver/fed.rs index bfe100e7..e5bee9ac 100644 --- a/src/service/resolver/fed.rs +++ b/src/service/resolver/fed.rs @@ -4,8 +4,7 @@ use std::{ net::{IpAddr, SocketAddr}, }; -use arrayvec::ArrayString; -use conduwuit::utils::math::Expected; +use conduwuit::{arrayvec::ArrayString, utils::math::Expected}; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Deserialize, PartialEq, Eq, Serialize)] diff --git a/src/service/resolver/mod.rs b/src/service/resolver/mod.rs index 090e562d..6be9d42d 100644 --- a/src/service/resolver/mod.rs +++ b/src/service/resolver/mod.rs @@ -6,8 +6,7 @@ mod tests; use std::sync::Arc; -use arrayvec::ArrayString; -use conduwuit::{utils::MutexMap, Result, Server}; +use conduwuit::{arrayvec::ArrayString, utils::MutexMap, Result, Server}; use self::{cache::Cache, dns::Resolver}; use crate::{client, Dep}; diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index a35aabe0..b7c38313 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -6,10 +6,8 @@ use std::{ use conduwuit::{debug, debug_info, err, implement, trace, warn, Err, Error, PduEvent, Result}; use futures::{future::ready, TryFutureExt}; use ruma::{ - api::client::error::ErrorKind, - events::StateEventType, - state_res::{self, EventTypeExt}, - CanonicalJsonObject, CanonicalJsonValue, EventId, RoomId, ServerName, + api::client::error::ErrorKind, events::StateEventType, state_res, CanonicalJsonObject, + CanonicalJsonValue, EventId, RoomId, ServerName, }; use super::{check_room_id, get_room_version_id, to_room_version}; @@ -123,7 +121,7 @@ pub(super) async fn handle_outlier_pdu<'a>( // The original create event must be in the auth events if !matches!( auth_events - .get(&(StateEventType::RoomCreate, String::new())) + .get(&(StateEventType::RoomCreate, String::new().into())) .map(AsRef::as_ref), Some(_) | None ) { @@ -134,7 +132,7 @@ pub(super) async fn handle_outlier_pdu<'a>( } let state_fetch = |ty: &'static StateEventType, sk: &str| { - let key = ty.with_state_key(sk); + let key = (ty.to_owned(), sk.into()); ready(auth_events.get(&key)) }; diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 4d99b088..eb9ca01f 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -64,6 +64,7 @@ pub async fn resolve_state( .multi_get_statekey_from_short(shortstatekeys) .zip(event_ids) .ready_filter_map(|(ty_sk, id)| Some((ty_sk.ok()?, id))) + .map(|((ty, sk), id)| ((ty, sk.as_str().to_owned()), id)) .collect() }) .map(Ok::<_, Error>) diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 8ae6354c..7bf3b8f8 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -172,6 +172,7 @@ async fn state_at_incoming_fork( .short .get_statekey_from_short(*k) .map_ok(|(ty, sk)| ((ty, sk), id.clone())) + .map_ok(|((ty, sk), id)| ((ty, sk.as_str().to_owned()), id)) }) .ready_filter_map(Result::ok) .collect() diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index 2e6ecbb5..26e11ded 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -1,7 +1,7 @@ use std::{mem::size_of, sync::Arc}; -use arrayvec::ArrayVec; use conduwuit::{ + arrayvec::ArrayVec, result::LogErr, utils::{ stream::{TryIgnore, WidebandExt}, diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index 35cfd444..cc015237 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -1,7 +1,7 @@ use std::sync::Arc; -use arrayvec::ArrayVec; use conduwuit::{ + arrayvec::ArrayVec, implement, utils::{ set, diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index dd586d02..8728325a 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -1,7 +1,7 @@ use std::{borrow::Borrow, fmt::Debug, mem::size_of_val, sync::Arc}; -pub use conduwuit::pdu::{ShortEventId, ShortId, ShortRoomId}; -use conduwuit::{err, implement, utils, utils::IterStream, Result}; +pub use conduwuit::pdu::{ShortEventId, ShortId, ShortRoomId, ShortStateKey}; +use conduwuit::{err, implement, utils, utils::IterStream, Result, StateKey}; use database::{Deserialized, Get, Map, Qry}; use futures::{Stream, StreamExt}; use ruma::{events::StateEventType, EventId, RoomId}; @@ -28,7 +28,6 @@ struct Services { } pub type ShortStateHash = ShortId; -pub type ShortStateKey = ShortId; impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { @@ -181,7 +180,7 @@ where pub async fn get_statekey_from_short( &self, shortstatekey: ShortStateKey, -) -> Result<(StateEventType, String)> { +) -> Result<(StateEventType, StateKey)> { const BUFSIZE: usize = size_of::(); self.db @@ -200,7 +199,7 @@ pub async fn get_statekey_from_short( pub fn multi_get_statekey_from_short<'a, S>( &'a self, shortstatekey: S, -) -> impl Stream> + Send + 'a +) -> impl Stream> + Send + 'a where S: Stream + Send + 'a, { diff --git a/src/service/rooms/state_accessor/room_state.rs b/src/service/rooms/state_accessor/room_state.rs index 98a82cea..e3ec55fe 100644 --- a/src/service/rooms/state_accessor/room_state.rs +++ b/src/service/rooms/state_accessor/room_state.rs @@ -1,6 +1,6 @@ use std::borrow::Borrow; -use conduwuit::{err, implement, PduEvent, Result}; +use conduwuit::{err, implement, PduEvent, Result, StateKey}; use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{events::StateEventType, EventId, RoomId}; use serde::Deserialize; @@ -27,7 +27,7 @@ where pub fn room_state_full<'a>( &'a self, room_id: &'a RoomId, -) -> impl Stream> + Send + 'a { +) -> impl Stream> + Send + 'a { self.services .state .get_room_shortstatehash(room_id) diff --git a/src/service/rooms/state_accessor/state.rs b/src/service/rooms/state_accessor/state.rs index 3cf168c1..da1500cb 100644 --- a/src/service/rooms/state_accessor/state.rs +++ b/src/service/rooms/state_accessor/state.rs @@ -6,7 +6,7 @@ use conduwuit::{ result::FlatOk, stream::{BroadbandExt, IterStream, ReadyExt, TryExpect}, }, - PduEvent, Result, + PduEvent, Result, StateKey, }; use database::Deserialized; use futures::{future::try_join, pin_mut, FutureExt, Stream, StreamExt, TryFutureExt}; @@ -192,7 +192,7 @@ pub fn state_keys_with_ids<'a, Id>( &'a self, shortstatehash: ShortStateHash, event_type: &'a StateEventType, -) -> impl Stream + Send + 'a +) -> impl Stream + Send + 'a where Id: for<'de> Deserialize<'de> + Send + Sized + ToOwned + 'a, ::Owned: Borrow, @@ -200,7 +200,7 @@ where let state_keys_with_short_ids = self .state_keys_with_shortids(shortstatehash, event_type) .unzip() - .map(|(ssks, sids): (Vec, Vec)| (ssks, sids)) + .map(|(ssks, sids): (Vec, Vec)| (ssks, sids)) .shared(); let state_keys = state_keys_with_short_ids @@ -230,7 +230,7 @@ pub fn state_keys_with_shortids<'a>( &'a self, shortstatehash: ShortStateHash, event_type: &'a StateEventType, -) -> impl Stream + Send + 'a { +) -> impl Stream + Send + 'a { let short_ids = self .state_full_shortids(shortstatehash) .expect_ok() @@ -267,7 +267,7 @@ pub fn state_keys<'a>( &'a self, shortstatehash: ShortStateHash, event_type: &'a StateEventType, -) -> impl Stream + Send + 'a { +) -> impl Stream + Send + 'a { let short_ids = self .state_full_shortids(shortstatehash) .expect_ok() @@ -314,7 +314,7 @@ pub fn state_added( pub fn state_full( &self, shortstatehash: ShortStateHash, -) -> impl Stream + Send + '_ { +) -> impl Stream + Send + '_ { self.state_full_pdus(shortstatehash) .ready_filter_map(|pdu| { Some(((pdu.kind.to_string().into(), pdu.state_key.clone()?), pdu)) diff --git a/src/service/rooms/state_accessor/user_can.rs b/src/service/rooms/state_accessor/user_can.rs index 725a4fba..0332c227 100644 --- a/src/service/rooms/state_accessor/user_can.rs +++ b/src/service/rooms/state_accessor/user_can.rs @@ -175,7 +175,7 @@ pub async fn user_can_invite( .timeline .create_hash_and_sign_event( PduBuilder::state( - target_user.into(), + target_user.as_str(), &RoomMemberEventContent::new(MembershipState::Invite), ), sender, diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 3d68dff6..18731809 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -5,8 +5,8 @@ use std::{ sync::{Arc, Mutex}, }; -use arrayvec::ArrayVec; use conduwuit::{ + arrayvec::ArrayVec, at, checked, err, expected, utils, utils::{bytes, math::usize_from_f64, stream::IterStream}, Result, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index a913034d..a7edd4a4 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -38,7 +38,7 @@ use ruma::{ push::{Action, Ruleset, Tweak}, state_res::{self, Event, RoomVersion}, uint, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, - OwnedServerName, OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, + OwnedServerName, RoomId, RoomVersionId, ServerName, UserId, }; use serde::Deserialize; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; @@ -387,10 +387,10 @@ impl Service { if pdu.kind == TimelineEventType::RoomMember { if let Some(state_key) = &pdu.state_key { - let target_user_id = OwnedUserId::parse(state_key)?; + let target_user_id = UserId::parse(state_key)?; - if self.services.users.is_active_local(&target_user_id).await { - push_target.insert(target_user_id); + if self.services.users.is_active_local(target_user_id).await { + push_target.insert(target_user_id.to_owned()); } } } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 86b219f7..b46ce7a8 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -13,6 +13,7 @@ use std::{ use async_trait::async_trait; use conduwuit::{ debug, debug_warn, err, error, + smallvec::SmallVec, utils::{available_parallelism, math::usize_from_u64_truncated, ReadyExt, TryReadyExt}, warn, Result, Server, }; @@ -21,7 +22,6 @@ use ruma::{ api::{appservice::Registration, OutgoingRequest}, RoomId, ServerName, UserId, }; -use smallvec::SmallVec; use tokio::{task, task::JoinSet}; use self::data::Data; From 0a9a9b3c92852cae269aaf2cb3894658b5e35a54 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 5 Feb 2025 12:22:22 +0000 Subject: [PATCH 059/310] larcen state-res from ruma --- Cargo.toml | 1 - src/api/client/membership.rs | 6 +- src/api/client/sync/v5.rs | 3 +- src/core/error/mod.rs | 2 +- src/core/mod.rs | 2 + src/core/pdu/event.rs | 2 +- src/core/state_res/LICENSE | 17 + src/core/state_res/error.rs | 23 + src/core/state_res/event_auth.rs | 1418 ++++++++++++++ src/core/state_res/mod.rs | 1644 +++++++++++++++++ src/core/state_res/outcomes.txt | 104 ++ src/core/state_res/power_levels.rs | 256 +++ src/core/state_res/room_version.rs | 149 ++ src/core/state_res/state_event.rs | 102 + src/core/state_res/state_res_bench.rs | 648 +++++++ src/core/state_res/test_utils.rs | 688 +++++++ src/service/rooms/event_handler/fetch_prev.rs | 11 +- .../rooms/event_handler/handle_outlier_pdu.rs | 6 +- src/service/rooms/event_handler/mod.rs | 6 +- .../rooms/event_handler/resolve_state.rs | 9 +- .../rooms/event_handler/state_at_incoming.rs | 4 +- .../event_handler/upgrade_outlier_pdu.rs | 10 +- src/service/rooms/state/mod.rs | 2 +- src/service/rooms/timeline/mod.rs | 2 +- 24 files changed, 5082 insertions(+), 33 deletions(-) create mode 100644 src/core/state_res/LICENSE create mode 100644 src/core/state_res/error.rs create mode 100644 src/core/state_res/event_auth.rs create mode 100644 src/core/state_res/mod.rs create mode 100644 src/core/state_res/outcomes.txt create mode 100644 src/core/state_res/power_levels.rs create mode 100644 src/core/state_res/room_version.rs create mode 100644 src/core/state_res/state_event.rs create mode 100644 src/core/state_res/state_res_bench.rs create mode 100644 src/core/state_res/test_utils.rs diff --git a/Cargo.toml b/Cargo.toml index b93877bd..d8f34544 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -355,7 +355,6 @@ features = [ "federation-api", "markdown", "push-gateway-api-c", - "state-res", "server-util", "unstable-exhaustive-types", "ring-compat", diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 449d44d5..1045b014 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -12,7 +12,7 @@ use conduwuit::{ at, debug, debug_info, debug_warn, err, info, pdu::{gen_event_id_canonical_json, PduBuilder}, result::FlatOk, - trace, + state_res, trace, utils::{self, shuffle, IterStream, ReadyExt}, warn, Err, PduEvent, Result, }; @@ -40,8 +40,8 @@ use ruma::{ }, StateEventType, }, - state_res, CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, - OwnedServerName, OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, + CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, + OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, }; use service::{ appservice::RegistrationInfo, diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index f8ee1047..63731688 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -11,7 +11,7 @@ use conduwuit::{ math::{ruma_from_usize, usize_from_ruma}, BoolExt, IterStream, ReadyExt, TryFutureExtExt, }, - warn, Error, Result, + warn, Error, Result, TypeStateKey, }; use futures::{FutureExt, StreamExt, TryFutureExt}; use ruma::{ @@ -24,7 +24,6 @@ use ruma::{ AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType, }, serde::Raw, - state_res::TypeStateKey, uint, DeviceId, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, }; use service::{rooms::read_receipt::pack_receipts, PduCount}; diff --git a/src/core/error/mod.rs b/src/core/error/mod.rs index 88ac6d09..16613b7e 100644 --- a/src/core/error/mod.rs +++ b/src/core/error/mod.rs @@ -121,7 +121,7 @@ pub enum Error { #[error(transparent)] Signatures(#[from] ruma::signatures::Error), #[error(transparent)] - StateRes(#[from] ruma::state_res::Error), + StateRes(#[from] crate::state_res::Error), #[error("uiaa")] Uiaa(ruma::api::client::uiaa::UiaaInfo), diff --git a/src/core/mod.rs b/src/core/mod.rs index ee128628..cd56774a 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -8,6 +8,7 @@ pub mod metrics; pub mod mods; pub mod pdu; pub mod server; +pub mod state_res; pub mod utils; pub use ::arrayvec; @@ -22,6 +23,7 @@ pub use error::Error; pub use info::{rustc_flags_capture, version, version::version}; pub use pdu::{Event, PduBuilder, PduCount, PduEvent, PduId, RawPduId, StateKey}; pub use server::Server; +pub use state_res::{EventTypeExt, RoomVersion, StateMap, TypeStateKey}; pub use utils::{ctor, dtor, implement, result, result::Result}; pub use crate as conduwuit_core; diff --git a/src/core/pdu/event.rs b/src/core/pdu/event.rs index 6a92afe8..d5c0561e 100644 --- a/src/core/pdu/event.rs +++ b/src/core/pdu/event.rs @@ -1,8 +1,8 @@ -pub use ruma::state_res::Event; use ruma::{events::TimelineEventType, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId}; use serde_json::value::RawValue as RawJsonValue; use super::Pdu; +pub use crate::state_res::Event; impl Event for Pdu { type Id = OwnedEventId; diff --git a/src/core/state_res/LICENSE b/src/core/state_res/LICENSE new file mode 100644 index 00000000..c103a044 --- /dev/null +++ b/src/core/state_res/LICENSE @@ -0,0 +1,17 @@ +//! Permission is hereby granted, free of charge, to any person obtaining a copy +//! of this software and associated documentation files (the "Software"), to +//! deal in the Software without restriction, including without limitation the +//! rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +//! sell copies of the Software, and to permit persons to whom the Software is +//! furnished to do so, subject to the following conditions: + +//! The above copyright notice and this permission notice shall be included in +//! all copies or substantial portions of the Software. + +//! THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +//! IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +//! FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +//! AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +//! LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +//! FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +//! IN THE SOFTWARE. diff --git a/src/core/state_res/error.rs b/src/core/state_res/error.rs new file mode 100644 index 00000000..7711d878 --- /dev/null +++ b/src/core/state_res/error.rs @@ -0,0 +1,23 @@ +use serde_json::Error as JsonError; +use thiserror::Error; + +/// Represents the various errors that arise when resolving state. +#[derive(Error, Debug)] +#[non_exhaustive] +pub enum Error { + /// A deserialization error. + #[error(transparent)] + SerdeJson(#[from] JsonError), + + /// The given option or version is unsupported. + #[error("Unsupported room version: {0}")] + Unsupported(String), + + /// The given event was not found. + #[error("Not found error: {0}")] + NotFound(String), + + /// Invalid fields in the given PDU. + #[error("Invalid PDU: {0}")] + InvalidPdu(String), +} diff --git a/src/core/state_res/event_auth.rs b/src/core/state_res/event_auth.rs new file mode 100644 index 00000000..72a0216c --- /dev/null +++ b/src/core/state_res/event_auth.rs @@ -0,0 +1,1418 @@ +use std::{borrow::Borrow, collections::BTreeSet}; + +use futures::{ + future::{join3, OptionFuture}, + Future, +}; +use ruma::{ + events::room::{ + create::RoomCreateEventContent, + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, ThirdPartyInvite}, + power_levels::RoomPowerLevelsEventContent, + third_party_invite::RoomThirdPartyInviteEventContent, + }, + int, + serde::{Base64, Raw}, + Int, OwnedUserId, RoomVersionId, UserId, +}; +use serde::{ + de::{Error as _, IgnoredAny}, + Deserialize, +}; +use serde_json::{from_str as from_json_str, value::RawValue as RawJsonValue}; +use tracing::{debug, error, instrument, trace, warn}; + +use super::{ + power_levels::{ + deserialize_power_levels, deserialize_power_levels_content_fields, + deserialize_power_levels_content_invite, deserialize_power_levels_content_redact, + }, + room_version::RoomVersion, + Error, Event, Result, StateEventType, TimelineEventType, +}; + +// FIXME: field extracting could be bundled for `content` +#[derive(Deserialize)] +struct GetMembership { + membership: MembershipState, +} + +#[derive(Deserialize)] +struct RoomMemberContentFields { + membership: Option>, + join_authorised_via_users_server: Option>, +} + +/// For the given event `kind` what are the relevant auth events that are needed +/// to authenticate this `content`. +/// +/// # Errors +/// +/// This function will return an error if the supplied `content` is not a JSON +/// object. +pub fn auth_types_for_event( + kind: &TimelineEventType, + sender: &UserId, + state_key: Option<&str>, + content: &RawJsonValue, +) -> serde_json::Result> { + if kind == &TimelineEventType::RoomCreate { + return Ok(vec![]); + } + + let mut auth_types = vec![ + (StateEventType::RoomPowerLevels, String::new()), + (StateEventType::RoomMember, sender.to_string()), + (StateEventType::RoomCreate, String::new()), + ]; + + if kind == &TimelineEventType::RoomMember { + #[derive(Deserialize)] + struct RoomMemberContentFields { + membership: Option>, + third_party_invite: Option>, + join_authorised_via_users_server: Option>, + } + + if let Some(state_key) = state_key { + let content: RoomMemberContentFields = from_json_str(content.get())?; + + if let Some(Ok(membership)) = content.membership.map(|m| m.deserialize()) { + if [MembershipState::Join, MembershipState::Invite, MembershipState::Knock] + .contains(&membership) + { + let key = (StateEventType::RoomJoinRules, String::new()); + if !auth_types.contains(&key) { + auth_types.push(key); + } + + if let Some(Ok(u)) = content + .join_authorised_via_users_server + .map(|m| m.deserialize()) + { + let key = (StateEventType::RoomMember, u.to_string()); + if !auth_types.contains(&key) { + auth_types.push(key); + } + } + } + + let key = (StateEventType::RoomMember, state_key.to_owned()); + if !auth_types.contains(&key) { + auth_types.push(key); + } + + if membership == MembershipState::Invite { + if let Some(Ok(t_id)) = content.third_party_invite.map(|t| t.deserialize()) { + let key = (StateEventType::RoomThirdPartyInvite, t_id.signed.token); + if !auth_types.contains(&key) { + auth_types.push(key); + } + } + } + } + } + } + + Ok(auth_types) +} + +/// Authenticate the incoming `event`. +/// +/// The steps of authentication are: +/// +/// * check that the event is being authenticated for the correct room +/// * then there are checks for specific event types +/// +/// The `fetch_state` closure should gather state from a state snapshot. We need +/// to know if the event passes auth against some state not a recursive +/// collection of auth_events fields. +#[instrument(level = "debug", skip_all, fields(event_id = incoming_event.event_id().borrow().as_str()))] +pub async fn auth_check( + room_version: &RoomVersion, + incoming_event: &Incoming, + current_third_party_invite: Option<&Incoming>, + fetch_state: F, +) -> Result +where + F: Fn(&'static StateEventType, &str) -> Fut, + Fut: Future> + Send, + Fetched: Event + Send, + Incoming: Event + Send, +{ + debug!( + "auth_check beginning for {} ({})", + incoming_event.event_id(), + incoming_event.event_type() + ); + + // [synapse] check that all the events are in the same room as `incoming_event` + + // [synapse] do_sig_check check the event has valid signatures for member events + + // TODO do_size_check is false when called by `iterative_auth_check` + // do_size_check is also mostly accomplished by ruma with the exception of + // checking event_type, state_key, and json are below a certain size (255 and + // 65_536 respectively) + + let sender = incoming_event.sender(); + + // Implementation of https://spec.matrix.org/latest/rooms/v1/#authorization-rules + // + // 1. If type is m.room.create: + if *incoming_event.event_type() == TimelineEventType::RoomCreate { + #[derive(Deserialize)] + struct RoomCreateContentFields { + room_version: Option>, + creator: Option>, + } + + debug!("start m.room.create check"); + + // If it has any previous events, reject + if incoming_event.prev_events().next().is_some() { + warn!("the room creation event had previous events"); + return Ok(false); + } + + // If the domain of the room_id does not match the domain of the sender, reject + let Some(room_id_server_name) = incoming_event.room_id().server_name() else { + warn!("room ID has no servername"); + return Ok(false); + }; + + if room_id_server_name != sender.server_name() { + warn!("servername of room ID does not match servername of sender"); + return Ok(false); + } + + // If content.room_version is present and is not a recognized version, reject + let content: RoomCreateContentFields = from_json_str(incoming_event.content().get())?; + if content + .room_version + .is_some_and(|v| v.deserialize().is_err()) + { + warn!("invalid room version found in m.room.create event"); + return Ok(false); + } + + if !room_version.use_room_create_sender { + // If content has no creator field, reject + if content.creator.is_none() { + warn!("no creator field found in m.room.create content"); + return Ok(false); + } + } + + debug!("m.room.create event was allowed"); + return Ok(true); + } + + /* + // TODO: In the past this code caused problems federating with synapse, maybe this has been + // resolved already. Needs testing. + // + // 2. Reject if auth_events + // a. auth_events cannot have duplicate keys since it's a BTree + // b. All entries are valid auth events according to spec + let expected_auth = auth_types_for_event( + incoming_event.kind, + sender, + incoming_event.state_key, + incoming_event.content().clone(), + ); + + dbg!(&expected_auth); + + for ev_key in auth_events.keys() { + // (b) + if !expected_auth.contains(ev_key) { + warn!("auth_events contained invalid auth event"); + return Ok(false); + } + } + */ + + let (room_create_event, power_levels_event, sender_member_event) = join3( + fetch_state(&StateEventType::RoomCreate, ""), + fetch_state(&StateEventType::RoomPowerLevels, ""), + fetch_state(&StateEventType::RoomMember, sender.as_str()), + ) + .await; + + let room_create_event = match room_create_event { + | None => { + warn!("no m.room.create event in auth chain"); + return Ok(false); + }, + | Some(e) => e, + }; + + // 3. If event does not have m.room.create in auth_events reject + if !incoming_event + .auth_events() + .any(|id| id.borrow() == room_create_event.event_id().borrow()) + { + warn!("no m.room.create event in auth events"); + return Ok(false); + } + + // If the create event content has the field m.federate set to false and the + // sender domain of the event does not match the sender domain of the create + // event, reject. + #[derive(Deserialize)] + struct RoomCreateContentFederate { + #[serde(rename = "m.federate", default = "ruma::serde::default_true")] + federate: bool, + } + let room_create_content: RoomCreateContentFederate = + from_json_str(room_create_event.content().get())?; + if !room_create_content.federate + && room_create_event.sender().server_name() != incoming_event.sender().server_name() + { + warn!( + "room is not federated and event's sender domain does not match create event's \ + sender domain" + ); + return Ok(false); + } + + // Only in some room versions 6 and below + if room_version.special_case_aliases_auth { + // 4. If type is m.room.aliases + if *incoming_event.event_type() == TimelineEventType::RoomAliases { + debug!("starting m.room.aliases check"); + + // If sender's domain doesn't matches state_key, reject + if incoming_event.state_key() != Some(sender.server_name().as_str()) { + warn!("state_key does not match sender"); + return Ok(false); + } + + debug!("m.room.aliases event was allowed"); + return Ok(true); + } + } + + // If type is m.room.member + if *incoming_event.event_type() == TimelineEventType::RoomMember { + debug!("starting m.room.member check"); + let state_key = match incoming_event.state_key() { + | None => { + warn!("no statekey in member event"); + return Ok(false); + }, + | Some(s) => s, + }; + + let content: RoomMemberContentFields = from_json_str(incoming_event.content().get())?; + if content + .membership + .as_ref() + .and_then(|m| m.deserialize().ok()) + .is_none() + { + warn!("no valid membership field found for m.room.member event content"); + return Ok(false); + } + + let target_user = + <&UserId>::try_from(state_key).map_err(|e| Error::InvalidPdu(format!("{e}")))?; + + let user_for_join_auth = content + .join_authorised_via_users_server + .as_ref() + .and_then(|u| u.deserialize().ok()); + + let user_for_join_auth_event: OptionFuture<_> = user_for_join_auth + .as_ref() + .map(|auth_user| fetch_state(&StateEventType::RoomMember, auth_user.as_str())) + .into(); + + let target_user_member_event = + fetch_state(&StateEventType::RoomMember, target_user.as_str()); + + let join_rules_event = fetch_state(&StateEventType::RoomJoinRules, ""); + + let (join_rules_event, target_user_member_event, user_for_join_auth_event) = + join3(join_rules_event, target_user_member_event, user_for_join_auth_event).await; + + let user_for_join_auth_membership = user_for_join_auth_event + .and_then(|mem| from_json_str::(mem?.content().get()).ok()) + .map_or(MembershipState::Leave, |mem| mem.membership); + + if !valid_membership_change( + room_version, + target_user, + target_user_member_event.as_ref(), + sender, + sender_member_event.as_ref(), + incoming_event, + current_third_party_invite, + power_levels_event.as_ref(), + join_rules_event.as_ref(), + user_for_join_auth.as_deref(), + &user_for_join_auth_membership, + room_create_event, + )? { + return Ok(false); + } + + debug!("m.room.member event was allowed"); + return Ok(true); + } + + // If the sender's current membership state is not join, reject + let sender_member_event = match sender_member_event { + | Some(mem) => mem, + | None => { + warn!("sender not found in room"); + return Ok(false); + }, + }; + + let sender_membership_event_content: RoomMemberContentFields = + from_json_str(sender_member_event.content().get())?; + let membership_state = sender_membership_event_content + .membership + .expect("we should test before that this field exists") + .deserialize()?; + + if !matches!(membership_state, MembershipState::Join) { + warn!("sender's membership is not join"); + return Ok(false); + } + + // If type is m.room.third_party_invite + let sender_power_level = if let Some(pl) = &power_levels_event { + let content = deserialize_power_levels_content_fields(pl.content().get(), room_version)?; + if let Some(level) = content.get_user_power(sender) { + *level + } else { + content.users_default + } + } else { + // If no power level event found the creator gets 100 everyone else gets 0 + let is_creator = if room_version.use_room_create_sender { + room_create_event.sender() == sender + } else { + #[allow(deprecated)] + from_json_str::(room_create_event.content().get()) + .is_ok_and(|create| create.creator.unwrap() == *sender) + }; + + if is_creator { + int!(100) + } else { + int!(0) + } + }; + + // Allow if and only if sender's current power level is greater than + // or equal to the invite level + if *incoming_event.event_type() == TimelineEventType::RoomThirdPartyInvite { + let invite_level = match &power_levels_event { + | Some(power_levels) => + deserialize_power_levels_content_invite( + power_levels.content().get(), + room_version, + )? + .invite, + | None => int!(0), + }; + + if sender_power_level < invite_level { + warn!("sender's cannot send invites in this room"); + return Ok(false); + } + + debug!("m.room.third_party_invite event was allowed"); + return Ok(true); + } + + // If the event type's required power level is greater than the sender's power + // level, reject If the event has a state_key that starts with an @ and does + // not match the sender, reject. + if !can_send_event(incoming_event, power_levels_event.as_ref(), sender_power_level) { + warn!("user cannot send event"); + return Ok(false); + } + + // If type is m.room.power_levels + if *incoming_event.event_type() == TimelineEventType::RoomPowerLevels { + debug!("starting m.room.power_levels check"); + + if let Some(required_pwr_lvl) = check_power_levels( + room_version, + incoming_event, + power_levels_event.as_ref(), + sender_power_level, + ) { + if !required_pwr_lvl { + warn!("m.room.power_levels was not allowed"); + return Ok(false); + } + } else { + warn!("m.room.power_levels was not allowed"); + return Ok(false); + } + debug!("m.room.power_levels event allowed"); + } + + // Room version 3: Redaction events are always accepted (provided the event is + // allowed by `events` and `events_default` in the power levels). However, + // servers should not apply or send redaction's to clients until both the + // redaction event and original event have been seen, and are valid. Servers + // should only apply redaction's to events where the sender's domains match, or + // the sender of the redaction has the appropriate permissions per the + // power levels. + + if room_version.extra_redaction_checks + && *incoming_event.event_type() == TimelineEventType::RoomRedaction + { + let redact_level = match power_levels_event { + | Some(pl) => + deserialize_power_levels_content_redact(pl.content().get(), room_version)?.redact, + | None => int!(50), + }; + + if !check_redaction(room_version, incoming_event, sender_power_level, redact_level)? { + return Ok(false); + } + } + + debug!("allowing event passed all checks"); + Ok(true) +} + +// TODO deserializing the member, power, join_rules event contents is done in +// conduit just before this is called. Could they be passed in? +/// Does the user who sent this member event have required power levels to do +/// so. +/// +/// * `user` - Information about the membership event and user making the +/// request. +/// * `auth_events` - The set of auth events that relate to a membership event. +/// +/// This is generated by calling `auth_types_for_event` with the membership +/// event and the current State. +#[allow(clippy::too_many_arguments)] +fn valid_membership_change( + room_version: &RoomVersion, + target_user: &UserId, + target_user_membership_event: Option, + sender: &UserId, + sender_membership_event: Option, + current_event: impl Event, + current_third_party_invite: Option, + power_levels_event: Option, + join_rules_event: Option, + user_for_join_auth: Option<&UserId>, + user_for_join_auth_membership: &MembershipState, + create_room: impl Event, +) -> Result { + #[derive(Deserialize)] + struct GetThirdPartyInvite { + third_party_invite: Option>, + } + let content = current_event.content(); + + let target_membership = from_json_str::(content.get())?.membership; + let third_party_invite = + from_json_str::(content.get())?.third_party_invite; + + let sender_membership = match &sender_membership_event { + | Some(pdu) => from_json_str::(pdu.content().get())?.membership, + | None => MembershipState::Leave, + }; + let sender_is_joined = sender_membership == MembershipState::Join; + + let target_user_current_membership = match &target_user_membership_event { + | Some(pdu) => from_json_str::(pdu.content().get())?.membership, + | None => MembershipState::Leave, + }; + + let power_levels: RoomPowerLevelsEventContent = match &power_levels_event { + | Some(ev) => from_json_str(ev.content().get())?, + | None => RoomPowerLevelsEventContent::default(), + }; + + let sender_power = power_levels + .users + .get(sender) + .or_else(|| sender_is_joined.then_some(&power_levels.users_default)); + + let target_power = power_levels.users.get(target_user).or_else(|| { + (target_membership == MembershipState::Join).then_some(&power_levels.users_default) + }); + + let mut join_rules = JoinRule::Invite; + if let Some(jr) = &join_rules_event { + join_rules = from_json_str::(jr.content().get())?.join_rule; + } + + let power_levels_event_id = power_levels_event.as_ref().map(Event::event_id); + let sender_membership_event_id = sender_membership_event.as_ref().map(Event::event_id); + let target_user_membership_event_id = + target_user_membership_event.as_ref().map(Event::event_id); + + let user_for_join_auth_is_valid = if let Some(user_for_join_auth) = user_for_join_auth { + // Is the authorised user allowed to invite users into this room + let (auth_user_pl, invite_level) = if let Some(pl) = &power_levels_event { + // TODO Refactor all powerlevel parsing + let invite = + deserialize_power_levels_content_invite(pl.content().get(), room_version)?.invite; + + let content = + deserialize_power_levels_content_fields(pl.content().get(), room_version)?; + let user_pl = if let Some(level) = content.get_user_power(user_for_join_auth) { + *level + } else { + content.users_default + }; + + (user_pl, invite) + } else { + (int!(0), int!(0)) + }; + (user_for_join_auth_membership == &MembershipState::Join) + && (auth_user_pl >= invite_level) + } else { + // No auth user was given + false + }; + + Ok(match target_membership { + | MembershipState::Join => { + // 1. If the only previous event is an m.room.create and the state_key is the + // creator, + // allow + let mut prev_events = current_event.prev_events(); + + let prev_event_is_create_event = prev_events + .next() + .is_some_and(|event_id| event_id.borrow() == create_room.event_id().borrow()); + let no_more_prev_events = prev_events.next().is_none(); + + if prev_event_is_create_event && no_more_prev_events { + let is_creator = if room_version.use_room_create_sender { + let creator = create_room.sender(); + + creator == sender && creator == target_user + } else { + #[allow(deprecated)] + let creator = from_json_str::(create_room.content().get())? + .creator + .ok_or_else(|| serde_json::Error::missing_field("creator"))?; + + creator == sender && creator == target_user + }; + + if is_creator { + return Ok(true); + } + } + + if sender != target_user { + // If the sender does not match state_key, reject. + warn!("Can't make other user join"); + false + } else if target_user_current_membership == MembershipState::Ban { + // If the sender is banned, reject. + warn!(?target_user_membership_event_id, "Banned user can't join"); + false + } else if (join_rules == JoinRule::Invite + || room_version.allow_knocking && join_rules == JoinRule::Knock) + // If the join_rule is invite then allow if membership state is invite or join + && (target_user_current_membership == MembershipState::Join + || target_user_current_membership == MembershipState::Invite) + { + true + } else if room_version.restricted_join_rules + && matches!(join_rules, JoinRule::Restricted(_)) + || room_version.knock_restricted_join_rule + && matches!(join_rules, JoinRule::KnockRestricted(_)) + { + // If the join_rule is restricted or knock_restricted + if matches!( + target_user_current_membership, + MembershipState::Invite | MembershipState::Join + ) { + // If membership state is join or invite, allow. + true + } else { + // If the join_authorised_via_users_server key in content is not a user with + // sufficient permission to invite other users, reject. + // Otherwise, allow. + user_for_join_auth_is_valid + } + } else { + // If the join_rule is public, allow. + // Otherwise, reject. + join_rules == JoinRule::Public + } + }, + | MembershipState::Invite => { + // If content has third_party_invite key + if let Some(tp_id) = third_party_invite.and_then(|i| i.deserialize().ok()) { + if target_user_current_membership == MembershipState::Ban { + warn!(?target_user_membership_event_id, "Can't invite banned user"); + false + } else { + let allow = verify_third_party_invite( + Some(target_user), + sender, + &tp_id, + current_third_party_invite, + ); + if !allow { + warn!("Third party invite invalid"); + } + allow + } + } else if !sender_is_joined + || target_user_current_membership == MembershipState::Join + || target_user_current_membership == MembershipState::Ban + { + warn!( + ?target_user_membership_event_id, + ?sender_membership_event_id, + "Can't invite user if sender not joined or the user is currently joined or \ + banned", + ); + false + } else { + let allow = sender_power + .filter(|&p| p >= &power_levels.invite) + .is_some(); + if !allow { + warn!( + ?target_user_membership_event_id, + ?power_levels_event_id, + "User does not have enough power to invite", + ); + } + allow + } + }, + | MembershipState::Leave => + if sender == target_user { + let allow = target_user_current_membership == MembershipState::Join + || target_user_current_membership == MembershipState::Invite + || target_user_current_membership == MembershipState::Knock; + if !allow { + warn!( + ?target_user_membership_event_id, + ?target_user_current_membership, + "Can't leave if sender is not already invited, knocked, or joined" + ); + } + allow + } else if !sender_is_joined + || target_user_current_membership == MembershipState::Ban + && sender_power.filter(|&p| p < &power_levels.ban).is_some() + { + warn!( + ?target_user_membership_event_id, + ?sender_membership_event_id, + "Can't kick if sender not joined or user is already banned", + ); + false + } else { + let allow = sender_power.filter(|&p| p >= &power_levels.kick).is_some() + && target_power < sender_power; + if !allow { + warn!( + ?target_user_membership_event_id, + ?power_levels_event_id, + "User does not have enough power to kick", + ); + } + allow + }, + | MembershipState::Ban => + if !sender_is_joined { + warn!(?sender_membership_event_id, "Can't ban user if sender is not joined"); + false + } else { + let allow = sender_power.filter(|&p| p >= &power_levels.ban).is_some() + && target_power < sender_power; + if !allow { + warn!( + ?target_user_membership_event_id, + ?power_levels_event_id, + "User does not have enough power to ban", + ); + } + allow + }, + | MembershipState::Knock if room_version.allow_knocking => { + // 1. If the `join_rule` is anything other than `knock` or `knock_restricted`, + // reject. + if !matches!(join_rules, JoinRule::KnockRestricted(_) | JoinRule::Knock) { + warn!( + "Join rule is not set to knock or knock_restricted, knocking is not allowed" + ); + false + } else if matches!(join_rules, JoinRule::KnockRestricted(_)) + && !room_version.knock_restricted_join_rule + { + // 2. If the `join_rule` is `knock_restricted`, but the room does not support + // `knock_restricted`, reject. + warn!( + "Join rule is set to knock_restricted but room version does not support \ + knock_restricted, knocking is not allowed" + ); + false + } else if sender != target_user { + // 3. If `sender` does not match `state_key`, reject. + warn!( + ?sender, + ?target_user, + "Can't make another user knock, sender did not match target" + ); + false + } else if matches!( + sender_membership, + MembershipState::Ban | MembershipState::Invite | MembershipState::Join + ) { + // 4. If the `sender`'s current membership is not `ban`, `invite`, or `join`, + // allow. + // 5. Otherwise, reject. + warn!( + ?target_user_membership_event_id, + "Knocking with a membership state of ban, invite or join is invalid", + ); + false + } else { + true + } + }, + | _ => { + warn!("Unknown membership transition"); + false + }, + }) +} + +/// Is the user allowed to send a specific event based on the rooms power +/// levels. +/// +/// Does the event have the correct userId as its state_key if it's not the "" +/// state_key. +fn can_send_event(event: impl Event, ple: Option, user_level: Int) -> bool { + let event_type_power_level = get_send_level(event.event_type(), event.state_key(), ple); + + debug!( + required_level = i64::from(event_type_power_level), + user_level = i64::from(user_level), + state_key = ?event.state_key(), + "permissions factors", + ); + + if user_level < event_type_power_level { + return false; + } + + if event.state_key().is_some_and(|k| k.starts_with('@')) + && event.state_key() != Some(event.sender().as_str()) + { + return false; // permission required to post in this room + } + + true +} + +/// Confirm that the event sender has the required power levels. +fn check_power_levels( + room_version: &RoomVersion, + power_event: impl Event, + previous_power_event: Option, + user_level: Int, +) -> Option { + match power_event.state_key() { + | Some("") => {}, + | Some(key) => { + error!(state_key = key, "m.room.power_levels event has non-empty state key"); + return None; + }, + | None => { + error!("check_power_levels requires an m.room.power_levels *state* event argument"); + return None; + }, + } + + // - If any of the keys users_default, events_default, state_default, ban, + // redact, kick, or invite in content are present and not an integer, reject. + // - If either of the keys events or notifications in content are present and + // not a dictionary with values that are integers, reject. + // - If users key in content is not a dictionary with keys that are valid user + // IDs with values that are integers, reject. + let user_content: RoomPowerLevelsEventContent = + deserialize_power_levels(power_event.content().get(), room_version)?; + + // Validation of users is done in Ruma, synapse for loops validating user_ids + // and integers here + debug!("validation of power event finished"); + + let current_state = match previous_power_event { + | Some(current_state) => current_state, + // If there is no previous m.room.power_levels event in the room, allow + | None => return Some(true), + }; + + let current_content: RoomPowerLevelsEventContent = + deserialize_power_levels(current_state.content().get(), room_version)?; + + let mut user_levels_to_check = BTreeSet::new(); + let old_list = ¤t_content.users; + let user_list = &user_content.users; + for user in old_list.keys().chain(user_list.keys()) { + let user: &UserId = user; + user_levels_to_check.insert(user); + } + + trace!(set = ?user_levels_to_check, "user levels to check"); + + let mut event_levels_to_check = BTreeSet::new(); + let old_list = ¤t_content.events; + let new_list = &user_content.events; + for ev_id in old_list.keys().chain(new_list.keys()) { + event_levels_to_check.insert(ev_id); + } + + trace!(set = ?event_levels_to_check, "event levels to check"); + + let old_state = ¤t_content; + let new_state = &user_content; + + // synapse does not have to split up these checks since we can't combine UserIds + // and EventTypes we do 2 loops + + // UserId loop + for user in user_levels_to_check { + let old_level = old_state.users.get(user); + let new_level = new_state.users.get(user); + if old_level.is_some() && new_level.is_some() && old_level == new_level { + continue; + } + + // If the current value is equal to the sender's current power level, reject + if user != power_event.sender() && old_level == Some(&user_level) { + warn!("m.room.power_level cannot remove ops == to own"); + return Some(false); // cannot remove ops level == to own + } + + // If the current value is higher than the sender's current power level, reject + // If the new value is higher than the sender's current power level, reject + let old_level_too_big = old_level > Some(&user_level); + let new_level_too_big = new_level > Some(&user_level); + if old_level_too_big || new_level_too_big { + warn!("m.room.power_level failed to add ops > than own"); + return Some(false); // cannot add ops greater than own + } + } + + // EventType loop + for ev_type in event_levels_to_check { + let old_level = old_state.events.get(ev_type); + let new_level = new_state.events.get(ev_type); + if old_level.is_some() && new_level.is_some() && old_level == new_level { + continue; + } + + // If the current value is higher than the sender's current power level, reject + // If the new value is higher than the sender's current power level, reject + let old_level_too_big = old_level > Some(&user_level); + let new_level_too_big = new_level > Some(&user_level); + if old_level_too_big || new_level_too_big { + warn!("m.room.power_level failed to add ops > than own"); + return Some(false); // cannot add ops greater than own + } + } + + // Notifications, currently there is only @room + if room_version.limit_notifications_power_levels { + let old_level = old_state.notifications.room; + let new_level = new_state.notifications.room; + if old_level != new_level { + // If the current value is higher than the sender's current power level, reject + // If the new value is higher than the sender's current power level, reject + let old_level_too_big = old_level > user_level; + let new_level_too_big = new_level > user_level; + if old_level_too_big || new_level_too_big { + warn!("m.room.power_level failed to add ops > than own"); + return Some(false); // cannot add ops greater than own + } + } + } + + let levels = [ + "users_default", + "events_default", + "state_default", + "ban", + "redact", + "kick", + "invite", + ]; + let old_state = serde_json::to_value(old_state).unwrap(); + let new_state = serde_json::to_value(new_state).unwrap(); + for lvl_name in &levels { + if let Some((old_lvl, new_lvl)) = get_deserialize_levels(&old_state, &new_state, lvl_name) + { + let old_level_too_big = old_lvl > user_level; + let new_level_too_big = new_lvl > user_level; + + if old_level_too_big || new_level_too_big { + warn!("cannot add ops > than own"); + return Some(false); + } + } + } + + Some(true) +} + +fn get_deserialize_levels( + old: &serde_json::Value, + new: &serde_json::Value, + name: &str, +) -> Option<(Int, Int)> { + Some(( + serde_json::from_value(old.get(name)?.clone()).ok()?, + serde_json::from_value(new.get(name)?.clone()).ok()?, + )) +} + +/// Does the event redacting come from a user with enough power to redact the +/// given event. +fn check_redaction( + _room_version: &RoomVersion, + redaction_event: impl Event, + user_level: Int, + redact_level: Int, +) -> Result { + if user_level >= redact_level { + debug!("redaction allowed via power levels"); + return Ok(true); + } + + // If the domain of the event_id of the event being redacted is the same as the + // domain of the event_id of the m.room.redaction, allow + if redaction_event.event_id().borrow().server_name() + == redaction_event + .redacts() + .as_ref() + .and_then(|&id| id.borrow().server_name()) + { + debug!("redaction event allowed via room version 1 rules"); + return Ok(true); + } + + Ok(false) +} + +/// Helper function to fetch the power level needed to send an event of type +/// `e_type` based on the rooms "m.room.power_level" event. +fn get_send_level( + e_type: &TimelineEventType, + state_key: Option<&str>, + power_lvl: Option, +) -> Int { + power_lvl + .and_then(|ple| { + from_json_str::(ple.content().get()) + .map(|content| { + content.events.get(e_type).copied().unwrap_or_else(|| { + if state_key.is_some() { + content.state_default + } else { + content.events_default + } + }) + }) + .ok() + }) + .unwrap_or_else(|| if state_key.is_some() { int!(50) } else { int!(0) }) +} + +fn verify_third_party_invite( + target_user: Option<&UserId>, + sender: &UserId, + tp_id: &ThirdPartyInvite, + current_third_party_invite: Option, +) -> bool { + // 1. Check for user being banned happens before this is called + // checking for mxid and token keys is done by ruma when deserializing + + // The state key must match the invitee + if target_user != Some(&tp_id.signed.mxid) { + return false; + } + + // If there is no m.room.third_party_invite event in the current room state with + // state_key matching token, reject + let current_tpid = match current_third_party_invite { + | Some(id) => id, + | None => return false, + }; + + if current_tpid.state_key() != Some(&tp_id.signed.token) { + return false; + } + + if sender != current_tpid.sender() { + return false; + } + + // If any signature in signed matches any public key in the + // m.room.third_party_invite event, allow + let tpid_ev = + match from_json_str::(current_tpid.content().get()) { + | Ok(ev) => ev, + | Err(_) => return false, + }; + + let decoded_invite_token = match Base64::parse(&tp_id.signed.token) { + | Ok(tok) => tok, + // FIXME: Log a warning? + | Err(_) => return false, + }; + + // A list of public keys in the public_keys field + for key in tpid_ev.public_keys.unwrap_or_default() { + if key.public_key == decoded_invite_token { + return true; + } + } + + // A single public key in the public_key field + tpid_ev.public_key == decoded_invite_token +} + +#[cfg(test)] +mod tests { + use std::sync::Arc; + + use ruma_events::{ + room::{ + join_rules::{ + AllowRule, JoinRule, Restricted, RoomJoinRulesEventContent, RoomMembership, + }, + member::{MembershipState, RoomMemberEventContent}, + }, + StateEventType, TimelineEventType, + }; + use serde_json::value::to_raw_value as to_raw_json_value; + + use crate::{ + event_auth::valid_membership_change, + test_utils::{ + alice, charlie, ella, event_id, member_content_ban, member_content_join, room_id, + to_pdu_event, PduEvent, INITIAL_EVENTS, INITIAL_EVENTS_CREATE_ROOM, + }, + Event, EventTypeExt, RoomVersion, StateMap, + }; + + #[test] + fn test_ban_pass() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let events = INITIAL_EVENTS(); + + let auth_events = events + .values() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .collect::>(); + + let requester = to_pdu_event( + "HELLO", + alice(), + TimelineEventType::RoomMember, + Some(charlie().as_str()), + member_content_ban(), + &[], + &["IMC"], + ); + + let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); + let target_user = charlie(); + let sender = alice(); + + assert!(valid_membership_change( + &RoomVersion::V6, + target_user, + fetch_state(StateEventType::RoomMember, target_user.to_string()), + sender, + fetch_state(StateEventType::RoomMember, sender.to_string()), + &requester, + None::, + fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), + fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None, + &MembershipState::Leave, + fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + ) + .unwrap()); + } + + #[test] + fn test_join_non_creator() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let events = INITIAL_EVENTS_CREATE_ROOM(); + + let auth_events = events + .values() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .collect::>(); + + let requester = to_pdu_event( + "HELLO", + charlie(), + TimelineEventType::RoomMember, + Some(charlie().as_str()), + member_content_join(), + &["CREATE"], + &["CREATE"], + ); + + let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); + let target_user = charlie(); + let sender = charlie(); + + assert!(!valid_membership_change( + &RoomVersion::V6, + target_user, + fetch_state(StateEventType::RoomMember, target_user.to_string()), + sender, + fetch_state(StateEventType::RoomMember, sender.to_string()), + &requester, + None::, + fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), + fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None, + &MembershipState::Leave, + fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + ) + .unwrap()); + } + + #[test] + fn test_join_creator() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let events = INITIAL_EVENTS_CREATE_ROOM(); + + let auth_events = events + .values() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .collect::>(); + + let requester = to_pdu_event( + "HELLO", + alice(), + TimelineEventType::RoomMember, + Some(alice().as_str()), + member_content_join(), + &["CREATE"], + &["CREATE"], + ); + + let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); + let target_user = alice(); + let sender = alice(); + + assert!(valid_membership_change( + &RoomVersion::V6, + target_user, + fetch_state(StateEventType::RoomMember, target_user.to_string()), + sender, + fetch_state(StateEventType::RoomMember, sender.to_string()), + &requester, + None::, + fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), + fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None, + &MembershipState::Leave, + fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + ) + .unwrap()); + } + + #[test] + fn test_ban_fail() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let events = INITIAL_EVENTS(); + + let auth_events = events + .values() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .collect::>(); + + let requester = to_pdu_event( + "HELLO", + charlie(), + TimelineEventType::RoomMember, + Some(alice().as_str()), + member_content_ban(), + &[], + &["IMC"], + ); + + let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); + let target_user = alice(); + let sender = charlie(); + + assert!(!valid_membership_change( + &RoomVersion::V6, + target_user, + fetch_state(StateEventType::RoomMember, target_user.to_string()), + sender, + fetch_state(StateEventType::RoomMember, sender.to_string()), + &requester, + None::, + fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), + fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None, + &MembershipState::Leave, + fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + ) + .unwrap()); + } + + #[test] + fn test_restricted_join_rule() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let mut events = INITIAL_EVENTS(); + *events.get_mut(&event_id("IJR")).unwrap() = to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Restricted( + Restricted::new(vec![AllowRule::RoomMembership(RoomMembership::new( + room_id().to_owned(), + ))]), + ))) + .unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["IPOWER"], + ); + + let mut member = RoomMemberEventContent::new(MembershipState::Join); + member.join_authorized_via_users_server = Some(alice().to_owned()); + + let auth_events = events + .values() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .collect::>(); + + let requester = to_pdu_event( + "HELLO", + ella(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Join)).unwrap(), + &["CREATE", "IJR", "IPOWER", "new"], + &["new"], + ); + + let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); + let target_user = ella(); + let sender = ella(); + + assert!(valid_membership_change( + &RoomVersion::V9, + target_user, + fetch_state(StateEventType::RoomMember, target_user.to_string()), + sender, + fetch_state(StateEventType::RoomMember, sender.to_string()), + &requester, + None::, + fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), + fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + Some(alice()), + &MembershipState::Join, + fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + ) + .unwrap()); + + assert!(!valid_membership_change( + &RoomVersion::V9, + target_user, + fetch_state(StateEventType::RoomMember, target_user.to_string()), + sender, + fetch_state(StateEventType::RoomMember, sender.to_string()), + &requester, + None::, + fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), + fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + Some(ella()), + &MembershipState::Leave, + fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + ) + .unwrap()); + } + + #[test] + fn test_knock() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let mut events = INITIAL_EVENTS(); + *events.get_mut(&event_id("IJR")).unwrap() = to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Knock)).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["IPOWER"], + ); + + let auth_events = events + .values() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .collect::>(); + + let requester = to_pdu_event( + "HELLO", + ella(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Knock)).unwrap(), + &[], + &["IMC"], + ); + + let fetch_state = |ty, key| auth_events.get(&(ty, key)).cloned(); + let target_user = ella(); + let sender = ella(); + + assert!(valid_membership_change( + &RoomVersion::V7, + target_user, + fetch_state(StateEventType::RoomMember, target_user.to_string()), + sender, + fetch_state(StateEventType::RoomMember, sender.to_string()), + &requester, + None::, + fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), + fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None, + &MembershipState::Leave, + fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + ) + .unwrap()); + } +} diff --git a/src/core/state_res/mod.rs b/src/core/state_res/mod.rs new file mode 100644 index 00000000..e4054377 --- /dev/null +++ b/src/core/state_res/mod.rs @@ -0,0 +1,1644 @@ +pub(crate) mod error; +pub mod event_auth; +mod power_levels; +mod room_version; +mod state_event; + +#[cfg(test)] +mod test_utils; + +use std::{ + borrow::Borrow, + cmp::{Ordering, Reverse}, + collections::{BinaryHeap, HashMap, HashSet}, + fmt::Debug, + hash::Hash, +}; + +use futures::{future, stream, Future, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; +use ruma::{ + events::{ + room::member::{MembershipState, RoomMemberEventContent}, + StateEventType, TimelineEventType, + }, + int, EventId, Int, MilliSecondsSinceUnixEpoch, RoomVersionId, +}; +use serde_json::from_str as from_json_str; + +pub(crate) use self::error::Error; +use self::power_levels::PowerLevelsContentFields; +pub use self::{ + event_auth::{auth_check, auth_types_for_event}, + room_version::RoomVersion, + state_event::Event, +}; +use crate::{debug, trace, warn}; + +/// A mapping of event type and state_key to some value `T`, usually an +/// `EventId`. +pub type StateMap = HashMap; +pub type StateMapItem = (TypeStateKey, T); +pub type TypeStateKey = (StateEventType, String); + +type Result = crate::Result; + +/// Resolve sets of state events as they come in. +/// +/// Internally `StateResolution` builds a graph and an auth chain to allow for +/// state conflict resolution. +/// +/// ## Arguments +/// +/// * `state_sets` - The incoming state to resolve. Each `StateMap` represents a +/// possible fork in the state of a room. +/// +/// * `auth_chain_sets` - The full recursive set of `auth_events` for each event +/// in the `state_sets`. +/// +/// * `event_fetch` - Any event not found in the `event_map` will defer to this +/// closure to find the event. +/// +/// * `parallel_fetches` - The number of asynchronous fetch requests in-flight +/// for any given operation. +/// +/// ## Invariants +/// +/// The caller of `resolve` must ensure that all the events are from the same +/// room. Although this function takes a `RoomId` it does not check that each +/// event is part of the same room. +//#[tracing::instrument(level = "debug", skip(state_sets, auth_chain_sets, +//#[tracing::instrument(level event_fetch))] +pub async fn resolve<'a, E, SetIter, Fetch, FetchFut, Exists, ExistsFut>( + room_version: &RoomVersionId, + state_sets: impl IntoIterator + Send, + auth_chain_sets: &'a [HashSet], + event_fetch: &Fetch, + event_exists: &Exists, + parallel_fetches: usize, +) -> Result> +where + Fetch: Fn(E::Id) -> FetchFut + Sync, + FetchFut: Future> + Send, + Exists: Fn(E::Id) -> ExistsFut + Sync, + ExistsFut: Future + Send, + SetIter: Iterator> + Clone + Send, + E: Event + Clone + Send + Sync, + E::Id: Borrow + Send + Sync, + for<'b> &'b E: Send, +{ + debug!("State resolution starting"); + + // Split non-conflicting and conflicting state + let (clean, conflicting) = separate(state_sets.into_iter()); + + debug!(count = clean.len(), "non-conflicting events"); + trace!(map = ?clean, "non-conflicting events"); + + if conflicting.is_empty() { + debug!("no conflicting state found"); + return Ok(clean); + } + + debug!(count = conflicting.len(), "conflicting events"); + trace!(map = ?conflicting, "conflicting events"); + + let auth_chain_diff = + get_auth_chain_diff(auth_chain_sets).chain(conflicting.into_values().flatten()); + + // `all_conflicted` contains unique items + // synapse says `full_set = {eid for eid in full_conflicted_set if eid in + // event_map}` + let all_conflicted: HashSet<_> = stream::iter(auth_chain_diff) + // Don't honor events we cannot "verify" + .map(|id| event_exists(id.clone()).map(move |exists| (id, exists))) + .buffer_unordered(parallel_fetches) + .filter_map(|(id, exists)| future::ready(exists.then_some(id))) + .collect() + .boxed() + .await; + + debug!(count = all_conflicted.len(), "full conflicted set"); + trace!(set = ?all_conflicted, "full conflicted set"); + + // We used to check that all events are events from the correct room + // this is now a check the caller of `resolve` must make. + + // Get only the control events with a state_key: "" or ban/kick event (sender != + // state_key) + let control_events: Vec<_> = stream::iter(all_conflicted.iter()) + .map(|id| is_power_event_id(id, &event_fetch).map(move |is| (id, is))) + .buffer_unordered(parallel_fetches) + .filter_map(|(id, is)| future::ready(is.then_some(id.clone()))) + .collect() + .boxed() + .await; + + // Sort the control events based on power_level/clock/event_id and + // outgoing/incoming edges + let sorted_control_levels = reverse_topological_power_sort( + control_events, + &all_conflicted, + &event_fetch, + parallel_fetches, + ) + .boxed() + .await?; + + debug!(count = sorted_control_levels.len(), "power events"); + trace!(list = ?sorted_control_levels, "sorted power events"); + + let room_version = RoomVersion::new(room_version)?; + // Sequentially auth check each control event. + let resolved_control = iterative_auth_check( + &room_version, + sorted_control_levels.iter(), + clean.clone(), + &event_fetch, + parallel_fetches, + ) + .boxed() + .await?; + + debug!(count = resolved_control.len(), "resolved power events"); + trace!(map = ?resolved_control, "resolved power events"); + + // At this point the control_events have been resolved we now have to + // sort the remaining events using the mainline of the resolved power level. + let deduped_power_ev = sorted_control_levels.into_iter().collect::>(); + + // This removes the control events that passed auth and more importantly those + // that failed auth + let events_to_resolve = all_conflicted + .iter() + .filter(|&id| !deduped_power_ev.contains(id.borrow())) + .cloned() + .collect::>(); + + debug!(count = events_to_resolve.len(), "events left to resolve"); + trace!(list = ?events_to_resolve, "events left to resolve"); + + // This "epochs" power level event + let power_event = resolved_control.get(&(StateEventType::RoomPowerLevels, String::new())); + + debug!(event_id = ?power_event, "power event"); + + let sorted_left_events = + mainline_sort(&events_to_resolve, power_event.cloned(), &event_fetch, parallel_fetches) + .boxed() + .await?; + + trace!(list = ?sorted_left_events, "events left, sorted"); + + let mut resolved_state = iterative_auth_check( + &room_version, + sorted_left_events.iter(), + resolved_control, // The control events are added to the final resolved state + &event_fetch, + parallel_fetches, + ) + .boxed() + .await?; + + // Add unconflicted state to the resolved state + // We priorities the unconflicting state + resolved_state.extend(clean); + + debug!("state resolution finished"); + + Ok(resolved_state) +} + +/// Split the events that have no conflicts from those that are conflicting. +/// +/// The return tuple looks like `(unconflicted, conflicted)`. +/// +/// State is determined to be conflicting if for the given key (StateEventType, +/// StateKey) there is not exactly one event ID. This includes missing events, +/// if one state_set includes an event that none of the other have this is a +/// conflicting event. +fn separate<'a, Id>( + state_sets_iter: impl Iterator>, +) -> (StateMap, StateMap>) +where + Id: Clone + Eq + Hash + 'a, +{ + let mut state_set_count = 0_usize; + let mut occurrences = HashMap::<_, HashMap<_, _>>::new(); + + let state_sets_iter = state_sets_iter.inspect(|_| state_set_count += 1); + for (k, v) in state_sets_iter.flatten() { + occurrences + .entry(k) + .or_default() + .entry(v) + .and_modify(|x| *x += 1) + .or_insert(1); + } + + let mut unconflicted_state = StateMap::new(); + let mut conflicted_state = StateMap::new(); + + for (k, v) in occurrences { + for (id, occurrence_count) in v { + if occurrence_count == state_set_count { + unconflicted_state.insert((k.0.clone(), k.1.clone()), id.clone()); + } else { + conflicted_state + .entry((k.0.clone(), k.1.clone())) + .and_modify(|x: &mut Vec<_>| x.push(id.clone())) + .or_insert(vec![id.clone()]); + } + } + } + + (unconflicted_state, conflicted_state) +} + +/// Returns a Vec of deduped EventIds that appear in some chains but not others. +fn get_auth_chain_diff(auth_chain_sets: &[HashSet]) -> impl Iterator + Send +where + Id: Clone + Eq + Hash + Send, +{ + let num_sets = auth_chain_sets.len(); + let mut id_counts: HashMap = HashMap::new(); + for id in auth_chain_sets.iter().flatten() { + *id_counts.entry(id.clone()).or_default() += 1; + } + + id_counts + .into_iter() + .filter_map(move |(id, count)| (count < num_sets).then_some(id)) +} + +/// Events are sorted from "earliest" to "latest". +/// +/// They are compared using the negative power level (reverse topological +/// ordering), the origin server timestamp and in case of a tie the `EventId`s +/// are compared lexicographically. +/// +/// The power level is negative because a higher power level is equated to an +/// earlier (further back in time) origin server timestamp. +#[tracing::instrument(level = "debug", skip_all)] +async fn reverse_topological_power_sort( + events_to_sort: Vec, + auth_diff: &HashSet, + fetch_event: &F, + parallel_fetches: usize, +) -> Result> +where + F: Fn(E::Id) -> Fut + Sync, + Fut: Future> + Send, + E: Event + Send, + E::Id: Borrow + Send + Sync, +{ + debug!("reverse topological sort of power events"); + + let mut graph = HashMap::new(); + for event_id in events_to_sort { + add_event_and_auth_chain_to_graph(&mut graph, event_id, auth_diff, fetch_event).await; + } + + // This is used in the `key_fn` passed to the lexico_topo_sort fn + let event_to_pl = stream::iter(graph.keys()) + .map(|event_id| { + get_power_level_for_sender(event_id.clone(), fetch_event, parallel_fetches) + .map(move |res| res.map(|pl| (event_id, pl))) + }) + .buffer_unordered(parallel_fetches) + .try_fold(HashMap::new(), |mut event_to_pl, (event_id, pl)| { + debug!( + event_id = event_id.borrow().as_str(), + power_level = i64::from(pl), + "found the power level of an event's sender", + ); + + event_to_pl.insert(event_id.clone(), pl); + future::ok(event_to_pl) + }) + .boxed() + .await?; + + let event_to_pl = &event_to_pl; + let fetcher = |event_id: E::Id| async move { + let pl = *event_to_pl + .get(event_id.borrow()) + .ok_or_else(|| Error::NotFound(String::new()))?; + let ev = fetch_event(event_id) + .await + .ok_or_else(|| Error::NotFound(String::new()))?; + Ok((pl, ev.origin_server_ts())) + }; + + lexicographical_topological_sort(&graph, &fetcher).await +} + +/// Sorts the event graph based on number of outgoing/incoming edges. +/// +/// `key_fn` is used as to obtain the power level and age of an event for +/// breaking ties (together with the event ID). +#[tracing::instrument(level = "debug", skip_all)] +pub async fn lexicographical_topological_sort( + graph: &HashMap>, + key_fn: &F, +) -> Result> +where + F: Fn(Id) -> Fut + Sync, + Fut: Future> + Send, + Id: Borrow + Clone + Eq + Hash + Ord + Send, +{ + #[derive(PartialEq, Eq)] + struct TieBreaker<'a, Id> { + power_level: Int, + origin_server_ts: MilliSecondsSinceUnixEpoch, + event_id: &'a Id, + } + + impl Ord for TieBreaker<'_, Id> + where + Id: Ord, + { + fn cmp(&self, other: &Self) -> Ordering { + // NOTE: the power level comparison is "backwards" intentionally. + // See the "Mainline ordering" section of the Matrix specification + // around where it says the following: + // + // > for events `x` and `y`, `x < y` if [...] + // + // + other + .power_level + .cmp(&self.power_level) + .then(self.origin_server_ts.cmp(&other.origin_server_ts)) + .then(self.event_id.cmp(other.event_id)) + } + } + + impl PartialOrd for TieBreaker<'_, Id> + where + Id: Ord, + { + fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } + } + + debug!("starting lexicographical topological sort"); + + // NOTE: an event that has no incoming edges happened most recently, + // and an event that has no outgoing edges happened least recently. + + // NOTE: this is basically Kahn's algorithm except we look at nodes with no + // outgoing edges, c.f. + // https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm + + // outdegree_map is an event referring to the events before it, the + // more outdegree's the more recent the event. + let mut outdegree_map = graph.clone(); + + // The number of events that depend on the given event (the EventId key) + // How many events reference this event in the DAG as a parent + let mut reverse_graph: HashMap<_, HashSet<_>> = HashMap::new(); + + // Vec of nodes that have zero out degree, least recent events. + let mut zero_outdegree = Vec::new(); + + for (node, edges) in graph { + if edges.is_empty() { + let (power_level, origin_server_ts) = key_fn(node.clone()).await?; + // The `Reverse` is because rusts `BinaryHeap` sorts largest -> smallest we need + // smallest -> largest + zero_outdegree.push(Reverse(TieBreaker { + power_level, + origin_server_ts, + event_id: node, + })); + } + + reverse_graph.entry(node).or_default(); + for edge in edges { + reverse_graph.entry(edge).or_default().insert(node); + } + } + + let mut heap = BinaryHeap::from(zero_outdegree); + + // We remove the oldest node (most incoming edges) and check against all other + let mut sorted = vec![]; + // Destructure the `Reverse` and take the smallest `node` each time + while let Some(Reverse(item)) = heap.pop() { + let node = item.event_id; + + for &parent in reverse_graph + .get(node) + .expect("EventId in heap is also in reverse_graph") + { + // The number of outgoing edges this node has + let out = outdegree_map + .get_mut(parent.borrow()) + .expect("outdegree_map knows of all referenced EventIds"); + + // Only push on the heap once older events have been cleared + out.remove(node.borrow()); + if out.is_empty() { + let (power_level, origin_server_ts) = key_fn(parent.clone()).await?; + heap.push(Reverse(TieBreaker { + power_level, + origin_server_ts, + event_id: parent, + })); + } + } + + // synapse yields we push then return the vec + sorted.push(node.clone()); + } + + Ok(sorted) +} + +/// Find the power level for the sender of `event_id` or return a default value +/// of zero. +/// +/// Do NOT use this any where but topological sort, we find the power level for +/// the eventId at the eventId's generation (we walk backwards to `EventId`s +/// most recent previous power level event). +async fn get_power_level_for_sender( + event_id: E::Id, + fetch_event: &F, + parallel_fetches: usize, +) -> serde_json::Result +where + F: Fn(E::Id) -> Fut + Sync, + Fut: Future> + Send, + E: Event + Send, + E::Id: Borrow + Send, +{ + debug!("fetch event ({event_id}) senders power level"); + + let event = fetch_event(event_id.clone()).await; + + let auth_events = event.as_ref().map(Event::auth_events).into_iter().flatten(); + + let pl = stream::iter(auth_events) + .map(|aid| fetch_event(aid.clone())) + .buffer_unordered(parallel_fetches.min(5)) + .filter_map(future::ready) + .collect::>() + .boxed() + .await + .into_iter() + .find(|aev| is_type_and_key(aev, &TimelineEventType::RoomPowerLevels, "")); + + let content: PowerLevelsContentFields = match pl { + | None => return Ok(int!(0)), + | Some(ev) => from_json_str(ev.content().get())?, + }; + + if let Some(ev) = event { + if let Some(&user_level) = content.get_user_power(ev.sender()) { + debug!("found {} at power_level {user_level}", ev.sender()); + return Ok(user_level); + } + } + + Ok(content.users_default) +} + +/// Check the that each event is authenticated based on the events before it. +/// +/// ## Returns +/// +/// The `unconflicted_state` combined with the newly auth'ed events. So any +/// event that fails the `event_auth::auth_check` will be excluded from the +/// returned state map. +/// +/// For each `events_to_check` event we gather the events needed to auth it from +/// the the `fetch_event` closure and verify each event using the +/// `event_auth::auth_check` function. +async fn iterative_auth_check<'a, E, F, Fut, I>( + room_version: &RoomVersion, + events_to_check: I, + unconflicted_state: StateMap, + fetch_event: &F, + parallel_fetches: usize, +) -> Result> +where + F: Fn(E::Id) -> Fut + Sync, + Fut: Future> + Send, + E::Id: Borrow + Clone + Eq + Ord + Send + Sync + 'a, + I: Iterator + Debug + Send + 'a, + E: Event + Clone + Send + Sync, +{ + debug!("starting iterative auth check"); + trace!( + list = ?events_to_check, + "events to check" + ); + + let events_to_check: Vec<_> = stream::iter(events_to_check) + .map(Result::Ok) + .map_ok(|event_id| { + fetch_event(event_id.clone()).map(move |result| { + result.ok_or_else(|| Error::NotFound(format!("Failed to find {event_id}"))) + }) + }) + .try_buffer_unordered(parallel_fetches) + .try_collect() + .boxed() + .await?; + + let auth_event_ids: HashSet = events_to_check + .iter() + .flat_map(|event: &E| event.auth_events().map(Clone::clone)) + .collect(); + + let auth_events: HashMap = stream::iter(auth_event_ids.into_iter()) + .map(fetch_event) + .buffer_unordered(parallel_fetches) + .filter_map(future::ready) + .map(|auth_event| (auth_event.event_id().clone(), auth_event)) + .collect() + .boxed() + .await; + + let auth_events = &auth_events; + let mut resolved_state = unconflicted_state; + for event in &events_to_check { + let event_id = event.event_id(); + let state_key = event + .state_key() + .ok_or_else(|| Error::InvalidPdu("State event had no state key".to_owned()))?; + + let auth_types = auth_types_for_event( + event.event_type(), + event.sender(), + Some(state_key), + event.content(), + )?; + + let mut auth_state = StateMap::new(); + for aid in event.auth_events() { + if let Some(ev) = auth_events.get(aid.borrow()) { + //TODO: synapse checks "rejected_reason" which is most likely related to + // soft-failing + auth_state.insert( + ev.event_type() + .with_state_key(ev.state_key().ok_or_else(|| { + Error::InvalidPdu("State event had no state key".to_owned()) + })?), + ev.clone(), + ); + } else { + warn!(event_id = aid.borrow().as_str(), "missing auth event"); + } + } + + stream::iter( + auth_types + .iter() + .filter_map(|key| Some((key, resolved_state.get(key)?))), + ) + .filter_map(|(key, ev_id)| async move { + if let Some(event) = auth_events.get(ev_id.borrow()) { + Some((key, event.clone())) + } else { + Some((key, fetch_event(ev_id.clone()).await?)) + } + }) + .for_each(|(key, event)| { + //TODO: synapse checks "rejected_reason" is None here + auth_state.insert(key.to_owned(), event); + future::ready(()) + }) + .await; + + debug!("event to check {:?}", event.event_id()); + + // The key for this is (eventType + a state_key of the signed token not sender) + // so search for it + let current_third_party = auth_state.iter().find_map(|(_, pdu)| { + (*pdu.event_type() == TimelineEventType::RoomThirdPartyInvite).then_some(pdu) + }); + + let fetch_state = |ty: &StateEventType, key: &str| { + future::ready(auth_state.get(&ty.with_state_key(key))) + }; + + if auth_check(room_version, &event, current_third_party.as_ref(), fetch_state).await? { + // add event to resolved state map + resolved_state.insert(event.event_type().with_state_key(state_key), event_id.clone()); + } else { + // synapse passes here on AuthError. We do not add this event to resolved_state. + warn!("event {event_id} failed the authentication check"); + } + } + + Ok(resolved_state) +} + +/// Returns the sorted `to_sort` list of `EventId`s based on a mainline sort +/// using the depth of `resolved_power_level`, the server timestamp, and the +/// eventId. +/// +/// The depth of the given event is calculated based on the depth of it's +/// closest "parent" power_level event. If there have been two power events the +/// after the most recent are depth 0, the events before (with the first power +/// level as a parent) will be marked as depth 1. depth 1 is "older" than depth +/// 0. +async fn mainline_sort( + to_sort: &[E::Id], + resolved_power_level: Option, + fetch_event: &F, + parallel_fetches: usize, +) -> Result> +where + F: Fn(E::Id) -> Fut + Sync, + Fut: Future> + Send, + E: Event + Clone + Send + Sync, + E::Id: Borrow + Clone + Send + Sync, +{ + debug!("mainline sort of events"); + + // There are no EventId's to sort, bail. + if to_sort.is_empty() { + return Ok(vec![]); + } + + let mut mainline = vec![]; + let mut pl = resolved_power_level; + while let Some(p) = pl { + mainline.push(p.clone()); + + let event = fetch_event(p.clone()) + .await + .ok_or_else(|| Error::NotFound(format!("Failed to find {p}")))?; + pl = None; + for aid in event.auth_events() { + let ev = fetch_event(aid.clone()) + .await + .ok_or_else(|| Error::NotFound(format!("Failed to find {aid}")))?; + if is_type_and_key(&ev, &TimelineEventType::RoomPowerLevels, "") { + pl = Some(aid.to_owned()); + break; + } + } + } + + let mainline_map = mainline + .iter() + .rev() + .enumerate() + .map(|(idx, eid)| ((*eid).clone(), idx)) + .collect::>(); + + let order_map = stream::iter(to_sort.iter()) + .map(|ev_id| { + fetch_event(ev_id.clone()).map(move |event| event.map(|event| (event, ev_id))) + }) + .buffer_unordered(parallel_fetches) + .filter_map(future::ready) + .map(|(event, ev_id)| { + get_mainline_depth(Some(event.clone()), &mainline_map, fetch_event) + .map_ok(move |depth| (depth, event, ev_id)) + .map(Result::ok) + }) + .buffer_unordered(parallel_fetches) + .filter_map(future::ready) + .fold(HashMap::new(), |mut order_map, (depth, event, ev_id)| { + order_map.insert(ev_id, (depth, event.origin_server_ts(), ev_id)); + future::ready(order_map) + }) + .boxed() + .await; + + // Sort the event_ids by their depth, timestamp and EventId + // unwrap is OK order map and sort_event_ids are from to_sort (the same Vec) + let mut sort_event_ids = order_map.keys().map(|&k| k.clone()).collect::>(); + sort_event_ids.sort_by_key(|sort_id| &order_map[sort_id]); + + Ok(sort_event_ids) +} + +/// Get the mainline depth from the `mainline_map` or finds a power_level event +/// that has an associated mainline depth. +async fn get_mainline_depth( + mut event: Option, + mainline_map: &HashMap, + fetch_event: &F, +) -> Result +where + F: Fn(E::Id) -> Fut + Sync, + Fut: Future> + Send, + E: Event + Send, + E::Id: Borrow + Send, +{ + while let Some(sort_ev) = event { + debug!(event_id = sort_ev.event_id().borrow().as_str(), "mainline"); + let id = sort_ev.event_id(); + if let Some(depth) = mainline_map.get(id.borrow()) { + return Ok(*depth); + } + + event = None; + for aid in sort_ev.auth_events() { + let aev = fetch_event(aid.clone()) + .await + .ok_or_else(|| Error::NotFound(format!("Failed to find {aid}")))?; + if is_type_and_key(&aev, &TimelineEventType::RoomPowerLevels, "") { + event = Some(aev); + break; + } + } + } + // Did not find a power level event so we default to zero + Ok(0) +} + +async fn add_event_and_auth_chain_to_graph( + graph: &mut HashMap>, + event_id: E::Id, + auth_diff: &HashSet, + fetch_event: &F, +) where + F: Fn(E::Id) -> Fut, + Fut: Future> + Send, + E: Event + Send, + E::Id: Borrow + Clone + Send, +{ + let mut state = vec![event_id]; + while let Some(eid) = state.pop() { + graph.entry(eid.clone()).or_default(); + let event = fetch_event(eid.clone()).await; + let auth_events = event.as_ref().map(Event::auth_events).into_iter().flatten(); + + // Prefer the store to event as the store filters dedups the events + for aid in auth_events { + if auth_diff.contains(aid.borrow()) { + if !graph.contains_key(aid.borrow()) { + state.push(aid.to_owned()); + } + + // We just inserted this at the start of the while loop + graph.get_mut(eid.borrow()).unwrap().insert(aid.to_owned()); + } + } + } +} + +async fn is_power_event_id(event_id: &E::Id, fetch: &F) -> bool +where + F: Fn(E::Id) -> Fut + Sync, + Fut: Future> + Send, + E: Event + Send, + E::Id: Borrow + Send, +{ + match fetch(event_id.clone()).await.as_ref() { + | Some(state) => is_power_event(state), + | _ => false, + } +} + +fn is_type_and_key(ev: impl Event, ev_type: &TimelineEventType, state_key: &str) -> bool { + ev.event_type() == ev_type && ev.state_key() == Some(state_key) +} + +fn is_power_event(event: impl Event) -> bool { + match event.event_type() { + | TimelineEventType::RoomPowerLevels + | TimelineEventType::RoomJoinRules + | TimelineEventType::RoomCreate => event.state_key() == Some(""), + | TimelineEventType::RoomMember => { + if let Ok(content) = from_json_str::(event.content().get()) { + if [MembershipState::Leave, MembershipState::Ban].contains(&content.membership) { + return Some(event.sender().as_str()) != event.state_key(); + } + } + + false + }, + | _ => false, + } +} + +/// Convenience trait for adding event type plus state key to state maps. +pub trait EventTypeExt { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, String); +} + +impl EventTypeExt for StateEventType { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { + (self, state_key.into()) + } +} + +impl EventTypeExt for TimelineEventType { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { + (self.to_string().into(), state_key.into()) + } +} + +impl EventTypeExt for &T +where + T: EventTypeExt + Clone, +{ + fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { + self.to_owned().with_state_key(state_key) + } +} + +#[cfg(test)] +mod tests { + use std::{ + collections::{HashMap, HashSet}, + sync::Arc, + }; + + use maplit::{hashmap, hashset}; + use rand::seq::SliceRandom; + use ruma::{ + events::{ + room::join_rules::{JoinRule, RoomJoinRulesEventContent}, + StateEventType, TimelineEventType, + }, + int, uint, + }; + use ruma_common::{MilliSecondsSinceUnixEpoch, OwnedEventId, RoomVersionId}; + use serde_json::{json, value::to_raw_value as to_raw_json_value}; + use tracing::debug; + + use crate::{ + is_power_event, + room_version::RoomVersion, + test_utils::{ + alice, bob, charlie, do_check, ella, event_id, member_content_ban, + member_content_join, room_id, to_init_pdu_event, to_pdu_event, zara, PduEvent, + TestStore, INITIAL_EVENTS, + }, + Event, EventTypeExt, StateMap, + }; + + async fn test_event_sort() { + use futures::future::ready; + + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let events = INITIAL_EVENTS(); + + let event_map = events + .values() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) + .collect::>(); + + let auth_chain: HashSet = HashSet::new(); + + let power_events = event_map + .values() + .filter(|&pdu| is_power_event(&**pdu)) + .map(|pdu| pdu.event_id.clone()) + .collect::>(); + + let fetcher = |id| ready(events.get(&id).cloned()); + let sorted_power_events = + crate::reverse_topological_power_sort(power_events, &auth_chain, &fetcher, 1) + .await + .unwrap(); + + let resolved_power = crate::iterative_auth_check( + &RoomVersion::V6, + sorted_power_events.iter(), + HashMap::new(), // unconflicted events + &fetcher, + 1, + ) + .await + .expect("iterative auth check failed on resolved events"); + + // don't remove any events so we know it sorts them all correctly + let mut events_to_sort = events.keys().cloned().collect::>(); + + events_to_sort.shuffle(&mut rand::thread_rng()); + + let power_level = resolved_power + .get(&(StateEventType::RoomPowerLevels, "".to_owned())) + .cloned(); + + let sorted_event_ids = crate::mainline_sort(&events_to_sort, power_level, &fetcher, 1) + .await + .unwrap(); + + assert_eq!( + vec![ + "$CREATE:foo", + "$IMA:foo", + "$IPOWER:foo", + "$IJR:foo", + "$IMB:foo", + "$IMC:foo", + "$START:foo", + "$END:foo" + ], + sorted_event_ids + .iter() + .map(|id| id.to_string()) + .collect::>() + ); + } + + #[tokio::test] + async fn test_sort() { + for _ in 0..20 { + // since we shuffle the eventIds before we sort them introducing randomness + // seems like we should test this a few times + test_event_sort().await; + } + } + + #[tokio::test] + async fn ban_vs_power_level() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let events = &[ + to_init_pdu_event( + "PA", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + to_init_pdu_event( + "MA", + alice(), + TimelineEventType::RoomMember, + Some(alice().to_string().as_str()), + member_content_join(), + ), + to_init_pdu_event( + "MB", + alice(), + TimelineEventType::RoomMember, + Some(bob().to_string().as_str()), + member_content_ban(), + ), + to_init_pdu_event( + "PB", + bob(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + ]; + + let edges = vec![vec!["END", "MB", "MA", "PA", "START"], vec!["END", "PA", "PB"]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec!["PA", "MA", "MB"] + .into_iter() + .map(event_id) + .collect::>(); + + do_check(events, edges, expected_state_ids).await; + } + + #[tokio::test] + async fn topic_basic() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let events = &[ + to_init_pdu_event( + "T1", + alice(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "PA1", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + to_init_pdu_event( + "T2", + alice(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "PA2", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 0 } })).unwrap(), + ), + to_init_pdu_event( + "PB", + bob(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + to_init_pdu_event( + "T3", + bob(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + ]; + + let edges = + vec![vec!["END", "PA2", "T2", "PA1", "T1", "START"], vec!["END", "T3", "PB", "PA1"]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec!["PA2", "T2"] + .into_iter() + .map(event_id) + .collect::>(); + + do_check(events, edges, expected_state_ids).await; + } + + #[tokio::test] + async fn topic_reset() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let events = &[ + to_init_pdu_event( + "T1", + alice(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "PA", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + to_init_pdu_event( + "T2", + bob(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "MB", + alice(), + TimelineEventType::RoomMember, + Some(bob().to_string().as_str()), + member_content_ban(), + ), + ]; + + let edges = vec![vec!["END", "MB", "T2", "PA", "T1", "START"], vec!["END", "T1"]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec!["T1", "MB", "PA"] + .into_iter() + .map(event_id) + .collect::>(); + + do_check(events, edges, expected_state_ids).await; + } + + #[tokio::test] + async fn join_rule_evasion() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let events = &[ + to_init_pdu_event( + "JR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Private)).unwrap(), + ), + to_init_pdu_event( + "ME", + ella(), + TimelineEventType::RoomMember, + Some(ella().to_string().as_str()), + member_content_join(), + ), + ]; + + let edges = vec![vec!["END", "JR", "START"], vec!["END", "ME", "START"]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec![event_id("JR")]; + + do_check(events, edges, expected_state_ids).await; + } + + #[tokio::test] + async fn offtopic_power_level() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let events = &[ + to_init_pdu_event( + "PA", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + to_init_pdu_event( + "PB", + bob(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value( + &json!({ "users": { alice(): 100, bob(): 50, charlie(): 50 } }), + ) + .unwrap(), + ), + to_init_pdu_event( + "PC", + charlie(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50, charlie(): 0 } })) + .unwrap(), + ), + ]; + + let edges = vec![vec!["END", "PC", "PB", "PA", "START"], vec!["END", "PA"]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec!["PC"].into_iter().map(event_id).collect::>(); + + do_check(events, edges, expected_state_ids).await; + } + + #[tokio::test] + async fn topic_setting() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let events = &[ + to_init_pdu_event( + "T1", + alice(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "PA1", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + to_init_pdu_event( + "T2", + alice(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "PA2", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 0 } })).unwrap(), + ), + to_init_pdu_event( + "PB", + bob(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + ), + to_init_pdu_event( + "T3", + bob(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "MZ1", + zara(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + to_init_pdu_event( + "T4", + alice(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + ), + ]; + + let edges = vec![vec!["END", "T4", "MZ1", "PA2", "T2", "PA1", "T1", "START"], vec![ + "END", "MZ1", "T3", "PB", "PA1", + ]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec!["T4", "PA2"] + .into_iter() + .map(event_id) + .collect::>(); + + do_check(events, edges, expected_state_ids).await; + } + + #[tokio::test] + async fn test_event_map_none() { + use futures::future::ready; + + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let mut store = TestStore::(hashmap! {}); + + // build up the DAG + let (state_at_bob, state_at_charlie, expected) = store.set_up(); + + let ev_map = store.0.clone(); + let fetcher = |id| ready(ev_map.get(&id).cloned()); + + let exists = |id: ::Id| ready(ev_map.get(&*id).is_some()); + + let state_sets = [state_at_bob, state_at_charlie]; + let auth_chain: Vec<_> = state_sets + .iter() + .map(|map| { + store + .auth_event_ids(room_id(), map.values().cloned().collect()) + .unwrap() + }) + .collect(); + + let resolved = match crate::resolve( + &RoomVersionId::V2, + &state_sets, + &auth_chain, + &fetcher, + &exists, + 1, + ) + .await + { + | Ok(state) => state, + | Err(e) => panic!("{e}"), + }; + + assert_eq!(expected, resolved); + } + + #[tokio::test] + async fn test_lexicographical_sort() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + + let graph = hashmap! { + event_id("l") => hashset![event_id("o")], + event_id("m") => hashset![event_id("n"), event_id("o")], + event_id("n") => hashset![event_id("o")], + event_id("o") => hashset![], // "o" has zero outgoing edges but 4 incoming edges + event_id("p") => hashset![event_id("o")], + }; + + let res = crate::lexicographical_topological_sort(&graph, &|_id| async { + Ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0)))) + }) + .await + .unwrap(); + + assert_eq!( + vec!["o", "l", "n", "m", "p"], + res.iter() + .map(ToString::to_string) + .map(|s| s.replace('$', "").replace(":foo", "")) + .collect::>() + ); + } + + #[tokio::test] + async fn ban_with_auth_chains() { + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let ban = BAN_STATE_SET(); + + let edges = vec![vec!["END", "MB", "PA", "START"], vec!["END", "IME", "MB"]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec!["PA", "MB"] + .into_iter() + .map(event_id) + .collect::>(); + + do_check(&ban.values().cloned().collect::>(), edges, expected_state_ids).await; + } + + #[tokio::test] + async fn ban_with_auth_chains2() { + use futures::future::ready; + + let _ = tracing::subscriber::set_default( + tracing_subscriber::fmt().with_test_writer().finish(), + ); + let init = INITIAL_EVENTS(); + let ban = BAN_STATE_SET(); + + let mut inner = init.clone(); + inner.extend(ban); + let store = TestStore(inner.clone()); + + let state_set_a = [ + inner.get(&event_id("CREATE")).unwrap(), + inner.get(&event_id("IJR")).unwrap(), + inner.get(&event_id("IMA")).unwrap(), + inner.get(&event_id("IMB")).unwrap(), + inner.get(&event_id("IMC")).unwrap(), + inner.get(&event_id("MB")).unwrap(), + inner.get(&event_id("PA")).unwrap(), + ] + .iter() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.event_id.clone())) + .collect::>(); + + let state_set_b = [ + inner.get(&event_id("CREATE")).unwrap(), + inner.get(&event_id("IJR")).unwrap(), + inner.get(&event_id("IMA")).unwrap(), + inner.get(&event_id("IMB")).unwrap(), + inner.get(&event_id("IMC")).unwrap(), + inner.get(&event_id("IME")).unwrap(), + inner.get(&event_id("PA")).unwrap(), + ] + .iter() + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.event_id.clone())) + .collect::>(); + + let ev_map = &store.0; + let state_sets = [state_set_a, state_set_b]; + let auth_chain: Vec<_> = state_sets + .iter() + .map(|map| { + store + .auth_event_ids(room_id(), map.values().cloned().collect()) + .unwrap() + }) + .collect(); + + let fetcher = |id: ::Id| ready(ev_map.get(&id).cloned()); + let exists = |id: ::Id| ready(ev_map.get(&id).is_some()); + let resolved = match crate::resolve( + &RoomVersionId::V6, + &state_sets, + &auth_chain, + &fetcher, + &exists, + 1, + ) + .await + { + | Ok(state) => state, + | Err(e) => panic!("{e}"), + }; + + debug!( + resolved = ?resolved + .iter() + .map(|((ty, key), id)| format!("(({ty}{key:?}), {id})")) + .collect::>(), + "resolved state", + ); + + let expected = [ + "$CREATE:foo", + "$IJR:foo", + "$PA:foo", + "$IMA:foo", + "$IMB:foo", + "$IMC:foo", + "$MB:foo", + ]; + + for id in expected.iter().map(|i| event_id(i)) { + // make sure our resolved events are equal to the expected list + assert!(resolved.values().any(|eid| eid == &id) || init.contains_key(&id), "{id}"); + } + assert_eq!(expected.len(), resolved.len()); + } + + #[tokio::test] + async fn join_rule_with_auth_chain() { + let join_rule = JOIN_RULE(); + + let edges = vec![vec!["END", "JR", "START"], vec!["END", "IMZ", "START"]] + .into_iter() + .map(|list| list.into_iter().map(event_id).collect::>()) + .collect::>(); + + let expected_state_ids = vec!["JR"].into_iter().map(event_id).collect::>(); + + do_check(&join_rule.values().cloned().collect::>(), edges, expected_state_ids) + .await; + } + + #[allow(non_snake_case)] + fn BAN_STATE_SET() -> HashMap> { + vec![ + to_pdu_event( + "PA", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + &["CREATE", "IMA", "IPOWER"], // auth_events + &["START"], // prev_events + ), + to_pdu_event( + "PB", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["END"], + ), + to_pdu_event( + "MB", + alice(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + member_content_ban(), + &["CREATE", "IMA", "PB"], + &["PA"], + ), + to_pdu_event( + "IME", + ella(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + member_content_join(), + &["CREATE", "IJR", "PA"], + &["MB"], + ), + ] + .into_iter() + .map(|ev| (ev.event_id.clone(), ev)) + .collect() + } + + #[allow(non_snake_case)] + fn JOIN_RULE() -> HashMap> { + vec![ + to_pdu_event( + "JR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&json!({ "join_rule": "invite" })).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["START"], + ), + to_pdu_event( + "IMZ", + zara(), + TimelineEventType::RoomPowerLevels, + Some(zara().as_str()), + member_content_join(), + &["CREATE", "JR", "IPOWER"], + &["START"], + ), + ] + .into_iter() + .map(|ev| (ev.event_id.clone(), ev)) + .collect() + } + + macro_rules! state_set { + ($($kind:expr => $key:expr => $id:expr),* $(,)?) => {{ + #[allow(unused_mut)] + let mut x = StateMap::new(); + $( + x.insert(($kind, $key.to_owned()), $id); + )* + x + }}; + } + + #[test] + fn separate_unique_conflicted() { + let (unconflicted, conflicted) = super::separate( + [ + state_set![StateEventType::RoomMember => "@a:hs1" => 0], + state_set![StateEventType::RoomMember => "@b:hs1" => 1], + state_set![StateEventType::RoomMember => "@c:hs1" => 2], + ] + .iter(), + ); + + assert_eq!(unconflicted, StateMap::new()); + assert_eq!(conflicted, state_set![ + StateEventType::RoomMember => "@a:hs1" => vec![0], + StateEventType::RoomMember => "@b:hs1" => vec![1], + StateEventType::RoomMember => "@c:hs1" => vec![2], + ],); + } + + #[test] + fn separate_conflicted() { + let (unconflicted, mut conflicted) = super::separate( + [ + state_set![StateEventType::RoomMember => "@a:hs1" => 0], + state_set![StateEventType::RoomMember => "@a:hs1" => 1], + state_set![StateEventType::RoomMember => "@a:hs1" => 2], + ] + .iter(), + ); + + // HashMap iteration order is random, so sort this before asserting on it + for v in conflicted.values_mut() { + v.sort_unstable(); + } + + assert_eq!(unconflicted, StateMap::new()); + assert_eq!(conflicted, state_set![ + StateEventType::RoomMember => "@a:hs1" => vec![0, 1, 2], + ],); + } + + #[test] + fn separate_unconflicted() { + let (unconflicted, conflicted) = super::separate( + [ + state_set![StateEventType::RoomMember => "@a:hs1" => 0], + state_set![StateEventType::RoomMember => "@a:hs1" => 0], + state_set![StateEventType::RoomMember => "@a:hs1" => 0], + ] + .iter(), + ); + + assert_eq!(unconflicted, state_set![ + StateEventType::RoomMember => "@a:hs1" => 0, + ],); + assert_eq!(conflicted, StateMap::new()); + } + + #[test] + fn separate_mixed() { + let (unconflicted, conflicted) = super::separate( + [ + state_set![StateEventType::RoomMember => "@a:hs1" => 0], + state_set![ + StateEventType::RoomMember => "@a:hs1" => 0, + StateEventType::RoomMember => "@b:hs1" => 1, + ], + state_set![ + StateEventType::RoomMember => "@a:hs1" => 0, + StateEventType::RoomMember => "@c:hs1" => 2, + ], + ] + .iter(), + ); + + assert_eq!(unconflicted, state_set![ + StateEventType::RoomMember => "@a:hs1" => 0, + ],); + assert_eq!(conflicted, state_set![ + StateEventType::RoomMember => "@b:hs1" => vec![1], + StateEventType::RoomMember => "@c:hs1" => vec![2], + ],); + } +} diff --git a/src/core/state_res/outcomes.txt b/src/core/state_res/outcomes.txt new file mode 100644 index 00000000..0fa1c734 --- /dev/null +++ b/src/core/state_res/outcomes.txt @@ -0,0 +1,104 @@ +11/29/2020 BRANCH: timo-spec-comp REV: d2a85669cc6056679ce6ca0fde4658a879ad2b08 +lexicographical topological sort + time: [1.7123 us 1.7157 us 1.7199 us] + change: [-1.7584% -1.5433% -1.3205%] (p = 0.00 < 0.05) + Performance has improved. +Found 8 outliers among 100 measurements (8.00%) + 2 (2.00%) low mild + 5 (5.00%) high mild + 1 (1.00%) high severe + +resolve state of 5 events one fork + time: [10.981 us 10.998 us 11.020 us] +Found 3 outliers among 100 measurements (3.00%) + 3 (3.00%) high mild + +resolve state of 10 events 3 conflicting + time: [26.858 us 26.946 us 27.037 us] + +11/29/2020 BRANCH: event-trait REV: f0eb1310efd49d722979f57f20bd1ac3592b0479 +lexicographical topological sort + time: [1.7686 us 1.7738 us 1.7810 us] + change: [-3.2752% -2.4634% -1.7635%] (p = 0.00 < 0.05) + Performance has improved. +Found 1 outliers among 100 measurements (1.00%) + 1 (1.00%) high severe + +resolve state of 5 events one fork + time: [10.643 us 10.656 us 10.669 us] + change: [-4.9990% -3.8078% -2.8319%] (p = 0.00 < 0.05) + Performance has improved. +Found 1 outliers among 100 measurements (1.00%) + 1 (1.00%) high severe + +resolve state of 10 events 3 conflicting + time: [29.149 us 29.252 us 29.375 us] + change: [-0.8433% -0.3270% +0.2656%] (p = 0.25 > 0.05) + No change in performance detected. +Found 1 outliers among 100 measurements (1.00%) + 1 (1.00%) high mild + +4/26/2020 BRANCH: fix-test-serde REV: +lexicographical topological sort + time: [1.6793 us 1.6823 us 1.6857 us] +Found 9 outliers among 100 measurements (9.00%) + 1 (1.00%) low mild + 4 (4.00%) high mild + 4 (4.00%) high severe + +resolve state of 5 events one fork + time: [9.9993 us 10.062 us 10.159 us] +Found 9 outliers among 100 measurements (9.00%) + 7 (7.00%) high mild + 2 (2.00%) high severe + +resolve state of 10 events 3 conflicting + time: [26.004 us 26.092 us 26.195 us] +Found 16 outliers among 100 measurements (16.00%) + 11 (11.00%) high mild + 5 (5.00%) high severe + +6/30/2021 BRANCH: state-closure REV: 174c3e2a72232ad75b3fb14b3551f5f746f4fe84 +lexicographical topological sort + time: [1.5496 us 1.5536 us 1.5586 us] +Found 9 outliers among 100 measurements (9.00%) + 1 (1.00%) low mild + 1 (1.00%) high mild + 7 (7.00%) high severe + +resolve state of 5 events one fork + time: [10.319 us 10.333 us 10.347 us] +Found 2 outliers among 100 measurements (2.00%) + 2 (2.00%) high severe + +resolve state of 10 events 3 conflicting + time: [25.770 us 25.805 us 25.839 us] +Found 7 outliers among 100 measurements (7.00%) + 5 (5.00%) high mild + 2 (2.00%) high severe + +7/20/2021 BRANCH stateres-result REV: +This marks the switch to HashSet/Map +lexicographical topological sort + time: [1.8122 us 1.8177 us 1.8233 us] + change: [+15.205% +15.919% +16.502%] (p = 0.00 < 0.05) + Performance has regressed. +Found 7 outliers among 100 measurements (7.00%) + 5 (5.00%) high mild + 2 (2.00%) high severe + +resolve state of 5 events one fork + time: [11.966 us 12.010 us 12.059 us] + change: [+16.089% +16.730% +17.469%] (p = 0.00 < 0.05) + Performance has regressed. +Found 7 outliers among 100 measurements (7.00%) + 3 (3.00%) high mild + 4 (4.00%) high severe + +resolve state of 10 events 3 conflicting + time: [29.092 us 29.201 us 29.311 us] + change: [+12.447% +12.847% +13.280%] (p = 0.00 < 0.05) + Performance has regressed. +Found 9 outliers among 100 measurements (9.00%) + 6 (6.00%) high mild + 3 (3.00%) high severe diff --git a/src/core/state_res/power_levels.rs b/src/core/state_res/power_levels.rs new file mode 100644 index 00000000..e1768574 --- /dev/null +++ b/src/core/state_res/power_levels.rs @@ -0,0 +1,256 @@ +use std::collections::BTreeMap; + +use ruma::{ + events::{room::power_levels::RoomPowerLevelsEventContent, TimelineEventType}, + power_levels::{default_power_level, NotificationPowerLevels}, + serde::{ + deserialize_v1_powerlevel, vec_deserialize_int_powerlevel_values, + vec_deserialize_v1_powerlevel_values, + }, + Int, OwnedUserId, UserId, +}; +use serde::Deserialize; +use serde_json::{from_str as from_json_str, Error}; +use tracing::error; + +use super::{Result, RoomVersion}; + +#[derive(Deserialize)] +struct IntRoomPowerLevelsEventContent { + #[serde(default = "default_power_level")] + ban: Int, + + #[serde(default)] + events: BTreeMap, + + #[serde(default)] + events_default: Int, + + #[serde(default)] + invite: Int, + + #[serde(default = "default_power_level")] + kick: Int, + + #[serde(default = "default_power_level")] + redact: Int, + + #[serde(default = "default_power_level")] + state_default: Int, + + #[serde(default)] + users: BTreeMap, + + #[serde(default)] + users_default: Int, + + #[serde(default)] + notifications: IntNotificationPowerLevels, +} + +impl From for RoomPowerLevelsEventContent { + fn from(int_pl: IntRoomPowerLevelsEventContent) -> Self { + let IntRoomPowerLevelsEventContent { + ban, + events, + events_default, + invite, + kick, + redact, + state_default, + users, + users_default, + notifications, + } = int_pl; + + let mut pl = Self::new(); + pl.ban = ban; + pl.events = events; + pl.events_default = events_default; + pl.invite = invite; + pl.kick = kick; + pl.redact = redact; + pl.state_default = state_default; + pl.users = users; + pl.users_default = users_default; + pl.notifications = notifications.into(); + + pl + } +} + +#[derive(Deserialize)] +struct IntNotificationPowerLevels { + #[serde(default = "default_power_level")] + room: Int, +} + +impl Default for IntNotificationPowerLevels { + fn default() -> Self { Self { room: default_power_level() } } +} + +impl From for NotificationPowerLevels { + fn from(int_notif: IntNotificationPowerLevels) -> Self { + let mut notif = Self::new(); + notif.room = int_notif.room; + + notif + } +} + +#[inline] +pub(crate) fn deserialize_power_levels( + content: &str, + room_version: &RoomVersion, +) -> Option { + if room_version.integer_power_levels { + deserialize_integer_power_levels(content) + } else { + deserialize_legacy_power_levels(content) + } +} + +fn deserialize_integer_power_levels(content: &str) -> Option { + match from_json_str::(content) { + | Ok(content) => Some(content.into()), + | Err(_) => { + error!("m.room.power_levels event is not valid with integer values"); + None + }, + } +} + +fn deserialize_legacy_power_levels(content: &str) -> Option { + match from_json_str(content) { + | Ok(content) => Some(content), + | Err(_) => { + error!( + "m.room.power_levels event is not valid with integer or string integer values" + ); + None + }, + } +} + +#[derive(Deserialize)] +pub(crate) struct PowerLevelsContentFields { + #[serde(default, deserialize_with = "vec_deserialize_v1_powerlevel_values")] + pub(crate) users: Vec<(OwnedUserId, Int)>, + + #[serde(default, deserialize_with = "deserialize_v1_powerlevel")] + pub(crate) users_default: Int, +} + +impl PowerLevelsContentFields { + pub(crate) fn get_user_power(&self, user_id: &UserId) -> Option<&Int> { + let comparator = |item: &(OwnedUserId, Int)| { + let item: &UserId = &item.0; + item.cmp(user_id) + }; + + self.users + .binary_search_by(comparator) + .ok() + .and_then(|idx| self.users.get(idx).map(|item| &item.1)) + } +} + +#[derive(Deserialize)] +struct IntPowerLevelsContentFields { + #[serde(default, deserialize_with = "vec_deserialize_int_powerlevel_values")] + users: Vec<(OwnedUserId, Int)>, + + #[serde(default)] + users_default: Int, +} + +impl From for PowerLevelsContentFields { + fn from(pl: IntPowerLevelsContentFields) -> Self { + let IntPowerLevelsContentFields { users, users_default } = pl; + Self { users, users_default } + } +} + +#[inline] +pub(crate) fn deserialize_power_levels_content_fields( + content: &str, + room_version: &RoomVersion, +) -> Result { + if room_version.integer_power_levels { + deserialize_integer_power_levels_content_fields(content) + } else { + deserialize_legacy_power_levels_content_fields(content) + } +} + +fn deserialize_integer_power_levels_content_fields( + content: &str, +) -> Result { + from_json_str::(content).map(Into::into) +} + +fn deserialize_legacy_power_levels_content_fields( + content: &str, +) -> Result { + from_json_str(content) +} + +#[derive(Deserialize)] +pub(crate) struct PowerLevelsContentInvite { + #[serde(default, deserialize_with = "deserialize_v1_powerlevel")] + pub(crate) invite: Int, +} + +#[derive(Deserialize)] +struct IntPowerLevelsContentInvite { + #[serde(default)] + invite: Int, +} + +impl From for PowerLevelsContentInvite { + fn from(pl: IntPowerLevelsContentInvite) -> Self { + let IntPowerLevelsContentInvite { invite } = pl; + Self { invite } + } +} + +pub(crate) fn deserialize_power_levels_content_invite( + content: &str, + room_version: &RoomVersion, +) -> Result { + if room_version.integer_power_levels { + from_json_str::(content).map(Into::into) + } else { + from_json_str(content) + } +} + +#[derive(Deserialize)] +pub(crate) struct PowerLevelsContentRedact { + #[serde(default = "default_power_level", deserialize_with = "deserialize_v1_powerlevel")] + pub(crate) redact: Int, +} + +#[derive(Deserialize)] +pub(crate) struct IntPowerLevelsContentRedact { + #[serde(default = "default_power_level")] + redact: Int, +} + +impl From for PowerLevelsContentRedact { + fn from(pl: IntPowerLevelsContentRedact) -> Self { + let IntPowerLevelsContentRedact { redact } = pl; + Self { redact } + } +} + +pub(crate) fn deserialize_power_levels_content_redact( + content: &str, + room_version: &RoomVersion, +) -> Result { + if room_version.integer_power_levels { + from_json_str::(content).map(Into::into) + } else { + from_json_str(content) + } +} diff --git a/src/core/state_res/room_version.rs b/src/core/state_res/room_version.rs new file mode 100644 index 00000000..e1b0afe1 --- /dev/null +++ b/src/core/state_res/room_version.rs @@ -0,0 +1,149 @@ +use ruma::RoomVersionId; + +use super::{Error, Result}; + +#[derive(Debug)] +#[allow(clippy::exhaustive_enums)] +pub enum RoomDisposition { + /// A room version that has a stable specification. + Stable, + /// A room version that is not yet fully specified. + Unstable, +} + +#[derive(Debug)] +#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)] +pub enum EventFormatVersion { + /// $id:server event id format + V1, + /// MSC1659-style $hash event id format: introduced for room v3 + V2, + /// MSC1884-style $hash format: introduced for room v4 + V3, +} + +#[derive(Debug)] +#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)] +pub enum StateResolutionVersion { + /// State resolution for rooms at version 1. + V1, + /// State resolution for room at version 2 or later. + V2, +} + +#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)] +pub struct RoomVersion { + /// The stability of this room. + pub disposition: RoomDisposition, + /// The format of the EventId. + pub event_format: EventFormatVersion, + /// Which state resolution algorithm is used. + pub state_res: StateResolutionVersion, + // FIXME: not sure what this one means? + pub enforce_key_validity: bool, + + /// `m.room.aliases` had special auth rules and redaction rules + /// before room version 6. + /// + /// before MSC2261/MSC2432, + pub special_case_aliases_auth: bool, + /// Strictly enforce canonical json, do not allow: + /// * Integers outside the range of [-2 ^ 53 + 1, 2 ^ 53 - 1] + /// * Floats + /// * NaN, Infinity, -Infinity + pub strict_canonicaljson: bool, + /// Verify notifications key while checking m.room.power_levels. + /// + /// bool: MSC2209: Check 'notifications' + pub limit_notifications_power_levels: bool, + /// Extra rules when verifying redaction events. + pub extra_redaction_checks: bool, + /// Allow knocking in event authentication. + /// + /// See [room v7 specification](https://spec.matrix.org/latest/rooms/v7/) for more information. + pub allow_knocking: bool, + /// Adds support for the restricted join rule. + /// + /// See: [MSC3289](https://github.com/matrix-org/matrix-spec-proposals/pull/3289) for more information. + pub restricted_join_rules: bool, + /// Adds support for the knock_restricted join rule. + /// + /// See: [MSC3787](https://github.com/matrix-org/matrix-spec-proposals/pull/3787) for more information. + pub knock_restricted_join_rule: bool, + /// Enforces integer power levels. + /// + /// See: [MSC3667](https://github.com/matrix-org/matrix-spec-proposals/pull/3667) for more information. + pub integer_power_levels: bool, + /// Determine the room creator using the `m.room.create` event's `sender`, + /// instead of the event content's `creator` field. + /// + /// See: [MSC2175](https://github.com/matrix-org/matrix-spec-proposals/pull/2175) for more information. + pub use_room_create_sender: bool, +} + +impl RoomVersion { + pub const V1: Self = Self { + disposition: RoomDisposition::Stable, + event_format: EventFormatVersion::V1, + state_res: StateResolutionVersion::V1, + enforce_key_validity: false, + special_case_aliases_auth: true, + strict_canonicaljson: false, + limit_notifications_power_levels: false, + extra_redaction_checks: true, + allow_knocking: false, + restricted_join_rules: false, + knock_restricted_join_rule: false, + integer_power_levels: false, + use_room_create_sender: false, + }; + pub const V10: Self = Self { + knock_restricted_join_rule: true, + integer_power_levels: true, + ..Self::V9 + }; + pub const V11: Self = Self { + use_room_create_sender: true, + ..Self::V10 + }; + pub const V2: Self = Self { + state_res: StateResolutionVersion::V2, + ..Self::V1 + }; + pub const V3: Self = Self { + event_format: EventFormatVersion::V2, + extra_redaction_checks: false, + ..Self::V2 + }; + pub const V4: Self = Self { + event_format: EventFormatVersion::V3, + ..Self::V3 + }; + pub const V5: Self = Self { enforce_key_validity: true, ..Self::V4 }; + pub const V6: Self = Self { + special_case_aliases_auth: false, + strict_canonicaljson: true, + limit_notifications_power_levels: true, + ..Self::V5 + }; + pub const V7: Self = Self { allow_knocking: true, ..Self::V6 }; + pub const V8: Self = Self { restricted_join_rules: true, ..Self::V7 }; + pub const V9: Self = Self::V8; + + pub fn new(version: &RoomVersionId) -> Result { + Ok(match version { + | RoomVersionId::V1 => Self::V1, + | RoomVersionId::V2 => Self::V2, + | RoomVersionId::V3 => Self::V3, + | RoomVersionId::V4 => Self::V4, + | RoomVersionId::V5 => Self::V5, + | RoomVersionId::V6 => Self::V6, + | RoomVersionId::V7 => Self::V7, + | RoomVersionId::V8 => Self::V8, + | RoomVersionId::V9 => Self::V9, + | RoomVersionId::V10 => Self::V10, + | RoomVersionId::V11 => Self::V11, + | ver => return Err(Error::Unsupported(format!("found version `{ver}`"))), + }) + } +} diff --git a/src/core/state_res/state_event.rs b/src/core/state_res/state_event.rs new file mode 100644 index 00000000..2c038cfe --- /dev/null +++ b/src/core/state_res/state_event.rs @@ -0,0 +1,102 @@ +use std::{ + borrow::Borrow, + fmt::{Debug, Display}, + hash::Hash, + sync::Arc, +}; + +use ruma::{events::TimelineEventType, EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId}; +use serde_json::value::RawValue as RawJsonValue; + +/// Abstraction of a PDU so users can have their own PDU types. +pub trait Event { + type Id: Clone + Debug + Display + Eq + Ord + Hash + Send + Borrow; + + /// The `EventId` of this event. + fn event_id(&self) -> &Self::Id; + + /// The `RoomId` of this event. + fn room_id(&self) -> &RoomId; + + /// The `UserId` of this event. + fn sender(&self) -> &UserId; + + /// The time of creation on the originating server. + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch; + + /// The event type. + fn event_type(&self) -> &TimelineEventType; + + /// The event's content. + fn content(&self) -> &RawJsonValue; + + /// The state key for this event. + fn state_key(&self) -> Option<&str>; + + /// The events before this event. + // Requires GATs to avoid boxing (and TAIT for making it convenient). + fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_; + + /// All the authenticating events for this event. + // Requires GATs to avoid boxing (and TAIT for making it convenient). + fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_; + + /// If this event is a redaction event this is the event it redacts. + fn redacts(&self) -> Option<&Self::Id>; +} + +impl Event for &T { + type Id = T::Id; + + fn event_id(&self) -> &Self::Id { (*self).event_id() } + + fn room_id(&self) -> &RoomId { (*self).room_id() } + + fn sender(&self) -> &UserId { (*self).sender() } + + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { (*self).origin_server_ts() } + + fn event_type(&self) -> &TimelineEventType { (*self).event_type() } + + fn content(&self) -> &RawJsonValue { (*self).content() } + + fn state_key(&self) -> Option<&str> { (*self).state_key() } + + fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { + (*self).prev_events() + } + + fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { + (*self).auth_events() + } + + fn redacts(&self) -> Option<&Self::Id> { (*self).redacts() } +} + +impl Event for Arc { + type Id = T::Id; + + fn event_id(&self) -> &Self::Id { (**self).event_id() } + + fn room_id(&self) -> &RoomId { (**self).room_id() } + + fn sender(&self) -> &UserId { (**self).sender() } + + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { (**self).origin_server_ts() } + + fn event_type(&self) -> &TimelineEventType { (**self).event_type() } + + fn content(&self) -> &RawJsonValue { (**self).content() } + + fn state_key(&self) -> Option<&str> { (**self).state_key() } + + fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { + (**self).prev_events() + } + + fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { + (**self).auth_events() + } + + fn redacts(&self) -> Option<&Self::Id> { (**self).redacts() } +} diff --git a/src/core/state_res/state_res_bench.rs b/src/core/state_res/state_res_bench.rs new file mode 100644 index 00000000..a2bd2c23 --- /dev/null +++ b/src/core/state_res/state_res_bench.rs @@ -0,0 +1,648 @@ +// Because of criterion `cargo bench` works, +// but if you use `cargo bench -- --save-baseline ` +// or pass any other args to it, it fails with the error +// `cargo bench unknown option --save-baseline`. +// To pass args to criterion, use this form +// `cargo bench --bench -- --save-baseline `. + +#![allow(clippy::exhaustive_structs)] + +use std::{ + borrow::Borrow, + collections::{HashMap, HashSet}, + sync::{ + atomic::{AtomicU64, Ordering::SeqCst}, + Arc, + }, +}; + +use criterion::{criterion_group, criterion_main, Criterion}; +use event::PduEvent; +use futures::{future, future::ready}; +use ruma::{int, uint}; +use maplit::{btreemap, hashmap, hashset}; +use ruma::{ + room_id, user_id, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, RoomVersionId, + Signatures, UserId, +}; +use ruma::events::{ + pdu::{EventHash, Pdu, RoomV3Pdu}, + room::{ + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + }, + StateEventType, TimelineEventType, +}; +use conduwuit::state_res::{self as state_res, Error, Event, Result, StateMap}; +use serde_json::{ + json, + value::{to_raw_value as to_raw_json_value, RawValue as RawJsonValue}, +}; + +static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); + +fn lexico_topo_sort(c: &mut Criterion) { + c.bench_function("lexicographical topological sort", |b| { + let graph = hashmap! { + event_id("l") => hashset![event_id("o")], + event_id("m") => hashset![event_id("n"), event_id("o")], + event_id("n") => hashset![event_id("o")], + event_id("o") => hashset![], // "o" has zero outgoing edges but 4 incoming edges + event_id("p") => hashset![event_id("o")], + }; + b.iter(|| { + let _ = state_res::lexicographical_topological_sort(&graph, &|_| { + future::ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0)))) + }); + }); + }); +} + +fn resolution_shallow_auth_chain(c: &mut Criterion) { + c.bench_function("resolve state of 5 events one fork", |b| { + let mut store = TestStore(hashmap! {}); + + // build up the DAG + let (state_at_bob, state_at_charlie, _) = store.set_up(); + + b.iter(|| async { + let ev_map = store.0.clone(); + let state_sets = [&state_at_bob, &state_at_charlie]; + let fetch = |id: OwnedEventId| ready(ev_map.get(&id).map(Arc::clone)); + let exists = |id: OwnedEventId| ready(ev_map.get(&id).is_some()); + let auth_chain_sets = state_sets + .iter() + .map(|map| { + store.auth_event_ids(room_id(), map.values().cloned().collect()).unwrap() + }) + .collect(); + + let _ = match state_res::resolve( + &RoomVersionId::V6, + state_sets.into_iter(), + &auth_chain_sets, + &fetch, + &exists, + ) + .await + { + Ok(state) => state, + Err(e) => panic!("{e}"), + }; + }); + }); +} + +fn resolve_deeper_event_set(c: &mut Criterion) { + c.bench_function("resolve state of 10 events 3 conflicting", |b| { + let mut inner = INITIAL_EVENTS(); + let ban = BAN_STATE_SET(); + + inner.extend(ban); + let store = TestStore(inner.clone()); + + let state_set_a = [ + inner.get(&event_id("CREATE")).unwrap(), + inner.get(&event_id("IJR")).unwrap(), + inner.get(&event_id("IMA")).unwrap(), + inner.get(&event_id("IMB")).unwrap(), + inner.get(&event_id("IMC")).unwrap(), + inner.get(&event_id("MB")).unwrap(), + inner.get(&event_id("PA")).unwrap(), + ] + .iter() + .map(|ev| { + (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.event_id().to_owned()) + }) + .collect::>(); + + let state_set_b = [ + inner.get(&event_id("CREATE")).unwrap(), + inner.get(&event_id("IJR")).unwrap(), + inner.get(&event_id("IMA")).unwrap(), + inner.get(&event_id("IMB")).unwrap(), + inner.get(&event_id("IMC")).unwrap(), + inner.get(&event_id("IME")).unwrap(), + inner.get(&event_id("PA")).unwrap(), + ] + .iter() + .map(|ev| { + (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.event_id().to_owned()) + }) + .collect::>(); + + b.iter(|| async { + let state_sets = [&state_set_a, &state_set_b]; + let auth_chain_sets = state_sets + .iter() + .map(|map| { + store.auth_event_ids(room_id(), map.values().cloned().collect()).unwrap() + }) + .collect(); + + let fetch = |id: OwnedEventId| ready(inner.get(&id).map(Arc::clone)); + let exists = |id: OwnedEventId| ready(inner.get(&id).is_some()); + let _ = match state_res::resolve( + &RoomVersionId::V6, + state_sets.into_iter(), + &auth_chain_sets, + &fetch, + &exists, + ) + .await + { + Ok(state) => state, + Err(_) => panic!("resolution failed during benchmarking"), + }; + }); + }); +} + +criterion_group!( + benches, + lexico_topo_sort, + resolution_shallow_auth_chain, + resolve_deeper_event_set +); + +criterion_main!(benches); + +//*///////////////////////////////////////////////////////////////////// +// +// IMPLEMENTATION DETAILS AHEAD +// +/////////////////////////////////////////////////////////////////////*/ +struct TestStore(HashMap>); + +#[allow(unused)] +impl TestStore { + fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> Result> { + self.0 + .get(event_id) + .map(Arc::clone) + .ok_or_else(|| Error::NotFound(format!("{} not found", event_id))) + } + + /// Returns the events that correspond to the `event_ids` sorted in the same order. + fn get_events(&self, room_id: &RoomId, event_ids: &[OwnedEventId]) -> Result>> { + let mut events = vec![]; + for id in event_ids { + events.push(self.get_event(room_id, id)?); + } + Ok(events) + } + + /// Returns a Vec of the related auth events to the given `event`. + fn auth_event_ids(&self, room_id: &RoomId, event_ids: Vec) -> Result> { + let mut result = HashSet::new(); + let mut stack = event_ids; + + // DFS for auth event chain + while !stack.is_empty() { + let ev_id = stack.pop().unwrap(); + if result.contains(&ev_id) { + continue; + } + + result.insert(ev_id.clone()); + + let event = self.get_event(room_id, ev_id.borrow())?; + + stack.extend(event.auth_events().map(ToOwned::to_owned)); + } + + Ok(result) + } + + /// Returns a vector representing the difference in auth chains of the given `events`. + fn auth_chain_diff(&self, room_id: &RoomId, event_ids: Vec>) -> Result> { + let mut auth_chain_sets = vec![]; + for ids in event_ids { + // TODO state store `auth_event_ids` returns self in the event ids list + // when an event returns `auth_event_ids` self is not contained + let chain = self.auth_event_ids(room_id, ids)?.into_iter().collect::>(); + auth_chain_sets.push(chain); + } + + if let Some(first) = auth_chain_sets.first().cloned() { + let common = auth_chain_sets + .iter() + .skip(1) + .fold(first, |a, b| a.intersection(b).cloned().collect::>()); + + Ok(auth_chain_sets + .into_iter() + .flatten() + .filter(|id| !common.contains(id.borrow())) + .collect()) + } else { + Ok(vec![]) + } + } +} + +impl TestStore { + #[allow(clippy::type_complexity)] + fn set_up( + &mut self, + ) -> (StateMap, StateMap, StateMap) { + let create_event = to_pdu_event::<&EventId>( + "CREATE", + alice(), + TimelineEventType::RoomCreate, + Some(""), + to_raw_json_value(&json!({ "creator": alice() })).unwrap(), + &[], + &[], + ); + let cre = create_event.event_id().to_owned(); + self.0.insert(cre.clone(), Arc::clone(&create_event)); + + let alice_mem = to_pdu_event( + "IMA", + alice(), + TimelineEventType::RoomMember, + Some(alice().to_string().as_str()), + member_content_join(), + &[cre.clone()], + &[cre.clone()], + ); + self.0.insert(alice_mem.event_id().to_owned(), Arc::clone(&alice_mem)); + + let join_rules = to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), + &[cre.clone(), alice_mem.event_id().to_owned()], + &[alice_mem.event_id().to_owned()], + ); + self.0.insert(join_rules.event_id().to_owned(), join_rules.clone()); + + // Bob and Charlie join at the same time, so there is a fork + // this will be represented in the state_sets when we resolve + let bob_mem = to_pdu_event( + "IMB", + bob(), + TimelineEventType::RoomMember, + Some(bob().to_string().as_str()), + member_content_join(), + &[cre.clone(), join_rules.event_id().to_owned()], + &[join_rules.event_id().to_owned()], + ); + self.0.insert(bob_mem.event_id().to_owned(), bob_mem.clone()); + + let charlie_mem = to_pdu_event( + "IMC", + charlie(), + TimelineEventType::RoomMember, + Some(charlie().to_string().as_str()), + member_content_join(), + &[cre, join_rules.event_id().to_owned()], + &[join_rules.event_id().to_owned()], + ); + self.0.insert(charlie_mem.event_id().to_owned(), charlie_mem.clone()); + + let state_at_bob = [&create_event, &alice_mem, &join_rules, &bob_mem] + .iter() + .map(|e| { + (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) + }) + .collect::>(); + + let state_at_charlie = [&create_event, &alice_mem, &join_rules, &charlie_mem] + .iter() + .map(|e| { + (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) + }) + .collect::>(); + + let expected = [&create_event, &alice_mem, &join_rules, &bob_mem, &charlie_mem] + .iter() + .map(|e| { + (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) + }) + .collect::>(); + + (state_at_bob, state_at_charlie, expected) + } +} + +fn event_id(id: &str) -> OwnedEventId { + if id.contains('$') { + return id.try_into().unwrap(); + } + format!("${}:foo", id).try_into().unwrap() +} + +fn alice() -> &'static UserId { + user_id!("@alice:foo") +} + +fn bob() -> &'static UserId { + user_id!("@bob:foo") +} + +fn charlie() -> &'static UserId { + user_id!("@charlie:foo") +} + +fn ella() -> &'static UserId { + user_id!("@ella:foo") +} + +fn room_id() -> &'static RoomId { + room_id!("!test:foo") +} + +fn member_content_ban() -> Box { + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Ban)).unwrap() +} + +fn member_content_join() -> Box { + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Join)).unwrap() +} + +fn to_pdu_event( + id: &str, + sender: &UserId, + ev_type: TimelineEventType, + state_key: Option<&str>, + content: Box, + auth_events: &[S], + prev_events: &[S], +) -> Arc +where + S: AsRef, +{ + // We don't care if the addition happens in order just that it is atomic + // (each event has its own value) + let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst); + let id = if id.contains('$') { id.to_owned() } else { format!("${}:foo", id) }; + let auth_events = auth_events.iter().map(AsRef::as_ref).map(event_id).collect::>(); + let prev_events = prev_events.iter().map(AsRef::as_ref).map(event_id).collect::>(); + + let state_key = state_key.map(ToOwned::to_owned); + Arc::new(PduEvent { + event_id: id.try_into().unwrap(), + rest: Pdu::RoomV3Pdu(RoomV3Pdu { + room_id: room_id().to_owned(), + sender: sender.to_owned(), + origin_server_ts: MilliSecondsSinceUnixEpoch(ts.try_into().unwrap()), + state_key, + kind: ev_type, + content, + redacts: None, + unsigned: btreemap! {}, + auth_events, + prev_events, + depth: uint!(0), + hashes: EventHash::new(String::new()), + signatures: Signatures::new(), + }), + }) +} + +// all graphs start with these input events +#[allow(non_snake_case)] +fn INITIAL_EVENTS() -> HashMap> { + vec![ + to_pdu_event::<&EventId>( + "CREATE", + alice(), + TimelineEventType::RoomCreate, + Some(""), + to_raw_json_value(&json!({ "creator": alice() })).unwrap(), + &[], + &[], + ), + to_pdu_event( + "IMA", + alice(), + TimelineEventType::RoomMember, + Some(alice().as_str()), + member_content_join(), + &["CREATE"], + &["CREATE"], + ), + to_pdu_event( + "IPOWER", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100 } })).unwrap(), + &["CREATE", "IMA"], + &["IMA"], + ), + to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["IPOWER"], + ), + to_pdu_event( + "IMB", + bob(), + TimelineEventType::RoomMember, + Some(bob().to_string().as_str()), + member_content_join(), + &["CREATE", "IJR", "IPOWER"], + &["IJR"], + ), + to_pdu_event( + "IMC", + charlie(), + TimelineEventType::RoomMember, + Some(charlie().to_string().as_str()), + member_content_join(), + &["CREATE", "IJR", "IPOWER"], + &["IMB"], + ), + to_pdu_event::<&EventId>( + "START", + charlie(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + &[], + &[], + ), + to_pdu_event::<&EventId>( + "END", + charlie(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + &[], + &[], + ), + ] + .into_iter() + .map(|ev| (ev.event_id().to_owned(), ev)) + .collect() +} + +// all graphs start with these input events +#[allow(non_snake_case)] +fn BAN_STATE_SET() -> HashMap> { + vec![ + to_pdu_event( + "PA", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + &["CREATE", "IMA", "IPOWER"], // auth_events + &["START"], // prev_events + ), + to_pdu_event( + "PB", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["END"], + ), + to_pdu_event( + "MB", + alice(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + member_content_ban(), + &["CREATE", "IMA", "PB"], + &["PA"], + ), + to_pdu_event( + "IME", + ella(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + member_content_join(), + &["CREATE", "IJR", "PA"], + &["MB"], + ), + ] + .into_iter() + .map(|ev| (ev.event_id().to_owned(), ev)) + .collect() +} + +/// Convenience trait for adding event type plus state key to state maps. +trait EventTypeExt { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, String); +} + +impl EventTypeExt for &TimelineEventType { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { + (self.to_string().into(), state_key.into()) + } +} + +mod event { + use ruma_common::{MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId}; + use ruma_events::{pdu::Pdu, TimelineEventType}; + use ruma_state_res::Event; + use serde::{Deserialize, Serialize}; + use serde_json::value::RawValue as RawJsonValue; + + impl Event for PduEvent { + type Id = OwnedEventId; + + fn event_id(&self) -> &Self::Id { + &self.event_id + } + + fn room_id(&self) -> &RoomId { + match &self.rest { + Pdu::RoomV1Pdu(ev) => &ev.room_id, + Pdu::RoomV3Pdu(ev) => &ev.room_id, + #[cfg(not(feature = "unstable-exhaustive-types"))] + _ => unreachable!("new PDU version"), + } + } + + fn sender(&self) -> &UserId { + match &self.rest { + Pdu::RoomV1Pdu(ev) => &ev.sender, + Pdu::RoomV3Pdu(ev) => &ev.sender, + #[cfg(not(feature = "unstable-exhaustive-types"))] + _ => unreachable!("new PDU version"), + } + } + + fn event_type(&self) -> &TimelineEventType { + match &self.rest { + Pdu::RoomV1Pdu(ev) => &ev.kind, + Pdu::RoomV3Pdu(ev) => &ev.kind, + #[cfg(not(feature = "unstable-exhaustive-types"))] + _ => unreachable!("new PDU version"), + } + } + + fn content(&self) -> &RawJsonValue { + match &self.rest { + Pdu::RoomV1Pdu(ev) => &ev.content, + Pdu::RoomV3Pdu(ev) => &ev.content, + #[cfg(not(feature = "unstable-exhaustive-types"))] + _ => unreachable!("new PDU version"), + } + } + + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { + match &self.rest { + Pdu::RoomV1Pdu(ev) => ev.origin_server_ts, + Pdu::RoomV3Pdu(ev) => ev.origin_server_ts, + #[cfg(not(feature = "unstable-exhaustive-types"))] + _ => unreachable!("new PDU version"), + } + } + + fn state_key(&self) -> Option<&str> { + match &self.rest { + Pdu::RoomV1Pdu(ev) => ev.state_key.as_deref(), + Pdu::RoomV3Pdu(ev) => ev.state_key.as_deref(), + #[cfg(not(feature = "unstable-exhaustive-types"))] + _ => unreachable!("new PDU version"), + } + } + + fn prev_events(&self) -> Box + Send + '_> { + match &self.rest { + Pdu::RoomV1Pdu(ev) => Box::new(ev.prev_events.iter().map(|(id, _)| id)), + Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter()), + #[cfg(not(feature = "unstable-exhaustive-types"))] + _ => unreachable!("new PDU version"), + } + } + + fn auth_events(&self) -> Box + Send + '_> { + match &self.rest { + Pdu::RoomV1Pdu(ev) => Box::new(ev.auth_events.iter().map(|(id, _)| id)), + Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter()), + #[cfg(not(feature = "unstable-exhaustive-types"))] + _ => unreachable!("new PDU version"), + } + } + + fn redacts(&self) -> Option<&Self::Id> { + match &self.rest { + Pdu::RoomV1Pdu(ev) => ev.redacts.as_ref(), + Pdu::RoomV3Pdu(ev) => ev.redacts.as_ref(), + #[cfg(not(feature = "unstable-exhaustive-types"))] + _ => unreachable!("new PDU version"), + } + } + } + + #[derive(Clone, Debug, Deserialize, Serialize)] + pub(crate) struct PduEvent { + pub(crate) event_id: OwnedEventId, + #[serde(flatten)] + pub(crate) rest: Pdu, + } +} diff --git a/src/core/state_res/test_utils.rs b/src/core/state_res/test_utils.rs new file mode 100644 index 00000000..7954b28d --- /dev/null +++ b/src/core/state_res/test_utils.rs @@ -0,0 +1,688 @@ +use std::{ + borrow::Borrow, + collections::{BTreeMap, HashMap, HashSet}, + sync::{ + atomic::{AtomicU64, Ordering::SeqCst}, + Arc, + }, +}; + +use futures_util::future::ready; +use js_int::{int, uint}; +use ruma_common::{ + event_id, room_id, user_id, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, + RoomVersionId, ServerSignatures, UserId, +}; +use ruma_events::{ + pdu::{EventHash, Pdu, RoomV3Pdu}, + room::{ + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + }, + TimelineEventType, +}; +use serde_json::{ + json, + value::{to_raw_value as to_raw_json_value, RawValue as RawJsonValue}, +}; +use tracing::info; + +pub(crate) use self::event::PduEvent; +use crate::{auth_types_for_event, Error, Event, EventTypeExt, Result, StateMap}; + +static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); + +pub(crate) async fn do_check( + events: &[Arc], + edges: Vec>, + expected_state_ids: Vec, +) { + // To activate logging use `RUST_LOG=debug cargo t` + + let init_events = INITIAL_EVENTS(); + + let mut store = TestStore( + init_events + .values() + .chain(events) + .map(|ev| (ev.event_id().to_owned(), ev.clone())) + .collect(), + ); + + // This will be lexi_topo_sorted for resolution + let mut graph = HashMap::new(); + // This is the same as in `resolve` event_id -> OriginalStateEvent + let mut fake_event_map = HashMap::new(); + + // Create the DB of events that led up to this point + // TODO maybe clean up some of these clones it is just tests but... + for ev in init_events.values().chain(events) { + graph.insert(ev.event_id().to_owned(), HashSet::new()); + fake_event_map.insert(ev.event_id().to_owned(), ev.clone()); + } + + for pair in INITIAL_EDGES().windows(2) { + if let [a, b] = &pair { + graph + .entry(a.to_owned()) + .or_insert_with(HashSet::new) + .insert(b.clone()); + } + } + + for edge_list in edges { + for pair in edge_list.windows(2) { + if let [a, b] = &pair { + graph + .entry(a.to_owned()) + .or_insert_with(HashSet::new) + .insert(b.clone()); + } + } + } + + // event_id -> PduEvent + let mut event_map: HashMap> = HashMap::new(); + // event_id -> StateMap + let mut state_at_event: HashMap> = HashMap::new(); + + // Resolve the current state and add it to the state_at_event map then continue + // on in "time" + for node in crate::lexicographical_topological_sort(&graph, &|_id| async { + Ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0)))) + }) + .await + .unwrap() + { + let fake_event = fake_event_map.get(&node).unwrap(); + let event_id = fake_event.event_id().to_owned(); + + let prev_events = graph.get(&node).unwrap(); + + let state_before: StateMap = if prev_events.is_empty() { + HashMap::new() + } else if prev_events.len() == 1 { + state_at_event + .get(prev_events.iter().next().unwrap()) + .unwrap() + .clone() + } else { + let state_sets = prev_events + .iter() + .filter_map(|k| state_at_event.get(k)) + .collect::>(); + + info!( + "{:#?}", + state_sets + .iter() + .map(|map| map + .iter() + .map(|((ty, key), id)| format!("(({ty}{key:?}), {id})")) + .collect::>()) + .collect::>() + ); + + let auth_chain_sets: Vec<_> = state_sets + .iter() + .map(|map| { + store + .auth_event_ids(room_id(), map.values().cloned().collect()) + .unwrap() + }) + .collect(); + + let event_map = &event_map; + let fetch = |id: ::Id| ready(event_map.get(&id).cloned()); + let exists = |id: ::Id| ready(event_map.get(&id).is_some()); + let resolved = crate::resolve( + &RoomVersionId::V6, + state_sets, + &auth_chain_sets, + &fetch, + &exists, + 1, + ) + .await; + + match resolved { + | Ok(state) => state, + | Err(e) => panic!("resolution for {node} failed: {e}"), + } + }; + + let mut state_after = state_before.clone(); + + let ty = fake_event.event_type(); + let key = fake_event.state_key().unwrap(); + state_after.insert(ty.with_state_key(key), event_id.to_owned()); + + let auth_types = auth_types_for_event( + fake_event.event_type(), + fake_event.sender(), + fake_event.state_key(), + fake_event.content(), + ) + .unwrap(); + + let mut auth_events = vec![]; + for key in auth_types { + if state_before.contains_key(&key) { + auth_events.push(state_before[&key].clone()); + } + } + + // TODO The event is just remade, adding the auth_events and prev_events here + // the `to_pdu_event` was split into `init` and the fn below, could be better + let e = fake_event; + let ev_id = e.event_id(); + let event = to_pdu_event( + e.event_id().as_str(), + e.sender(), + e.event_type().clone(), + e.state_key(), + e.content().to_owned(), + &auth_events, + &prev_events.iter().cloned().collect::>(), + ); + + // We have to update our store, an actual user of this lib would + // be giving us state from a DB. + store.0.insert(ev_id.to_owned(), event.clone()); + + state_at_event.insert(node, state_after); + event_map.insert(event_id.to_owned(), Arc::clone(store.0.get(ev_id).unwrap())); + } + + let mut expected_state = StateMap::new(); + for node in expected_state_ids { + let ev = event_map.get(&node).unwrap_or_else(|| { + panic!( + "{node} not found in {:?}", + event_map + .keys() + .map(ToString::to_string) + .collect::>() + ) + }); + + let key = ev.event_type().with_state_key(ev.state_key().unwrap()); + + expected_state.insert(key, node); + } + + let start_state = state_at_event.get(event_id!("$START:foo")).unwrap(); + + let end_state = state_at_event + .get(event_id!("$END:foo")) + .unwrap() + .iter() + .filter(|(k, v)| { + expected_state.contains_key(k) + || start_state.get(k) != Some(*v) + // Filter out the dummy messages events. + // These act as points in time where there should be a known state to + // test against. + && **k != ("m.room.message".into(), "dummy".to_owned()) + }) + .map(|(k, v)| (k.clone(), v.clone())) + .collect::>(); + + assert_eq!(expected_state, end_state); +} + +#[allow(clippy::exhaustive_structs)] +pub(crate) struct TestStore(pub(crate) HashMap>); + +impl TestStore { + pub(crate) fn get_event(&self, _: &RoomId, event_id: &EventId) -> Result> { + self.0 + .get(event_id) + .cloned() + .ok_or_else(|| Error::NotFound(format!("{event_id} not found"))) + } + + /// Returns a Vec of the related auth events to the given `event`. + pub(crate) fn auth_event_ids( + &self, + room_id: &RoomId, + event_ids: Vec, + ) -> Result> { + let mut result = HashSet::new(); + let mut stack = event_ids; + + // DFS for auth event chain + while let Some(ev_id) = stack.pop() { + if result.contains(&ev_id) { + continue; + } + + result.insert(ev_id.clone()); + + let event = self.get_event(room_id, ev_id.borrow())?; + + stack.extend(event.auth_events().map(ToOwned::to_owned)); + } + + Ok(result) + } +} + +// A StateStore implementation for testing +#[allow(clippy::type_complexity)] +impl TestStore { + pub(crate) fn set_up( + &mut self, + ) -> (StateMap, StateMap, StateMap) { + let create_event = to_pdu_event::<&EventId>( + "CREATE", + alice(), + TimelineEventType::RoomCreate, + Some(""), + to_raw_json_value(&json!({ "creator": alice() })).unwrap(), + &[], + &[], + ); + let cre = create_event.event_id().to_owned(); + self.0.insert(cre.clone(), Arc::clone(&create_event)); + + let alice_mem = to_pdu_event( + "IMA", + alice(), + TimelineEventType::RoomMember, + Some(alice().as_str()), + member_content_join(), + &[cre.clone()], + &[cre.clone()], + ); + self.0 + .insert(alice_mem.event_id().to_owned(), Arc::clone(&alice_mem)); + + let join_rules = to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), + &[cre.clone(), alice_mem.event_id().to_owned()], + &[alice_mem.event_id().to_owned()], + ); + self.0 + .insert(join_rules.event_id().to_owned(), join_rules.clone()); + + // Bob and Charlie join at the same time, so there is a fork + // this will be represented in the state_sets when we resolve + let bob_mem = to_pdu_event( + "IMB", + bob(), + TimelineEventType::RoomMember, + Some(bob().as_str()), + member_content_join(), + &[cre.clone(), join_rules.event_id().to_owned()], + &[join_rules.event_id().to_owned()], + ); + self.0 + .insert(bob_mem.event_id().to_owned(), bob_mem.clone()); + + let charlie_mem = to_pdu_event( + "IMC", + charlie(), + TimelineEventType::RoomMember, + Some(charlie().as_str()), + member_content_join(), + &[cre, join_rules.event_id().to_owned()], + &[join_rules.event_id().to_owned()], + ); + self.0 + .insert(charlie_mem.event_id().to_owned(), charlie_mem.clone()); + + let state_at_bob = [&create_event, &alice_mem, &join_rules, &bob_mem] + .iter() + .map(|e| { + (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) + }) + .collect::>(); + + let state_at_charlie = [&create_event, &alice_mem, &join_rules, &charlie_mem] + .iter() + .map(|e| { + (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) + }) + .collect::>(); + + let expected = [&create_event, &alice_mem, &join_rules, &bob_mem, &charlie_mem] + .iter() + .map(|e| { + (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) + }) + .collect::>(); + + (state_at_bob, state_at_charlie, expected) + } +} + +pub(crate) fn event_id(id: &str) -> OwnedEventId { + if id.contains('$') { + return id.try_into().unwrap(); + } + + format!("${id}:foo").try_into().unwrap() +} + +pub(crate) fn alice() -> &'static UserId { user_id!("@alice:foo") } + +pub(crate) fn bob() -> &'static UserId { user_id!("@bob:foo") } + +pub(crate) fn charlie() -> &'static UserId { user_id!("@charlie:foo") } + +pub(crate) fn ella() -> &'static UserId { user_id!("@ella:foo") } + +pub(crate) fn zara() -> &'static UserId { user_id!("@zara:foo") } + +pub(crate) fn room_id() -> &'static RoomId { room_id!("!test:foo") } + +pub(crate) fn member_content_ban() -> Box { + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Ban)).unwrap() +} + +pub(crate) fn member_content_join() -> Box { + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Join)).unwrap() +} + +pub(crate) fn to_init_pdu_event( + id: &str, + sender: &UserId, + ev_type: TimelineEventType, + state_key: Option<&str>, + content: Box, +) -> Arc { + let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst); + let id = if id.contains('$') { + id.to_owned() + } else { + format!("${id}:foo") + }; + + let state_key = state_key.map(ToOwned::to_owned); + Arc::new(PduEvent { + event_id: id.try_into().unwrap(), + rest: Pdu::RoomV3Pdu(RoomV3Pdu { + room_id: room_id().to_owned(), + sender: sender.to_owned(), + origin_server_ts: MilliSecondsSinceUnixEpoch(ts.try_into().unwrap()), + state_key, + kind: ev_type, + content, + redacts: None, + unsigned: BTreeMap::new(), + auth_events: vec![], + prev_events: vec![], + depth: uint!(0), + hashes: EventHash::new("".to_owned()), + signatures: ServerSignatures::default(), + }), + }) +} + +pub(crate) fn to_pdu_event( + id: &str, + sender: &UserId, + ev_type: TimelineEventType, + state_key: Option<&str>, + content: Box, + auth_events: &[S], + prev_events: &[S], +) -> Arc +where + S: AsRef, +{ + let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst); + let id = if id.contains('$') { + id.to_owned() + } else { + format!("${id}:foo") + }; + let auth_events = auth_events + .iter() + .map(AsRef::as_ref) + .map(event_id) + .collect::>(); + let prev_events = prev_events + .iter() + .map(AsRef::as_ref) + .map(event_id) + .collect::>(); + + let state_key = state_key.map(ToOwned::to_owned); + Arc::new(PduEvent { + event_id: id.try_into().unwrap(), + rest: Pdu::RoomV3Pdu(RoomV3Pdu { + room_id: room_id().to_owned(), + sender: sender.to_owned(), + origin_server_ts: MilliSecondsSinceUnixEpoch(ts.try_into().unwrap()), + state_key, + kind: ev_type, + content, + redacts: None, + unsigned: BTreeMap::new(), + auth_events, + prev_events, + depth: uint!(0), + hashes: EventHash::new("".to_owned()), + signatures: ServerSignatures::default(), + }), + }) +} + +// all graphs start with these input events +#[allow(non_snake_case)] +pub(crate) fn INITIAL_EVENTS() -> HashMap> { + vec![ + to_pdu_event::<&EventId>( + "CREATE", + alice(), + TimelineEventType::RoomCreate, + Some(""), + to_raw_json_value(&json!({ "creator": alice() })).unwrap(), + &[], + &[], + ), + to_pdu_event( + "IMA", + alice(), + TimelineEventType::RoomMember, + Some(alice().as_str()), + member_content_join(), + &["CREATE"], + &["CREATE"], + ), + to_pdu_event( + "IPOWER", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100 } })).unwrap(), + &["CREATE", "IMA"], + &["IMA"], + ), + to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["IPOWER"], + ), + to_pdu_event( + "IMB", + bob(), + TimelineEventType::RoomMember, + Some(bob().as_str()), + member_content_join(), + &["CREATE", "IJR", "IPOWER"], + &["IJR"], + ), + to_pdu_event( + "IMC", + charlie(), + TimelineEventType::RoomMember, + Some(charlie().as_str()), + member_content_join(), + &["CREATE", "IJR", "IPOWER"], + &["IMB"], + ), + to_pdu_event::<&EventId>( + "START", + charlie(), + TimelineEventType::RoomMessage, + Some("dummy"), + to_raw_json_value(&json!({})).unwrap(), + &[], + &[], + ), + to_pdu_event::<&EventId>( + "END", + charlie(), + TimelineEventType::RoomMessage, + Some("dummy"), + to_raw_json_value(&json!({})).unwrap(), + &[], + &[], + ), + ] + .into_iter() + .map(|ev| (ev.event_id().to_owned(), ev)) + .collect() +} + +// all graphs start with these input events +#[allow(non_snake_case)] +pub(crate) fn INITIAL_EVENTS_CREATE_ROOM() -> HashMap> { + vec![to_pdu_event::<&EventId>( + "CREATE", + alice(), + TimelineEventType::RoomCreate, + Some(""), + to_raw_json_value(&json!({ "creator": alice() })).unwrap(), + &[], + &[], + )] + .into_iter() + .map(|ev| (ev.event_id().to_owned(), ev)) + .collect() +} + +#[allow(non_snake_case)] +pub(crate) fn INITIAL_EDGES() -> Vec { + vec!["START", "IMC", "IMB", "IJR", "IPOWER", "IMA", "CREATE"] + .into_iter() + .map(event_id) + .collect::>() +} + +pub(crate) mod event { + use ruma_common::{MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId}; + use ruma_events::{pdu::Pdu, TimelineEventType}; + use serde::{Deserialize, Serialize}; + use serde_json::value::RawValue as RawJsonValue; + + use crate::Event; + + impl Event for PduEvent { + type Id = OwnedEventId; + + fn event_id(&self) -> &Self::Id { &self.event_id } + + fn room_id(&self) -> &RoomId { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.room_id, + | Pdu::RoomV3Pdu(ev) => &ev.room_id, + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + fn sender(&self) -> &UserId { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.sender, + | Pdu::RoomV3Pdu(ev) => &ev.sender, + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + fn event_type(&self) -> &TimelineEventType { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.kind, + | Pdu::RoomV3Pdu(ev) => &ev.kind, + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + fn content(&self) -> &RawJsonValue { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.content, + | Pdu::RoomV3Pdu(ev) => &ev.content, + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => ev.origin_server_ts, + | Pdu::RoomV3Pdu(ev) => ev.origin_server_ts, + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + fn state_key(&self) -> Option<&str> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => ev.state_key.as_deref(), + | Pdu::RoomV3Pdu(ev) => ev.state_key.as_deref(), + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + #[allow(refining_impl_trait)] + fn prev_events(&self) -> Box + Send + '_> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => Box::new(ev.prev_events.iter().map(|(id, _)| id)), + | Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter()), + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + #[allow(refining_impl_trait)] + fn auth_events(&self) -> Box + Send + '_> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => Box::new(ev.auth_events.iter().map(|(id, _)| id)), + | Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter()), + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + + fn redacts(&self) -> Option<&Self::Id> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => ev.redacts.as_ref(), + | Pdu::RoomV3Pdu(ev) => ev.redacts.as_ref(), + #[allow(unreachable_patterns)] + | _ => unreachable!("new PDU version"), + } + } + } + + #[derive(Clone, Debug, Deserialize, Serialize)] + #[allow(clippy::exhaustive_structs)] + pub(crate) struct PduEvent { + pub(crate) event_id: OwnedEventId, + #[serde(flatten)] + pub(crate) rest: Pdu, + } +} diff --git a/src/service/rooms/event_handler/fetch_prev.rs b/src/service/rooms/event_handler/fetch_prev.rs index aea70739..5a38f7fe 100644 --- a/src/service/rooms/event_handler/fetch_prev.rs +++ b/src/service/rooms/event_handler/fetch_prev.rs @@ -3,12 +3,15 @@ use std::{ sync::Arc, }; -use conduwuit::{debug_warn, err, implement, PduEvent, Result}; +use conduwuit::{ + debug_warn, err, implement, + state_res::{self}, + PduEvent, Result, +}; use futures::{future, FutureExt}; use ruma::{ - int, - state_res::{self}, - uint, CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, ServerName, UInt, + int, uint, CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, ServerName, + UInt, }; use super::check_room_id; diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index b7c38313..3cc15fc4 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -3,10 +3,12 @@ use std::{ sync::Arc, }; -use conduwuit::{debug, debug_info, err, implement, trace, warn, Err, Error, PduEvent, Result}; +use conduwuit::{ + debug, debug_info, err, implement, state_res, trace, warn, Err, Error, PduEvent, Result, +}; use futures::{future::ready, TryFutureExt}; use ruma::{ - api::client::error::ErrorKind, events::StateEventType, state_res, CanonicalJsonObject, + api::client::error::ErrorKind, events::StateEventType, CanonicalJsonObject, CanonicalJsonValue, EventId, RoomId, ServerName, }; diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 8bcbc48b..5960c734 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -19,12 +19,12 @@ use std::{ use conduwuit::{ utils::{MutexMap, TryFutureExtExt}, - Err, PduEvent, Result, Server, + Err, PduEvent, Result, RoomVersion, Server, }; use futures::TryFutureExt; use ruma::{ - events::room::create::RoomCreateEventContent, state_res::RoomVersion, OwnedEventId, - OwnedRoomId, RoomId, RoomVersionId, + events::room::create::RoomCreateEventContent, OwnedEventId, OwnedRoomId, RoomId, + RoomVersionId, }; use crate::{globals, rooms, sending, server_keys, Dep}; diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index eb9ca01f..28011a1b 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -5,15 +5,14 @@ use std::{ }; use conduwuit::{ - err, implement, trace, + err, implement, + state_res::{self, StateMap}, + trace, utils::stream::{automatic_width, IterStream, ReadyExt, TryWidebandExt, WidebandExt}, Error, Result, }; use futures::{future::try_join, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; -use ruma::{ - state_res::{self, StateMap}, - OwnedEventId, RoomId, RoomVersionId, -}; +use ruma::{OwnedEventId, RoomId, RoomVersionId}; use crate::rooms::state_compressor::CompressedState; diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 7bf3b8f8..843b2af9 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -8,10 +8,10 @@ use std::{ use conduwuit::{ debug, err, implement, trace, utils::stream::{BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, TryWidebandExt}, - PduEvent, Result, + PduEvent, Result, StateMap, }; use futures::{future::try_join, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; -use ruma::{state_res::StateMap, OwnedEventId, RoomId, RoomVersionId}; +use ruma::{OwnedEventId, RoomId, RoomVersionId}; use crate::rooms::short::ShortStateHash; diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index b33b0388..f319ba48 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -1,16 +1,12 @@ use std::{borrow::Borrow, collections::BTreeMap, iter::once, sync::Arc, time::Instant}; use conduwuit::{ - debug, debug_info, err, implement, trace, + debug, debug_info, err, implement, state_res, trace, utils::stream::{BroadbandExt, ReadyExt}, - warn, Err, PduEvent, Result, + warn, Err, EventTypeExt, PduEvent, Result, }; use futures::{future::ready, FutureExt, StreamExt}; -use ruma::{ - events::StateEventType, - state_res::{self, EventTypeExt}, - CanonicalJsonValue, RoomId, ServerName, -}; +use ruma::{events::StateEventType, CanonicalJsonValue, RoomId, ServerName}; use super::{get_room_version_id, to_room_version}; use crate::rooms::{ diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index de90a89c..d538de3c 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -3,6 +3,7 @@ use std::{collections::HashMap, fmt::Write, iter::once, sync::Arc}; use conduwuit::{ err, result::FlatOk, + state_res::{self, StateMap}, utils::{ calculate_hash, stream::{BroadbandExt, TryIgnore}, @@ -20,7 +21,6 @@ use ruma::{ AnyStrippedStateEvent, StateEventType, TimelineEventType, }, serde::Raw, - state_res::{self, StateMap}, EventId, OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, UserId, }; diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index a7edd4a4..d6154121 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -12,6 +12,7 @@ use std::{ use conduwuit::{ at, debug, debug_warn, err, error, implement, info, pdu::{gen_event_id, EventHash, PduBuilder, PduCount, PduEvent}, + state_res::{self, Event, RoomVersion}, utils::{ self, future::TryExtExt, stream::TryIgnore, IterStream, MutexMap, MutexMapGuard, ReadyExt, }, @@ -36,7 +37,6 @@ use ruma::{ GlobalAccountDataEventType, StateEventType, TimelineEventType, }, push::{Action, Ruleset, Tweak}, - state_res::{self, Event, RoomVersion}, uint, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, RoomId, RoomVersionId, ServerName, UserId, }; From f2ca670c3b0858675312be60dcfb971384ce1244 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 8 Feb 2025 01:58:13 +0000 Subject: [PATCH 060/310] optimize further into state-res with SmallString triage and de-lints for state-res. Signed-off-by: Jason Volk --- Cargo.lock | 1 + Cargo.toml | 4 + src/api/client/membership.rs | 8 +- src/api/client/sync/v4.rs | 15 +- src/api/client/sync/v5.rs | 15 +- src/core/Cargo.toml | 3 + src/core/state_res/event_auth.rs | 142 ++++++++++-------- src/core/state_res/mod.rs | 89 ++++++----- src/core/state_res/room_version.rs | 1 + src/core/state_res/test_utils.rs | 43 +++--- .../rooms/event_handler/handle_outlier_pdu.rs | 2 +- .../rooms/event_handler/resolve_state.rs | 1 - .../rooms/event_handler/state_at_incoming.rs | 1 - .../event_handler/upgrade_outlier_pdu.rs | 10 +- src/service/rooms/timeline/mod.rs | 2 +- 15 files changed, 192 insertions(+), 145 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5981a2a6..4441779e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -810,6 +810,7 @@ dependencies = [ "libc", "libloading", "log", + "maplit", "nix", "num-traits", "rand", diff --git a/Cargo.toml b/Cargo.toml index d8f34544..a17aa4d6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -379,6 +379,7 @@ features = [ "unstable-msc4203", # sending to-device events to appservices "unstable-msc4210", # remove legacy mentions "unstable-extensible-events", + "unstable-pdu", ] [workspace.dependencies.rust-rocksdb] @@ -527,6 +528,9 @@ features = ["std"] version = "0.3.2" features = ["std"] +[workspace.dependencies.maplit] +version = "1.0.2" + # # Patches # diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 1045b014..6c970665 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -14,7 +14,7 @@ use conduwuit::{ result::FlatOk, state_res, trace, utils::{self, shuffle, IterStream, ReadyExt}, - warn, Err, PduEvent, Result, + warn, Err, PduEvent, Result, StateKey, }; use futures::{join, FutureExt, StreamExt, TryFutureExt}; use ruma::{ @@ -1151,8 +1151,8 @@ async fn join_room_by_id_helper_remote( debug!("Running send_join auth check"); let fetch_state = &state; - let state_fetch = |k: &'static StateEventType, s: String| async move { - let shortstatekey = services.rooms.short.get_shortstatekey(k, &s).await.ok()?; + let state_fetch = |k: StateEventType, s: StateKey| async move { + let shortstatekey = services.rooms.short.get_shortstatekey(&k, &s).await.ok()?; let event_id = fetch_state.get(&shortstatekey)?; services.rooms.timeline.get_pdu(event_id).await.ok() @@ -1162,7 +1162,7 @@ async fn join_room_by_id_helper_remote( &state_res::RoomVersion::new(&room_version_id)?, &parsed_join_pdu, None, // TODO: third party invite - |k, s| state_fetch(k, s.to_owned()), + |k, s| state_fetch(k.clone(), s.into()), ) .await .map_err(|e| err!(Request(Forbidden(warn!("Auth check failed: {e:?}")))))?; diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 4e474ef3..13f832b2 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -395,9 +395,12 @@ pub(crate) async fn sync_events_v4_route( .map_or(10, usize_from_u64_truncated) .min(100); - todo_room - .0 - .extend(list.room_details.required_state.iter().cloned()); + todo_room.0.extend( + list.room_details + .required_state + .iter() + .map(|(ty, sk)| (ty.clone(), sk.as_str().into())), + ); todo_room.1 = todo_room.1.max(limit); // 0 means unknown because it got out of date @@ -449,7 +452,11 @@ pub(crate) async fn sync_events_v4_route( .map_or(10, usize_from_u64_truncated) .min(100); - todo_room.0.extend(room.required_state.iter().cloned()); + todo_room.0.extend( + room.required_state + .iter() + .map(|(ty, sk)| (ty.clone(), sk.as_str().into())), + ); todo_room.1 = todo_room.1.max(limit); // 0 means unknown because it got out of date todo_room.2 = todo_room.2.min( diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index 63731688..cda6c041 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -223,7 +223,11 @@ async fn fetch_subscriptions( let limit: UInt = room.timeline_limit; - todo_room.0.extend(room.required_state.iter().cloned()); + todo_room.0.extend( + room.required_state + .iter() + .map(|(ty, sk)| (ty.clone(), sk.as_str().into())), + ); todo_room.1 = todo_room.1.max(usize_from_ruma(limit)); // 0 means unknown because it got out of date todo_room.2 = todo_room.2.min( @@ -303,9 +307,12 @@ async fn handle_lists<'a>( let limit: usize = usize_from_ruma(list.room_details.timeline_limit).min(100); - todo_room - .0 - .extend(list.room_details.required_state.iter().cloned()); + todo_room.0.extend( + list.room_details + .required_state + .iter() + .map(|(ty, sk)| (ty.clone(), sk.as_str().into())), + ); todo_room.1 = todo_room.1.max(limit); // 0 means unknown because it got out of date diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index d4b0c83b..b40dd3ad 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -116,5 +116,8 @@ nix.workspace = true hardened_malloc-rs.workspace = true hardened_malloc-rs.optional = true +[dev-dependencies] +maplit.workspace = true + [lints] workspace = true diff --git a/src/core/state_res/event_auth.rs b/src/core/state_res/event_auth.rs index 72a0216c..df2f8b36 100644 --- a/src/core/state_res/event_auth.rs +++ b/src/core/state_res/event_auth.rs @@ -21,7 +21,6 @@ use serde::{ Deserialize, }; use serde_json::{from_str as from_json_str, value::RawValue as RawJsonValue}; -use tracing::{debug, error, instrument, trace, warn}; use super::{ power_levels::{ @@ -29,8 +28,9 @@ use super::{ deserialize_power_levels_content_invite, deserialize_power_levels_content_redact, }, room_version::RoomVersion, - Error, Event, Result, StateEventType, TimelineEventType, + Error, Event, Result, StateEventType, StateKey, TimelineEventType, }; +use crate::{debug, error, trace, warn}; // FIXME: field extracting could be bundled for `content` #[derive(Deserialize)] @@ -56,15 +56,15 @@ pub fn auth_types_for_event( sender: &UserId, state_key: Option<&str>, content: &RawJsonValue, -) -> serde_json::Result> { +) -> serde_json::Result> { if kind == &TimelineEventType::RoomCreate { return Ok(vec![]); } let mut auth_types = vec![ - (StateEventType::RoomPowerLevels, String::new()), - (StateEventType::RoomMember, sender.to_string()), - (StateEventType::RoomCreate, String::new()), + (StateEventType::RoomPowerLevels, StateKey::new()), + (StateEventType::RoomMember, sender.as_str().into()), + (StateEventType::RoomCreate, StateKey::new()), ]; if kind == &TimelineEventType::RoomMember { @@ -82,7 +82,7 @@ pub fn auth_types_for_event( if [MembershipState::Join, MembershipState::Invite, MembershipState::Knock] .contains(&membership) { - let key = (StateEventType::RoomJoinRules, String::new()); + let key = (StateEventType::RoomJoinRules, StateKey::new()); if !auth_types.contains(&key) { auth_types.push(key); } @@ -91,21 +91,22 @@ pub fn auth_types_for_event( .join_authorised_via_users_server .map(|m| m.deserialize()) { - let key = (StateEventType::RoomMember, u.to_string()); + let key = (StateEventType::RoomMember, u.as_str().into()); if !auth_types.contains(&key) { auth_types.push(key); } } } - let key = (StateEventType::RoomMember, state_key.to_owned()); + let key = (StateEventType::RoomMember, state_key.into()); if !auth_types.contains(&key) { auth_types.push(key); } if membership == MembershipState::Invite { if let Some(Ok(t_id)) = content.third_party_invite.map(|t| t.deserialize()) { - let key = (StateEventType::RoomThirdPartyInvite, t_id.signed.token); + let key = + (StateEventType::RoomThirdPartyInvite, t_id.signed.token.into()); if !auth_types.contains(&key) { auth_types.push(key); } @@ -128,7 +129,13 @@ pub fn auth_types_for_event( /// The `fetch_state` closure should gather state from a state snapshot. We need /// to know if the event passes auth against some state not a recursive /// collection of auth_events fields. -#[instrument(level = "debug", skip_all, fields(event_id = incoming_event.event_id().borrow().as_str()))] +#[tracing::instrument( + level = "debug", + skip_all, + fields( + event_id = incoming_event.event_id().borrow().as_str() + ) +)] pub async fn auth_check( room_version: &RoomVersion, incoming_event: &Incoming, @@ -136,10 +143,10 @@ pub async fn auth_check( fetch_state: F, ) -> Result where - F: Fn(&'static StateEventType, &str) -> Fut, + F: Fn(&StateEventType, &str) -> Fut + Send, Fut: Future> + Send, Fetched: Event + Send, - Incoming: Event + Send, + Incoming: Event + Send + Sync, { debug!( "auth_check beginning for {} ({})", @@ -262,6 +269,7 @@ where // sender domain of the event does not match the sender domain of the create // event, reject. #[derive(Deserialize)] + #[allow(clippy::items_after_statements)] struct RoomCreateContentFederate { #[serde(rename = "m.federate", default = "ruma::serde::default_true")] federate: bool, @@ -354,7 +362,7 @@ where join_rules_event.as_ref(), user_for_join_auth.as_deref(), &user_for_join_auth_membership, - room_create_event, + &room_create_event, )? { return Ok(false); } @@ -364,6 +372,7 @@ where } // If the sender's current membership state is not join, reject + #[allow(clippy::manual_let_else)] let sender_member_event = match sender_member_event { | Some(mem) => mem, | None => { @@ -498,19 +507,20 @@ where /// This is generated by calling `auth_types_for_event` with the membership /// event and the current State. #[allow(clippy::too_many_arguments)] +#[allow(clippy::cognitive_complexity)] fn valid_membership_change( room_version: &RoomVersion, target_user: &UserId, - target_user_membership_event: Option, + target_user_membership_event: Option<&impl Event>, sender: &UserId, - sender_membership_event: Option, + sender_membership_event: Option<&impl Event>, current_event: impl Event, - current_third_party_invite: Option, - power_levels_event: Option, - join_rules_event: Option, + current_third_party_invite: Option<&impl Event>, + power_levels_event: Option<&impl Event>, + join_rules_event: Option<&impl Event>, user_for_join_auth: Option<&UserId>, user_for_join_auth_membership: &MembershipState, - create_room: impl Event, + create_room: &impl Event, ) -> Result { #[derive(Deserialize)] struct GetThirdPartyInvite { @@ -856,6 +866,7 @@ fn check_power_levels( // and integers here debug!("validation of power event finished"); + #[allow(clippy::manual_let_else)] let current_state = match previous_power_event { | Some(current_state) => current_state, // If there is no previous m.room.power_levels event in the room, allow @@ -1054,6 +1065,7 @@ fn verify_third_party_invite( // If there is no m.room.third_party_invite event in the current room state with // state_key matching token, reject + #[allow(clippy::manual_let_else)] let current_tpid = match current_third_party_invite { | Some(id) => id, | None => return false, @@ -1069,12 +1081,14 @@ fn verify_third_party_invite( // If any signature in signed matches any public key in the // m.room.third_party_invite event, allow + #[allow(clippy::manual_let_else)] let tpid_ev = match from_json_str::(current_tpid.content().get()) { | Ok(ev) => ev, | Err(_) => return false, }; + #[allow(clippy::manual_let_else)] let decoded_invite_token = match Base64::parse(&tp_id.signed.token) { | Ok(tok) => tok, // FIXME: Log a warning? @@ -1096,7 +1110,7 @@ fn verify_third_party_invite( mod tests { use std::sync::Arc; - use ruma_events::{ + use ruma::events::{ room::{ join_rules::{ AllowRule, JoinRule, Restricted, RoomJoinRulesEventContent, RoomMembership, @@ -1107,7 +1121,7 @@ mod tests { }; use serde_json::value::to_raw_value as to_raw_json_value; - use crate::{ + use crate::state_res::{ event_auth::valid_membership_change, test_utils::{ alice, charlie, ella, event_id, member_content_ban, member_content_join, room_id, @@ -1145,16 +1159,16 @@ mod tests { assert!(valid_membership_change( &RoomVersion::V6, target_user, - fetch_state(StateEventType::RoomMember, target_user.to_string()), + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), sender, - fetch_state(StateEventType::RoomMember, sender.to_string()), + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), &requester, - None::, - fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), - fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), None, &MembershipState::Leave, - fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), ) .unwrap()); } @@ -1188,16 +1202,16 @@ mod tests { assert!(!valid_membership_change( &RoomVersion::V6, target_user, - fetch_state(StateEventType::RoomMember, target_user.to_string()), + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), sender, - fetch_state(StateEventType::RoomMember, sender.to_string()), + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), &requester, - None::, - fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), - fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), None, &MembershipState::Leave, - fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), ) .unwrap()); } @@ -1231,16 +1245,16 @@ mod tests { assert!(valid_membership_change( &RoomVersion::V6, target_user, - fetch_state(StateEventType::RoomMember, target_user.to_string()), + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), sender, - fetch_state(StateEventType::RoomMember, sender.to_string()), + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), &requester, - None::, - fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), - fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), None, &MembershipState::Leave, - fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), ) .unwrap()); } @@ -1274,16 +1288,16 @@ mod tests { assert!(!valid_membership_change( &RoomVersion::V6, target_user, - fetch_state(StateEventType::RoomMember, target_user.to_string()), + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), sender, - fetch_state(StateEventType::RoomMember, sender.to_string()), + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), &requester, - None::, - fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), - fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), None, &MembershipState::Leave, - fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), ) .unwrap()); } @@ -1334,32 +1348,32 @@ mod tests { assert!(valid_membership_change( &RoomVersion::V9, target_user, - fetch_state(StateEventType::RoomMember, target_user.to_string()), + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), sender, - fetch_state(StateEventType::RoomMember, sender.to_string()), + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), &requester, - None::, - fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), - fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), Some(alice()), &MembershipState::Join, - fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), ) .unwrap()); assert!(!valid_membership_change( &RoomVersion::V9, target_user, - fetch_state(StateEventType::RoomMember, target_user.to_string()), + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), sender, - fetch_state(StateEventType::RoomMember, sender.to_string()), + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), &requester, - None::, - fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), - fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), Some(ella()), &MembershipState::Leave, - fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), ) .unwrap()); } @@ -1402,16 +1416,16 @@ mod tests { assert!(valid_membership_change( &RoomVersion::V7, target_user, - fetch_state(StateEventType::RoomMember, target_user.to_string()), + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), sender, - fetch_state(StateEventType::RoomMember, sender.to_string()), + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), &requester, - None::, - fetch_state(StateEventType::RoomPowerLevels, "".to_owned()), - fetch_state(StateEventType::RoomJoinRules, "".to_owned()), + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), None, &MembershipState::Leave, - fetch_state(StateEventType::RoomCreate, "".to_owned()).unwrap(), + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), ) .unwrap()); } diff --git a/src/core/state_res/mod.rs b/src/core/state_res/mod.rs index e4054377..19ea3cc0 100644 --- a/src/core/state_res/mod.rs +++ b/src/core/state_res/mod.rs @@ -1,3 +1,5 @@ +#![cfg_attr(test, allow(warnings))] + pub(crate) mod error; pub mod event_auth; mod power_levels; @@ -12,7 +14,7 @@ use std::{ cmp::{Ordering, Reverse}, collections::{BinaryHeap, HashMap, HashSet}, fmt::Debug, - hash::Hash, + hash::{BuildHasher, Hash}, }; use futures::{future, stream, Future, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; @@ -32,13 +34,13 @@ pub use self::{ room_version::RoomVersion, state_event::Event, }; -use crate::{debug, trace, warn}; +use crate::{debug, pdu::StateKey, trace, warn}; /// A mapping of event type and state_key to some value `T`, usually an /// `EventId`. pub type StateMap = HashMap; pub type StateMapItem = (TypeStateKey, T); -pub type TypeStateKey = (StateEventType, String); +pub type TypeStateKey = (StateEventType, StateKey); type Result = crate::Result; @@ -68,10 +70,10 @@ type Result = crate::Result; /// event is part of the same room. //#[tracing::instrument(level = "debug", skip(state_sets, auth_chain_sets, //#[tracing::instrument(level event_fetch))] -pub async fn resolve<'a, E, SetIter, Fetch, FetchFut, Exists, ExistsFut>( +pub async fn resolve<'a, E, Sets, SetIter, Hasher, Fetch, FetchFut, Exists, ExistsFut>( room_version: &RoomVersionId, - state_sets: impl IntoIterator + Send, - auth_chain_sets: &'a [HashSet], + state_sets: Sets, + auth_chain_sets: &'a [HashSet], event_fetch: &Fetch, event_exists: &Exists, parallel_fetches: usize, @@ -81,7 +83,9 @@ where FetchFut: Future> + Send, Exists: Fn(E::Id) -> ExistsFut + Sync, ExistsFut: Future + Send, + Sets: IntoIterator + Send, SetIter: Iterator> + Clone + Send, + Hasher: BuildHasher + Send + Sync, E: Event + Clone + Send + Sync, E::Id: Borrow + Send + Sync, for<'b> &'b E: Send, @@ -178,7 +182,7 @@ where trace!(list = ?events_to_resolve, "events left to resolve"); // This "epochs" power level event - let power_event = resolved_control.get(&(StateEventType::RoomPowerLevels, String::new())); + let power_event = resolved_control.get(&(StateEventType::RoomPowerLevels, StateKey::new())); debug!(event_id = ?power_event, "power event"); @@ -222,16 +226,17 @@ fn separate<'a, Id>( where Id: Clone + Eq + Hash + 'a, { - let mut state_set_count = 0_usize; + let mut state_set_count: usize = 0; let mut occurrences = HashMap::<_, HashMap<_, _>>::new(); - let state_sets_iter = state_sets_iter.inspect(|_| state_set_count += 1); + let state_sets_iter = + state_sets_iter.inspect(|_| state_set_count = state_set_count.saturating_add(1)); for (k, v) in state_sets_iter.flatten() { occurrences .entry(k) .or_default() .entry(v) - .and_modify(|x| *x += 1) + .and_modify(|x: &mut usize| *x = x.saturating_add(1)) .or_insert(1); } @@ -246,7 +251,7 @@ where conflicted_state .entry((k.0.clone(), k.1.clone())) .and_modify(|x: &mut Vec<_>| x.push(id.clone())) - .or_insert(vec![id.clone()]); + .or_insert_with(|| vec![id.clone()]); } } } @@ -255,9 +260,13 @@ where } /// Returns a Vec of deduped EventIds that appear in some chains but not others. -fn get_auth_chain_diff(auth_chain_sets: &[HashSet]) -> impl Iterator + Send +#[allow(clippy::arithmetic_side_effects)] +fn get_auth_chain_diff( + auth_chain_sets: &[HashSet], +) -> impl Iterator + Send where Id: Clone + Eq + Hash + Send, + Hasher: BuildHasher + Send + Sync, { let num_sets = auth_chain_sets.len(); let mut id_counts: HashMap = HashMap::new(); @@ -288,7 +297,7 @@ async fn reverse_topological_power_sort( where F: Fn(E::Id) -> Fut + Sync, Fut: Future> + Send, - E: Event + Send, + E: Event + Send + Sync, E::Id: Borrow + Send + Sync, { debug!("reverse topological sort of power events"); @@ -337,14 +346,15 @@ where /// `key_fn` is used as to obtain the power level and age of an event for /// breaking ties (together with the event ID). #[tracing::instrument(level = "debug", skip_all)] -pub async fn lexicographical_topological_sort( - graph: &HashMap>, +pub async fn lexicographical_topological_sort( + graph: &HashMap>, key_fn: &F, ) -> Result> where F: Fn(Id) -> Fut + Sync, Fut: Future> + Send, - Id: Borrow + Clone + Eq + Hash + Ord + Send, + Id: Borrow + Clone + Eq + Hash + Ord + Send + Sync, + Hasher: BuildHasher + Default + Clone + Send + Sync, { #[derive(PartialEq, Eq)] struct TieBreaker<'a, Id> { @@ -395,7 +405,7 @@ where // The number of events that depend on the given event (the EventId key) // How many events reference this event in the DAG as a parent - let mut reverse_graph: HashMap<_, HashSet<_>> = HashMap::new(); + let mut reverse_graph: HashMap<_, HashSet<_, Hasher>> = HashMap::new(); // Vec of nodes that have zero out degree, least recent events. let mut zero_outdegree = Vec::new(); @@ -727,8 +737,8 @@ async fn get_mainline_depth( where F: Fn(E::Id) -> Fut + Sync, Fut: Future> + Send, - E: Event + Send, - E::Id: Borrow + Send, + E: Event + Send + Sync, + E::Id: Borrow + Send + Sync, { while let Some(sort_ev) = event { debug!(event_id = sort_ev.event_id().borrow().as_str(), "mainline"); @@ -758,10 +768,10 @@ async fn add_event_and_auth_chain_to_graph( auth_diff: &HashSet, fetch_event: &F, ) where - F: Fn(E::Id) -> Fut, + F: Fn(E::Id) -> Fut + Sync, Fut: Future> + Send, - E: Event + Send, - E::Id: Borrow + Clone + Send, + E: Event + Send + Sync, + E::Id: Borrow + Clone + Send + Sync, { let mut state = vec![event_id]; while let Some(eid) = state.pop() { @@ -788,7 +798,7 @@ where F: Fn(E::Id) -> Fut + Sync, Fut: Future> + Send, E: Event + Send, - E::Id: Borrow + Send, + E::Id: Borrow + Send + Sync, { match fetch(event_id.clone()).await.as_ref() { | Some(state) => is_power_event(state), @@ -820,18 +830,18 @@ fn is_power_event(event: impl Event) -> bool { /// Convenience trait for adding event type plus state key to state maps. pub trait EventTypeExt { - fn with_state_key(self, state_key: impl Into) -> (StateEventType, String); + fn with_state_key(self, state_key: impl Into) -> (StateEventType, StateKey); } impl EventTypeExt for StateEventType { - fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, StateKey) { (self, state_key.into()) } } impl EventTypeExt for TimelineEventType { - fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { - (self.to_string().into(), state_key.into()) + fn with_state_key(self, state_key: impl Into) -> (StateEventType, StateKey) { + (self.into(), state_key.into()) } } @@ -839,7 +849,7 @@ impl EventTypeExt for &T where T: EventTypeExt + Clone, { - fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, StateKey) { self.to_owned().with_state_key(state_key) } } @@ -858,13 +868,11 @@ mod tests { room::join_rules::{JoinRule, RoomJoinRulesEventContent}, StateEventType, TimelineEventType, }, - int, uint, + int, uint, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomVersionId, }; - use ruma_common::{MilliSecondsSinceUnixEpoch, OwnedEventId, RoomVersionId}; use serde_json::{json, value::to_raw_value as to_raw_json_value}; - use tracing::debug; - use crate::{ + use super::{ is_power_event, room_version::RoomVersion, test_utils::{ @@ -874,6 +882,7 @@ mod tests { }, Event, EventTypeExt, StateMap, }; + use crate::debug; async fn test_event_sort() { use futures::future::ready; @@ -898,11 +907,11 @@ mod tests { let fetcher = |id| ready(events.get(&id).cloned()); let sorted_power_events = - crate::reverse_topological_power_sort(power_events, &auth_chain, &fetcher, 1) + super::reverse_topological_power_sort(power_events, &auth_chain, &fetcher, 1) .await .unwrap(); - let resolved_power = crate::iterative_auth_check( + let resolved_power = super::iterative_auth_check( &RoomVersion::V6, sorted_power_events.iter(), HashMap::new(), // unconflicted events @@ -918,10 +927,10 @@ mod tests { events_to_sort.shuffle(&mut rand::thread_rng()); let power_level = resolved_power - .get(&(StateEventType::RoomPowerLevels, "".to_owned())) + .get(&(StateEventType::RoomPowerLevels, "".into())) .cloned(); - let sorted_event_ids = crate::mainline_sort(&events_to_sort, power_level, &fetcher, 1) + let sorted_event_ids = super::mainline_sort(&events_to_sort, power_level, &fetcher, 1) .await .unwrap(); @@ -1302,7 +1311,7 @@ mod tests { }) .collect(); - let resolved = match crate::resolve( + let resolved = match super::resolve( &RoomVersionId::V2, &state_sets, &auth_chain, @@ -1333,7 +1342,7 @@ mod tests { event_id("p") => hashset![event_id("o")], }; - let res = crate::lexicographical_topological_sort(&graph, &|_id| async { + let res = super::lexicographical_topological_sort(&graph, &|_id| async { Ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0)))) }) .await @@ -1421,7 +1430,7 @@ mod tests { let fetcher = |id: ::Id| ready(ev_map.get(&id).cloned()); let exists = |id: ::Id| ready(ev_map.get(&id).is_some()); - let resolved = match crate::resolve( + let resolved = match super::resolve( &RoomVersionId::V6, &state_sets, &auth_chain, @@ -1552,7 +1561,7 @@ mod tests { #[allow(unused_mut)] let mut x = StateMap::new(); $( - x.insert(($kind, $key.to_owned()), $id); + x.insert(($kind, $key.into()), $id); )* x }}; diff --git a/src/core/state_res/room_version.rs b/src/core/state_res/room_version.rs index e1b0afe1..8dfd6cde 100644 --- a/src/core/state_res/room_version.rs +++ b/src/core/state_res/room_version.rs @@ -32,6 +32,7 @@ pub enum StateResolutionVersion { } #[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)] +#[allow(clippy::struct_excessive_bools)] pub struct RoomVersion { /// The stability of this room. pub disposition: RoomDisposition, diff --git a/src/core/state_res/test_utils.rs b/src/core/state_res/test_utils.rs index 7954b28d..9c2b151f 100644 --- a/src/core/state_res/test_utils.rs +++ b/src/core/state_res/test_utils.rs @@ -7,28 +7,28 @@ use std::{ }, }; -use futures_util::future::ready; -use js_int::{int, uint}; -use ruma_common::{ - event_id, room_id, user_id, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, - RoomVersionId, ServerSignatures, UserId, -}; -use ruma_events::{ - pdu::{EventHash, Pdu, RoomV3Pdu}, - room::{ - join_rules::{JoinRule, RoomJoinRulesEventContent}, - member::{MembershipState, RoomMemberEventContent}, +use futures::future::ready; +use ruma::{ + event_id, + events::{ + pdu::{EventHash, Pdu, RoomV3Pdu}, + room::{ + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + }, + TimelineEventType, }, - TimelineEventType, + int, room_id, uint, user_id, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, + RoomVersionId, ServerSignatures, UserId, }; use serde_json::{ json, value::{to_raw_value as to_raw_json_value, RawValue as RawJsonValue}, }; -use tracing::info; pub(crate) use self::event::PduEvent; -use crate::{auth_types_for_event, Error, Event, EventTypeExt, Result, StateMap}; +use super::auth_types_for_event; +use crate::{info, Event, EventTypeExt, Result, StateMap}; static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); @@ -88,7 +88,7 @@ pub(crate) async fn do_check( // Resolve the current state and add it to the state_at_event map then continue // on in "time" - for node in crate::lexicographical_topological_sort(&graph, &|_id| async { + for node in super::lexicographical_topological_sort(&graph, &|_id| async { Ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0)))) }) .await @@ -135,7 +135,7 @@ pub(crate) async fn do_check( let event_map = &event_map; let fetch = |id: ::Id| ready(event_map.get(&id).cloned()); let exists = |id: ::Id| ready(event_map.get(&id).is_some()); - let resolved = crate::resolve( + let resolved = super::resolve( &RoomVersionId::V6, state_sets, &auth_chain_sets, @@ -223,7 +223,7 @@ pub(crate) async fn do_check( // Filter out the dummy messages events. // These act as points in time where there should be a known state to // test against. - && **k != ("m.room.message".into(), "dummy".to_owned()) + && **k != ("m.room.message".into(), "dummy".into()) }) .map(|(k, v)| (k.clone(), v.clone())) .collect::>(); @@ -239,7 +239,8 @@ impl TestStore { self.0 .get(event_id) .cloned() - .ok_or_else(|| Error::NotFound(format!("{event_id} not found"))) + .ok_or_else(|| super::Error::NotFound(format!("{event_id} not found"))) + .map_err(Into::into) } /// Returns a Vec of the related auth events to the given `event`. @@ -582,8 +583,10 @@ pub(crate) fn INITIAL_EDGES() -> Vec { } pub(crate) mod event { - use ruma_common::{MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId}; - use ruma_events::{pdu::Pdu, TimelineEventType}; + use ruma::{ + events::{pdu::Pdu, TimelineEventType}, + MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, + }; use serde::{Deserialize, Serialize}; use serde_json::value::RawValue as RawJsonValue; diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index 3cc15fc4..e628c77a 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -133,7 +133,7 @@ pub(super) async fn handle_outlier_pdu<'a>( )); } - let state_fetch = |ty: &'static StateEventType, sk: &str| { + let state_fetch = |ty: &StateEventType, sk: &str| { let key = (ty.to_owned(), sk.into()); ready(auth_events.get(&key)) }; diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 28011a1b..37d47d47 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -63,7 +63,6 @@ pub async fn resolve_state( .multi_get_statekey_from_short(shortstatekeys) .zip(event_ids) .ready_filter_map(|(ty_sk, id)| Some((ty_sk.ok()?, id))) - .map(|((ty, sk), id)| ((ty, sk.as_str().to_owned()), id)) .collect() }) .map(Ok::<_, Error>) diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 843b2af9..2eb6013a 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -172,7 +172,6 @@ async fn state_at_incoming_fork( .short .get_statekey_from_short(*k) .map_ok(|(ty, sk)| ((ty, sk), id.clone())) - .map_ok(|((ty, sk), id)| ((ty, sk.as_str().to_owned()), id)) }) .ready_filter_map(Result::ok) .collect() diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index f319ba48..385d2142 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -3,7 +3,7 @@ use std::{borrow::Borrow, collections::BTreeMap, iter::once, sync::Arc, time::In use conduwuit::{ debug, debug_info, err, implement, state_res, trace, utils::stream::{BroadbandExt, ReadyExt}, - warn, Err, EventTypeExt, PduEvent, Result, + warn, Err, EventTypeExt, PduEvent, Result, StateKey, }; use futures::{future::ready, FutureExt, StreamExt}; use ruma::{events::StateEventType, CanonicalJsonValue, RoomId, ServerName}; @@ -71,8 +71,8 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( debug!("Performing auth check"); // 11. Check the auth of the event passes based on the state of the event let state_fetch_state = &state_at_incoming_event; - let state_fetch = |k: &'static StateEventType, s: String| async move { - let shortstatekey = self.services.short.get_shortstatekey(k, &s).await.ok()?; + let state_fetch = |k: StateEventType, s: StateKey| async move { + let shortstatekey = self.services.short.get_shortstatekey(&k, &s).await.ok()?; let event_id = state_fetch_state.get(&shortstatekey)?; self.services.timeline.get_pdu(event_id).await.ok() @@ -82,7 +82,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( &room_version, &incoming_pdu, None, // TODO: third party invite - |k, s| state_fetch(k, s.to_owned()), + |ty, sk| state_fetch(ty.clone(), sk.into()), ) .await .map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?; @@ -104,7 +104,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( ) .await?; - let state_fetch = |k: &'static StateEventType, s: &str| { + let state_fetch = |k: &StateEventType, s: &str| { let key = k.with_state_key(s); ready(auth_events.get(&key).cloned()) }; diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index d6154121..9d6ee982 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -747,7 +747,7 @@ impl Service { }; let auth_fetch = |k: &StateEventType, s: &str| { - let key = (k.clone(), s.to_owned()); + let key = (k.clone(), s.into()); ready(auth_events.get(&key)) }; From 4de0dafdf11acb71d28e6891c9b740b66d448934 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 8 Feb 2025 06:24:25 +0000 Subject: [PATCH 061/310] bump ruma Signed-off-by: Jason Volk --- Cargo.lock | 360 +++++++++++++++++++++++++++-------------------------- Cargo.toml | 2 +- 2 files changed, 183 insertions(+), 179 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4441779e..efba2e07 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -79,7 +79,7 @@ checksum = "0ae92a5119aa49cdbcf6b9f893fe4e1d98b04ccbf82ee0584ad948a44a734dea" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -161,18 +161,18 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] name = "async-trait" -version = "0.1.85" +version = "0.1.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056" +checksum = "644dd749086bf3771a2fbc5f256fdb982d53f011c7d5d560304eafeecebce79d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -221,9 +221,9 @@ dependencies = [ [[package]] name = "aws-lc-rs" -version = "1.12.1" +version = "1.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ea835662a0af02443aa1396d39be523bbf8f11ee6fad20329607c480bea48c3" +checksum = "4c2b7ddaa2c56a367ad27a094ad8ef4faacf8a617c2575acb2ba88949df999ca" dependencies = [ "aws-lc-sys", "paste", @@ -232,9 +232,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.25.0" +version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71b2ddd3ada61a305e1d8bb6c005d1eaa7d14d903681edfc400406d523a9b491" +checksum = "54ac4f13dad353b209b34cbec082338202cbc01c8f00336b55c750c13ac91f8f" dependencies = [ "bindgen", "cc", @@ -427,7 +427,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.96", + "syn 2.0.98", "which", ] @@ -495,9 +495,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "4.0.1" +version = "4.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a45bd2e4095a8b518033b128020dd4a55aab1c0a381ba4404a472630f4bc362" +checksum = "74fa05ad7d803d413eb8380983b092cbbaf9a85f151b871360e7b00cd7060b37" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -511,9 +511,9 @@ checksum = "c360505aed52b7ec96a3636c3f039d99103c37d1d9b4f7a8c743d3ea9ffcd03b" [[package]] name = "bumpalo" -version = "3.16.0" +version = "3.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" +checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" [[package]] name = "bytemuck" @@ -535,9 +535,9 @@ checksum = "8f1fe948ff07f4bd06c30984e69f5b4899c516a3ef74f34df92a2df2ab535495" [[package]] name = "bytes" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" +checksum = "f61dac84819c6588b558454b194026eb1f09c293b9036ae9b159e74e73ab6cf9" [[package]] name = "bytesize" @@ -568,9 +568,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.10" +version = "1.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13208fcbb66eaeffe09b99fffbe1af420f00a7b35aa99ad683dfc1aa76145229" +checksum = "755717a7de9ec452bf7f3f1a3099085deabd7f2962b861dae91ecd7a365903d2" dependencies = [ "jobserver", "libc", @@ -639,9 +639,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.26" +version = "4.5.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8eb5e908ef3a6efbe1ed62520fb7287959888c88485abe072543190ecc66783" +checksum = "3e77c3243bd94243c03672cb5154667347c457ca271254724f9f393aee1c05ff" dependencies = [ "clap_builder", "clap_derive", @@ -649,9 +649,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.26" +version = "4.5.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b01801b5fc6a0a232407abc821660c9c6d25a1cafc0d4f85f29fb8d9afc121" +checksum = "1b26884eb4b57140e4d2d93652abfa49498b938b3c9179f9fc487b0acc3edad7" dependencies = [ "anstyle", "clap_lex", @@ -659,14 +659,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.24" +version = "4.5.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54b755194d6389280185988721fffba69495eed5ee9feeee9a599b53db80318c" +checksum = "bf4ced95c6f4a675af3da73304b9ac4ed991640c36374e4b46795c49e17cf1ed" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -677,9 +677,9 @@ checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" [[package]] name = "cmake" -version = "0.1.52" +version = "0.1.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c682c223677e0e5b6b7f63a64b9351844c3f1b1678a68b7ee617e30fb082620e" +checksum = "e24a03c8b52922d68a1589ad61032f2c1aa5a8158d2aa0d93c6e9534944bbad6" dependencies = [ "cc", ] @@ -863,7 +863,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1030,9 +1030,9 @@ dependencies = [ [[package]] name = "cpufeatures" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" dependencies = [ "libc", ] @@ -1177,7 +1177,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a2785755761f3ddc1492979ce1e48d2c00d09311c39e4466429188f3dd6501" dependencies = [ "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1204,7 +1204,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1273,7 +1273,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1325,7 +1325,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1526,7 +1526,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1578,10 +1578,22 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "wasm-bindgen", ] +[[package]] +name = "getrandom" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +dependencies = [ + "cfg-if", + "libc", + "wasi 0.13.3+wasi-0.2.2", + "windows-targets 0.52.6", +] + [[package]] name = "gif" version = "0.13.1" @@ -1616,7 +1628,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.7.0", + "indexmap 2.7.1", "slab", "tokio", "tokio-util", @@ -1708,9 +1720,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hickory-proto" -version = "0.24.2" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "447afdcdb8afb9d0a852af6dc65d9b285ce720ed7a59e42a8bf2e931c67bc1b5" +checksum = "2ad3d6d98c648ed628df039541a5577bee1a7c83e9e16fe3dbedeea4cdfeb971" dependencies = [ "async-trait", "cfg-if", @@ -1732,9 +1744,9 @@ dependencies = [ [[package]] name = "hickory-resolver" -version = "0.24.2" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a2e2aba9c389ce5267d31cf1e4dace82390ae276b0b364ea55630b1fa1b44b4" +checksum = "dcf287bde7b776e85d7188e6e5db7cf410a2f9531fe82817eb87feed034c8d14" dependencies = [ "cfg-if", "futures-util", @@ -1802,7 +1814,7 @@ dependencies = [ "markup5ever", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -1850,9 +1862,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.5" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" +checksum = "f2d708df4e7140240a16cd6ab0ab65c972d7433ab77819ea693fde9c43811e2a" [[package]] name = "httpdate" @@ -1868,9 +1880,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "1.5.2" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "256fb8d4bd6413123cc9d91832d78325c48ff41677595be797d90f42969beae0" +checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" dependencies = [ "bytes", "futures-channel", @@ -2054,7 +2066,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2129,9 +2141,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.7.0" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f822373a4fe84d4bb149bf54e584a7f4abec90e072ed49cda0edea5b95471f" +checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" dependencies = [ "equivalent", "hashbrown 0.15.2", @@ -2158,7 +2170,7 @@ checksum = "c34819042dc3d3971c46c2190835914dfbe0c3c13f61449b2997f4e9722dfa60" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2189,9 +2201,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.10.1" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] name = "itertools" @@ -2300,7 +2312,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2528,7 +2540,7 @@ checksum = "bd2209fff77f705b00c737016a48e73733d7fbccb8b007194db148f03561fb70" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2574,7 +2586,7 @@ checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" dependencies = [ "libc", "log", - "wasi", + "wasi 0.11.0+wasi-snapshot-preview1", "windows-sys 0.52.0", ] @@ -2675,7 +2687,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2739,15 +2751,15 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.20.2" +version = "1.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" +checksum = "945462a4b81e43c4e3ba96bd7b49d834c6f61198356aa858733bc4acf3cbe62e" [[package]] name = "openssl-probe" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" [[package]] name = "opentelemetry" @@ -2757,7 +2769,7 @@ checksum = "1e32339a5dc40459130b3bd269e9892439f55b33e772d2a9d402a789baaf4e8a" dependencies = [ "futures-core", "futures-sink", - "indexmap 2.7.0", + "indexmap 2.7.1", "js-sys", "once_cell", "pin-project-lite", @@ -2913,7 +2925,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -2928,7 +2940,7 @@ version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" dependencies = [ - "phf_shared 0.11.3", + "phf_shared", ] [[package]] @@ -2937,18 +2949,8 @@ version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aef8048c789fa5e851558d709946d6d79a8ff88c0440c587967f8e94bfb1216a" dependencies = [ - "phf_generator 0.11.3", - "phf_shared 0.11.3", -] - -[[package]] -name = "phf_generator" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d5285893bb5eb82e6aaf5d59ee909a06a16737a8970984dd7746ba9283498d6" -dependencies = [ - "phf_shared 0.10.0", - "rand", + "phf_generator", + "phf_shared", ] [[package]] @@ -2957,46 +2959,37 @@ version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" dependencies = [ - "phf_shared 0.11.3", + "phf_shared", "rand", ] -[[package]] -name = "phf_shared" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6796ad771acdc0123d2a88dc428b5e38ef24456743ddb1744ed628f9815c096" -dependencies = [ - "siphasher 0.3.11", -] - [[package]] name = "phf_shared" version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" dependencies = [ - "siphasher 1.0.1", + "siphasher", ] [[package]] name = "pin-project" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e2ec53ad785f4d35dac0adea7f7dc6f1bb277ad84a680c7afefeae05d1f5916" +checksum = "dfe2e71e1471fe07709406bf725f710b02927c9c54b2b5b2ec0e8087d97c327d" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" +checksum = "f6e859e6e5bd50440ab63c47e3ebabc90f26251f7c73c3d3e837b74a1cc3fa67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -3068,7 +3061,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6924ced06e1f7dfe3fa48d57b9f74f55d8915f5036121bef647ef4b204895fac" dependencies = [ "proc-macro2", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -3097,7 +3090,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", "version_check", "yansi", ] @@ -3118,7 +3111,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a65f2e60fbf1063868558d69c6beacf412dc755f9fc020f514b7955fc914fe30" dependencies = [ "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -3141,7 +3134,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -3202,7 +3195,7 @@ dependencies = [ "pin-project-lite", "quinn-proto", "quinn-udp", - "rustc-hash 2.1.0", + "rustc-hash 2.1.1", "rustls", "socket2", "thiserror 2.0.11", @@ -3217,10 +3210,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" dependencies = [ "bytes", - "getrandom", + "getrandom 0.2.15", "rand", "ring", - "rustc-hash 2.1.0", + "rustc-hash 2.1.1", "rustls", "rustls-pki-types", "slab", @@ -3280,7 +3273,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom", + "getrandom 0.2.15", ] [[package]] @@ -3479,7 +3472,7 @@ checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", "cfg-if", - "getrandom", + "getrandom 0.2.15", "libc", "spin", "untrusted", @@ -3489,7 +3482,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "assign", "js_int", @@ -3511,7 +3504,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "js_int", "ruma-common", @@ -3523,7 +3516,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "as_variant", "assign", @@ -3546,14 +3539,14 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "as_variant", "base64 0.22.1", "bytes", "form_urlencoded", "http", - "indexmap 2.7.0", + "indexmap 2.7.1", "js_int", "konst", "percent-encoding", @@ -3577,10 +3570,10 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "as_variant", - "indexmap 2.7.0", + "indexmap 2.7.1", "js_int", "js_option", "percent-encoding", @@ -3602,7 +3595,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "bytes", "http", @@ -3620,7 +3613,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3629,7 +3622,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "js_int", "ruma-common", @@ -3639,7 +3632,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3647,14 +3640,14 @@ dependencies = [ "quote", "ruma-identifiers-validation", "serde", - "syn 2.0.96", + "syn 2.0.98", "toml", ] [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "js_int", "ruma-common", @@ -3666,7 +3659,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "headers", "http", @@ -3679,7 +3672,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3695,7 +3688,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5667c6292adb43fbe4725d31d6b5127a0cf60ce#f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" dependencies = [ "futures-util", "js_int", @@ -3755,9 +3748,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustc-hash" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" [[package]] name = "rustc_version" @@ -3770,9 +3763,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.43" +version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a78891ee6bf2340288408954ac787aa063d8e8817e9f53abb37c695c6d834ef6" +checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ "bitflags 2.8.0", "errno", @@ -3783,9 +3776,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.21" +version = "0.23.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f287924602bf649d949c63dc8ac8b235fa5387d394020705b80c4eb597ce5b8" +checksum = "9fb9263ab4eb695e42321db096e3b8fbd715a59b154d5c88d82db2175b681ba7" dependencies = [ "aws-lc-rs", "log", @@ -3820,9 +3813,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.10.1" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2bf47e6ff922db3825eb750c4e2ff784c6ff8fb9e13046ef6a1d1c5401b0b37" +checksum = "917ce264624a4b4db1c364dcc35bfca9ded014d0a958cd47ad3e960e988ea51c" dependencies = [ "web-time 1.1.0", ] @@ -3862,9 +3855,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" +checksum = "6ea1a2d0a644769cc99faa24c3ad26b379b786fe7c36fd3c546254801650e6dd" [[package]] name = "sanitize-filename" @@ -3892,9 +3885,9 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "sd-notify" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "561e6b346a5e59e0b8a07894004897d7160567e3352d2ebd6c3741d4e086b6f5" +checksum = "b943eadf71d8b69e661330cb0e2656e31040acf21ee7708e2c238a0ec6af2bf4" dependencies = [ "libc", ] @@ -3924,9 +3917,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.24" +version = "1.0.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cb6eb87a131f756572d7fb904f6e7b68633f09cca868c5df1c4b8d1a694bbba" +checksum = "f79dfe2d285b0488816f30e700a7438c5a73d816b5b7d3ac72fbc48b0d185e03" [[package]] name = "sentry" @@ -4080,7 +4073,7 @@ checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -4090,7 +4083,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d2de91cf02bbc07cde38891769ccd5d4f073d22a40683aa4bc7a95781aaa2c4" dependencies = [ "form_urlencoded", - "indexmap 2.7.0", + "indexmap 2.7.1", "itoa", "ryu", "serde", @@ -4098,9 +4091,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.135" +version = "1.0.138" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b0d7ba2887406110130a978386c4e1befb98c674b4fba677954e4db976630d9" +checksum = "d434192e7da787e94a6ea7e9670b26a036d0ca41e0b7efb2676dd32bae872949" dependencies = [ "itoa", "memchr", @@ -4155,7 +4148,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.7.0", + "indexmap 2.7.1", "itoa", "ryu", "serde", @@ -4253,12 +4246,6 @@ dependencies = [ "quote", ] -[[package]] -name = "siphasher" -version = "0.3.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" - [[package]] name = "siphasher" version = "1.0.1" @@ -4333,26 +4320,25 @@ checksum = "f42444fea5b87a39db4218d9422087e66a85d0e7a0963a439b07bcdf91804006" [[package]] name = "string_cache" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f91138e76242f575eb1d3b38b4f1362f10d3a43f47d182a5b359af488a02293b" +checksum = "938d512196766101d333398efde81bc1f37b00cb42c2f8350e5df639f040bbbe" dependencies = [ "new_debug_unreachable", - "once_cell", "parking_lot", - "phf_shared 0.10.0", + "phf_shared", "precomputed-hash", "serde", ] [[package]] name = "string_cache_codegen" -version = "0.5.2" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bb30289b722be4ff74a408c3cc27edeaad656e06cb1fe8fa9231fa59c728988" +checksum = "244292f3441c89febe5b5bdfbb6863aeaf4f64da810ea3050fd927b27b8d92ce" dependencies = [ - "phf_generator 0.10.0", - "phf_shared 0.10.0", + "phf_generator", + "phf_shared", "proc-macro2", "quote", ] @@ -4385,9 +4371,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.96" +version = "2.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5d0adab1ae378d7f53bdebc67a39f1f151407ef230f0ce2883572f5d8985c80" +checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1" dependencies = [ "proc-macro2", "quote", @@ -4411,7 +4397,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -4446,9 +4432,9 @@ dependencies = [ [[package]] name = "termimad" -version = "0.31.1" +version = "0.31.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6a5d4cf55d9f1cb04fcda48f725772d0733ae34e030dfc4dd36e738a5965f4" +checksum = "a8e19c6dbf107bec01d0e216bb8219485795b7d75328e4fa5ef2756c1be4f8dc" dependencies = [ "coolor", "crokey", @@ -4496,7 +4482,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -4507,7 +4493,7 @@ checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -4663,7 +4649,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -4726,9 +4712,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" +checksum = "cd87a5cdd6ffab733b2f74bc4fd7ee5fff6634124999ac278c35fc78c6120148" dependencies = [ "serde", "serde_spanned", @@ -4747,11 +4733,11 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.22" +version = "0.22.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" +checksum = "02a8b472d1a3d7c18e2d61a489aee3453fd9031c33e4f55bd533f4a7adca1bee" dependencies = [ - "indexmap 2.7.0", + "indexmap 2.7.1", "serde", "serde_spanned", "toml_datetime", @@ -4876,7 +4862,7 @@ source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -4997,9 +4983,9 @@ checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-ident" -version = "1.0.14" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" +checksum = "a210d160f08b701c8721ba1c726c11662f877ea6b7094007e1ca9a1041945034" [[package]] name = "unicode-segmentation" @@ -5084,11 +5070,11 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "uuid" -version = "1.12.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744018581f9a3454a9e15beb8a33b017183f1e7c0cd170232a2d1453b23a51c4" +checksum = "ced87ca4be083373936a67f8de945faa23b6b42384bd5b64434850802c6dccd0" dependencies = [ - "getrandom", + "getrandom 0.3.1", "serde", ] @@ -5142,6 +5128,15 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasi" +version = "0.13.3+wasi-0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +dependencies = [ + "wit-bindgen-rt", +] + [[package]] name = "wasm-bindgen" version = "0.2.100" @@ -5164,7 +5159,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", "wasm-bindgen-shared", ] @@ -5199,7 +5194,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -5257,9 +5252,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.7" +version = "0.26.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d642ff16b7e79272ae451b7322067cdc17cadf68c23264be9d94a32319efe7e" +checksum = "2210b291f7ea53617fbafcc4939f10914214ec15aace5ba62293a668f322c5c9" dependencies = [ "rustls-pki-types", ] @@ -5515,9 +5510,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.6.24" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8d71a593cc5c42ad7876e2c1fda56f314f3754c084128833e64f1345ff8a03a" +checksum = "86e376c75f4f43f44db463cf729e0d3acbf954d13e22c51e26e4c264b4ab545f" dependencies = [ "memchr", ] @@ -5532,6 +5527,15 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "wit-bindgen-rt" +version = "0.33.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +dependencies = [ + "bitflags 2.8.0", +] + [[package]] name = "write16" version = "1.0.0" @@ -5581,7 +5585,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", "synstructure", ] @@ -5603,7 +5607,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] @@ -5623,7 +5627,7 @@ checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", "synstructure", ] @@ -5652,7 +5656,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.96", + "syn 2.0.98", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index a17aa4d6..12556e00 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "f5667c6292adb43fbe4725d31d6b5127a0cf60ce" +rev = "e7a793b720e58bbe6858fecb86db97191dbfe7aa" features = [ "compat", "rand", From 6113803038f15a9f0206b31fc0216ebc315d7761 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 8 Feb 2025 10:09:57 +0000 Subject: [PATCH 062/310] better error logging on send_join response failure Signed-off-by: Jason Volk --- src/api/client/membership.rs | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 6c970665..26736fb5 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -9,7 +9,7 @@ use std::{ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - at, debug, debug_info, debug_warn, err, info, + at, debug, debug_info, debug_warn, err, error, info, pdu::{gen_event_id_canonical_json, PduBuilder}, result::FlatOk, state_res, trace, @@ -1011,10 +1011,17 @@ async fn join_room_by_id_helper_remote( .await, }; - let send_join_response = services + let send_join_response = match services .sending .send_synapse_request(&remote_server, send_join_request) - .await?; + .await + { + | Ok(response) => response, + | Err(e) => { + error!("send_join failed: {e}"); + return Err(e); + }, + }; info!("send_join finished"); From f47677c995e2847b6ad39c877c3ab5b9bd5b1152 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 7 Feb 2025 07:09:45 +0000 Subject: [PATCH 063/310] refactor spaces Signed-off-by: Jason Volk --- src/api/client/space.rs | 228 +++++---- src/api/server/hierarchy.rs | 90 ++-- src/service/rooms/spaces/mod.rs | 774 +++++++++++++++--------------- src/service/rooms/spaces/tests.rs | 27 +- 4 files changed, 568 insertions(+), 551 deletions(-) diff --git a/src/api/client/space.rs b/src/api/client/space.rs index 8f54de2a..7efd7817 100644 --- a/src/api/client/space.rs +++ b/src/api/client/space.rs @@ -1,18 +1,25 @@ -use std::{collections::VecDeque, str::FromStr}; +use std::{ + collections::{BTreeSet, VecDeque}, + str::FromStr, +}; use axum::extract::State; -use conduwuit::{checked, pdu::ShortRoomId, utils::stream::IterStream}; -use futures::{StreamExt, TryFutureExt}; +use conduwuit::{ + utils::{future::TryExtExt, stream::IterStream}, + Err, Result, +}; +use futures::{future::OptionFuture, StreamExt, TryFutureExt}; use ruma::{ - api::client::{error::ErrorKind, space::get_hierarchy}, - OwnedRoomId, OwnedServerName, RoomId, UInt, UserId, + api::client::space::get_hierarchy, OwnedRoomId, OwnedServerName, RoomId, UInt, UserId, }; use service::{ - rooms::spaces::{get_parent_children_via, summary_to_chunk, SummaryAccessibility}, + rooms::spaces::{ + get_parent_children_via, summary_to_chunk, PaginationToken, SummaryAccessibility, + }, Services, }; -use crate::{service::rooms::spaces::PaginationToken, Error, Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/client/v1/rooms/{room_id}/hierarchy` /// @@ -40,10 +47,9 @@ pub(crate) async fn get_hierarchy_route( // Should prevent unexpeded behaviour in (bad) clients if let Some(ref token) = key { if token.suggested_only != body.suggested_only || token.max_depth != max_depth { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "suggested_only and max_depth cannot change on paginated requests", - )); + return Err!(Request(InvalidParam( + "suggested_only and max_depth cannot change on paginated requests" + ))); } } @@ -52,58 +58,70 @@ pub(crate) async fn get_hierarchy_route( body.sender_user(), &body.room_id, limit.try_into().unwrap_or(10), - key.map_or(vec![], |token| token.short_room_ids), - max_depth.into(), + max_depth.try_into().unwrap_or(usize::MAX), body.suggested_only, + key.as_ref() + .into_iter() + .flat_map(|t| t.short_room_ids.iter()), ) .await } -async fn get_client_hierarchy( +async fn get_client_hierarchy<'a, ShortRoomIds>( services: &Services, sender_user: &UserId, room_id: &RoomId, limit: usize, - short_room_ids: Vec, - max_depth: u64, + max_depth: usize, suggested_only: bool, -) -> Result { - let mut parents = VecDeque::new(); + short_room_ids: ShortRoomIds, +) -> Result +where + ShortRoomIds: Iterator + Clone + Send + Sync + 'a, +{ + type Via = Vec; + type Entry = (OwnedRoomId, Via); + type Rooms = VecDeque; - // Don't start populating the results if we have to start at a specific room. - let mut populate_results = short_room_ids.is_empty(); + let mut queue: Rooms = [( + room_id.to_owned(), + room_id + .server_name() + .map(ToOwned::to_owned) + .into_iter() + .collect(), + )] + .into(); - let mut stack = vec![vec![(room_id.to_owned(), match room_id.server_name() { - | Some(server_name) => vec![server_name.into()], - | None => vec![], - })]]; + let mut rooms = Vec::with_capacity(limit); + let mut parents = BTreeSet::new(); + while let Some((current_room, via)) = queue.pop_front() { + let summary = services + .rooms + .spaces + .get_summary_and_children_client(¤t_room, suggested_only, sender_user, &via) + .await?; - let mut results = Vec::with_capacity(limit); - - while let Some((current_room, via)) = { next_room_to_traverse(&mut stack, &mut parents) } { - if results.len() >= limit { - break; - } - - match ( - services - .rooms - .spaces - .get_summary_and_children_client(¤t_room, suggested_only, sender_user, &via) - .await?, - current_room == room_id, - ) { + match (summary, current_room == room_id) { + | (None | Some(SummaryAccessibility::Inaccessible), false) => { + // Just ignore other unavailable rooms + }, + | (None, true) => { + return Err!(Request(Forbidden("The requested room was not found"))); + }, + | (Some(SummaryAccessibility::Inaccessible), true) => { + return Err!(Request(Forbidden("The requested room is inaccessible"))); + }, | (Some(SummaryAccessibility::Accessible(summary)), _) => { - let mut children: Vec<(OwnedRoomId, Vec)> = - get_parent_children_via(&summary, suggested_only) - .into_iter() - .filter(|(room, _)| parents.iter().all(|parent| parent != room)) - .rev() - .collect(); + let populate = parents.len() >= short_room_ids.clone().count(); - if populate_results { - results.push(summary_to_chunk(*summary.clone())); - } else { + let mut children: Vec = get_parent_children_via(&summary, suggested_only) + .filter(|(room, _)| !parents.contains(room)) + .rev() + .map(|(key, val)| (key, val.collect())) + .collect(); + + if !populate { children = children .iter() .rev() @@ -113,97 +131,69 @@ async fn get_client_hierarchy( .rooms .short .get_shortroomid(room) - .map_ok(|short| Some(&short) != short_room_ids.get(parents.len())) + .map_ok(|short| { + Some(&short) != short_room_ids.clone().nth(parents.len()) + }) .unwrap_or_else(|_| false) }) .map(Clone::clone) - .collect::)>>() + .collect::>() .await .into_iter() .rev() .collect(); - - if children.is_empty() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Room IDs in token were not found.", - )); - } - - // We have reached the room after where we last left off - let parents_len = parents.len(); - if checked!(parents_len + 1)? == short_room_ids.len() { - populate_results = true; - } } - let parents_len: u64 = parents.len().try_into()?; - if !children.is_empty() && parents_len < max_depth { - parents.push_back(current_room.clone()); - stack.push(children); + if populate { + rooms.push(summary_to_chunk(summary.clone())); + } else if queue.is_empty() && children.is_empty() { + return Err!(Request(InvalidParam("Room IDs in token were not found."))); } - // Root room in the space hierarchy, we return an error - // if this one fails. + + parents.insert(current_room.clone()); + if rooms.len() >= limit { + break; + } + + if children.is_empty() { + break; + } + + if parents.len() >= max_depth { + continue; + } + + queue.extend(children); }, - | (Some(SummaryAccessibility::Inaccessible), true) => { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "The requested room is inaccessible", - )); - }, - | (None, true) => { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "The requested room was not found", - )); - }, - // Just ignore other unavailable rooms - | (None | Some(SummaryAccessibility::Inaccessible), false) => (), } } - Ok(get_hierarchy::v1::Response { - next_batch: if let Some((room, _)) = next_room_to_traverse(&mut stack, &mut parents) { - parents.pop_front(); - parents.push_back(room); + let next_batch: OptionFuture<_> = queue + .pop_front() + .map(|(room, _)| async move { + parents.insert(room); let next_short_room_ids: Vec<_> = parents .iter() .stream() - .filter_map(|room_id| async move { - services.rooms.short.get_shortroomid(room_id).await.ok() - }) + .filter_map(|room_id| services.rooms.short.get_shortroomid(room_id).ok()) .collect() .await; - (next_short_room_ids != short_room_ids && !next_short_room_ids.is_empty()).then( - || { - PaginationToken { - short_room_ids: next_short_room_ids, - limit: UInt::new(max_depth) - .expect("When sent in request it must have been valid UInt"), - max_depth: UInt::new(max_depth) - .expect("When sent in request it must have been valid UInt"), - suggested_only, - } - .to_string() - }, - ) - } else { - None - }, - rooms: results, + (next_short_room_ids.iter().ne(short_room_ids) && !next_short_room_ids.is_empty()) + .then_some(PaginationToken { + short_room_ids: next_short_room_ids, + limit: max_depth.try_into().ok()?, + max_depth: max_depth.try_into().ok()?, + suggested_only, + }) + .as_ref() + .map(PaginationToken::to_string) + }) + .into(); + + Ok(get_hierarchy::v1::Response { + next_batch: next_batch.await.flatten(), + rooms, }) } - -fn next_room_to_traverse( - stack: &mut Vec)>>, - parents: &mut VecDeque, -) -> Option<(OwnedRoomId, Vec)> { - while stack.last().is_some_and(Vec::is_empty) { - stack.pop(); - parents.pop_back(); - } - - stack.last_mut().and_then(Vec::pop) -} diff --git a/src/api/server/hierarchy.rs b/src/api/server/hierarchy.rs index bcf2f7bc..f7bc43ab 100644 --- a/src/api/server/hierarchy.rs +++ b/src/api/server/hierarchy.rs @@ -1,10 +1,11 @@ use axum::extract::State; -use conduwuit::{Err, Result}; -use ruma::{api::federation::space::get_hierarchy, RoomId, ServerName}; -use service::{ - rooms::spaces::{get_parent_children_via, Identifier, SummaryAccessibility}, - Services, +use conduwuit::{ + utils::stream::{BroadbandExt, IterStream}, + Err, Result, }; +use futures::{FutureExt, StreamExt}; +use ruma::api::federation::space::get_hierarchy; +use service::rooms::spaces::{get_parent_children_via, Identifier, SummaryAccessibility}; use crate::Ruma; @@ -20,54 +21,51 @@ pub(crate) async fn get_hierarchy_route( return Err!(Request(NotFound("Room does not exist."))); } - get_hierarchy(&services, &body.room_id, body.origin(), body.suggested_only).await -} - -/// Gets the response for the space hierarchy over federation request -/// -/// Errors if the room does not exist, so a check if the room exists should -/// be done -async fn get_hierarchy( - services: &Services, - room_id: &RoomId, - server_name: &ServerName, - suggested_only: bool, -) -> Result { + let room_id = &body.room_id; + let suggested_only = body.suggested_only; + let ref identifier = Identifier::ServerName(body.origin()); match services .rooms .spaces - .get_summary_and_children_local(&room_id.to_owned(), Identifier::ServerName(server_name)) + .get_summary_and_children_local(room_id, identifier) .await? { - | Some(SummaryAccessibility::Accessible(room)) => { - let mut children = Vec::new(); - let mut inaccessible_children = Vec::new(); + | None => Err!(Request(NotFound("The requested room was not found"))), - for (child, _via) in get_parent_children_via(&room, suggested_only) { - match services - .rooms - .spaces - .get_summary_and_children_local(&child, Identifier::ServerName(server_name)) - .await? - { - | Some(SummaryAccessibility::Accessible(summary)) => { - children.push((*summary).into()); - }, - | Some(SummaryAccessibility::Inaccessible) => { - inaccessible_children.push(child); - }, - | None => (), - } - } - - Ok(get_hierarchy::v1::Response { - room: *room, - children, - inaccessible_children, - }) - }, | Some(SummaryAccessibility::Inaccessible) => Err!(Request(NotFound("The requested room is inaccessible"))), - | None => Err!(Request(NotFound("The requested room was not found"))), + + | Some(SummaryAccessibility::Accessible(room)) => { + let (children, inaccessible_children) = + get_parent_children_via(&room, suggested_only) + .stream() + .broad_filter_map(|(child, _via)| async move { + match services + .rooms + .spaces + .get_summary_and_children_local(&child, identifier) + .await + .ok()? + { + | None => None, + + | Some(SummaryAccessibility::Inaccessible) => + Some((None, Some(child))), + + | Some(SummaryAccessibility::Accessible(summary)) => + Some((Some(summary), None)), + } + }) + .unzip() + .map(|(children, inaccessible_children): (Vec<_>, Vec<_>)| { + ( + children.into_iter().flatten().map(Into::into).collect(), + inaccessible_children.into_iter().flatten().collect(), + ) + }) + .await; + + Ok(get_hierarchy::v1::Response { room, children, inaccessible_children }) + }, } } diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 1e2b0a9f..268d6dfe 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -1,14 +1,24 @@ mod pagination_token; +#[cfg(test)] mod tests; -use std::{collections::HashMap, sync::Arc}; +use std::sync::Arc; -use conduwuit::{debug_info, err, utils::math::usize_from_f64, Error, Result}; -use futures::StreamExt; +use conduwuit::{ + implement, + utils::{ + future::BoolExt, + math::usize_from_f64, + stream::{BroadbandExt, ReadyExt}, + IterStream, + }, + Err, Error, Result, +}; +use futures::{pin_mut, stream::FuturesUnordered, FutureExt, Stream, StreamExt, TryFutureExt}; use lru_cache::LruCache; use ruma::{ api::{ - client::{error::ErrorKind, space::SpaceHierarchyRoomsChunk}, + client::space::SpaceHierarchyRoomsChunk, federation::{ self, space::{SpaceHierarchyChildSummary, SpaceHierarchyParentSummary}, @@ -21,46 +31,46 @@ use ruma::{ }, serde::Raw, space::SpaceRoomJoinRule, - OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, + OwnedEventId, OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, }; -use tokio::sync::Mutex; +use tokio::sync::{Mutex, MutexGuard}; pub use self::pagination_token::PaginationToken; -use crate::{rooms, sending, Dep}; - -pub struct CachedSpaceHierarchySummary { - summary: SpaceHierarchyParentSummary, -} - -pub enum SummaryAccessibility { - Accessible(Box), - Inaccessible, -} - -/// Identifier used to check if rooms are accessible -/// -/// None is used if you want to return the room, no matter if accessible or not -pub enum Identifier<'a> { - UserId(&'a UserId), - ServerName(&'a ServerName), -} +use crate::{conduwuit::utils::TryFutureExtExt, rooms, sending, Dep}; pub struct Service { services: Services, - pub roomid_spacehierarchy_cache: - Mutex>>, + pub roomid_spacehierarchy_cache: Mutex, } struct Services { state_accessor: Dep, state_cache: Dep, state: Dep, - short: Dep, event_handler: Dep, timeline: Dep, sending: Dep, } +pub struct CachedSpaceHierarchySummary { + summary: SpaceHierarchyParentSummary, +} + +#[allow(clippy::large_enum_variant)] +pub enum SummaryAccessibility { + Accessible(SpaceHierarchyParentSummary), + Inaccessible, +} + +/// Identifier used to check if rooms are accessible. None is used if you want +/// to return the room, no matter if accessible or not +pub enum Identifier<'a> { + UserId(&'a UserId), + ServerName(&'a ServerName), +} + +type Cache = LruCache>; + impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { let config = &args.server.config; @@ -72,7 +82,6 @@ impl crate::Service for Service { .depend::("rooms::state_accessor"), state_cache: args.depend::("rooms::state_cache"), state: args.depend::("rooms::state"), - short: args.depend::("rooms::short"), event_handler: args .depend::("rooms::event_handler"), timeline: args.depend::("rooms::timeline"), @@ -85,370 +94,407 @@ impl crate::Service for Service { fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } -impl Service { - /// Gets the summary of a space using solely local information - pub async fn get_summary_and_children_local( - &self, - current_room: &OwnedRoomId, - identifier: Identifier<'_>, - ) -> Result> { - if let Some(cached) = self - .roomid_spacehierarchy_cache - .lock() - .await - .get_mut(¤t_room.to_owned()) - .as_ref() - { - return Ok(if let Some(cached) = cached { +/// Gets the summary of a space using solely local information +#[implement(Service)] +pub async fn get_summary_and_children_local( + &self, + current_room: &RoomId, + identifier: &Identifier<'_>, +) -> Result> { + match self + .roomid_spacehierarchy_cache + .lock() + .await + .get_mut(current_room) + .as_ref() + { + | None => (), // cache miss + | Some(None) => return Ok(None), + | Some(Some(cached)) => + return Ok(Some( if self .is_accessible_child( current_room, &cached.summary.join_rule, - &identifier, + identifier, &cached.summary.allowed_room_ids, ) .await { - Some(SummaryAccessibility::Accessible(Box::new(cached.summary.clone()))) + SummaryAccessibility::Accessible(cached.summary.clone()) } else { - Some(SummaryAccessibility::Inaccessible) - } - } else { - None - }); - } + SummaryAccessibility::Inaccessible + }, + )), + }; - if let Some(children_pdus) = self.get_stripped_space_child_events(current_room).await? { - let summary = self - .get_room_summary(current_room, children_pdus, &identifier) - .await; - if let Ok(summary) = summary { - self.roomid_spacehierarchy_cache.lock().await.insert( - current_room.clone(), - Some(CachedSpaceHierarchySummary { summary: summary.clone() }), - ); + let children_pdus: Vec<_> = self + .get_stripped_space_child_events(current_room) + .collect() + .await; - Ok(Some(SummaryAccessibility::Accessible(Box::new(summary)))) - } else { - Ok(None) - } - } else { - Ok(None) - } - } + let summary = self + .get_room_summary(current_room, children_pdus, identifier) + .boxed() + .await; - /// Gets the summary of a space using solely federation - #[tracing::instrument(level = "debug", skip(self))] - async fn get_summary_and_children_federation( - &self, - current_room: &OwnedRoomId, - suggested_only: bool, - user_id: &UserId, - via: &[OwnedServerName], - ) -> Result> { - for server in via { - debug_info!("Asking {server} for /hierarchy"); - let Ok(response) = self - .services + let Ok(summary) = summary else { + return Ok(None); + }; + + self.roomid_spacehierarchy_cache.lock().await.insert( + current_room.to_owned(), + Some(CachedSpaceHierarchySummary { summary: summary.clone() }), + ); + + Ok(Some(SummaryAccessibility::Accessible(summary))) +} + +/// Gets the summary of a space using solely federation +#[implement(Service)] +#[tracing::instrument(level = "debug", skip(self))] +async fn get_summary_and_children_federation( + &self, + current_room: &RoomId, + suggested_only: bool, + user_id: &UserId, + via: &[OwnedServerName], +) -> Result> { + let request = federation::space::get_hierarchy::v1::Request { + room_id: current_room.to_owned(), + suggested_only, + }; + + let mut requests: FuturesUnordered<_> = via + .iter() + .map(|server| { + self.services .sending - .send_federation_request(server, federation::space::get_hierarchy::v1::Request { - room_id: current_room.to_owned(), - suggested_only, - }) - .await - else { - continue; - }; - - debug_info!("Got response from {server} for /hierarchy\n{response:?}"); - let summary = response.room.clone(); - - self.roomid_spacehierarchy_cache.lock().await.insert( - current_room.clone(), - Some(CachedSpaceHierarchySummary { summary: summary.clone() }), - ); - - for child in response.children { - let mut guard = self.roomid_spacehierarchy_cache.lock().await; - if !guard.contains_key(current_room) { - guard.insert( - current_room.clone(), - Some(CachedSpaceHierarchySummary { - summary: { - let SpaceHierarchyChildSummary { - canonical_alias, - name, - num_joined_members, - room_id, - topic, - world_readable, - guest_can_join, - avatar_url, - join_rule, - room_type, - allowed_room_ids, - } = child; - - SpaceHierarchyParentSummary { - canonical_alias, - name, - num_joined_members, - room_id: room_id.clone(), - topic, - world_readable, - guest_can_join, - avatar_url, - join_rule, - room_type, - children_state: self - .get_stripped_space_child_events(&room_id) - .await? - .unwrap(), - allowed_room_ids, - } - }, - }), - ); - } - } - if self - .is_accessible_child( - current_room, - &response.room.join_rule, - &Identifier::UserId(user_id), - &response.room.allowed_room_ids, - ) - .await - { - return Ok(Some(SummaryAccessibility::Accessible(Box::new(summary.clone())))); - } - - return Ok(Some(SummaryAccessibility::Inaccessible)); - } + .send_federation_request(server, request.clone()) + }) + .collect(); + let Some(Ok(response)) = requests.next().await else { self.roomid_spacehierarchy_cache .lock() .await - .insert(current_room.clone(), None); + .insert(current_room.to_owned(), None); - Ok(None) - } + return Ok(None); + }; - /// Gets the summary of a space using either local or remote (federation) - /// sources - pub async fn get_summary_and_children_client( - &self, - current_room: &OwnedRoomId, - suggested_only: bool, - user_id: &UserId, - via: &[OwnedServerName], - ) -> Result> { - if let Ok(Some(response)) = self - .get_summary_and_children_local(current_room, Identifier::UserId(user_id)) - .await - { - Ok(Some(response)) - } else { - self.get_summary_and_children_federation(current_room, suggested_only, user_id, via) - .await - } - } + let summary = response.room; + self.roomid_spacehierarchy_cache.lock().await.insert( + current_room.to_owned(), + Some(CachedSpaceHierarchySummary { summary: summary.clone() }), + ); - async fn get_room_summary( - &self, - current_room: &OwnedRoomId, - children_state: Vec>, - identifier: &Identifier<'_>, - ) -> Result { - let room_id: &RoomId = current_room; - - let join_rule = self - .services - .state_accessor - .room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") - .await - .map_or(JoinRule::Invite, |c: RoomJoinRulesEventContent| c.join_rule); - - let allowed_room_ids = self - .services - .state_accessor - .allowed_room_ids(join_rule.clone()); - - if !self - .is_accessible_child( - current_room, - &join_rule.clone().into(), - identifier, - &allowed_room_ids, - ) - .await - { - debug_info!("User is not allowed to see room {room_id}"); - // This error will be caught later - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "User is not allowed to see the room", - )); - } - - Ok(SpaceHierarchyParentSummary { - canonical_alias: self - .services - .state_accessor - .get_canonical_alias(room_id) - .await - .ok(), - name: self.services.state_accessor.get_name(room_id).await.ok(), - num_joined_members: self - .services - .state_cache - .room_joined_count(room_id) - .await - .unwrap_or(0) - .try_into() - .expect("user count should not be that big"), - room_id: room_id.to_owned(), - topic: self - .services - .state_accessor - .get_room_topic(room_id) - .await - .ok(), - world_readable: self - .services - .state_accessor - .is_world_readable(room_id) - .await, - guest_can_join: self.services.state_accessor.guest_can_join(room_id).await, - avatar_url: self - .services - .state_accessor - .get_avatar(room_id) - .await - .into_option() - .unwrap_or_default() - .url, - join_rule: join_rule.into(), - room_type: self - .services - .state_accessor - .get_room_type(room_id) - .await - .ok(), - children_state, - allowed_room_ids, + response + .children + .into_iter() + .stream() + .then(|child| { + self.roomid_spacehierarchy_cache + .lock() + .map(|lock| (child, lock)) }) + .ready_filter_map(|(child, mut cache)| { + (!cache.contains_key(current_room)).then_some((child, cache)) + }) + .for_each(|(child, cache)| self.cache_insert(cache, current_room, child)) + .await; + + let identifier = Identifier::UserId(user_id); + let is_accessible_child = self + .is_accessible_child( + current_room, + &summary.join_rule, + &identifier, + &summary.allowed_room_ids, + ) + .await; + + if is_accessible_child { + return Ok(Some(SummaryAccessibility::Accessible(summary))); } - /// Simply returns the stripped m.space.child events of a room - async fn get_stripped_space_child_events( - &self, - room_id: &RoomId, - ) -> Result>>, Error> { - let Ok(current_shortstatehash) = - self.services.state.get_room_shortstatehash(room_id).await - else { - return Ok(None); - }; - - let state: HashMap<_, Arc<_>> = self - .services - .state_accessor - .state_full_ids(current_shortstatehash) - .collect() - .await; - - let mut children_pdus = Vec::with_capacity(state.len()); - for (key, id) in state { - let (event_type, state_key) = - self.services.short.get_statekey_from_short(key).await?; - - if event_type != StateEventType::SpaceChild { - continue; - } - - let pdu = - self.services.timeline.get_pdu(&id).await.map_err(|e| { - err!(Database("Event {id:?} in space state not found: {e:?}")) - })?; + Ok(Some(SummaryAccessibility::Inaccessible)) +} +/// Simply returns the stripped m.space.child events of a room +#[implement(Service)] +fn get_stripped_space_child_events<'a>( + &'a self, + room_id: &'a RoomId, +) -> impl Stream> + 'a { + self.services + .state + .get_room_shortstatehash(room_id) + .map_ok(|current_shortstatehash| { + self.services + .state_accessor + .state_keys_with_ids(current_shortstatehash, &StateEventType::SpaceChild) + }) + .map(Result::into_iter) + .map(IterStream::stream) + .map(StreamExt::flatten) + .flatten_stream() + .broad_filter_map(move |(state_key, event_id): (_, OwnedEventId)| async move { + self.services + .timeline + .get_pdu(&event_id) + .map_ok(move |pdu| (state_key, pdu)) + .await + .ok() + }) + .ready_filter_map(move |(state_key, pdu)| { if let Ok(content) = pdu.get_content::() { if content.via.is_empty() { - continue; + return None; } } - if OwnedRoomId::try_from(state_key).is_ok() { - children_pdus.push(pdu.to_stripped_spacechild_state_event()); + if RoomId::parse(&state_key).is_ok() { + return Some(pdu.to_stripped_spacechild_state_event()); } - } - Ok(Some(children_pdus)) + None + }) +} + +/// Gets the summary of a space using either local or remote (federation) +/// sources +#[implement(Service)] +pub async fn get_summary_and_children_client( + &self, + current_room: &OwnedRoomId, + suggested_only: bool, + user_id: &UserId, + via: &[OwnedServerName], +) -> Result> { + let identifier = Identifier::UserId(user_id); + + if let Ok(Some(response)) = self + .get_summary_and_children_local(current_room, &identifier) + .await + { + return Ok(Some(response)); } - /// With the given identifier, checks if a room is accessable - async fn is_accessible_child( - &self, - current_room: &OwnedRoomId, - join_rule: &SpaceRoomJoinRule, - identifier: &Identifier<'_>, - allowed_room_ids: &Vec, - ) -> bool { - match identifier { - | Identifier::ServerName(server_name) => { - // Checks if ACLs allow for the server to participate - if self - .services - .event_handler - .acl_check(server_name, current_room) - .await - .is_err() - { - return false; - } - }, - | Identifier::UserId(user_id) => { - if self - .services - .state_cache - .is_joined(user_id, current_room) - .await || self - .services - .state_cache - .is_invited(user_id, current_room) - .await - { - return true; - } - }, + self.get_summary_and_children_federation(current_room, suggested_only, user_id, via) + .await +} + +#[implement(Service)] +async fn get_room_summary( + &self, + room_id: &RoomId, + children_state: Vec>, + identifier: &Identifier<'_>, +) -> Result { + let join_rule = self + .services + .state_accessor + .room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") + .await + .map_or(JoinRule::Invite, |c: RoomJoinRulesEventContent| c.join_rule); + + let allowed_room_ids = self + .services + .state_accessor + .allowed_room_ids(join_rule.clone()); + + let join_rule = join_rule.clone().into(); + let is_accessible_child = self + .is_accessible_child(room_id, &join_rule, identifier, &allowed_room_ids) + .await; + + if !is_accessible_child { + return Err!(Request(Forbidden("User is not allowed to see the room",))); + } + + let name = self.services.state_accessor.get_name(room_id).ok(); + + let topic = self.services.state_accessor.get_room_topic(room_id).ok(); + + let room_type = self.services.state_accessor.get_room_type(room_id).ok(); + + let world_readable = self.services.state_accessor.is_world_readable(room_id); + + let guest_can_join = self.services.state_accessor.guest_can_join(room_id); + + let num_joined_members = self + .services + .state_cache + .room_joined_count(room_id) + .unwrap_or(0); + + let canonical_alias = self + .services + .state_accessor + .get_canonical_alias(room_id) + .ok(); + + let avatar_url = self + .services + .state_accessor + .get_avatar(room_id) + .map(|res| res.into_option().unwrap_or_default().url); + + let ( + canonical_alias, + name, + num_joined_members, + topic, + world_readable, + guest_can_join, + avatar_url, + room_type, + ) = futures::join!( + canonical_alias, + name, + num_joined_members, + topic, + world_readable, + guest_can_join, + avatar_url, + room_type + ); + + Ok(SpaceHierarchyParentSummary { + canonical_alias, + name, + topic, + world_readable, + guest_can_join, + avatar_url, + room_type, + children_state, + allowed_room_ids, + join_rule, + room_id: room_id.to_owned(), + num_joined_members: num_joined_members + .try_into() + .expect("user count should not be that big"), + }) +} + +/// With the given identifier, checks if a room is accessable +#[implement(Service)] +async fn is_accessible_child( + &self, + current_room: &RoomId, + join_rule: &SpaceRoomJoinRule, + identifier: &Identifier<'_>, + allowed_room_ids: &[OwnedRoomId], +) -> bool { + if let Identifier::ServerName(server_name) = identifier { + // Checks if ACLs allow for the server to participate + if self + .services + .event_handler + .acl_check(server_name, current_room) + .await + .is_err() + { + return false; } - match &join_rule { - | SpaceRoomJoinRule::Public - | SpaceRoomJoinRule::Knock - | SpaceRoomJoinRule::KnockRestricted => true, - | SpaceRoomJoinRule::Restricted => { - for room in allowed_room_ids { + } + + if let Identifier::UserId(user_id) = identifier { + let is_joined = self.services.state_cache.is_joined(user_id, current_room); + + let is_invited = self.services.state_cache.is_invited(user_id, current_room); + + pin_mut!(is_joined, is_invited); + if is_joined.or(is_invited).await { + return true; + } + } + + match join_rule { + | SpaceRoomJoinRule::Public + | SpaceRoomJoinRule::Knock + | SpaceRoomJoinRule::KnockRestricted => true, + | SpaceRoomJoinRule::Restricted => + allowed_room_ids + .iter() + .stream() + .any(|room| async { match identifier { - | Identifier::UserId(user) => { - if self.services.state_cache.is_joined(user, room).await { - return true; - } - }, - | Identifier::ServerName(server) => { - if self.services.state_cache.server_in_room(server, room).await { - return true; - } - }, + | Identifier::UserId(user) => + self.services.state_cache.is_joined(user, room).await, + | Identifier::ServerName(server) => + self.services.state_cache.server_in_room(server, room).await, } - } - false - }, - // Invite only, Private, or Custom join rule - | _ => false, - } + }) + .await, + + // Invite only, Private, or Custom join rule + | _ => false, } } +/// Returns the children of a SpaceHierarchyParentSummary, making use of the +/// children_state field +pub fn get_parent_children_via( + parent: &SpaceHierarchyParentSummary, + suggested_only: bool, +) -> impl DoubleEndedIterator)> + Send + '_ +{ + parent + .children_state + .iter() + .map(Raw::deserialize) + .filter_map(Result::ok) + .filter_map(move |ce| { + (!suggested_only || ce.content.suggested) + .then_some((ce.state_key, ce.content.via.into_iter())) + }) +} + +#[implement(Service)] +async fn cache_insert( + &self, + mut cache: MutexGuard<'_, Cache>, + current_room: &RoomId, + child: SpaceHierarchyChildSummary, +) { + let SpaceHierarchyChildSummary { + canonical_alias, + name, + num_joined_members, + room_id, + topic, + world_readable, + guest_can_join, + avatar_url, + join_rule, + room_type, + allowed_room_ids, + } = child; + + let summary = SpaceHierarchyParentSummary { + canonical_alias, + name, + num_joined_members, + topic, + world_readable, + guest_can_join, + avatar_url, + join_rule, + room_type, + allowed_room_ids, + room_id: room_id.clone(), + children_state: self + .get_stripped_space_child_events(&room_id) + .collect() + .await, + }; + + cache.insert(current_room.to_owned(), Some(CachedSpaceHierarchySummary { summary })); +} + // Here because cannot implement `From` across ruma-federation-api and // ruma-client-api types impl From for SpaceHierarchyRoomsChunk { @@ -517,25 +563,3 @@ pub fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyR children_state, } } - -/// Returns the children of a SpaceHierarchyParentSummary, making use of the -/// children_state field -#[must_use] -pub fn get_parent_children_via( - parent: &SpaceHierarchyParentSummary, - suggested_only: bool, -) -> Vec<(OwnedRoomId, Vec)> { - parent - .children_state - .iter() - .filter_map(|raw_ce| { - raw_ce.deserialize().map_or(None, |ce| { - if suggested_only && !ce.content.suggested { - None - } else { - Some((ce.state_key, ce.content.via)) - } - }) - }) - .collect() -} diff --git a/src/service/rooms/spaces/tests.rs b/src/service/rooms/spaces/tests.rs index b4c387d7..dd6c2f35 100644 --- a/src/service/rooms/spaces/tests.rs +++ b/src/service/rooms/spaces/tests.rs @@ -1,5 +1,3 @@ -#![cfg(test)] - use std::str::FromStr; use ruma::{ @@ -69,15 +67,22 @@ fn get_summary_children() { } .into(); - assert_eq!(get_parent_children_via(&summary, false), vec![ - (owned_room_id!("!foo:example.org"), vec![owned_server_name!("example.org")]), - (owned_room_id!("!bar:example.org"), vec![owned_server_name!("example.org")]), - (owned_room_id!("!baz:example.org"), vec![owned_server_name!("example.org")]) - ]); - assert_eq!(get_parent_children_via(&summary, true), vec![( - owned_room_id!("!bar:example.org"), - vec![owned_server_name!("example.org")] - )]); + assert_eq!( + get_parent_children_via(&summary, false) + .map(|(k, v)| (k, v.collect::>())) + .collect::>(), + vec![ + (owned_room_id!("!foo:example.org"), vec![owned_server_name!("example.org")]), + (owned_room_id!("!bar:example.org"), vec![owned_server_name!("example.org")]), + (owned_room_id!("!baz:example.org"), vec![owned_server_name!("example.org")]) + ] + ); + assert_eq!( + get_parent_children_via(&summary, true) + .map(|(k, v)| (k, v.collect::>())) + .collect::>(), + vec![(owned_room_id!("!bar:example.org"), vec![owned_server_name!("example.org")])] + ); } #[test] From c614d5bf44b477a39a6b819ab4f31fa9f2c626f1 Mon Sep 17 00:00:00 2001 From: strawberry Date: Mon, 17 Feb 2025 17:35:03 -0500 Subject: [PATCH 064/310] bump ruwuma Signed-off-by: strawberry --- Cargo.lock | 27 ++++++++++++------------- Cargo.toml | 2 +- flake.lock | 6 +++--- src/api/client/unstable.rs | 40 +++++++++++++++++++++++--------------- 4 files changed, 42 insertions(+), 33 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index efba2e07..be2c6720 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3482,7 +3482,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "assign", "js_int", @@ -3504,7 +3504,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "js_int", "ruma-common", @@ -3516,7 +3516,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "as_variant", "assign", @@ -3539,12 +3539,13 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "as_variant", "base64 0.22.1", "bytes", "form_urlencoded", + "getrandom 0.2.15", "http", "indexmap 2.7.1", "js_int", @@ -3570,7 +3571,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "as_variant", "indexmap 2.7.1", @@ -3595,7 +3596,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "bytes", "http", @@ -3613,7 +3614,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3622,7 +3623,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "js_int", "ruma-common", @@ -3632,7 +3633,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3647,7 +3648,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "js_int", "ruma-common", @@ -3659,7 +3660,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "headers", "http", @@ -3672,7 +3673,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3688,7 +3689,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=e7a793b720e58bbe6858fecb86db97191dbfe7aa#e7a793b720e58bbe6858fecb86db97191dbfe7aa" +source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" dependencies = [ "futures-util", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 12556e00..bea306f6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "e7a793b720e58bbe6858fecb86db97191dbfe7aa" +rev = "4b3a92568310bef42078783e0172b188c5a92b3d" features = [ "compat", "rand", diff --git a/flake.lock b/flake.lock index 5af6ec43..15040a42 100644 --- a/flake.lock +++ b/flake.lock @@ -567,11 +567,11 @@ "rocksdb": { "flake": false, "locked": { - "lastModified": 1737828695, - "narHash": "sha256-8Ev6zzhNPU798JNvU27a7gj5X+6SDG3jBweUkQ59DbA=", + "lastModified": 1739735789, + "narHash": "sha256-BIzuZS0TV4gRnciP4ieW5J3Hql986iedM5dHQfK6z68=", "owner": "girlbossceo", "repo": "rocksdb", - "rev": "a4d9230dcc9d03be428b9a728133f8f646c0065c", + "rev": "34e401fd4392dd3268e042f1e40dffd064b9a7ff", "type": "github" }, "original": { diff --git a/src/api/client/unstable.rs b/src/api/client/unstable.rs index 66cb31d5..67c7df75 100644 --- a/src/api/client/unstable.rs +++ b/src/api/client/unstable.rs @@ -272,7 +272,7 @@ pub(crate) async fn set_profile_key_route( ))); } - let Some(profile_key_value) = body.kv_pair.get(&body.key) else { + let Some(profile_key_value) = body.kv_pair.get(&body.key_name) else { return Err!(Request(BadJson( "The key does not match the URL field key, or JSON body is empty (use DELETE)" ))); @@ -290,7 +290,7 @@ pub(crate) async fn set_profile_key_route( return Err!(Request(BadJson("Key names cannot be longer than 128 bytes"))); } - if body.key == "displayname" { + if body.key_name == "displayname" { let all_joined_rooms: Vec = services .rooms .state_cache @@ -306,7 +306,7 @@ pub(crate) async fn set_profile_key_route( &all_joined_rooms, ) .await; - } else if body.key == "avatar_url" { + } else if body.key_name == "avatar_url" { let mxc = ruma::OwnedMxcUri::from(profile_key_value.to_string()); let all_joined_rooms: Vec = services @@ -319,9 +319,11 @@ pub(crate) async fn set_profile_key_route( update_avatar_url(&services, &body.user_id, Some(mxc), None, &all_joined_rooms).await; } else { - services - .users - .set_profile_key(&body.user_id, &body.key, Some(profile_key_value.clone())); + services.users.set_profile_key( + &body.user_id, + &body.key_name, + Some(profile_key_value.clone()), + ); } if services.globals.allow_local_presence() { @@ -357,7 +359,7 @@ pub(crate) async fn delete_profile_key_route( ))); } - if body.key == "displayname" { + if body.key_name == "displayname" { let all_joined_rooms: Vec = services .rooms .state_cache @@ -367,7 +369,7 @@ pub(crate) async fn delete_profile_key_route( .await; update_displayname(&services, &body.user_id, None, &all_joined_rooms).await; - } else if body.key == "avatar_url" { + } else if body.key_name == "avatar_url" { let all_joined_rooms: Vec = services .rooms .state_cache @@ -380,7 +382,7 @@ pub(crate) async fn delete_profile_key_route( } else { services .users - .set_profile_key(&body.user_id, &body.key, None); + .set_profile_key(&body.user_id, &body.key_name, None); } if services.globals.allow_local_presence() { @@ -497,11 +499,13 @@ pub(crate) async fn get_profile_key_route( .users .set_timezone(&body.user_id, response.tz.clone()); - if let Some(value) = response.custom_profile_fields.get(&body.key) { - profile_key_value.insert(body.key.clone(), value.clone()); - services - .users - .set_profile_key(&body.user_id, &body.key, Some(value.clone())); + if let Some(value) = response.custom_profile_fields.get(&body.key_name) { + profile_key_value.insert(body.key_name.clone(), value.clone()); + services.users.set_profile_key( + &body.user_id, + &body.key_name, + Some(value.clone()), + ); } else { return Err!(Request(NotFound("The requested profile key does not exist."))); } @@ -520,8 +524,12 @@ pub(crate) async fn get_profile_key_route( return Err!(Request(NotFound("Profile was not found."))); } - if let Ok(value) = services.users.profile_key(&body.user_id, &body.key).await { - profile_key_value.insert(body.key.clone(), value); + if let Ok(value) = services + .users + .profile_key(&body.user_id, &body.key_name) + .await + { + profile_key_value.insert(body.key_name.clone(), value); } else { return Err!(Request(NotFound("The requested profile key does not exist."))); } From 01155fa649ef401d3ca9653439c0a7adf8a83f71 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 21 Feb 2025 17:47:44 +0000 Subject: [PATCH 065/310] fix unsafe precondition violation Signed-off-by: Jason Volk --- src/api/client/sync/mod.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/api/client/sync/mod.rs b/src/api/client/sync/mod.rs index 1967f4a2..46540881 100644 --- a/src/api/client/sync/mod.rs +++ b/src/api/client/sync/mod.rs @@ -76,11 +76,13 @@ async fn share_encrypted_room( .state_cache .get_shared_rooms(sender_user, user_id) .ready_filter(|&room_id| Some(room_id) != ignore_room) - .broad_any(|other_room_id| { + .map(ToOwned::to_owned) + .broad_any(|other_room_id| async move { services .rooms .state_accessor - .is_encrypted_room(other_room_id) + .is_encrypted_room(&other_room_id) + .await }) .await } From 1061f68f0e14ee166a14d631540d322492988627 Mon Sep 17 00:00:00 2001 From: morguldir Date: Fri, 21 Feb 2025 21:13:06 +0100 Subject: [PATCH 066/310] bump ruwuma --- Cargo.lock | 26 +++++++++++++------------- Cargo.toml | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index be2c6720..7e84437c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3482,7 +3482,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "assign", "js_int", @@ -3504,7 +3504,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "js_int", "ruma-common", @@ -3516,7 +3516,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "as_variant", "assign", @@ -3539,7 +3539,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "as_variant", "base64 0.22.1", @@ -3571,7 +3571,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "as_variant", "indexmap 2.7.1", @@ -3596,7 +3596,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "bytes", "http", @@ -3614,7 +3614,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3623,7 +3623,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "js_int", "ruma-common", @@ -3633,7 +3633,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3648,7 +3648,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "js_int", "ruma-common", @@ -3660,7 +3660,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "headers", "http", @@ -3673,7 +3673,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3689,7 +3689,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=4b3a92568310bef42078783e0172b188c5a92b3d#4b3a92568310bef42078783e0172b188c5a92b3d" +source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" dependencies = [ "futures-util", "js_int", diff --git a/Cargo.toml b/Cargo.toml index bea306f6..ed7e6ac3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "4b3a92568310bef42078783e0172b188c5a92b3d" +rev = "5dc3e0f81d614ed9dc96b50f646b2e4385291c55" features = [ "compat", "rand", From 8085a1c064afeb61d8136963a671e6bbc15a8f98 Mon Sep 17 00:00:00 2001 From: morguldir Date: Sat, 22 Feb 2025 16:46:06 +0100 Subject: [PATCH 067/310] Implement MSC3967, also fixes error when uploading keys in element Co-authored-by: Aiden McClelland Signed-off-by: morguldir --- bin/complement | 2 +- src/api/client/keys.rs | 120 +++++++++++++++++++++++++++++++++------ src/api/server/send.rs | 14 ++--- src/service/users/mod.rs | 60 +++++++++++--------- 4 files changed, 143 insertions(+), 53 deletions(-) diff --git a/bin/complement b/bin/complement index a4c62856..ffd7a938 100755 --- a/bin/complement +++ b/bin/complement @@ -45,7 +45,7 @@ set +o pipefail env \ -C "$COMPLEMENT_SRC" \ COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \ - go test -tags="conduwuit_blacklist" "$SKIPPED_COMPLEMENT_TESTS" -v -timeout 1h -json ./tests | tee "$LOG_FILE" + go test -tags="conduwuit_blacklist" "$SKIPPED_COMPLEMENT_TESTS" -v -timeout 1h -json ./tests ./tests/msc3967 | tee "$LOG_FILE" set -o pipefail # Post-process the results into an easy-to-compare format, sorted by Test name for reproducible results diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 7bf0a5da..801ae32b 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -1,7 +1,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use axum::extract::State; -use conduwuit::{err, utils, Error, Result}; +use conduwuit::{debug, err, info, result::NotFound, utils, Err, Error, Result}; use futures::{stream::FuturesUnordered, StreamExt}; use ruma::{ api::{ @@ -15,6 +15,7 @@ use ruma::{ }, federation, }, + encryption::CrossSigningKey, serde::Raw, OneTimeKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, }; @@ -125,7 +126,24 @@ pub(crate) async fn upload_signing_keys_route( auth_error: None, }; - if let Some(auth) = &body.auth { + if let Ok(exists) = check_for_new_keys( + services, + sender_user, + body.self_signing_key.as_ref(), + body.user_signing_key.as_ref(), + body.master_key.as_ref(), + ) + .await + .inspect_err(|e| info!(?e)) + { + if let Some(result) = exists { + // No-op, they tried to reupload the same set of keys + // (lost connection for example) + return Ok(result); + } + debug!("Skipping UIA in accordance with MSC3967, the user didn't have any existing keys"); + // Some of the keys weren't found, so we let them upload + } else if let Some(auth) = &body.auth { let (worked, uiaainfo) = services .uiaa .try_auth(sender_user, sender_device, auth, &uiaainfo) @@ -134,7 +152,7 @@ pub(crate) async fn upload_signing_keys_route( if !worked { return Err(Error::Uiaa(uiaainfo)); } - // Success! + // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services @@ -146,22 +164,90 @@ pub(crate) async fn upload_signing_keys_route( return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } - if let Some(master_key) = &body.master_key { - services - .users - .add_cross_signing_keys( - sender_user, - master_key, - &body.self_signing_key, - &body.user_signing_key, - true, // notify so that other users see the new keys - ) - .await?; - } + services + .users + .add_cross_signing_keys( + sender_user, + &body.master_key, + &body.self_signing_key, + &body.user_signing_key, + true, // notify so that other users see the new keys + ) + .await?; Ok(upload_signing_keys::v3::Response {}) } +async fn check_for_new_keys( + services: crate::State, + user_id: &UserId, + self_signing_key: Option<&Raw>, + user_signing_key: Option<&Raw>, + master_signing_key: Option<&Raw>, +) -> Result> { + debug!("checking for existing keys"); + let mut empty = false; + if let Some(master_signing_key) = master_signing_key { + let (key, value) = parse_master_key(user_id, master_signing_key)?; + let result = services + .users + .get_master_key(None, user_id, &|_| true) + .await; + if result.is_not_found() { + empty = true; + } else { + let existing_master_key = result?; + let (existing_key, existing_value) = parse_master_key(user_id, &existing_master_key)?; + if existing_key != key || existing_value != value { + return Err!(Request(Forbidden( + "Tried to change an existing master key, UIA required" + ))); + } + } + } + if let Some(user_signing_key) = user_signing_key { + let key = services.users.get_user_signing_key(user_id).await; + if key.is_not_found() && !empty { + return Err!(Request(Forbidden( + "Tried to update an existing user signing key, UIA required" + ))); + } + if !key.is_not_found() { + let existing_signing_key = key?.deserialize()?; + if existing_signing_key != user_signing_key.deserialize()? { + return Err!(Request(Forbidden( + "Tried to change an existing user signing key, UIA required" + ))); + } + } + } + if let Some(self_signing_key) = self_signing_key { + let key = services + .users + .get_self_signing_key(None, user_id, &|_| true) + .await; + if key.is_not_found() && !empty { + debug!(?key); + return Err!(Request(Forbidden( + "Tried to add a new signing key independently from the master key" + ))); + } + if !key.is_not_found() { + let existing_signing_key = key?.deserialize()?; + if existing_signing_key != self_signing_key.deserialize()? { + return Err!(Request(Forbidden( + "Tried to update an existing self signing key, UIA required" + ))); + } + } + } + if empty { + return Ok(None); + } + + Ok(Some(upload_signing_keys::v3::Response {})) +} + /// # `POST /_matrix/client/r0/keys/signatures/upload` /// /// Uploads end-to-end key signatures from the sender user. @@ -407,7 +493,9 @@ where * resulting in an endless loop */ ) .await?; - master_keys.insert(user.clone(), raw); + if let Some(raw) = raw { + master_keys.insert(user.clone(), raw); + } } self_signing_keys.extend(response.self_signing_keys); diff --git a/src/api/server/send.rs b/src/api/server/send.rs index 2e615a0c..bc18377e 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -585,12 +585,10 @@ async fn handle_edu_signing_key_update( return; } - if let Some(master_key) = master_key { - services - .users - .add_cross_signing_keys(&user_id, &master_key, &self_signing_key, &None, true) - .await - .log_err() - .ok(); - } + services + .users + .add_cross_signing_keys(&user_id, &master_key, &self_signing_key, &None, true) + .await + .log_err() + .ok(); } diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 68b87541..f0389a4a 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -514,7 +514,7 @@ impl Service { pub async fn add_cross_signing_keys( &self, user_id: &UserId, - master_key: &Raw, + master_key: &Option>, self_signing_key: &Option>, user_signing_key: &Option>, notify: bool, @@ -523,15 +523,17 @@ impl Service { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xFF); - let (master_key_key, _) = parse_master_key(user_id, master_key)?; + if let Some(master_key) = master_key { + let (master_key_key, _) = parse_master_key(user_id, master_key)?; - self.db - .keyid_key - .insert(&master_key_key, master_key.json().get().as_bytes()); + self.db + .keyid_key + .insert(&master_key_key, master_key.json().get().as_bytes()); - self.db - .userid_masterkeyid - .insert(user_id.as_bytes(), &master_key_key); + self.db + .userid_masterkeyid + .insert(user_id.as_bytes(), &master_key_key); + } // Self-signing key if let Some(self_signing_key) = self_signing_key { @@ -567,32 +569,16 @@ impl Service { // User-signing key if let Some(user_signing_key) = user_signing_key { - let mut user_signing_key_ids = user_signing_key - .deserialize() - .map_err(|_| err!(Request(InvalidParam("Invalid user signing key"))))? - .keys - .into_values(); - - let user_signing_key_id = user_signing_key_ids - .next() - .ok_or(err!(Request(InvalidParam("User signing key contained no key."))))?; - - if user_signing_key_ids.next().is_some() { - return Err!(Request(InvalidParam( - "User signing key contained more than one key." - ))); - } - - let mut user_signing_key_key = prefix; - user_signing_key_key.extend_from_slice(user_signing_key_id.as_bytes()); + let user_signing_key_id = parse_user_signing_key(user_signing_key)?; + let user_signing_key_key = (user_id, &user_signing_key_id); self.db .keyid_key - .insert(&user_signing_key_key, user_signing_key.json().get().as_bytes()); + .put_raw(user_signing_key_key, user_signing_key.json().get().as_bytes()); self.db .userid_usersigningkeyid - .insert(user_id.as_bytes(), &user_signing_key_key); + .put(user_id, user_signing_key_key); } if notify { @@ -1079,6 +1065,24 @@ pub fn parse_master_key( Ok((master_key_key, master_key)) } +pub fn parse_user_signing_key(user_signing_key: &Raw) -> Result { + let mut user_signing_key_ids = user_signing_key + .deserialize() + .map_err(|_| err!(Request(InvalidParam("Invalid user signing key"))))? + .keys + .into_values(); + + let user_signing_key_id = user_signing_key_ids + .next() + .ok_or(err!(Request(InvalidParam("User signing key contained no key."))))?; + + if user_signing_key_ids.next().is_some() { + return Err!(Request(InvalidParam("User signing key contained more than one key."))); + } + + Ok(user_signing_key_id) +} + /// Ensure that a user only sees signatures from themselves and the target user fn clean_signatures( mut cross_signing_key: serde_json::Value, From bec19df275f100f15fa58dc8654a2ec41958eacc Mon Sep 17 00:00:00 2001 From: morguldir Date: Sat, 22 Feb 2025 17:12:31 +0100 Subject: [PATCH 068/310] increase compression levels for some column families again --- src/database/engine/descriptor.rs | 4 ++-- src/database/maps.rs | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/database/engine/descriptor.rs b/src/database/engine/descriptor.rs index 934ef831..816555d2 100644 --- a/src/database/engine/descriptor.rs +++ b/src/database/engine/descriptor.rs @@ -83,7 +83,7 @@ pub(crate) static RANDOM: Descriptor = Descriptor { write_size: 1024 * 1024 * 32, cache_shards: 128, compression_level: -3, - bottommost_level: Some(-1), + bottommost_level: Some(2), compressed_index: true, ..BASE }; @@ -95,7 +95,7 @@ pub(crate) static SEQUENTIAL: Descriptor = Descriptor { file_size: 1024 * 1024 * 2, cache_shards: 128, compression_level: -2, - bottommost_level: Some(-1), + bottommost_level: Some(2), compression_shape: [0, 0, 1, 1, 1, 1, 1], compressed_index: false, ..BASE diff --git a/src/database/maps.rs b/src/database/maps.rs index 9ae5ab44..fc216ee0 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -171,6 +171,8 @@ pub(super) static MAPS: &[Descriptor] = &[ name: "roomsynctoken_shortstatehash", val_size_hint: Some(8), block_size: 512, + compression_level: 3, + bottommost_level: Some(6), ..descriptor::SEQUENTIAL }, Descriptor { From e97952b7f6d310d5954a0d9e6b8979d25b090387 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 23 Feb 2025 01:17:30 -0500 Subject: [PATCH 069/310] bump nix lock, update to rust 2024 and 1.85.0 toolchain Signed-off-by: June Clementine Strawberry --- Cargo.toml | 7 +++++-- flake.lock | 36 ++++++++++++++++++------------------ flake.nix | 2 +- rust-toolchain.toml | 2 +- rustfmt.toml | 2 +- 5 files changed, 26 insertions(+), 23 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index ed7e6ac3..76de2212 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,14 +13,14 @@ authors = [ ] categories = ["network-programming"] description = "a very cool Matrix chat homeserver written in Rust" -edition = "2021" +edition = "2024" homepage = "https://conduwuit.puppyirl.gay/" keywords = ["chat", "matrix", "networking", "server", "uwu"] license = "Apache-2.0" # See also `rust-toolchain.toml` readme = "README.md" repository = "https://github.com/girlbossceo/conduwuit" -rust-version = "1.84.0" +rust-version = "1.85.0" version = "0.5.0" [workspace.metadata.crane] @@ -975,3 +975,6 @@ suspicious = { level = "warn", priority = -1 } ## some sadness let_underscore_future = { level = "allow", priority = 1 } + +# rust doesnt understand conduwuit's custom log macros +literal_string_with_formatting_args = { level = "allow", priority = 1 } diff --git a/flake.lock b/flake.lock index 15040a42..9bf6ac55 100644 --- a/flake.lock +++ b/flake.lock @@ -10,11 +10,11 @@ "nixpkgs-stable": "nixpkgs-stable" }, "locked": { - "lastModified": 1731270564, - "narHash": "sha256-6KMC/NH/VWP5Eb+hA56hz0urel3jP6Y6cF2PX6xaTkk=", + "lastModified": 1738524606, + "narHash": "sha256-hPYEJ4juK3ph7kbjbvv7PlU1D9pAkkhl+pwx8fZY53U=", "owner": "zhaofengli", "repo": "attic", - "rev": "47752427561f1c34debb16728a210d378f0ece36", + "rev": "ff8a897d1f4408ebbf4d45fa9049c06b3e1e3f4e", "type": "github" }, "original": { @@ -117,11 +117,11 @@ }, "crane_2": { "locked": { - "lastModified": 1737689766, - "narHash": "sha256-ivVXYaYlShxYoKfSo5+y5930qMKKJ8CLcAoIBPQfJ6s=", + "lastModified": 1739936662, + "narHash": "sha256-x4syUjNUuRblR07nDPeLDP7DpphaBVbUaSoeZkFbGSk=", "owner": "ipetkov", "repo": "crane", - "rev": "6fe74265bbb6d016d663b1091f015e2976c4a527", + "rev": "19de14aaeb869287647d9461cbd389187d8ecdb7", "type": "github" }, "original": { @@ -170,11 +170,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1737786656, - "narHash": "sha256-ubCW9Jy7ZUOF354bWxTgLDpVnTvIpNr6qR4H/j7I0oo=", + "lastModified": 1740206139, + "narHash": "sha256-wWSv4KYhPKggKuJLzghfBs99pS3Kli9UBlyXVBzuIzc=", "owner": "nix-community", "repo": "fenix", - "rev": "2f721f527886f801403f389a9cabafda8f1e3b7f", + "rev": "133a9eb59fb4ddac443ebe5ab2449d3940396533", "type": "github" }, "original": { @@ -364,11 +364,11 @@ "liburing": { "flake": false, "locked": { - "lastModified": 1737600516, - "narHash": "sha256-EKyLQ3pbcjoU5jH5atge59F4fzuhTsb6yalUj6Ve2t8=", + "lastModified": 1740063075, + "narHash": "sha256-AfrCMPiXwgB0yxociq4no4NjCqGf/nRVhC3CLRoKqhA=", "owner": "axboe", "repo": "liburing", - "rev": "6c509e2b0c881a13b83b259a221bf15fc9b3f681", + "rev": "5c788d514b9ed6d1a3624150de8aa6db403c1c65", "type": "github" }, "original": { @@ -550,11 +550,11 @@ }, "nixpkgs_5": { "locked": { - "lastModified": 1737717945, - "narHash": "sha256-ET91TMkab3PmOZnqiJQYOtSGvSTvGeHoegAv4zcTefM=", + "lastModified": 1740019556, + "narHash": "sha256-vn285HxnnlHLWnv59Og7muqECNMS33mWLM14soFIv2g=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "ecd26a469ac56357fd333946a99086e992452b6a", + "rev": "dad564433178067be1fbdfcce23b546254b6d641", "type": "github" }, "original": { @@ -599,11 +599,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1737728869, - "narHash": "sha256-U4pl3Hi0lT6GP4ecN3q9wdD2sdaKMbmD/5NJ1NdJ9AM=", + "lastModified": 1740077634, + "narHash": "sha256-KlYdDhon/hy91NutuBeN8e3qTKf3FXgsudWsjnHud68=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "6e4c29f7ce18cea7d3d31237a4661ab932eab636", + "rev": "88fbdcd510e79ef3bcd81d6d9d4f07bdce84be8c", "type": "github" }, "original": { diff --git a/flake.nix b/flake.nix index 3cef1af5..04dee681 100644 --- a/flake.nix +++ b/flake.nix @@ -26,7 +26,7 @@ file = ./rust-toolchain.toml; # See also `rust-toolchain.toml` - sha256 = "sha256-lMLAupxng4Fd9F1oDw8gx+qA0RuF7ou7xhNU8wgs0PU="; + sha256 = "sha256-AJ6LX/Q/Er9kS15bn9iflkUwcgYqRQxiOIL2ToVAXaU="; }; mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: { diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 97e33c91..00fb6cee 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -9,7 +9,7 @@ # If you're having trouble making the relevant changes, bug a maintainer. [toolchain] -channel = "1.84.0" +channel = "1.85.0" profile = "minimal" components = [ # For rust-analyzer diff --git a/rustfmt.toml b/rustfmt.toml index 635ec8f8..89041b04 100644 --- a/rustfmt.toml +++ b/rustfmt.toml @@ -2,7 +2,7 @@ array_width = 80 chain_width = 60 comment_width = 80 condense_wildcard_suffixes = true -edition = "2024" +style_edition = "2024" fn_call_width = 80 fn_single_line = true format_code_in_doc_comments = true From a1e1f40deda8f974d61b0095fc41356cc3fda43f Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 23 Feb 2025 01:17:45 -0500 Subject: [PATCH 070/310] run cargo fix for rust 2024 changes and rustfmt Signed-off-by: June Clementine Strawberry --- src/admin/appservice/commands.rs | 2 +- src/admin/command.rs | 4 +- src/admin/debug/commands.rs | 37 +- src/admin/debug/tester.rs | 2 +- src/admin/federation/commands.rs | 2 +- src/admin/media/commands.rs | 157 +++---- src/admin/processor.rs | 14 +- src/admin/query/account_data.rs | 2 +- src/admin/query/raw.rs | 5 +- src/admin/query/resolver.rs | 4 +- src/admin/query/room_state_cache.rs | 2 +- src/admin/query/room_timeline.rs | 4 +- src/admin/query/sending.rs | 2 +- src/admin/query/short.rs | 2 +- src/admin/query/users.rs | 2 +- src/admin/room/alias.rs | 4 +- src/admin/room/commands.rs | 4 +- src/admin/room/directory.rs | 4 +- src/admin/room/info.rs | 4 +- src/admin/room/moderation.rs | 198 ++++----- src/admin/server/commands.rs | 2 +- src/admin/user/commands.rs | 8 +- src/admin/utils.rs | 2 +- src/api/client/account.rs | 201 +++++---- src/api/client/account_data.rs | 6 +- src/api/client/alias.rs | 30 +- src/api/client/appservice.rs | 2 +- src/api/client/backup.rs | 4 +- src/api/client/capabilities.rs | 2 +- src/api/client/context.rs | 11 +- src/api/client/device.rs | 86 ++-- src/api/client/directory.rs | 41 +- src/api/client/keys.rs | 130 +++--- src/api/client/media.rs | 7 +- src/api/client/media_legacy.rs | 252 ++++++------ src/api/client/membership.rs | 157 +++---- src/api/client/message.rs | 13 +- src/api/client/presence.rs | 57 +-- src/api/client/profile.rs | 9 +- src/api/client/push.rs | 6 +- src/api/client/read_marker.rs | 6 +- src/api/client/redact.rs | 2 +- src/api/client/relations.rs | 13 +- src/api/client/report.rs | 10 +- src/api/client/room/create.rs | 25 +- src/api/client/room/event.rs | 6 +- src/api/client/room/initial_sync.rs | 5 +- src/api/client/room/upgrade.rs | 7 +- src/api/client/search.rs | 11 +- src/api/client/send.rs | 4 +- src/api/client/session.rs | 68 ++-- src/api/client/space.rs | 12 +- src/api/client/state.rs | 6 +- src/api/client/sync/mod.rs | 14 +- src/api/client/sync/v3.rs | 65 +-- src/api/client/sync/v4.rs | 25 +- src/api/client/sync/v5.rs | 24 +- src/api/client/tag.rs | 2 +- src/api/client/threads.rs | 2 +- src/api/client/typing.rs | 65 +-- src/api/client/unstable.rs | 34 +- src/api/client/unversioned.rs | 2 +- src/api/client/user_directory.rs | 4 +- src/api/client/voip.rs | 6 +- src/api/client/well_known.rs | 2 +- src/api/mod.rs | 2 +- src/api/router.rs | 6 +- src/api/router/args.rs | 8 +- src/api/router/auth.rs | 24 +- src/api/router/handler.rs | 4 +- src/api/router/request.rs | 4 +- src/api/router/response.rs | 4 +- src/api/server/backfill.rs | 4 +- src/api/server/event.rs | 4 +- src/api/server/event_auth.rs | 4 +- src/api/server/get_missing_events.rs | 2 +- src/api/server/hierarchy.rs | 4 +- src/api/server/invite.rs | 6 +- src/api/server/key.rs | 8 +- src/api/server/make_join.rs | 8 +- src/api/server/make_knock.rs | 8 +- src/api/server/make_leave.rs | 2 +- src/api/server/media.rs | 8 +- src/api/server/query.rs | 4 +- src/api/server/send.rs | 12 +- src/api/server/send_join.rs | 16 +- src/api/server/send_knock.rs | 14 +- src/api/server/send_leave.rs | 8 +- src/api/server/state.rs | 4 +- src/api/server/state_ids.rs | 4 +- src/api/server/user.rs | 2 +- src/api/server/utils.rs | 4 +- src/core/alloc/je.rs | 25 +- src/core/config/check.rs | 2 +- src/core/config/manager.rs | 4 +- src/core/config/mod.rs | 10 +- src/core/debug.rs | 2 +- src/core/error/err.rs | 4 +- src/core/error/mod.rs | 4 +- src/core/error/panic.rs | 2 +- src/core/error/response.rs | 2 +- src/core/info/room_version.rs | 2 +- src/core/log/capture/data.rs | 4 +- src/core/log/capture/util.rs | 2 +- src/core/log/console.rs | 6 +- src/core/log/fmt.rs | 2 +- src/core/log/mod.rs | 4 +- src/core/log/reload.rs | 4 +- src/core/mods/module.rs | 4 +- src/core/mods/new.rs | 2 +- src/core/pdu/builder.rs | 4 +- src/core/pdu/content.rs | 2 +- src/core/pdu/count.rs | 2 +- src/core/pdu/event.rs | 2 +- src/core/pdu/event_id.rs | 2 +- src/core/pdu/mod.rs | 6 +- src/core/pdu/raw_id.rs | 4 +- src/core/pdu/redact.rs | 31 +- src/core/pdu/strip.rs | 2 +- src/core/pdu/unsigned.rs | 4 +- src/core/server.rs | 4 +- src/core/state_res/event_auth.rs | 383 +++++++++--------- src/core/state_res/mod.rs | 25 +- src/core/state_res/power_levels.rs | 8 +- src/core/state_res/state_event.rs | 2 +- src/core/state_res/test_utils.rs | 16 +- src/core/utils/bytes.rs | 2 +- src/core/utils/defer.rs | 4 +- src/core/utils/future/bool_ext.rs | 2 +- src/core/utils/future/ext_ext.rs | 2 +- src/core/utils/future/mod.rs | 2 +- src/core/utils/future/option_ext.rs | 2 +- src/core/utils/future/try_ext_ext.rs | 3 +- src/core/utils/hash/argon.rs | 6 +- src/core/utils/json.rs | 2 +- src/core/utils/math.rs | 2 +- src/core/utils/math/tried.rs | 2 +- src/core/utils/mod.rs | 8 +- src/core/utils/mutex_map.rs | 2 +- src/core/utils/rand.rs | 2 +- src/core/utils/stream/broadband.rs | 4 +- src/core/utils/stream/cloned.rs | 2 +- src/core/utils/stream/ignore.rs | 2 +- src/core/utils/stream/iter_stream.rs | 3 +- src/core/utils/stream/mod.rs | 4 +- src/core/utils/stream/ready.rs | 2 +- src/core/utils/stream/try_parallel.rs | 4 +- src/core/utils/stream/try_ready.rs | 2 +- src/core/utils/stream/try_tools.rs | 2 +- src/core/utils/stream/wideband.rs | 4 +- src/core/utils/string.rs | 2 +- src/core/utils/string/unquoted.rs | 4 +- src/core/utils/sys.rs | 4 +- src/core/utils/sys/compute.rs | 6 +- src/core/utils/sys/storage.rs | 4 +- src/core/utils/tests.rs | 2 +- src/core/utils/time.rs | 2 +- src/database/de.rs | 5 +- src/database/engine.rs | 6 +- src/database/engine/backup.rs | 2 +- src/database/engine/cf_opts.rs | 4 +- src/database/engine/context.rs | 2 +- src/database/engine/db_opts.rs | 4 +- src/database/engine/files.rs | 4 +- src/database/engine/memory_usage.rs | 2 +- src/database/engine/open.rs | 8 +- src/database/engine/repair.rs | 2 +- src/database/handle.rs | 2 +- src/database/keyval.rs | 2 +- src/database/map.rs | 2 +- src/database/map/compact.rs | 2 +- src/database/map/contains.rs | 16 +- src/database/map/count.rs | 7 +- src/database/map/get.rs | 11 +- src/database/map/get_batch.rs | 11 +- src/database/map/keys.rs | 2 +- src/database/map/keys_from.rs | 18 +- src/database/map/keys_prefix.rs | 10 +- src/database/map/qry.rs | 13 +- src/database/map/qry_batch.rs | 7 +- src/database/map/rev_keys.rs | 2 +- src/database/map/rev_keys_from.rs | 12 +- src/database/map/rev_keys_prefix.rs | 10 +- src/database/map/rev_stream.rs | 2 +- src/database/map/rev_stream_from.rs | 12 +- src/database/map/rev_stream_prefix.rs | 10 +- src/database/map/stream.rs | 2 +- src/database/map/stream_from.rs | 12 +- src/database/map/stream_prefix.rs | 10 +- src/database/maps.rs | 2 +- src/database/mod.rs | 10 +- src/database/pool.rs | 9 +- src/database/pool/configure.rs | 3 +- src/database/ser.rs | 4 +- src/database/stream.rs | 4 +- src/database/stream/items.rs | 4 +- src/database/stream/items_rev.rs | 4 +- src/database/stream/keys.rs | 4 +- src/database/stream/keys_rev.rs | 4 +- src/database/tests.rs | 7 +- src/database/watchers.rs | 2 +- src/macros/admin.rs | 6 +- src/macros/cargo.rs | 2 +- src/macros/config.rs | 8 +- src/macros/implement.rs | 2 +- src/macros/mod.rs | 3 +- src/macros/refutable.rs | 4 +- src/macros/utils.rs | 2 +- src/main/clap.rs | 2 +- src/main/logging.rs | 6 +- src/main/main.rs | 4 +- src/main/mods.rs | 4 +- src/main/runtime.rs | 5 +- src/main/sentry.rs | 8 +- src/main/server.rs | 2 +- src/router/layers.rs | 18 +- src/router/request.rs | 4 +- src/router/router.rs | 2 +- src/router/run.rs | 4 +- src/router/serve/mod.rs | 2 +- src/router/serve/plain.rs | 6 +- src/router/serve/tls.rs | 4 +- src/router/serve/unix.rs | 10 +- src/service/account_data/mod.rs | 9 +- src/service/admin/console.rs | 8 +- src/service/admin/create.rs | 4 +- src/service/admin/execute.rs | 4 +- src/service/admin/grant.rs | 6 +- src/service/admin/mod.rs | 6 +- src/service/appservice/mod.rs | 6 +- src/service/appservice/registration_info.rs | 2 +- src/service/client/mod.rs | 9 +- src/service/config/mod.rs | 5 +- src/service/emergency/mod.rs | 6 +- src/service/federation/execute.rs | 12 +- src/service/federation/mod.rs | 2 +- src/service/globals/data.rs | 2 +- src/service/globals/mod.rs | 2 +- src/service/key_backups/mod.rs | 7 +- src/service/manager.rs | 4 +- src/service/media/blurhash.rs | 2 +- src/service/media/data.rs | 7 +- src/service/media/migrations.rs | 6 +- src/service/media/mod.rs | 70 ++-- src/service/media/preview.rs | 2 +- src/service/media/remote.rs | 10 +- src/service/media/tests.rs | 2 +- src/service/media/thumbnail.rs | 18 +- src/service/migrations.rs | 12 +- src/service/mod.rs | 2 +- src/service/presence/data.rs | 9 +- src/service/presence/mod.rs | 8 +- src/service/presence/presence.rs | 4 +- src/service/pusher/mod.rs | 17 +- src/service/resolver/actual.rs | 26 +- src/service/resolver/cache.rs | 6 +- src/service/resolver/dns.rs | 4 +- src/service/resolver/mod.rs | 4 +- src/service/resolver/tests.rs | 2 +- src/service/rooms/alias/mod.rs | 15 +- src/service/rooms/alias/remote.rs | 4 +- src/service/rooms/auth_chain/data.rs | 2 +- src/service/rooms/auth_chain/mod.rs | 8 +- src/service/rooms/directory/mod.rs | 4 +- src/service/rooms/event_handler/acl_check.rs | 4 +- .../fetch_and_handle_outliers.rs | 23 +- src/service/rooms/event_handler/fetch_prev.rs | 90 ++-- .../rooms/event_handler/fetch_state.rs | 8 +- .../event_handler/handle_incoming_pdu.rs | 8 +- .../rooms/event_handler/handle_outlier_pdu.rs | 10 +- .../rooms/event_handler/handle_prev_pdu.rs | 4 +- src/service/rooms/event_handler/mod.rs | 8 +- .../rooms/event_handler/parse_incoming_pdu.rs | 2 +- .../rooms/event_handler/resolve_state.rs | 13 +- .../rooms/event_handler/state_at_incoming.rs | 5 +- .../event_handler/upgrade_outlier_pdu.rs | 9 +- src/service/rooms/lazy_loading/mod.rs | 9 +- src/service/rooms/metadata/mod.rs | 4 +- src/service/rooms/outlier/mod.rs | 2 +- src/service/rooms/pdu_metadata/data.rs | 10 +- src/service/rooms/pdu_metadata/mod.rs | 8 +- src/service/rooms/read_receipt/data.rs | 8 +- src/service/rooms/read_receipt/mod.rs | 24 +- src/service/rooms/search/mod.rs | 16 +- src/service/rooms/short/mod.rs | 6 +- src/service/rooms/spaces/mod.rs | 18 +- src/service/rooms/spaces/pagination_token.rs | 2 +- src/service/rooms/spaces/tests.rs | 4 +- src/service/rooms/state/mod.rs | 132 +++--- src/service/rooms/state_accessor/mod.rs | 13 +- .../rooms/state_accessor/room_state.rs | 4 +- .../rooms/state_accessor/server_can.rs | 8 +- src/service/rooms/state_accessor/state.rs | 15 +- src/service/rooms/state_accessor/user_can.rs | 64 +-- src/service/rooms/state_cache/mod.rs | 18 +- src/service/rooms/state_compressor/mod.rs | 5 +- src/service/rooms/threads/mod.rs | 20 +- src/service/rooms/timeline/data.rs | 9 +- src/service/rooms/timeline/mod.rs | 23 +- src/service/rooms/typing/mod.rs | 9 +- src/service/rooms/user/mod.rs | 4 +- src/service/sending/appservice.rs | 4 +- src/service/sending/data.rs | 11 +- src/service/sending/mod.rs | 12 +- src/service/sending/sender.rs | 30 +- src/service/server_keys/acquire.rs | 8 +- src/service/server_keys/get.rs | 8 +- src/service/server_keys/keypair.rs | 2 +- src/service/server_keys/mod.rs | 11 +- src/service/server_keys/request.rs | 14 +- src/service/server_keys/sign.rs | 2 +- src/service/server_keys/verify.rs | 4 +- src/service/service.rs | 2 +- src/service/services.rs | 2 +- src/service/sync/mod.rs | 4 +- src/service/sync/watch.rs | 4 +- src/service/transaction_ids/mod.rs | 2 +- src/service/uiaa/mod.rs | 10 +- src/service/updates/mod.rs | 6 +- src/service/users/mod.rs | 24 +- 320 files changed, 2212 insertions(+), 2039 deletions(-) diff --git a/src/admin/appservice/commands.rs b/src/admin/appservice/commands.rs index 4f02531a..88f28431 100644 --- a/src/admin/appservice/commands.rs +++ b/src/admin/appservice/commands.rs @@ -1,6 +1,6 @@ use ruma::{api::appservice::Registration, events::room::message::RoomMessageEventContent}; -use crate::{admin_command, Result}; +use crate::{Result, admin_command}; #[admin_command] pub(super) async fn register(&self) -> Result { diff --git a/src/admin/command.rs b/src/admin/command.rs index 5ad9e581..5df980d6 100644 --- a/src/admin/command.rs +++ b/src/admin/command.rs @@ -3,9 +3,9 @@ use std::{fmt, time::SystemTime}; use conduwuit::Result; use conduwuit_service::Services; use futures::{ + Future, FutureExt, io::{AsyncWriteExt, BufWriter}, lock::Mutex, - Future, FutureExt, }; use ruma::EventId; @@ -21,7 +21,7 @@ impl Command<'_> { pub(crate) fn write_fmt( &self, arguments: fmt::Arguments<'_>, - ) -> impl Future + Send + '_ { + ) -> impl Future + Send + '_ + use<'_> { let buf = format!("{arguments}"); self.output.lock().then(|mut output| async move { output.write_all(buf.as_bytes()).await.map_err(Into::into) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index dcf9879c..c6f6a170 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -6,19 +6,19 @@ use std::{ }; use conduwuit::{ - debug_error, err, info, trace, utils, + Error, PduEvent, PduId, RawPduId, Result, debug_error, err, info, trace, utils, utils::{ stream::{IterStream, ReadyExt}, string::EMPTY, }, - warn, Error, PduEvent, PduId, RawPduId, Result, + warn, }; use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{ - api::{client::error::ErrorKind, federation::event::get_room_state}, - events::room::message::RoomMessageEventContent, CanonicalJsonObject, EventId, OwnedEventId, OwnedRoomOrAliasId, RoomId, RoomVersionId, ServerName, + api::{client::error::ErrorKind, federation::event::get_room_state}, + events::room::message::RoomMessageEventContent, }; use service::rooms::{ short::{ShortEventId, ShortRoomId}, @@ -209,18 +209,21 @@ pub(super) async fn get_remote_pdu_list( for pdu in list { if force { - if let Err(e) = self.get_remote_pdu(Box::from(pdu), server.clone()).await { - failed_count = failed_count.saturating_add(1); - self.services - .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "Failed to get remote PDU, ignoring error: {e}" - ))) - .await - .ok(); - warn!("Failed to get remote PDU, ignoring error: {e}"); - } else { - success_count = success_count.saturating_add(1); + match self.get_remote_pdu(Box::from(pdu), server.clone()).await { + | Err(e) => { + failed_count = failed_count.saturating_add(1); + self.services + .admin + .send_message(RoomMessageEventContent::text_plain(format!( + "Failed to get remote PDU, ignoring error: {e}" + ))) + .await + .ok(); + warn!("Failed to get remote PDU, ignoring error: {e}"); + }, + | _ => { + success_count = success_count.saturating_add(1); + }, } } else { self.get_remote_pdu(Box::from(pdu), server.clone()).await?; @@ -957,7 +960,7 @@ pub(super) async fn database_stats( self.services .db .iter() - .filter(|(&name, _)| map_name.is_empty() || map_name == name) + .filter(|&(&name, _)| map_name.is_empty() || map_name == name) .try_stream() .try_for_each(|(&name, map)| { let res = map.property(&property).expect("invalid property"); diff --git a/src/admin/debug/tester.rs b/src/admin/debug/tester.rs index 5200fa0d..005ee775 100644 --- a/src/admin/debug/tester.rs +++ b/src/admin/debug/tester.rs @@ -1,7 +1,7 @@ use conduwuit::Err; use ruma::events::room::message::RoomMessageEventContent; -use crate::{admin_command, admin_command_dispatch, Result}; +use crate::{Result, admin_command, admin_command_dispatch}; #[admin_command_dispatch] #[derive(Debug, clap::Subcommand)] diff --git a/src/admin/federation/commands.rs b/src/admin/federation/commands.rs index 13bc8da4..240ffa6a 100644 --- a/src/admin/federation/commands.rs +++ b/src/admin/federation/commands.rs @@ -3,7 +3,7 @@ use std::fmt::Write; use conduwuit::Result; use futures::StreamExt; use ruma::{ - events::room::message::RoomMessageEventContent, OwnedRoomId, RoomId, ServerName, UserId, + OwnedRoomId, RoomId, ServerName, UserId, events::room::message::RoomMessageEventContent, }; use crate::{admin_command, get_room_info}; diff --git a/src/admin/media/commands.rs b/src/admin/media/commands.rs index 3d0a9473..aeefa9f2 100644 --- a/src/admin/media/commands.rs +++ b/src/admin/media/commands.rs @@ -1,12 +1,12 @@ use std::time::Duration; use conduwuit::{ - debug, debug_info, debug_warn, error, info, trace, utils::time::parse_timepoint_ago, Result, + Result, debug, debug_info, debug_warn, error, info, trace, utils::time::parse_timepoint_ago, }; use conduwuit_service::media::Dim; use ruma::{ - events::room::message::RoomMessageEventContent, EventId, Mxc, MxcUri, OwnedMxcUri, - OwnedServerName, ServerName, + EventId, Mxc, MxcUri, OwnedMxcUri, OwnedServerName, ServerName, + events::room::message::RoomMessageEventContent, }; use crate::{admin_command, utils::parse_local_user_id}; @@ -41,103 +41,106 @@ pub(super) async fn delete( let mut mxc_urls = Vec::with_capacity(4); // parsing the PDU for any MXC URLs begins here - if let Ok(event_json) = self.services.rooms.timeline.get_pdu_json(&event_id).await { - if let Some(content_key) = event_json.get("content") { - debug!("Event ID has \"content\"."); - let content_obj = content_key.as_object(); + match self.services.rooms.timeline.get_pdu_json(&event_id).await { + | Ok(event_json) => { + if let Some(content_key) = event_json.get("content") { + debug!("Event ID has \"content\"."); + let content_obj = content_key.as_object(); - if let Some(content) = content_obj { - // 1. attempts to parse the "url" key - debug!("Attempting to go into \"url\" key for main media file"); - if let Some(url) = content.get("url") { - debug!("Got a URL in the event ID {event_id}: {url}"); + if let Some(content) = content_obj { + // 1. attempts to parse the "url" key + debug!("Attempting to go into \"url\" key for main media file"); + if let Some(url) = content.get("url") { + debug!("Got a URL in the event ID {event_id}: {url}"); - if url.to_string().starts_with("\"mxc://") { - debug!("Pushing URL {url} to list of MXCs to delete"); - let final_url = url.to_string().replace('"', ""); - mxc_urls.push(final_url); - } else { - info!( - "Found a URL in the event ID {event_id} but did not start with \ - mxc://, ignoring" - ); - } - } - - // 2. attempts to parse the "info" key - debug!("Attempting to go into \"info\" key for thumbnails"); - if let Some(info_key) = content.get("info") { - debug!("Event ID has \"info\"."); - let info_obj = info_key.as_object(); - - if let Some(info) = info_obj { - if let Some(thumbnail_url) = info.get("thumbnail_url") { - debug!("Found a thumbnail_url in info key: {thumbnail_url}"); - - if thumbnail_url.to_string().starts_with("\"mxc://") { - debug!( - "Pushing thumbnail URL {thumbnail_url} to list of MXCs \ - to delete" - ); - let final_thumbnail_url = - thumbnail_url.to_string().replace('"', ""); - mxc_urls.push(final_thumbnail_url); - } else { - info!( - "Found a thumbnail URL in the event ID {event_id} but \ - did not start with mxc://, ignoring" - ); - } + if url.to_string().starts_with("\"mxc://") { + debug!("Pushing URL {url} to list of MXCs to delete"); + let final_url = url.to_string().replace('"', ""); + mxc_urls.push(final_url); } else { info!( - "No \"thumbnail_url\" key in \"info\" key, assuming no \ - thumbnails." + "Found a URL in the event ID {event_id} but did not start \ + with mxc://, ignoring" ); } } - } - // 3. attempts to parse the "file" key - debug!("Attempting to go into \"file\" key"); - if let Some(file_key) = content.get("file") { - debug!("Event ID has \"file\"."); - let file_obj = file_key.as_object(); + // 2. attempts to parse the "info" key + debug!("Attempting to go into \"info\" key for thumbnails"); + if let Some(info_key) = content.get("info") { + debug!("Event ID has \"info\"."); + let info_obj = info_key.as_object(); - if let Some(file) = file_obj { - if let Some(url) = file.get("url") { - debug!("Found url in file key: {url}"); + if let Some(info) = info_obj { + if let Some(thumbnail_url) = info.get("thumbnail_url") { + debug!("Found a thumbnail_url in info key: {thumbnail_url}"); - if url.to_string().starts_with("\"mxc://") { - debug!("Pushing URL {url} to list of MXCs to delete"); - let final_url = url.to_string().replace('"', ""); - mxc_urls.push(final_url); + if thumbnail_url.to_string().starts_with("\"mxc://") { + debug!( + "Pushing thumbnail URL {thumbnail_url} to list of \ + MXCs to delete" + ); + let final_thumbnail_url = + thumbnail_url.to_string().replace('"', ""); + mxc_urls.push(final_thumbnail_url); + } else { + info!( + "Found a thumbnail URL in the event ID {event_id} \ + but did not start with mxc://, ignoring" + ); + } } else { info!( - "Found a URL in the event ID {event_id} but did not \ - start with mxc://, ignoring" + "No \"thumbnail_url\" key in \"info\" key, assuming no \ + thumbnails." ); } - } else { - info!("No \"url\" key in \"file\" key."); } } + + // 3. attempts to parse the "file" key + debug!("Attempting to go into \"file\" key"); + if let Some(file_key) = content.get("file") { + debug!("Event ID has \"file\"."); + let file_obj = file_key.as_object(); + + if let Some(file) = file_obj { + if let Some(url) = file.get("url") { + debug!("Found url in file key: {url}"); + + if url.to_string().starts_with("\"mxc://") { + debug!("Pushing URL {url} to list of MXCs to delete"); + let final_url = url.to_string().replace('"', ""); + mxc_urls.push(final_url); + } else { + info!( + "Found a URL in the event ID {event_id} but did not \ + start with mxc://, ignoring" + ); + } + } else { + info!("No \"url\" key in \"file\" key."); + } + } + } + } else { + return Ok(RoomMessageEventContent::text_plain( + "Event ID does not have a \"content\" key or failed parsing the \ + event ID JSON.", + )); } } else { return Ok(RoomMessageEventContent::text_plain( - "Event ID does not have a \"content\" key or failed parsing the event \ - ID JSON.", + "Event ID does not have a \"content\" key, this is not a message or an \ + event type that contains media.", )); } - } else { + }, + | _ => { return Ok(RoomMessageEventContent::text_plain( - "Event ID does not have a \"content\" key, this is not a message or an \ - event type that contains media.", + "Event ID does not exist or is not known to us.", )); - } - } else { - return Ok(RoomMessageEventContent::text_plain( - "Event ID does not exist or is not known to us.", - )); + }, } if mxc_urls.is_empty() { diff --git a/src/admin/processor.rs b/src/admin/processor.rs index eefcdcd6..77a60959 100644 --- a/src/admin/processor.rs +++ b/src/admin/processor.rs @@ -8,7 +8,7 @@ use std::{ use clap::{CommandFactory, Parser}; use conduwuit::{ - debug, error, + Error, Result, debug, error, log::{ capture, capture::Capture, @@ -16,24 +16,24 @@ use conduwuit::{ }, trace, utils::string::{collect_stream, common_prefix}, - warn, Error, Result, + warn, }; -use futures::{future::FutureExt, io::BufWriter, AsyncWriteExt}; +use futures::{AsyncWriteExt, future::FutureExt, io::BufWriter}; use ruma::{ + EventId, events::{ relation::InReplyTo, room::message::{Relation::Reply, RoomMessageEventContent}, }, - EventId, }; use service::{ - admin::{CommandInput, CommandOutput, ProcessorFuture, ProcessorResult}, Services, + admin::{CommandInput, CommandOutput, ProcessorFuture, ProcessorResult}, }; use tracing::Level; -use tracing_subscriber::{filter::LevelFilter, EnvFilter}; +use tracing_subscriber::{EnvFilter, filter::LevelFilter}; -use crate::{admin, admin::AdminCommand, Command}; +use crate::{Command, admin, admin::AdminCommand}; #[must_use] pub(super) fn complete(line: &str) -> String { complete_command(AdminCommand::command(), line) } diff --git a/src/admin/query/account_data.rs b/src/admin/query/account_data.rs index bb8ddeff..b2bf5e6d 100644 --- a/src/admin/query/account_data.rs +++ b/src/admin/query/account_data.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, RoomId, UserId}; +use ruma::{RoomId, UserId, events::room::message::RoomMessageEventContent}; use crate::{admin_command, admin_command_dispatch}; diff --git a/src/admin/query/raw.rs b/src/admin/query/raw.rs index 5a6006ec..23f11cc8 100644 --- a/src/admin/query/raw.rs +++ b/src/admin/query/raw.rs @@ -2,13 +2,12 @@ use std::{borrow::Cow, collections::BTreeMap, ops::Deref}; use clap::Subcommand; use conduwuit::{ - apply, at, is_zero, + Err, Result, apply, at, is_zero, utils::{ + IterStream, stream::{ReadyExt, TryIgnore, TryParallelExt}, string::EMPTY, - IterStream, }, - Err, Result, }; use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::events::room::message::RoomMessageEventContent; diff --git a/src/admin/query/resolver.rs b/src/admin/query/resolver.rs index 08b5d171..10748d88 100644 --- a/src/admin/query/resolver.rs +++ b/src/admin/query/resolver.rs @@ -1,7 +1,7 @@ use clap::Subcommand; -use conduwuit::{utils::time, Result}; +use conduwuit::{Result, utils::time}; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, OwnedServerName}; +use ruma::{OwnedServerName, events::room::message::RoomMessageEventContent}; use crate::{admin_command, admin_command_dispatch}; diff --git a/src/admin/query/room_state_cache.rs b/src/admin/query/room_state_cache.rs index 71dadc99..1de5c02d 100644 --- a/src/admin/query/room_state_cache.rs +++ b/src/admin/query/room_state_cache.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::{Error, Result}; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, RoomId, ServerName, UserId}; +use ruma::{RoomId, ServerName, UserId, events::room::message::RoomMessageEventContent}; use crate::Command; diff --git a/src/admin/query/room_timeline.rs b/src/admin/query/room_timeline.rs index 3fe653e3..6f08aee9 100644 --- a/src/admin/query/room_timeline.rs +++ b/src/admin/query/room_timeline.rs @@ -1,7 +1,7 @@ use clap::Subcommand; -use conduwuit::{utils::stream::TryTools, PduCount, Result}; +use conduwuit::{PduCount, Result, utils::stream::TryTools}; use futures::TryStreamExt; -use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomOrAliasId}; +use ruma::{OwnedRoomOrAliasId, events::room::message::RoomMessageEventContent}; use crate::{admin_command, admin_command_dispatch}; diff --git a/src/admin/query/sending.rs b/src/admin/query/sending.rs index 8c6fb25f..a148f718 100644 --- a/src/admin/query/sending.rs +++ b/src/admin/query/sending.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, ServerName, UserId}; +use ruma::{ServerName, UserId, events::room::message::RoomMessageEventContent}; use service::sending::Destination; use crate::Command; diff --git a/src/admin/query/short.rs b/src/admin/query/short.rs index 7f0f3449..0957c15e 100644 --- a/src/admin/query/short.rs +++ b/src/admin/query/short.rs @@ -1,6 +1,6 @@ use clap::Subcommand; use conduwuit::Result; -use ruma::{events::room::message::RoomMessageEventContent, OwnedEventId, OwnedRoomOrAliasId}; +use ruma::{OwnedEventId, OwnedRoomOrAliasId, events::room::message::RoomMessageEventContent}; use crate::{admin_command, admin_command_dispatch}; diff --git a/src/admin/query/users.rs b/src/admin/query/users.rs index c517d9dd..5995bc62 100644 --- a/src/admin/query/users.rs +++ b/src/admin/query/users.rs @@ -2,7 +2,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::stream::StreamExt; use ruma::{ - events::room::message::RoomMessageEventContent, OwnedDeviceId, OwnedRoomId, OwnedUserId, + OwnedDeviceId, OwnedRoomId, OwnedUserId, events::room::message::RoomMessageEventContent, }; use crate::{admin_command, admin_command_dispatch}; diff --git a/src/admin/room/alias.rs b/src/admin/room/alias.rs index d3b956e1..6262f33e 100644 --- a/src/admin/room/alias.rs +++ b/src/admin/room/alias.rs @@ -4,10 +4,10 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; use ruma::{ - events::room::message::RoomMessageEventContent, OwnedRoomAliasId, OwnedRoomId, RoomId, + OwnedRoomAliasId, OwnedRoomId, RoomId, events::room::message::RoomMessageEventContent, }; -use crate::{escape_html, Command}; +use crate::{Command, escape_html}; #[derive(Debug, Subcommand)] pub(crate) enum RoomAliasCommand { diff --git a/src/admin/room/commands.rs b/src/admin/room/commands.rs index b58d04c5..b5c303c8 100644 --- a/src/admin/room/commands.rs +++ b/src/admin/room/commands.rs @@ -1,8 +1,8 @@ use conduwuit::Result; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, OwnedRoomId}; +use ruma::{OwnedRoomId, events::room::message::RoomMessageEventContent}; -use crate::{admin_command, get_room_info, PAGE_SIZE}; +use crate::{PAGE_SIZE, admin_command, get_room_info}; #[admin_command] pub(super) async fn list_rooms( diff --git a/src/admin/room/directory.rs b/src/admin/room/directory.rs index 791b9204..e9c23a1d 100644 --- a/src/admin/room/directory.rs +++ b/src/admin/room/directory.rs @@ -1,9 +1,9 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, RoomId}; +use ruma::{RoomId, events::room::message::RoomMessageEventContent}; -use crate::{get_room_info, Command, PAGE_SIZE}; +use crate::{Command, PAGE_SIZE, get_room_info}; #[derive(Debug, Subcommand)] pub(crate) enum RoomDirectoryCommand { diff --git a/src/admin/room/info.rs b/src/admin/room/info.rs index 34abf8a9..a39728fe 100644 --- a/src/admin/room/info.rs +++ b/src/admin/room/info.rs @@ -1,7 +1,7 @@ use clap::Subcommand; -use conduwuit::{utils::ReadyExt, Result}; +use conduwuit::{Result, utils::ReadyExt}; use futures::StreamExt; -use ruma::{events::room::message::RoomMessageEventContent, RoomId}; +use ruma::{RoomId, events::room::message::RoomMessageEventContent}; use crate::{admin_command, admin_command_dispatch}; diff --git a/src/admin/room/moderation.rs b/src/admin/room/moderation.rs index bf54505e..ee132590 100644 --- a/src/admin/room/moderation.rs +++ b/src/admin/room/moderation.rs @@ -1,14 +1,14 @@ use api::client::leave_room; use clap::Subcommand; use conduwuit::{ - debug, error, info, + Result, debug, error, info, utils::{IterStream, ReadyExt}, - warn, Result, + warn, }; use futures::StreamExt; use ruma::{ - events::room::message::RoomMessageEventContent, OwnedRoomId, RoomAliasId, RoomId, - RoomOrAliasId, + OwnedRoomId, RoomAliasId, RoomId, RoomOrAliasId, + events::room::message::RoomMessageEventContent, }; use crate::{admin_command, admin_command_dispatch, get_room_info}; @@ -124,41 +124,42 @@ async fn ban_room( locally, if not using get_alias_helper to fetch room ID remotely" ); - let room_id = if let Ok(room_id) = self + let room_id = match self .services .rooms .alias .resolve_local_alias(room_alias) .await { - room_id - } else { - debug!( - "We don't have this room alias to a room ID locally, attempting to fetch room \ - ID over federation" - ); + | Ok(room_id) => room_id, + | _ => { + debug!( + "We don't have this room alias to a room ID locally, attempting to fetch \ + room ID over federation" + ); - match self - .services - .rooms - .alias - .resolve_alias(room_alias, None) - .await - { - | Ok((room_id, servers)) => { - debug!( - ?room_id, - ?servers, - "Got federation response fetching room ID for {room_id}" - ); - room_id - }, - | Err(e) => { - return Ok(RoomMessageEventContent::notice_plain(format!( - "Failed to resolve room alias {room_alias} to a room ID: {e}" - ))); - }, - } + match self + .services + .rooms + .alias + .resolve_alias(room_alias, None) + .await + { + | Ok((room_id, servers)) => { + debug!( + ?room_id, + ?servers, + "Got federation response fetching room ID for {room_id}" + ); + room_id + }, + | Err(e) => { + return Ok(RoomMessageEventContent::notice_plain(format!( + "Failed to resolve room alias {room_alias} to a room ID: {e}" + ))); + }, + } + }, }; self.services.rooms.metadata.ban_room(&room_id, true); @@ -321,51 +322,55 @@ async fn ban_list_of_rooms( if room_alias_or_id.is_room_alias_id() { match RoomAliasId::parse(room_alias_or_id) { | Ok(room_alias) => { - let room_id = if let Ok(room_id) = self + let room_id = match self .services .rooms .alias .resolve_local_alias(room_alias) .await { - room_id - } else { - debug!( - "We don't have this room alias to a room ID locally, \ - attempting to fetch room ID over federation" - ); + | Ok(room_id) => room_id, + | _ => { + debug!( + "We don't have this room alias to a room ID locally, \ + attempting to fetch room ID over federation" + ); - match self - .services - .rooms - .alias - .resolve_alias(room_alias, None) - .await - { - | Ok((room_id, servers)) => { - debug!( - ?room_id, - ?servers, - "Got federation response fetching room ID for {room}", - ); - room_id - }, - | Err(e) => { - // don't fail if force blocking - if force { - warn!( - "Failed to resolve room alias {room} to a room \ - ID: {e}" + match self + .services + .rooms + .alias + .resolve_alias(room_alias, None) + .await + { + | Ok((room_id, servers)) => { + debug!( + ?room_id, + ?servers, + "Got federation response fetching room ID for \ + {room}", ); - continue; - } + room_id + }, + | Err(e) => { + // don't fail if force blocking + if force { + warn!( + "Failed to resolve room alias {room} to a \ + room ID: {e}" + ); + continue; + } - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to resolve room alias {room} to a room ID: \ - {e}" - ))); - }, - } + return Ok(RoomMessageEventContent::text_plain( + format!( + "Failed to resolve room alias {room} to a \ + room ID: {e}" + ), + )); + }, + } + }, }; room_ids.push(room_id); @@ -537,41 +542,42 @@ async fn unban_room( locally, if not using get_alias_helper to fetch room ID remotely" ); - let room_id = if let Ok(room_id) = self + let room_id = match self .services .rooms .alias .resolve_local_alias(room_alias) .await { - room_id - } else { - debug!( - "We don't have this room alias to a room ID locally, attempting to fetch room \ - ID over federation" - ); + | Ok(room_id) => room_id, + | _ => { + debug!( + "We don't have this room alias to a room ID locally, attempting to fetch \ + room ID over federation" + ); - match self - .services - .rooms - .alias - .resolve_alias(room_alias, None) - .await - { - | Ok((room_id, servers)) => { - debug!( - ?room_id, - ?servers, - "Got federation response fetching room ID for room {room}" - ); - room_id - }, - | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to resolve room alias {room} to a room ID: {e}" - ))); - }, - } + match self + .services + .rooms + .alias + .resolve_alias(room_alias, None) + .await + { + | Ok((room_id, servers)) => { + debug!( + ?room_id, + ?servers, + "Got federation response fetching room ID for room {room}" + ); + room_id + }, + | Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "Failed to resolve room alias {room} to a room ID: {e}" + ))); + }, + } + }, }; self.services.rooms.metadata.ban_room(&room_id, false); diff --git a/src/admin/server/commands.rs b/src/admin/server/commands.rs index d4cfa7d5..17bf9ec0 100644 --- a/src/admin/server/commands.rs +++ b/src/admin/server/commands.rs @@ -1,6 +1,6 @@ use std::{fmt::Write, path::PathBuf, sync::Arc}; -use conduwuit::{info, utils::time, warn, Err, Result}; +use conduwuit::{Err, Result, info, utils::time, warn}; use ruma::events::room::message::RoomMessageEventContent; use crate::admin_command; diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 64767a36..8cb8edc3 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -2,23 +2,23 @@ use std::{collections::BTreeMap, fmt::Write as _}; use api::client::{full_user_deactivate, join_room_by_id_helper, leave_room}; use conduwuit::{ - debug_warn, error, info, is_equal_to, + PduBuilder, Result, debug_warn, error, info, is_equal_to, utils::{self, ReadyExt}, - warn, PduBuilder, Result, + warn, }; use conduwuit_api::client::{leave_all_rooms, update_avatar_url, update_displayname}; use futures::StreamExt; use ruma::{ + EventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, RoomId, UserId, events::{ + RoomAccountDataEventType, StateEventType, room::{ message::RoomMessageEventContent, power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, redaction::RoomRedactionEventContent, }, tag::{TagEvent, TagEventContent, TagInfo}, - RoomAccountDataEventType, StateEventType, }, - EventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, RoomId, UserId, }; use crate::{ diff --git a/src/admin/utils.rs b/src/admin/utils.rs index eba33fba..a2696c50 100644 --- a/src/admin/utils.rs +++ b/src/admin/utils.rs @@ -1,4 +1,4 @@ -use conduwuit_core::{err, Err, Result}; +use conduwuit_core::{Err, Result, err}; use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; use service::Services; diff --git a/src/api/client/account.rs b/src/api/client/account.rs index cb25b276..cb49a6db 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -3,34 +3,35 @@ use std::fmt::Write; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - debug_info, error, info, is_equal_to, utils, utils::ReadyExt, warn, Error, PduBuilder, Result, + Error, PduBuilder, Result, debug_info, error, info, is_equal_to, utils, utils::ReadyExt, warn, }; use futures::{FutureExt, StreamExt}; use register::RegistrationKind; use ruma::{ + OwnedRoomId, UserId, api::client::{ account::{ - change_password, check_registration_token_validity, deactivate, get_3pids, - get_username_availability, + ThirdPartyIdRemovalStatus, change_password, check_registration_token_validity, + deactivate, get_3pids, get_username_availability, register::{self, LoginType}, request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn, - whoami, ThirdPartyIdRemovalStatus, + whoami, }, error::ErrorKind, uiaa::{AuthFlow, AuthType, UiaaInfo}, }, events::{ + GlobalAccountDataEventType, StateEventType, room::{ message::RoomMessageEventContent, power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, }, - GlobalAccountDataEventType, StateEventType, }, - push, OwnedRoomId, UserId, + push, }; use service::Services; -use super::{join_room_by_id_helper, DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; +use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH, join_room_by_id_helper}; use crate::Ruma; const RANDOM_USER_ID_LENGTH: usize = 10; @@ -218,12 +219,20 @@ pub(crate) async fn register_route( }; if body.body.login_type == Some(LoginType::ApplicationService) { - if let Some(ref info) = body.appservice_info { - if !info.is_user_match(&user_id) { - return Err(Error::BadRequest(ErrorKind::Exclusive, "User is not in namespace.")); - } - } else { - return Err(Error::BadRequest(ErrorKind::MissingToken, "Missing appservice token.")); + match body.appservice_info { + | Some(ref info) => + if !info.is_user_match(&user_id) { + return Err(Error::BadRequest( + ErrorKind::Exclusive, + "User is not in namespace.", + )); + }, + | _ => { + return Err(Error::BadRequest( + ErrorKind::MissingToken, + "Missing appservice token.", + )); + }, } } else if services.appservice.is_exclusive_user_id(&user_id).await { return Err(Error::BadRequest(ErrorKind::Exclusive, "User ID reserved by appservice.")); @@ -256,33 +265,39 @@ pub(crate) async fn register_route( }; if !skip_auth { - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services - .uiaa - .try_auth( - &UserId::parse_with_server_name("", services.globals.server_name()) - .expect("we know this is valid"), - "".into(), - auth, - &uiaainfo, - ) - .await?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - } else if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services.uiaa.create( - &UserId::parse_with_server_name("", services.globals.server_name()) - .expect("we know this is valid"), - "".into(), - &uiaainfo, - &json, - ); - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + match &body.auth { + | Some(auth) => { + let (worked, uiaainfo) = services + .uiaa + .try_auth( + &UserId::parse_with_server_name("", services.globals.server_name()) + .expect("we know this is valid"), + "".into(), + auth, + &uiaainfo, + ) + .await?; + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + }, + | _ => match body.json_body { + | Some(json) => { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services.uiaa.create( + &UserId::parse_with_server_name("", services.globals.server_name()) + .expect("we know this is valid"), + "".into(), + &uiaainfo, + &json, + ); + return Err(Error::Uiaa(uiaainfo)); + }, + | _ => { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + }, + }, } } @@ -463,7 +478,7 @@ pub(crate) async fn register_route( } if let Some(room_server_name) = room.server_name() { - if let Err(e) = join_room_by_id_helper( + match join_room_by_id_helper( &services, &user_id, &room_id, @@ -475,10 +490,15 @@ pub(crate) async fn register_route( .boxed() .await { - // don't return this error so we don't fail registrations - error!("Failed to automatically join room {room} for user {user_id}: {e}"); - } else { - info!("Automatically joined room {room} for user {user_id}"); + | Err(e) => { + // don't return this error so we don't fail registrations + error!( + "Failed to automatically join room {room} for user {user_id}: {e}" + ); + }, + | _ => { + info!("Automatically joined room {room} for user {user_id}"); + }, }; } } @@ -532,26 +552,32 @@ pub(crate) async fn change_password_route( auth_error: None, }; - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; + match &body.auth { + | Some(auth) => { + let (worked, uiaainfo) = services + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } - // Success! - } else if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + // Success! + }, + | _ => match body.json_body { + | Some(json) => { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services + .uiaa + .create(sender_user, sender_device, &uiaainfo, &json); - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + return Err(Error::Uiaa(uiaainfo)); + }, + | _ => { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + }, + }, } services @@ -636,25 +662,31 @@ pub(crate) async fn deactivate_route( auth_error: None, }; - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; + match &body.auth { + | Some(auth) => { + let (worked, uiaainfo) = services + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - } else if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + }, + | _ => match body.json_body { + | Some(json) => { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services + .uiaa + .create(sender_user, sender_device, &uiaainfo, &json); - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + return Err(Error::Uiaa(uiaainfo)); + }, + | _ => { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + }, + }, } // Remove profile pictures and display name @@ -809,7 +841,7 @@ pub async fn full_user_deactivate( power_levels_content.users.remove(user_id); // ignore errors so deactivation doesn't fail - if let Err(e) = services + match services .rooms .timeline .build_and_append_pdu( @@ -820,9 +852,12 @@ pub async fn full_user_deactivate( ) .await { - warn!(%room_id, %user_id, "Failed to demote user's own power level: {e}"); - } else { - info!("Demoted {user_id} in {room_id} as part of account deactivation"); + | Err(e) => { + warn!(%room_id, %user_id, "Failed to demote user's own power level: {e}"); + }, + | _ => { + info!("Demoted {user_id} in {room_id} as part of account deactivation"); + }, } } } diff --git a/src/api/client/account_data.rs b/src/api/client/account_data.rs index 9f84f227..60c18b37 100644 --- a/src/api/client/account_data.rs +++ b/src/api/client/account_data.rs @@ -1,6 +1,7 @@ use axum::extract::State; -use conduwuit::{err, Err}; +use conduwuit::{Err, err}; use ruma::{ + RoomId, UserId, api::client::config::{ get_global_account_data, get_room_account_data, set_global_account_data, set_room_account_data, @@ -10,12 +11,11 @@ use ruma::{ GlobalAccountDataEventType, RoomAccountDataEventType, }, serde::Raw, - RoomId, UserId, }; use serde::Deserialize; use serde_json::{json, value::RawValue as RawJsonValue}; -use crate::{service::Services, Result, Ruma}; +use crate::{Result, Ruma, service::Services}; /// # `PUT /_matrix/client/r0/user/{userId}/account_data/{type}` /// diff --git a/src/api/client/alias.rs b/src/api/client/alias.rs index e1af416e..319e5141 100644 --- a/src/api/client/alias.rs +++ b/src/api/client/alias.rs @@ -1,10 +1,10 @@ use axum::extract::State; -use conduwuit::{debug, Err, Result}; +use conduwuit::{Err, Result, debug}; use futures::StreamExt; use rand::seq::SliceRandom; use ruma::{ - api::client::alias::{create_alias, delete_alias, get_alias}, OwnedServerName, RoomAliasId, RoomId, + api::client::alias::{create_alias, delete_alias, get_alias}, }; use service::Services; @@ -128,18 +128,26 @@ async fn room_available_servers( // insert our server as the very first choice if in list, else check if we can // prefer the room alias server first - if let Some(server_index) = servers + match servers .iter() .position(|server_name| services.globals.server_is_ours(server_name)) { - servers.swap_remove(server_index); - servers.insert(0, services.globals.server_name().to_owned()); - } else if let Some(alias_server_index) = servers - .iter() - .position(|server| server == room_alias.server_name()) - { - servers.swap_remove(alias_server_index); - servers.insert(0, room_alias.server_name().into()); + | Some(server_index) => { + servers.swap_remove(server_index); + servers.insert(0, services.globals.server_name().to_owned()); + }, + | _ => { + match servers + .iter() + .position(|server| server == room_alias.server_name()) + { + | Some(alias_server_index) => { + servers.swap_remove(alias_server_index); + servers.insert(0, room_alias.server_name().into()); + }, + | _ => {}, + } + }, } servers diff --git a/src/api/client/appservice.rs b/src/api/client/appservice.rs index e4071ab0..84955309 100644 --- a/src/api/client/appservice.rs +++ b/src/api/client/appservice.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduwuit::{err, Err, Result}; +use conduwuit::{Err, Result, err}; use ruma::api::{appservice::ping, client::appservice::request_ping}; use crate::Ruma; diff --git a/src/api/client/backup.rs b/src/api/client/backup.rs index d330952d..714e3f86 100644 --- a/src/api/client/backup.rs +++ b/src/api/client/backup.rs @@ -1,6 +1,7 @@ use axum::extract::State; -use conduwuit::{err, Err}; +use conduwuit::{Err, err}; use ruma::{ + UInt, api::client::backup::{ add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session, create_backup_version, delete_backup_keys, delete_backup_keys_for_room, @@ -8,7 +9,6 @@ use ruma::{ get_backup_keys_for_room, get_backup_keys_for_session, get_latest_backup_info, update_backup_version, }, - UInt, }; use crate::{Result, Ruma}; diff --git a/src/api/client/capabilities.rs b/src/api/client/capabilities.rs index 7188aa23..e20af21b 100644 --- a/src/api/client/capabilities.rs +++ b/src/api/client/capabilities.rs @@ -3,11 +3,11 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{Result, Server}; use ruma::{ + RoomVersionId, api::client::discovery::get_capabilities::{ self, Capabilities, GetLoginTokenCapability, RoomVersionStability, RoomVersionsCapability, ThirdPartyIdChangesCapability, }, - RoomVersionId, }; use serde_json::json; diff --git a/src/api/client/context.rs b/src/api/client/context.rs index 7256683f..3f16c850 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -1,23 +1,22 @@ use axum::extract::State; use conduwuit::{ - at, err, ref_at, + Err, PduEvent, Result, at, err, ref_at, utils::{ + IterStream, future::TryExtExt, stream::{BroadbandExt, ReadyExt, TryIgnore, WidebandExt}, - IterStream, }, - Err, PduEvent, Result, }; use futures::{ - future::{join, join3, try_join3, OptionFuture}, FutureExt, StreamExt, TryFutureExt, TryStreamExt, + future::{OptionFuture, join, join3, try_join3}, }; -use ruma::{api::client::context::get_context, events::StateEventType, OwnedEventId, UserId}; +use ruma::{OwnedEventId, UserId, api::client::context::get_context, events::StateEventType}; use service::rooms::{lazy_loading, lazy_loading::Options, short::ShortStateKey}; use crate::{ - client::message::{event_filter, ignored_filter, lazy_loading_witness, visibility_filter}, Ruma, + client::message::{event_filter, ignored_filter, lazy_loading_witness, visibility_filter}, }; const LIMIT_MAX: usize = 100; diff --git a/src/api/client/device.rs b/src/api/client/device.rs index bb0773dd..6a845aed 100644 --- a/src/api/client/device.rs +++ b/src/api/client/device.rs @@ -1,18 +1,18 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{err, Err}; +use conduwuit::{Err, err}; use futures::StreamExt; use ruma::{ + MilliSecondsSinceUnixEpoch, api::client::{ device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, error::ErrorKind, uiaa::{AuthFlow, AuthType, UiaaInfo}, }, - MilliSecondsSinceUnixEpoch, }; use super::SESSION_ID_LENGTH; -use crate::{utils, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, utils}; /// # `GET /_matrix/client/r0/devices` /// @@ -107,25 +107,31 @@ pub(crate) async fn delete_device_route( auth_error: None, }; - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; + match &body.auth { + | Some(auth) => { + let (worked, uiaainfo) = services + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; - if !worked { - return Err!(Uiaa(uiaainfo)); - } - // Success! - } else if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + if !worked { + return Err!(Uiaa(uiaainfo)); + } + // Success! + }, + | _ => match body.json_body { + | Some(json) => { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services + .uiaa + .create(sender_user, sender_device, &uiaainfo, &json); - return Err!(Uiaa(uiaainfo)); - } else { - return Err!(Request(NotJson("Not json."))); + return Err!(Uiaa(uiaainfo)); + }, + | _ => { + return Err!(Request(NotJson("Not json."))); + }, + }, } services @@ -164,25 +170,31 @@ pub(crate) async fn delete_devices_route( auth_error: None, }; - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; + match &body.auth { + | Some(auth) => { + let (worked, uiaainfo) = services + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - } else if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + }, + | _ => match body.json_body { + | Some(json) => { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services + .uiaa + .create(sender_user, sender_device, &uiaainfo, &json); - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + return Err(Error::Uiaa(uiaainfo)); + }, + | _ => { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + }, + }, } for device_id in &body.devices { diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index 9166eed9..136c5961 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -1,8 +1,9 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{info, warn, Err, Error, Result}; +use conduwuit::{Err, Error, Result, info, warn}; use futures::{StreamExt, TryFutureExt}; use ruma::{ + OwnedRoomId, RoomId, ServerName, UInt, UserId, api::{ client::{ directory::{ @@ -16,13 +17,13 @@ use ruma::{ }, directory::{Filter, PublicRoomJoinRule, PublicRoomsChunk, RoomNetwork}, events::{ + StateEventType, room::{ join_rules::{JoinRule, RoomJoinRulesEventContent}, power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, }, - StateEventType, }, - uint, OwnedRoomId, RoomId, ServerName, UInt, UserId, + uint, }; use service::Services; @@ -365,30 +366,34 @@ async fn user_can_publish_room( user_id: &UserId, room_id: &RoomId, ) -> Result { - if let Ok(event) = services + match services .rooms .state_accessor .room_state_get(room_id, &StateEventType::RoomPowerLevels, "") .await { - serde_json::from_str(event.content.get()) + | Ok(event) => serde_json::from_str(event.content.get()) .map_err(|_| Error::bad_database("Invalid event content for m.room.power_levels")) .map(|content: RoomPowerLevelsEventContent| { RoomPowerLevels::from(content) .user_can_send_state(user_id, StateEventType::RoomHistoryVisibility) - }) - } else if let Ok(event) = services - .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomCreate, "") - .await - { - Ok(event.sender == user_id) - } else { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "User is not allowed to publish this room", - )); + }), + | _ => { + match services + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomCreate, "") + .await + { + | Ok(event) => Ok(event.sender == user_id), + | _ => { + return Err(Error::BadRequest( + ErrorKind::forbidden(), + "User is not allowed to publish this room", + )); + }, + } + }, } } diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 801ae32b..6f20153b 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -1,9 +1,10 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use axum::extract::State; -use conduwuit::{debug, err, info, result::NotFound, utils, Err, Error, Result}; -use futures::{stream::FuturesUnordered, StreamExt}; +use conduwuit::{Err, Error, Result, debug, err, info, result::NotFound, utils}; +use futures::{StreamExt, stream::FuturesUnordered}; use ruma::{ + OneTimeKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, api::{ client::{ error::ErrorKind, @@ -17,14 +18,13 @@ use ruma::{ }, encryption::CrossSigningKey, serde::Raw, - OneTimeKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, }; use serde_json::json; use super::SESSION_ID_LENGTH; use crate::{ - service::{users::parse_master_key, Services}, Ruma, + service::{Services, users::parse_master_key}, }; /// # `POST /_matrix/client/r0/keys/upload` @@ -126,7 +126,7 @@ pub(crate) async fn upload_signing_keys_route( auth_error: None, }; - if let Ok(exists) = check_for_new_keys( + match check_for_new_keys( services, sender_user, body.self_signing_key.as_ref(), @@ -136,32 +136,45 @@ pub(crate) async fn upload_signing_keys_route( .await .inspect_err(|e| info!(?e)) { - if let Some(result) = exists { - // No-op, they tried to reupload the same set of keys - // (lost connection for example) - return Ok(result); - } - debug!("Skipping UIA in accordance with MSC3967, the user didn't have any existing keys"); - // Some of the keys weren't found, so we let them upload - } else if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; + | Ok(exists) => { + if let Some(result) = exists { + // No-op, they tried to reupload the same set of keys + // (lost connection for example) + return Ok(result); + } + debug!( + "Skipping UIA in accordance with MSC3967, the user didn't have any existing keys" + ); + // Some of the keys weren't found, so we let them upload + }, + | _ => { + match &body.auth { + | Some(auth) => { + let (worked, uiaainfo) = services + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - } else if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + }, + | _ => match body.json_body { + | Some(json) => { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services + .uiaa + .create(sender_user, sender_device, &uiaainfo, &json); - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + return Err(Error::Uiaa(uiaainfo)); + }, + | _ => { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + }, + }, + } + }, } services @@ -471,37 +484,40 @@ where .collect(); while let Some((server, response)) = futures.next().await { - if let Ok(response) = response { - for (user, master_key) in response.master_keys { - let (master_key_id, mut master_key) = parse_master_key(&user, &master_key)?; + match response { + | Ok(response) => { + for (user, master_key) in response.master_keys { + let (master_key_id, mut master_key) = parse_master_key(&user, &master_key)?; - if let Ok(our_master_key) = services - .users - .get_key(&master_key_id, sender_user, &user, &allowed_signatures) - .await - { - let (_, mut our_master_key) = parse_master_key(&user, &our_master_key)?; - master_key.signatures.append(&mut our_master_key.signatures); + if let Ok(our_master_key) = services + .users + .get_key(&master_key_id, sender_user, &user, &allowed_signatures) + .await + { + let (_, mut our_master_key) = parse_master_key(&user, &our_master_key)?; + master_key.signatures.append(&mut our_master_key.signatures); + } + let json = serde_json::to_value(master_key).expect("to_value always works"); + let raw = serde_json::from_value(json).expect("Raw::from_value always works"); + services + .users + .add_cross_signing_keys( + &user, &raw, &None, &None, + false, /* Dont notify. A notification would trigger another key + * request resulting in an endless loop */ + ) + .await?; + if let Some(raw) = raw { + master_keys.insert(user.clone(), raw); + } } - let json = serde_json::to_value(master_key).expect("to_value always works"); - let raw = serde_json::from_value(json).expect("Raw::from_value always works"); - services - .users - .add_cross_signing_keys( - &user, &raw, &None, &None, - false, /* Dont notify. A notification would trigger another key request - * resulting in an endless loop */ - ) - .await?; - if let Some(raw) = raw { - master_keys.insert(user.clone(), raw); - } - } - self_signing_keys.extend(response.self_signing_keys); - device_keys.extend(response.device_keys); - } else { - failures.insert(server.to_string(), json!({})); + self_signing_keys.extend(response.self_signing_keys); + device_keys.extend(response.device_keys); + }, + | _ => { + failures.insert(server.to_string(), json!({})); + }, } } diff --git a/src/api/client/media.rs b/src/api/client/media.rs index 0cff8185..94572413 100644 --- a/src/api/client/media.rs +++ b/src/api/client/media.rs @@ -3,16 +3,16 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - err, + Err, Result, err, utils::{self, content_disposition::make_content_disposition, math::ruma_from_usize}, - Err, Result, }; use conduwuit_service::{ - media::{Dim, FileMeta, CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, MXC_LENGTH}, Services, + media::{CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, Dim, FileMeta, MXC_LENGTH}, }; use reqwest::Url; use ruma::{ + Mxc, UserId, api::client::{ authenticated_media::{ get_content, get_content_as_filename, get_content_thumbnail, get_media_config, @@ -20,7 +20,6 @@ use ruma::{ }, media::create_content, }, - Mxc, UserId, }; use crate::Ruma; diff --git a/src/api/client/media_legacy.rs b/src/api/client/media_legacy.rs index 4fa0b52e..d9f24f77 100644 --- a/src/api/client/media_legacy.rs +++ b/src/api/client/media_legacy.rs @@ -3,21 +3,20 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - err, + Err, Result, err, utils::{content_disposition::make_content_disposition, math::ruma_from_usize}, - Err, Result, }; -use conduwuit_service::media::{Dim, FileMeta, CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN}; +use conduwuit_service::media::{CACHE_CONTROL_IMMUTABLE, CORP_CROSS_ORIGIN, Dim, FileMeta}; use reqwest::Url; use ruma::{ + Mxc, api::client::media::{ create_content, get_content, get_content_as_filename, get_content_thumbnail, get_media_config, get_media_preview, }, - Mxc, }; -use crate::{client::create_content_route, Ruma, RumaResponse}; +use crate::{Ruma, RumaResponse, client::create_content_route}; /// # `GET /_matrix/media/v3/config` /// @@ -142,46 +141,52 @@ pub(crate) async fn get_content_legacy_route( media_id: &body.media_id, }; - if let Some(FileMeta { - content, - content_type, - content_disposition, - }) = services.media.get(&mxc).await? - { - let content_disposition = - make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None); + match services.media.get(&mxc).await? { + | Some(FileMeta { + content, + content_type, + content_disposition, + }) => { + let content_disposition = make_content_disposition( + content_disposition.as_ref(), + content_type.as_deref(), + None, + ); - Ok(get_content::v3::Response { - file: content.expect("entire file contents"), - content_type: content_type.map(Into::into), - content_disposition: Some(content_disposition), - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - }) - } else if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { - let response = services - .media - .fetch_remote_content_legacy(&mxc, body.allow_redirect, body.timeout_ms) - .await - .map_err(|e| { - err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) - })?; + Ok(get_content::v3::Response { + file: content.expect("entire file contents"), + content_type: content_type.map(Into::into), + content_disposition: Some(content_disposition), + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + }) + }, + | _ => + if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { + let response = services + .media + .fetch_remote_content_legacy(&mxc, body.allow_redirect, body.timeout_ms) + .await + .map_err(|e| { + err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) + })?; - let content_disposition = make_content_disposition( - response.content_disposition.as_ref(), - response.content_type.as_deref(), - None, - ); + let content_disposition = make_content_disposition( + response.content_disposition.as_ref(), + response.content_type.as_deref(), + None, + ); - Ok(get_content::v3::Response { - file: response.file, - content_type: response.content_type, - content_disposition: Some(content_disposition), - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - }) - } else { - Err!(Request(NotFound("Media not found."))) + Ok(get_content::v3::Response { + file: response.file, + content_type: response.content_type, + content_disposition: Some(content_disposition), + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + }) + } else { + Err!(Request(NotFound("Media not found."))) + }, } } @@ -227,49 +232,52 @@ pub(crate) async fn get_content_as_filename_legacy_route( media_id: &body.media_id, }; - if let Some(FileMeta { - content, - content_type, - content_disposition, - }) = services.media.get(&mxc).await? - { - let content_disposition = make_content_disposition( - content_disposition.as_ref(), - content_type.as_deref(), - Some(&body.filename), - ); + match services.media.get(&mxc).await? { + | Some(FileMeta { + content, + content_type, + content_disposition, + }) => { + let content_disposition = make_content_disposition( + content_disposition.as_ref(), + content_type.as_deref(), + Some(&body.filename), + ); - Ok(get_content_as_filename::v3::Response { - file: content.expect("entire file contents"), - content_type: content_type.map(Into::into), - content_disposition: Some(content_disposition), - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - }) - } else if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { - let response = services - .media - .fetch_remote_content_legacy(&mxc, body.allow_redirect, body.timeout_ms) - .await - .map_err(|e| { - err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) - })?; + Ok(get_content_as_filename::v3::Response { + file: content.expect("entire file contents"), + content_type: content_type.map(Into::into), + content_disposition: Some(content_disposition), + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + }) + }, + | _ => + if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { + let response = services + .media + .fetch_remote_content_legacy(&mxc, body.allow_redirect, body.timeout_ms) + .await + .map_err(|e| { + err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) + })?; - let content_disposition = make_content_disposition( - response.content_disposition.as_ref(), - response.content_type.as_deref(), - None, - ); + let content_disposition = make_content_disposition( + response.content_disposition.as_ref(), + response.content_type.as_deref(), + None, + ); - Ok(get_content_as_filename::v3::Response { - content_disposition: Some(content_disposition), - content_type: response.content_type, - file: response.file, - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - }) - } else { - Err!(Request(NotFound("Media not found."))) + Ok(get_content_as_filename::v3::Response { + content_disposition: Some(content_disposition), + content_type: response.content_type, + file: response.file, + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + }) + } else { + Err!(Request(NotFound("Media not found."))) + }, } } @@ -315,46 +323,52 @@ pub(crate) async fn get_content_thumbnail_legacy_route( }; let dim = Dim::from_ruma(body.width, body.height, body.method.clone())?; - if let Some(FileMeta { - content, - content_type, - content_disposition, - }) = services.media.get_thumbnail(&mxc, &dim).await? - { - let content_disposition = - make_content_disposition(content_disposition.as_ref(), content_type.as_deref(), None); + match services.media.get_thumbnail(&mxc, &dim).await? { + | Some(FileMeta { + content, + content_type, + content_disposition, + }) => { + let content_disposition = make_content_disposition( + content_disposition.as_ref(), + content_type.as_deref(), + None, + ); - Ok(get_content_thumbnail::v3::Response { - file: content.expect("entire file contents"), - content_type: content_type.map(Into::into), - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - content_disposition: Some(content_disposition), - }) - } else if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { - let response = services - .media - .fetch_remote_thumbnail_legacy(&body) - .await - .map_err(|e| { - err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) - })?; + Ok(get_content_thumbnail::v3::Response { + file: content.expect("entire file contents"), + content_type: content_type.map(Into::into), + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + content_disposition: Some(content_disposition), + }) + }, + | _ => + if !services.globals.server_is_ours(&body.server_name) && body.allow_remote { + let response = services + .media + .fetch_remote_thumbnail_legacy(&body) + .await + .map_err(|e| { + err!(Request(NotFound(debug_warn!(%mxc, "Fetching media failed: {e:?}")))) + })?; - let content_disposition = make_content_disposition( - response.content_disposition.as_ref(), - response.content_type.as_deref(), - None, - ); + let content_disposition = make_content_disposition( + response.content_disposition.as_ref(), + response.content_type.as_deref(), + None, + ); - Ok(get_content_thumbnail::v3::Response { - file: response.file, - content_type: response.content_type, - cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), - cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), - content_disposition: Some(content_disposition), - }) - } else { - Err!(Request(NotFound("Media not found."))) + Ok(get_content_thumbnail::v3::Response { + file: response.file, + content_type: response.content_type, + cross_origin_resource_policy: Some(CORP_CROSS_ORIGIN.into()), + cache_control: Some(CACHE_CONTROL_IMMUTABLE.into()), + content_disposition: Some(content_disposition), + }) + } else { + Err!(Request(NotFound("Media not found."))) + }, } } diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 26736fb5..9c2693dc 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -9,51 +9,51 @@ use std::{ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - at, debug, debug_info, debug_warn, err, error, info, - pdu::{gen_event_id_canonical_json, PduBuilder}, + Err, PduEvent, Result, StateKey, at, debug, debug_info, debug_warn, err, error, info, + pdu::{PduBuilder, gen_event_id_canonical_json}, result::FlatOk, state_res, trace, - utils::{self, shuffle, IterStream, ReadyExt}, - warn, Err, PduEvent, Result, StateKey, + utils::{self, IterStream, ReadyExt, shuffle}, + warn, }; -use futures::{join, FutureExt, StreamExt, TryFutureExt}; +use futures::{FutureExt, StreamExt, TryFutureExt, join}; use ruma::{ + CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, + OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, api::{ client::{ error::ErrorKind, knock::knock_room, membership::{ - ban_user, forget_room, get_member_events, invite_user, join_room_by_id, - join_room_by_id_or_alias, + ThirdPartySigned, ban_user, forget_room, get_member_events, invite_user, + join_room_by_id, join_room_by_id_or_alias, joined_members::{self, v3::RoomMember}, - joined_rooms, kick_user, leave_room, unban_user, ThirdPartySigned, + joined_rooms, kick_user, leave_room, unban_user, }, }, federation::{self, membership::create_invite}, }, canonical_json::to_canonical_value, events::{ + StateEventType, room::{ join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, message::RoomMessageEventContent, }, - StateEventType, }, - CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, - OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, }; use service::{ + Services, appservice::RegistrationInfo, pdu::gen_event_id, rooms::{ state::RoomMutexGuard, state_compressor::{CompressedState, HashSetCompressStateEvent}, }, - Services, }; -use crate::{client::full_user_deactivate, Ruma}; +use crate::{Ruma, client::full_user_deactivate}; /// Checks if the room is banned in any way possible and the sender user is not /// an admin. @@ -507,43 +507,54 @@ pub(crate) async fn invite_user_route( ) .await?; - if let invite_user::v3::InvitationRecipient::UserId { user_id } = &body.recipient { - let sender_ignored_recipient = services.users.user_is_ignored(sender_user, user_id); - let recipient_ignored_by_sender = services.users.user_is_ignored(user_id, sender_user); + match &body.recipient { + | invite_user::v3::InvitationRecipient::UserId { user_id } => { + let sender_ignored_recipient = services.users.user_is_ignored(sender_user, user_id); + let recipient_ignored_by_sender = + services.users.user_is_ignored(user_id, sender_user); - let (sender_ignored_recipient, recipient_ignored_by_sender) = - join!(sender_ignored_recipient, recipient_ignored_by_sender); + let (sender_ignored_recipient, recipient_ignored_by_sender) = + join!(sender_ignored_recipient, recipient_ignored_by_sender); - if sender_ignored_recipient { - return Err!(Request(Forbidden( - "You cannot invite users you have ignored to rooms." - ))); - } - - if let Ok(target_user_membership) = services - .rooms - .state_accessor - .get_member(&body.room_id, user_id) - .await - { - if target_user_membership.membership == MembershipState::Ban { - return Err!(Request(Forbidden("User is banned from this room."))); + if sender_ignored_recipient { + return Err!(Request(Forbidden( + "You cannot invite users you have ignored to rooms." + ))); } - } - if recipient_ignored_by_sender { - // silently drop the invite to the recipient if they've been ignored by the - // sender, pretend it worked - return Ok(invite_user::v3::Response {}); - } + if let Ok(target_user_membership) = services + .rooms + .state_accessor + .get_member(&body.room_id, user_id) + .await + { + if target_user_membership.membership == MembershipState::Ban { + return Err!(Request(Forbidden("User is banned from this room."))); + } + } - invite_helper(&services, sender_user, user_id, &body.room_id, body.reason.clone(), false) + if recipient_ignored_by_sender { + // silently drop the invite to the recipient if they've been ignored by the + // sender, pretend it worked + return Ok(invite_user::v3::Response {}); + } + + invite_helper( + &services, + sender_user, + user_id, + &body.room_id, + body.reason.clone(), + false, + ) .boxed() .await?; - Ok(invite_user::v3::Response {}) - } else { - Err!(Request(NotFound("User not found."))) + Ok(invite_user::v3::Response {}) + }, + | _ => { + Err!(Request(NotFound("User not found."))) + }, } } @@ -1830,38 +1841,46 @@ async fn remote_leave_room( .collect() .await; - if let Ok(invite_state) = services + match services .rooms .state_cache .invite_state(user_id, room_id) .await { - servers.extend( - invite_state - .iter() - .filter_map(|event| event.get_field("sender").ok().flatten()) - .filter_map(|sender: &str| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()), - ); - } else if let Ok(knock_state) = services - .rooms - .state_cache - .knock_state(user_id, room_id) - .await - { - servers.extend( - knock_state - .iter() - .filter_map(|event| event.get_field("sender").ok().flatten()) - .filter_map(|sender: &str| UserId::parse(sender).ok()) - .filter_map(|sender| { - if !services.globals.user_is_local(sender) { - Some(sender.server_name().to_owned()) - } else { - None - } - }), - ); + | Ok(invite_state) => { + servers.extend( + invite_state + .iter() + .filter_map(|event| event.get_field("sender").ok().flatten()) + .filter_map(|sender: &str| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()), + ); + }, + | _ => { + match services + .rooms + .state_cache + .knock_state(user_id, room_id) + .await + { + | Ok(knock_state) => { + servers.extend( + knock_state + .iter() + .filter_map(|event| event.get_field("sender").ok().flatten()) + .filter_map(|sender: &str| UserId::parse(sender).ok()) + .filter_map(|sender| { + if !services.globals.user_is_local(sender) { + Some(sender.server_name().to_owned()) + } else { + None + } + }), + ); + }, + | _ => {}, + } + }, } if let Some(room_id_server_name) = room_id.server_name() { diff --git a/src/api/client/message.rs b/src/api/client/message.rs index bb4e72dd..571a238a 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -1,30 +1,29 @@ use axum::extract::State; use conduwuit::{ - at, + Event, PduCount, PduEvent, Result, at, utils::{ + IterStream, ReadyExt, result::{FlatOk, LogErr}, stream::{BroadbandExt, TryIgnore, WidebandExt}, - IterStream, ReadyExt, }, - Event, PduCount, PduEvent, Result, }; -use futures::{future::OptionFuture, pin_mut, FutureExt, StreamExt, TryFutureExt}; +use futures::{FutureExt, StreamExt, TryFutureExt, future::OptionFuture, pin_mut}; use ruma::{ + RoomId, UserId, api::{ - client::{filter::RoomEventFilter, message::get_message_events}, Direction, + client::{filter::RoomEventFilter, message::get_message_events}, }, events::{AnyStateEvent, StateEventType, TimelineEventType, TimelineEventType::*}, serde::Raw, - RoomId, UserId, }; use service::{ + Services, rooms::{ lazy_loading, lazy_loading::{Options, Witness}, timeline::PdusIterItem, }, - Services, }; use crate::Ruma; diff --git a/src/api/client/presence.rs b/src/api/client/presence.rs index d19e6ae1..9b41a721 100644 --- a/src/api/client/presence.rs +++ b/src/api/client/presence.rs @@ -70,37 +70,38 @@ pub(crate) async fn get_presence_route( } } - if let Some(presence) = presence_event { - let status_msg = if presence - .content - .status_msg - .as_ref() - .is_some_and(String::is_empty) - { - None - } else { - presence.content.status_msg - }; - - let last_active_ago = match presence.content.currently_active { - | Some(true) => None, - | _ => presence + match presence_event { + | Some(presence) => { + let status_msg = if presence .content - .last_active_ago - .map(|millis| Duration::from_millis(millis.into())), - }; + .status_msg + .as_ref() + .is_some_and(String::is_empty) + { + None + } else { + presence.content.status_msg + }; - Ok(get_presence::v3::Response { - // TODO: Should ruma just use the presenceeventcontent type here? - status_msg, - currently_active: presence.content.currently_active, - last_active_ago, - presence: presence.content.presence, - }) - } else { - Err(Error::BadRequest( + let last_active_ago = match presence.content.currently_active { + | Some(true) => None, + | _ => presence + .content + .last_active_ago + .map(|millis| Duration::from_millis(millis.into())), + }; + + Ok(get_presence::v3::Response { + // TODO: Should ruma just use the presenceeventcontent type here? + status_msg, + currently_active: presence.content.currently_active, + last_active_ago, + presence: presence.content.presence, + }) + }, + | _ => Err(Error::BadRequest( ErrorKind::NotFound, "Presence state for this user was not found", - )) + )), } } diff --git a/src/api/client/profile.rs b/src/api/client/profile.rs index 584adfc1..12e5ebcc 100644 --- a/src/api/client/profile.rs +++ b/src/api/client/profile.rs @@ -2,12 +2,14 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{ + Err, Error, Result, pdu::PduBuilder, - utils::{stream::TryIgnore, IterStream}, - warn, Err, Error, Result, + utils::{IterStream, stream::TryIgnore}, + warn, }; -use futures::{future::join3, StreamExt, TryStreamExt}; +use futures::{StreamExt, TryStreamExt, future::join3}; use ruma::{ + OwnedMxcUri, OwnedRoomId, UserId, api::{ client::{ error::ErrorKind, @@ -19,7 +21,6 @@ use ruma::{ }, events::room::member::{MembershipState, RoomMemberEventContent}, presence::PresenceState, - OwnedMxcUri, OwnedRoomId, UserId, }; use service::Services; diff --git a/src/api/client/push.rs b/src/api/client/push.rs index ed7371e4..384b9dbc 100644 --- a/src/api/client/push.rs +++ b/src/api/client/push.rs @@ -1,6 +1,7 @@ use axum::extract::State; -use conduwuit::{err, Err}; +use conduwuit::{Err, err}; use ruma::{ + CanonicalJsonObject, CanonicalJsonValue, api::client::{ error::ErrorKind, push::{ @@ -10,14 +11,13 @@ use ruma::{ }, }, events::{ - push_rules::{PushRulesEvent, PushRulesEventContent}, GlobalAccountDataEventType, + push_rules::{PushRulesEvent, PushRulesEventContent}, }, push::{ InsertPushRuleError, PredefinedContentRuleId, PredefinedOverrideRuleId, RemovePushRuleError, Ruleset, }, - CanonicalJsonObject, CanonicalJsonValue, }; use service::Services; diff --git a/src/api/client/read_marker.rs b/src/api/client/read_marker.rs index 89fe003a..d01327f6 100644 --- a/src/api/client/read_marker.rs +++ b/src/api/client/read_marker.rs @@ -1,14 +1,14 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduwuit::{err, Err, PduCount}; +use conduwuit::{Err, PduCount, err}; use ruma::{ + MilliSecondsSinceUnixEpoch, api::client::{read_marker::set_read_marker, receipt::create_receipt}, events::{ - receipt::{ReceiptThread, ReceiptType}, RoomAccountDataEventType, + receipt::{ReceiptThread, ReceiptType}, }, - MilliSecondsSinceUnixEpoch, }; use crate::{Result, Ruma}; diff --git a/src/api/client/redact.rs b/src/api/client/redact.rs index ba59a010..7b512d06 100644 --- a/src/api/client/redact.rs +++ b/src/api/client/redact.rs @@ -3,7 +3,7 @@ use ruma::{ api::client::redact::redact_event, events::room::redaction::RoomRedactionEventContent, }; -use crate::{service::pdu::PduBuilder, Result, Ruma}; +use crate::{Result, Ruma, service::pdu::PduBuilder}; /// # `PUT /_matrix/client/r0/rooms/{roomId}/redact/{eventId}/{txnId}` /// diff --git a/src/api/client/relations.rs b/src/api/client/relations.rs index 87fb1eac..7ed40f14 100644 --- a/src/api/client/relations.rs +++ b/src/api/client/relations.rs @@ -1,22 +1,21 @@ use axum::extract::State; use conduwuit::{ - at, - utils::{result::FlatOk, stream::WidebandExt, IterStream, ReadyExt}, - PduCount, Result, + PduCount, Result, at, + utils::{IterStream, ReadyExt, result::FlatOk, stream::WidebandExt}, }; use futures::StreamExt; use ruma::{ + EventId, RoomId, UInt, UserId, api::{ + Direction, client::relations::{ get_relating_events, get_relating_events_with_rel_type, get_relating_events_with_rel_type_and_event_type, }, - Direction, }, - events::{relation::RelationType, TimelineEventType}, - EventId, RoomId, UInt, UserId, + events::{TimelineEventType, relation::RelationType}, }; -use service::{rooms::timeline::PdusIterItem, Services}; +use service::{Services, rooms::timeline::PdusIterItem}; use crate::Ruma; diff --git a/src/api/client/report.rs b/src/api/client/report.rs index 57de3f12..db085721 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -2,22 +2,22 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{info, utils::ReadyExt, Err}; +use conduwuit::{Err, info, utils::ReadyExt}; use rand::Rng; use ruma::{ + EventId, RoomId, UserId, api::client::{ error::ErrorKind, room::{report_content, report_room}, }, events::room::message, - int, EventId, RoomId, UserId, + int, }; use tokio::time::sleep; use crate::{ - debug_info, - service::{pdu::PduEvent, Services}, - Error, Result, Ruma, + Error, Result, Ruma, debug_info, + service::{Services, pdu::PduEvent}, }; /// # `POST /_matrix/client/v3/rooms/{roomId}/report` diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index e362b3b3..1b8294a5 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -2,15 +2,17 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{ - debug_info, debug_warn, err, error, info, pdu::PduBuilder, warn, Err, Error, Result, StateKey, + Err, Error, Result, StateKey, debug_info, debug_warn, err, error, info, pdu::PduBuilder, warn, }; use futures::FutureExt; use ruma::{ + CanonicalJsonObject, Int, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomId, RoomVersionId, api::client::{ error::ErrorKind, room::{self, create_room}, }, events::{ + TimelineEventType, room::{ canonical_alias::RoomCanonicalAliasEventContent, create::RoomCreateEventContent, @@ -22,16 +24,14 @@ use ruma::{ power_levels::RoomPowerLevelsEventContent, topic::RoomTopicEventContent, }, - TimelineEventType, }, int, serde::{JsonObject, Raw}, - CanonicalJsonObject, Int, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomId, RoomVersionId, }; use serde_json::{json, value::to_raw_value}; -use service::{appservice::RegistrationInfo, Services}; +use service::{Services, appservice::RegistrationInfo}; -use crate::{client::invite_helper, Ruma}; +use crate::{Ruma, client::invite_helper}; /// # `POST /_matrix/client/v3/createRoom` /// @@ -68,10 +68,9 @@ pub(crate) async fn create_room_route( )); } - let room_id: OwnedRoomId = if let Some(custom_room_id) = &body.room_id { - custom_room_id_check(&services, custom_room_id)? - } else { - RoomId::new(&services.server.name) + let room_id: OwnedRoomId = match &body.room_id { + | Some(custom_room_id) => custom_room_id_check(&services, custom_room_id)?, + | _ => RoomId::new(&services.server.name), }; // check if room ID doesn't already exist instead of erroring on auth check @@ -114,10 +113,10 @@ pub(crate) async fn create_room_route( .await; let state_lock = services.rooms.state.mutex.lock(&room_id).await; - let alias: Option = if let Some(alias) = body.room_alias_name.as_ref() { - Some(room_alias_check(&services, alias, body.appservice_info.as_ref()).await?) - } else { - None + let alias: Option = match body.room_alias_name.as_ref() { + | Some(alias) => + Some(room_alias_check(&services, alias, body.appservice_info.as_ref()).await?), + | _ => None, }; let room_version = match body.room_version.clone() { diff --git a/src/api/client/room/event.rs b/src/api/client/room/event.rs index f0ae64dd..84b591cd 100644 --- a/src/api/client/room/event.rs +++ b/src/api/client/room/event.rs @@ -1,9 +1,9 @@ use axum::extract::State; -use conduwuit::{err, Err, Event, Result}; -use futures::{future::try_join, FutureExt, TryFutureExt}; +use conduwuit::{Err, Event, Result, err}; +use futures::{FutureExt, TryFutureExt, future::try_join}; use ruma::api::client::room::get_room_event; -use crate::{client::is_ignored_pdu, Ruma}; +use crate::{Ruma, client::is_ignored_pdu}; /// # `GET /_matrix/client/r0/rooms/{roomId}/event/{eventId}` /// diff --git a/src/api/client/room/initial_sync.rs b/src/api/client/room/initial_sync.rs index 233d180f..e4c76ae0 100644 --- a/src/api/client/room/initial_sync.rs +++ b/src/api/client/room/initial_sync.rs @@ -1,8 +1,7 @@ use axum::extract::State; use conduwuit::{ - at, - utils::{stream::TryTools, BoolExt}, - Err, PduEvent, Result, + Err, PduEvent, Result, at, + utils::{BoolExt, stream::TryTools}, }; use futures::TryStreamExt; use ruma::api::client::room::initial_sync::v3::{PaginationChunk, Request, Response}; diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs index a624f95f..4ac341a9 100644 --- a/src/api/client/room/upgrade.rs +++ b/src/api/client/room/upgrade.rs @@ -1,19 +1,20 @@ use std::cmp::max; use axum::extract::State; -use conduwuit::{err, info, pdu::PduBuilder, Error, Result, StateKey}; +use conduwuit::{Error, Result, StateKey, err, info, pdu::PduBuilder}; use futures::StreamExt; use ruma::{ + CanonicalJsonObject, RoomId, RoomVersionId, api::client::{error::ErrorKind, room::upgrade_room}, events::{ + StateEventType, TimelineEventType, room::{ member::{MembershipState, RoomMemberEventContent}, power_levels::RoomPowerLevelsEventContent, tombstone::RoomTombstoneEventContent, }, - StateEventType, TimelineEventType, }, - int, CanonicalJsonObject, RoomId, RoomVersionId, + int, }; use serde_json::{json, value::to_raw_value}; diff --git a/src/api/client/search.rs b/src/api/client/search.rs index 898dfc7f..f3366843 100644 --- a/src/api/client/search.rs +++ b/src/api/client/search.rs @@ -2,23 +2,22 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{ - at, is_true, + Err, PduEvent, Result, at, is_true, result::FlatOk, - utils::{stream::ReadyExt, IterStream}, - Err, PduEvent, Result, + utils::{IterStream, stream::ReadyExt}, }; -use futures::{future::OptionFuture, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; +use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::OptionFuture}; use ruma::{ + OwnedRoomId, RoomId, UInt, UserId, api::client::search::search_events::{ self, v3::{Criteria, EventContextResult, ResultCategories, ResultRoomEvents, SearchResult}, }, events::AnyStateEvent, serde::Raw, - OwnedRoomId, RoomId, UInt, UserId, }; use search_events::v3::{Request, Response}; -use service::{rooms::search::RoomQuery, Services}; +use service::{Services, rooms::search::RoomQuery}; use crate::Ruma; diff --git a/src/api/client/send.rs b/src/api/client/send.rs index 39340070..b01d1ed6 100644 --- a/src/api/client/send.rs +++ b/src/api/client/send.rs @@ -1,11 +1,11 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduwuit::{err, Err}; +use conduwuit::{Err, err}; use ruma::{api::client::message::send_message_event, events::MessageLikeEventType}; use serde_json::from_str; -use crate::{service::pdu::PduBuilder, utils, Result, Ruma}; +use crate::{Result, Ruma, service::pdu::PduBuilder, utils}; /// # `PUT /_matrix/client/v3/rooms/{roomId}/send/{eventType}/{txnId}` /// diff --git a/src/api/client/session.rs b/src/api/client/session.rs index 7155351c..5c0ab47d 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -2,9 +2,10 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{debug, err, info, utils::ReadyExt, warn, Err}; +use conduwuit::{Err, debug, err, info, utils::ReadyExt, warn}; use futures::StreamExt; use ruma::{ + OwnedUserId, UserId, api::client::{ error::ErrorKind, session::{ @@ -21,12 +22,11 @@ use ruma::{ }, uiaa, }, - OwnedUserId, UserId, }; use service::uiaa::SESSION_ID_LENGTH; use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH}; -use crate::{utils, utils::hash, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, utils, utils::hash}; /// # `GET /_matrix/client/v3/login` /// @@ -139,18 +139,20 @@ pub(crate) async fn login_route( Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") })?; - if let Some(ref info) = body.appservice_info { - if !info.is_user_match(&user_id) { + match body.appservice_info { + | Some(ref info) => + if !info.is_user_match(&user_id) { + return Err(Error::BadRequest( + ErrorKind::Exclusive, + "User is not in namespace.", + )); + }, + | _ => { return Err(Error::BadRequest( - ErrorKind::Exclusive, - "User is not in namespace.", + ErrorKind::MissingToken, + "Missing appservice token.", )); - } - } else { - return Err(Error::BadRequest( - ErrorKind::MissingToken, - "Missing appservice token.", - )); + }, } user_id @@ -259,26 +261,32 @@ pub(crate) async fn login_token_route( auth_error: None, }; - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services - .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) - .await?; + match &body.auth { + | Some(auth) => { + let (worked, uiaainfo) = services + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo) + .await?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } - // Success! - } else if let Some(json) = body.json_body.as_ref() { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services - .uiaa - .create(sender_user, sender_device, &uiaainfo, json); + // Success! + }, + | _ => match body.json_body.as_ref() { + | Some(json) => { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services + .uiaa + .create(sender_user, sender_device, &uiaainfo, json); - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err!(Request(NotJson("No JSON body was sent when required."))); + return Err(Error::Uiaa(uiaainfo)); + }, + | _ => { + return Err!(Request(NotJson("No JSON body was sent when required."))); + }, + }, } let login_token = utils::random_string(TOKEN_LENGTH); diff --git a/src/api/client/space.rs b/src/api/client/space.rs index 7efd7817..a667f852 100644 --- a/src/api/client/space.rs +++ b/src/api/client/space.rs @@ -5,18 +5,18 @@ use std::{ use axum::extract::State; use conduwuit::{ - utils::{future::TryExtExt, stream::IterStream}, Err, Result, + utils::{future::TryExtExt, stream::IterStream}, }; -use futures::{future::OptionFuture, StreamExt, TryFutureExt}; +use futures::{StreamExt, TryFutureExt, future::OptionFuture}; use ruma::{ - api::client::space::get_hierarchy, OwnedRoomId, OwnedServerName, RoomId, UInt, UserId, + OwnedRoomId, OwnedServerName, RoomId, UInt, UserId, api::client::space::get_hierarchy, }; use service::{ - rooms::spaces::{ - get_parent_children_via, summary_to_chunk, PaginationToken, SummaryAccessibility, - }, Services, + rooms::spaces::{ + PaginationToken, SummaryAccessibility, get_parent_children_via, summary_to_chunk, + }, }; use crate::Ruma; diff --git a/src/api/client/state.rs b/src/api/client/state.rs index f73ffa46..6353fe1c 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -1,19 +1,19 @@ use axum::extract::State; -use conduwuit::{err, pdu::PduBuilder, utils::BoolExt, Err, PduEvent, Result}; +use conduwuit::{Err, PduEvent, Result, err, pdu::PduBuilder, utils::BoolExt}; use futures::TryStreamExt; use ruma::{ + OwnedEventId, RoomId, UserId, api::client::state::{get_state_events, get_state_events_for_key, send_state_event}, events::{ + AnyStateEventContent, StateEventType, room::{ canonical_alias::RoomCanonicalAliasEventContent, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, join_rules::{JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, }, - AnyStateEventContent, StateEventType, }, serde::Raw, - OwnedEventId, RoomId, UserId, }; use service::Services; diff --git a/src/api/client/sync/mod.rs b/src/api/client/sync/mod.rs index 46540881..3eab76cc 100644 --- a/src/api/client/sync/mod.rs +++ b/src/api/client/sync/mod.rs @@ -3,25 +3,25 @@ mod v4; mod v5; use conduwuit::{ - utils::{ - stream::{BroadbandExt, ReadyExt, TryIgnore}, - IterStream, - }, PduCount, + utils::{ + IterStream, + stream::{BroadbandExt, ReadyExt, TryIgnore}, + }, }; -use futures::{pin_mut, StreamExt}; +use futures::{StreamExt, pin_mut}; use ruma::{ + RoomId, UserId, directory::RoomTypeFilter, events::TimelineEventType::{ self, Beacon, CallInvite, PollStart, RoomEncrypted, RoomMessage, Sticker, }, - RoomId, UserId, }; pub(crate) use self::{ v3::sync_events_route, v4::sync_events_v4_route, v5::sync_events_v5_route, }; -use crate::{service::Services, Error, PduEvent, Result}; +use crate::{Error, PduEvent, Result, service::Services}; pub(crate) const DEFAULT_BUMP_TYPES: &[TimelineEventType; 6] = &[CallInvite, PollStart, Beacon, RoomEncrypted, RoomMessage, Sticker]; diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index f9dcd5ec..fb59837b 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -6,57 +6,55 @@ use std::{ use axum::extract::State; use conduwuit::{ - at, err, error, extract_variant, is_equal_to, pair_of, + PduCount, PduEvent, Result, at, err, error, extract_variant, is_equal_to, pair_of, pdu::{Event, EventHash}, ref_at, result::FlatOk, utils::{ - self, + self, BoolExt, IterStream, ReadyExt, TryFutureExtExt, math::ruma_from_u64, stream::{BroadbandExt, Tools, TryExpect, WidebandExt}, - BoolExt, IterStream, ReadyExt, TryFutureExtExt, }, - PduCount, PduEvent, Result, }; use conduwuit_service::{ + Services, rooms::{ lazy_loading, lazy_loading::{Options, Witness}, short::ShortStateHash, }, - Services, }; use futures::{ - future::{join, join3, join4, join5, try_join, try_join4, OptionFuture}, FutureExt, StreamExt, TryFutureExt, TryStreamExt, + future::{OptionFuture, join, join3, join4, join5, try_join, try_join4}, }; use ruma::{ + DeviceId, EventId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId, api::client::{ filter::FilterDefinition, sync::sync_events::{ - self, + self, DeviceLists, UnreadNotificationsCount, v3::{ Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom, KnockState, KnockedRoom, LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State as RoomState, Timeline, ToDevice, }, - DeviceLists, UnreadNotificationsCount, }, uiaa::UiaaResponse, }, events::{ - presence::{PresenceEvent, PresenceEventContent}, - room::member::{MembershipState, RoomMemberEventContent}, AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType::*, + presence::{PresenceEvent, PresenceEventContent}, + room::member::{MembershipState, RoomMemberEventContent}, }, serde::Raw, - uint, DeviceId, EventId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId, + uint, }; use service::rooms::short::{ShortEventId, ShortStateKey}; use super::{load_timeline, share_encrypted_room}; -use crate::{client::ignored_filter, Ruma, RumaResponse}; +use crate::{Ruma, RumaResponse, client::ignored_filter}; #[derive(Default)] struct StateChanges { @@ -168,8 +166,8 @@ pub(crate) async fn build_sync_events( let full_state = body.body.full_state; let filter = match body.body.filter.as_ref() { | None => FilterDefinition::default(), - | Some(Filter::FilterDefinition(ref filter)) => filter.clone(), - | Some(Filter::FilterId(ref filter_id)) => services + | Some(Filter::FilterDefinition(filter)) => filter.clone(), + | Some(Filter::FilterId(filter_id)) => services .users .get_filter(sender_user, filter_id) .await @@ -1016,34 +1014,37 @@ async fn calculate_state_incremental<'a>( let lazy_state_ids: OptionFuture<_> = witness .filter(|_| !full_state && !encrypted_room) .map(|witness| { - witness - .iter() - .stream() - .broad_filter_map(|user_id| state_get_shorteventid(user_id)) - .into_future() + StreamExt::into_future( + witness + .iter() + .stream() + .broad_filter_map(|user_id| state_get_shorteventid(user_id)), + ) }) .into(); let state_diff: OptionFuture<_> = (!full_state && state_changed) .then(|| { - services - .rooms - .state_accessor - .state_added((since_shortstatehash, current_shortstatehash)) - .boxed() - .into_future() + StreamExt::into_future( + services + .rooms + .state_accessor + .state_added((since_shortstatehash, current_shortstatehash)) + .boxed(), + ) }) .into(); let current_state_ids: OptionFuture<_> = full_state .then(|| { - services - .rooms - .state_accessor - .state_full_shortids(current_shortstatehash) - .expect_ok() - .boxed() - .into_future() + StreamExt::into_future( + services + .rooms + .state_accessor + .state_full_shortids(current_shortstatehash) + .expect_ok() + .boxed(), + ) }) .into(); diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 13f832b2..5fdcbab8 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -6,37 +6,37 @@ use std::{ use axum::extract::State; use conduwuit::{ - debug, error, extract_variant, + Error, PduCount, Result, debug, error, extract_variant, utils::{ - math::{ruma_from_usize, usize_from_ruma, usize_from_u64_truncated}, BoolExt, IterStream, ReadyExt, TryFutureExtExt, + math::{ruma_from_usize, usize_from_ruma, usize_from_u64_truncated}, }, - warn, Error, PduCount, Result, + warn, }; use futures::{FutureExt, StreamExt, TryFutureExt}; use ruma::{ + MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, api::client::{ error::ErrorKind, sync::sync_events::{ - self, + self, DeviceLists, UnreadNotificationsCount, v4::{SlidingOp, SlidingSyncRoomHero}, - DeviceLists, UnreadNotificationsCount, }, }, events::{ - room::member::{MembershipState, RoomMemberEventContent}, AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType::*, + room::member::{MembershipState, RoomMemberEventContent}, }, serde::Raw, - uint, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, + uint, }; use service::rooms::read_receipt::pack_receipts; use super::{load_timeline, share_encrypted_room}; use crate::{ - client::{filter_rooms, ignored_filter, sync::v5::TodoRooms, DEFAULT_BUMP_TYPES}, Ruma, + client::{DEFAULT_BUMP_TYPES, filter_rooms, ignored_filter, sync::v5::TodoRooms}, }; pub(crate) const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync"; @@ -700,14 +700,13 @@ pub(crate) async fn sync_events_v4_route( .await .ok() .or(name), - avatar: if let Some(heroes_avatar) = heroes_avatar { - ruma::JsOption::Some(heroes_avatar) - } else { - match services.rooms.state_accessor.get_avatar(room_id).await { + avatar: match heroes_avatar { + | Some(heroes_avatar) => ruma::JsOption::Some(heroes_avatar), + | _ => match services.rooms.state_accessor.get_avatar(room_id).await { | ruma::JsOption::Some(avatar) => ruma::JsOption::from_option(avatar.url), | ruma::JsOption::Null => ruma::JsOption::Null, | ruma::JsOption::Undefined => ruma::JsOption::Undefined, - } + }, }, initial: Some(roomsince == &0), is_dm: None, diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index cda6c041..b4c1b815 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -6,32 +6,33 @@ use std::{ use axum::extract::State; use conduwuit::{ - debug, error, extract_variant, trace, + Error, Result, TypeStateKey, debug, error, extract_variant, trace, utils::{ - math::{ruma_from_usize, usize_from_ruma}, BoolExt, IterStream, ReadyExt, TryFutureExtExt, + math::{ruma_from_usize, usize_from_ruma}, }, - warn, Error, Result, TypeStateKey, + warn, }; use futures::{FutureExt, StreamExt, TryFutureExt}; use ruma::{ + DeviceId, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, api::client::{ error::ErrorKind, sync::sync_events::{self, DeviceLists, UnreadNotificationsCount}, }, events::{ - room::member::{MembershipState, RoomMemberEventContent}, AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType, + room::member::{MembershipState, RoomMemberEventContent}, }, serde::Raw, - uint, DeviceId, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, + uint, }; -use service::{rooms::read_receipt::pack_receipts, PduCount}; +use service::{PduCount, rooms::read_receipt::pack_receipts}; use super::{filter_rooms, share_encrypted_room}; use crate::{ - client::{ignored_filter, sync::load_timeline, DEFAULT_BUMP_TYPES}, Ruma, + client::{DEFAULT_BUMP_TYPES, ignored_filter, sync::load_timeline}, }; type SyncInfo<'a> = (&'a UserId, &'a DeviceId, u64, &'a sync_events::v5::Request); @@ -572,14 +573,13 @@ async fn process_rooms( .await .ok() .or(name), - avatar: if let Some(heroes_avatar) = heroes_avatar { - ruma::JsOption::Some(heroes_avatar) - } else { - match services.rooms.state_accessor.get_avatar(room_id).await { + avatar: match heroes_avatar { + | Some(heroes_avatar) => ruma::JsOption::Some(heroes_avatar), + | _ => match services.rooms.state_accessor.get_avatar(room_id).await { | ruma::JsOption::Some(avatar) => ruma::JsOption::from_option(avatar.url), | ruma::JsOption::Null => ruma::JsOption::Null, | ruma::JsOption::Undefined => ruma::JsOption::Undefined, - } + }, }, initial: Some(roomsince == &0), is_dm: None, diff --git a/src/api/client/tag.rs b/src/api/client/tag.rs index 820ee4a1..3b3b40d4 100644 --- a/src/api/client/tag.rs +++ b/src/api/client/tag.rs @@ -4,8 +4,8 @@ use axum::extract::State; use ruma::{ api::client::tag::{create_tag, delete_tag, get_tags}, events::{ - tag::{TagEvent, TagEventContent}, RoomAccountDataEventType, + tag::{TagEvent, TagEventContent}, }, }; diff --git a/src/api/client/threads.rs b/src/api/client/threads.rs index f0cbf467..d25e52c0 100644 --- a/src/api/client/threads.rs +++ b/src/api/client/threads.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduwuit::{at, PduCount, PduEvent}; +use conduwuit::{PduCount, PduEvent, at}; use futures::StreamExt; use ruma::{api::client::threads::get_threads, uint}; diff --git a/src/api/client/typing.rs b/src/api/client/typing.rs index b311295b..ccfa7340 100644 --- a/src/api/client/typing.rs +++ b/src/api/client/typing.rs @@ -1,8 +1,8 @@ use axum::extract::State; -use conduwuit::{utils::math::Tried, Err}; +use conduwuit::{Err, utils::math::Tried}; use ruma::api::client::typing::create_typing_event; -use crate::{utils, Result, Ruma}; +use crate::{Result, Ruma, utils}; /// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` /// @@ -27,37 +27,40 @@ pub(crate) async fn create_typing_event_route( return Err!(Request(Forbidden("You are not in this room."))); } - if let Typing::Yes(duration) = body.state { - let duration = utils::clamp( - duration.as_millis().try_into().unwrap_or(u64::MAX), + match body.state { + | Typing::Yes(duration) => { + let duration = utils::clamp( + duration.as_millis().try_into().unwrap_or(u64::MAX), + services + .server + .config + .typing_client_timeout_min_s + .try_mul(1000)?, + services + .server + .config + .typing_client_timeout_max_s + .try_mul(1000)?, + ); services - .server - .config - .typing_client_timeout_min_s - .try_mul(1000)?, + .rooms + .typing + .typing_add( + sender_user, + &body.room_id, + utils::millis_since_unix_epoch() + .checked_add(duration) + .expect("user typing timeout should not get this high"), + ) + .await?; + }, + | _ => { services - .server - .config - .typing_client_timeout_max_s - .try_mul(1000)?, - ); - services - .rooms - .typing - .typing_add( - sender_user, - &body.room_id, - utils::millis_since_unix_epoch() - .checked_add(duration) - .expect("user typing timeout should not get this high"), - ) - .await?; - } else { - services - .rooms - .typing - .typing_remove(sender_user, &body.room_id) - .await?; + .rooms + .typing + .typing_remove(sender_user, &body.room_id) + .await?; + }, } // ping presence diff --git a/src/api/client/unstable.rs b/src/api/client/unstable.rs index 67c7df75..08da5a37 100644 --- a/src/api/client/unstable.rs +++ b/src/api/client/unstable.rs @@ -5,6 +5,7 @@ use axum_client_ip::InsecureClientIp; use conduwuit::Err; use futures::StreamExt; use ruma::{ + OwnedRoomId, api::{ client::{ error::ErrorKind, @@ -19,7 +20,6 @@ use ruma::{ }, events::room::member::MembershipState, presence::PresenceState, - OwnedRoomId, }; use super::{update_avatar_url, update_displayname}; @@ -499,15 +499,18 @@ pub(crate) async fn get_profile_key_route( .users .set_timezone(&body.user_id, response.tz.clone()); - if let Some(value) = response.custom_profile_fields.get(&body.key_name) { - profile_key_value.insert(body.key_name.clone(), value.clone()); - services.users.set_profile_key( - &body.user_id, - &body.key_name, - Some(value.clone()), - ); - } else { - return Err!(Request(NotFound("The requested profile key does not exist."))); + match response.custom_profile_fields.get(&body.key_name) { + | Some(value) => { + profile_key_value.insert(body.key_name.clone(), value.clone()); + services.users.set_profile_key( + &body.user_id, + &body.key_name, + Some(value.clone()), + ); + }, + | _ => { + return Err!(Request(NotFound("The requested profile key does not exist."))); + }, } if profile_key_value.is_empty() { @@ -524,14 +527,17 @@ pub(crate) async fn get_profile_key_route( return Err!(Request(NotFound("Profile was not found."))); } - if let Ok(value) = services + match services .users .profile_key(&body.user_id, &body.key_name) .await { - profile_key_value.insert(body.key_name.clone(), value); - } else { - return Err!(Request(NotFound("The requested profile key does not exist."))); + | Ok(value) => { + profile_key_value.insert(body.key_name.clone(), value); + }, + | _ => { + return Err!(Request(NotFound("The requested profile key does not exist."))); + }, } if profile_key_value.is_empty() { diff --git a/src/api/client/unversioned.rs b/src/api/client/unversioned.rs index 904f1d2f..4e2b7d9d 100644 --- a/src/api/client/unversioned.rs +++ b/src/api/client/unversioned.rs @@ -1,6 +1,6 @@ use std::collections::BTreeMap; -use axum::{extract::State, response::IntoResponse, Json}; +use axum::{Json, extract::State, response::IntoResponse}; use futures::StreamExt; use ruma::api::client::discovery::get_supported_versions; diff --git a/src/api/client/user_directory.rs b/src/api/client/user_directory.rs index 182e30db..c5d79a56 100644 --- a/src/api/client/user_directory.rs +++ b/src/api/client/user_directory.rs @@ -1,11 +1,11 @@ use axum::extract::State; use conduwuit::utils::TryFutureExtExt; -use futures::{pin_mut, StreamExt}; +use futures::{StreamExt, pin_mut}; use ruma::{ api::client::user_directory::search_users, events::{ - room::join_rules::{JoinRule, RoomJoinRulesEventContent}, StateEventType, + room::join_rules::{JoinRule, RoomJoinRulesEventContent}, }, }; diff --git a/src/api/client/voip.rs b/src/api/client/voip.rs index 70ad4913..37e67984 100644 --- a/src/api/client/voip.rs +++ b/src/api/client/voip.rs @@ -1,10 +1,10 @@ use std::time::{Duration, SystemTime}; use axum::extract::State; -use base64::{engine::general_purpose, Engine as _}; -use conduwuit::{utils, Err}; +use base64::{Engine as _, engine::general_purpose}; +use conduwuit::{Err, utils}; use hmac::{Hmac, Mac}; -use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch, UserId}; +use ruma::{SecondsSinceUnixEpoch, UserId, api::client::voip::get_turn_server_info}; use sha1::Sha1; use crate::{Result, Ruma}; diff --git a/src/api/client/well_known.rs b/src/api/client/well_known.rs index 5c53d013..abda61b0 100644 --- a/src/api/client/well_known.rs +++ b/src/api/client/well_known.rs @@ -1,4 +1,4 @@ -use axum::{extract::State, response::IntoResponse, Json}; +use axum::{Json, extract::State, response::IntoResponse}; use ruma::api::client::{ discovery::{ discover_homeserver::{self, HomeserverInfo, SlidingSyncProxyInfo}, diff --git a/src/api/mod.rs b/src/api/mod.rs index 80e34f10..8df17a59 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -7,7 +7,7 @@ pub mod server; extern crate conduwuit_core as conduwuit; extern crate conduwuit_service as service; -pub(crate) use conduwuit::{debug_info, pdu::PduEvent, utils, Error, Result}; +pub(crate) use conduwuit::{Error, Result, debug_info, pdu::PduEvent, utils}; pub(crate) use self::router::{Ruma, RumaResponse, State}; diff --git a/src/api/router.rs b/src/api/router.rs index 7855ddfa..3fbef275 100644 --- a/src/api/router.rs +++ b/src/api/router.rs @@ -8,12 +8,12 @@ pub mod state; use std::str::FromStr; use axum::{ + Router, response::{IntoResponse, Redirect}, routing::{any, get, post}, - Router, }; -use conduwuit::{err, Server}; -use http::{uri, Uri}; +use conduwuit::{Server, err}; +use http::{Uri, uri}; use self::handler::RouterExt; pub(super) use self::{args::Args as Ruma, response::RumaResponse, state::State}; diff --git a/src/api/router/args.rs b/src/api/router/args.rs index 582f0c56..65a68fa4 100644 --- a/src/api/router/args.rs +++ b/src/api/router/args.rs @@ -2,15 +2,15 @@ use std::{mem, ops::Deref}; use axum::{async_trait, body::Body, extract::FromRequest}; use bytes::{BufMut, Bytes, BytesMut}; -use conduwuit::{debug, debug_warn, err, trace, utils::string::EMPTY, Error, Result}; +use conduwuit::{Error, Result, debug, debug_warn, err, trace, utils::string::EMPTY}; use ruma::{ - api::IncomingRequest, CanonicalJsonObject, CanonicalJsonValue, DeviceId, OwnedDeviceId, - OwnedServerName, OwnedUserId, ServerName, UserId, + CanonicalJsonObject, CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedServerName, + OwnedUserId, ServerName, UserId, api::IncomingRequest, }; use service::Services; use super::{auth, auth::Auth, request, request::Request}; -use crate::{service::appservice::RegistrationInfo, State}; +use crate::{State, service::appservice::RegistrationInfo}; /// Extractor for Ruma request structs pub(crate) struct Args { diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index ecea305b..56256683 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -1,12 +1,14 @@ use axum::RequestPartsExt; use axum_extra::{ - headers::{authorization::Bearer, Authorization}, - typed_header::TypedHeaderRejectionReason, TypedHeader, + headers::{Authorization, authorization::Bearer}, + typed_header::TypedHeaderRejectionReason, }; -use conduwuit::{debug_error, err, warn, Err, Error, Result}; +use conduwuit::{Err, Error, Result, debug_error, err, warn}; use ruma::{ + CanonicalJsonObject, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId, api::{ + AuthScheme, IncomingRequest, Metadata, client::{ directory::get_public_rooms, error::ErrorKind, @@ -16,14 +18,12 @@ use ruma::{ voip::get_turn_server_info, }, federation::openid::get_openid_userinfo, - AuthScheme, IncomingRequest, Metadata, }, server_util::authorization::XMatrix, - CanonicalJsonObject, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId, }; use service::{ - server_keys::{PubKeyMap, PubKeys}, Services, + server_keys::{PubKeyMap, PubKeys}, }; use super::request::Request; @@ -56,12 +56,12 @@ pub(super) async fn auth( }; let token = if let Some(token) = token { - if let Some(reg_info) = services.appservice.find_from_token(token).await { - Token::Appservice(Box::new(reg_info)) - } else if let Ok((user_id, device_id)) = services.users.find_from_token(token).await { - Token::User((user_id, device_id)) - } else { - Token::Invalid + match services.appservice.find_from_token(token).await { + | Some(reg_info) => Token::Appservice(Box::new(reg_info)), + | _ => match services.users.find_from_token(token).await { + | Ok((user_id, device_id)) => Token::User((user_id, device_id)), + | _ => Token::Invalid, + }, } } else { Token::None diff --git a/src/api/router/handler.rs b/src/api/router/handler.rs index cfb8fb6e..ab013945 100644 --- a/src/api/router/handler.rs +++ b/src/api/router/handler.rs @@ -1,8 +1,8 @@ use axum::{ + Router, extract::FromRequestParts, response::IntoResponse, - routing::{on, MethodFilter}, - Router, + routing::{MethodFilter, on}, }; use conduwuit::Result; use futures::{Future, TryFutureExt}; diff --git a/src/api/router/request.rs b/src/api/router/request.rs index 615a8bff..3cdc452b 100644 --- a/src/api/router/request.rs +++ b/src/api/router/request.rs @@ -1,8 +1,8 @@ use std::str; -use axum::{extract::Path, RequestExt, RequestPartsExt}; +use axum::{RequestExt, RequestPartsExt, extract::Path}; use bytes::Bytes; -use conduwuit::{err, Result}; +use conduwuit::{Result, err}; use http::request::Parts; use serde::Deserialize; use service::Services; diff --git a/src/api/router/response.rs b/src/api/router/response.rs index a10560f1..03c9060e 100644 --- a/src/api/router/response.rs +++ b/src/api/router/response.rs @@ -1,9 +1,9 @@ use axum::response::{IntoResponse, Response}; use bytes::BytesMut; -use conduwuit::{error, Error}; +use conduwuit::{Error, error}; use http::StatusCode; use http_body_util::Full; -use ruma::api::{client::uiaa::UiaaResponse, OutgoingResponse}; +use ruma::api::{OutgoingResponse, client::uiaa::UiaaResponse}; pub(crate) struct RumaResponse(pub(crate) T) where diff --git a/src/api/server/backfill.rs b/src/api/server/backfill.rs index b44db67c..5c875807 100644 --- a/src/api/server/backfill.rs +++ b/src/api/server/backfill.rs @@ -2,11 +2,11 @@ use std::cmp; use axum::extract::State; use conduwuit::{ - utils::{stream::TryTools, IterStream, ReadyExt}, PduCount, Result, + utils::{IterStream, ReadyExt, stream::TryTools}, }; use futures::{FutureExt, StreamExt, TryStreamExt}; -use ruma::{api::federation::backfill::get_backfill, uint, MilliSecondsSinceUnixEpoch}; +use ruma::{MilliSecondsSinceUnixEpoch, api::federation::backfill::get_backfill, uint}; use super::AccessCheck; use crate::Ruma; diff --git a/src/api/server/event.rs b/src/api/server/event.rs index 629dd6a2..5846c6d7 100644 --- a/src/api/server/event.rs +++ b/src/api/server/event.rs @@ -1,6 +1,6 @@ use axum::extract::State; -use conduwuit::{err, Result}; -use ruma::{api::federation::event::get_event, MilliSecondsSinceUnixEpoch, RoomId}; +use conduwuit::{Result, err}; +use ruma::{MilliSecondsSinceUnixEpoch, RoomId, api::federation::event::get_event}; use super::AccessCheck; use crate::Ruma; diff --git a/src/api/server/event_auth.rs b/src/api/server/event_auth.rs index 49dcd718..c9e210f5 100644 --- a/src/api/server/event_auth.rs +++ b/src/api/server/event_auth.rs @@ -1,11 +1,11 @@ use std::{borrow::Borrow, iter::once}; use axum::extract::State; -use conduwuit::{utils::stream::ReadyExt, Error, Result}; +use conduwuit::{Error, Result, utils::stream::ReadyExt}; use futures::StreamExt; use ruma::{ - api::{client::error::ErrorKind, federation::authorization::get_event_authorization}, RoomId, + api::{client::error::ErrorKind, federation::authorization::get_event_authorization}, }; use super::AccessCheck; diff --git a/src/api/server/get_missing_events.rs b/src/api/server/get_missing_events.rs index ea06015a..3d0bbb07 100644 --- a/src/api/server/get_missing_events.rs +++ b/src/api/server/get_missing_events.rs @@ -1,8 +1,8 @@ use axum::extract::State; use conduwuit::{Error, Result}; use ruma::{ - api::{client::error::ErrorKind, federation::event::get_missing_events}, CanonicalJsonValue, EventId, RoomId, + api::{client::error::ErrorKind, federation::event::get_missing_events}, }; use super::AccessCheck; diff --git a/src/api/server/hierarchy.rs b/src/api/server/hierarchy.rs index f7bc43ab..41eaedd0 100644 --- a/src/api/server/hierarchy.rs +++ b/src/api/server/hierarchy.rs @@ -1,11 +1,11 @@ use axum::extract::State; use conduwuit::{ - utils::stream::{BroadbandExt, IterStream}, Err, Result, + utils::stream::{BroadbandExt, IterStream}, }; use futures::{FutureExt, StreamExt}; use ruma::api::federation::space::get_hierarchy; -use service::rooms::spaces::{get_parent_children_via, Identifier, SummaryAccessibility}; +use service::rooms::spaces::{Identifier, SummaryAccessibility, get_parent_children_via}; use crate::Ruma; diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index 27a4485c..463cb9ab 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -1,12 +1,12 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use base64::{engine::general_purpose, Engine as _}; -use conduwuit::{err, utils, utils::hash::sha256, warn, Err, Error, PduEvent, Result}; +use base64::{Engine as _, engine::general_purpose}; +use conduwuit::{Err, Error, PduEvent, Result, err, utils, utils::hash::sha256, warn}; use ruma::{ + CanonicalJsonValue, OwnedUserId, UserId, api::{client::error::ErrorKind, federation::membership::create_invite}, events::room::member::{MembershipState, RoomMemberEventContent}, serde::JsonObject, - CanonicalJsonValue, OwnedUserId, UserId, }; use service::pdu::gen_event_id; diff --git a/src/api/server/key.rs b/src/api/server/key.rs index 75801a7a..f9bd0926 100644 --- a/src/api/server/key.rs +++ b/src/api/server/key.rs @@ -3,15 +3,15 @@ use std::{ time::{Duration, SystemTime}, }; -use axum::{extract::State, response::IntoResponse, Json}; -use conduwuit::{utils::timepoint_from_now, Result}; +use axum::{Json, extract::State, response::IntoResponse}; +use conduwuit::{Result, utils::timepoint_from_now}; use ruma::{ + MilliSecondsSinceUnixEpoch, Signatures, api::{ - federation::discovery::{get_server_keys, OldVerifyKey, ServerSigningKeys}, OutgoingResponse, + federation::discovery::{OldVerifyKey, ServerSigningKeys, get_server_keys}, }, serde::Raw, - MilliSecondsSinceUnixEpoch, Signatures, }; /// # `GET /_matrix/key/v2/server` diff --git a/src/api/server/make_join.rs b/src/api/server/make_join.rs index b753346c..f18d1304 100644 --- a/src/api/server/make_join.rs +++ b/src/api/server/make_join.rs @@ -1,22 +1,22 @@ use axum::extract::State; -use conduwuit::{debug_info, utils::IterStream, warn, Err}; +use conduwuit::{Err, debug_info, utils::IterStream, warn}; use futures::StreamExt; use ruma::{ + CanonicalJsonObject, OwnedUserId, RoomId, RoomVersionId, UserId, api::{client::error::ErrorKind, federation::membership::prepare_join_event}, events::{ + StateEventType, room::{ join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, }, - StateEventType, }, - CanonicalJsonObject, OwnedUserId, RoomId, RoomVersionId, UserId, }; use serde_json::value::to_raw_value; use crate::{ - service::{pdu::PduBuilder, Services}, Error, Result, Ruma, + service::{Services, pdu::PduBuilder}, }; /// # `GET /_matrix/federation/v1/make_join/{roomId}/{userId}` diff --git a/src/api/server/make_knock.rs b/src/api/server/make_knock.rs index 423e202d..71536439 100644 --- a/src/api/server/make_knock.rs +++ b/src/api/server/make_knock.rs @@ -1,15 +1,15 @@ +use RoomVersionId::*; use axum::extract::State; -use conduwuit::{debug_warn, Err}; +use conduwuit::{Err, debug_warn}; use ruma::{ + RoomVersionId, api::{client::error::ErrorKind, federation::knock::create_knock_event_template}, events::room::member::{MembershipState, RoomMemberEventContent}, - RoomVersionId, }; use serde_json::value::to_raw_value; use tracing::warn; -use RoomVersionId::*; -use crate::{service::pdu::PduBuilder, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, service::pdu::PduBuilder}; /// # `GET /_matrix/federation/v1/make_knock/{roomId}/{userId}` /// diff --git a/src/api/server/make_leave.rs b/src/api/server/make_leave.rs index 936e0fbb..1ed02785 100644 --- a/src/api/server/make_leave.rs +++ b/src/api/server/make_leave.rs @@ -7,7 +7,7 @@ use ruma::{ use serde_json::value::to_raw_value; use super::make_join::maybe_strip_event_id; -use crate::{service::pdu::PduBuilder, Ruma}; +use crate::{Ruma, service::pdu::PduBuilder}; /// # `GET /_matrix/federation/v1/make_leave/{roomId}/{eventId}` /// diff --git a/src/api/server/media.rs b/src/api/server/media.rs index e56f5b9d..cbe8595b 100644 --- a/src/api/server/media.rs +++ b/src/api/server/media.rs @@ -1,12 +1,12 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{utils::content_disposition::make_content_disposition, Err, Result}; +use conduwuit::{Err, Result, utils::content_disposition::make_content_disposition}; use conduwuit_service::media::{Dim, FileMeta}; use ruma::{ - api::federation::authenticated_media::{ - get_content, get_content_thumbnail, Content, ContentMetadata, FileOrLocation, - }, Mxc, + api::federation::authenticated_media::{ + Content, ContentMetadata, FileOrLocation, get_content, get_content_thumbnail, + }, }; use crate::Ruma; diff --git a/src/api/server/query.rs b/src/api/server/query.rs index 69f62e94..9d4fcf73 100644 --- a/src/api/server/query.rs +++ b/src/api/server/query.rs @@ -1,16 +1,16 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduwuit::{err, Error, Result}; +use conduwuit::{Error, Result, err}; use futures::StreamExt; use get_profile_information::v1::ProfileField; use rand::seq::SliceRandom; use ruma::{ + OwnedServerName, api::{ client::error::ErrorKind, federation::query::{get_profile_information, get_room_information}, }, - OwnedServerName, }; use crate::Ruma; diff --git a/src/api/server/send.rs b/src/api/server/send.rs index bc18377e..1f467dac 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -3,20 +3,21 @@ use std::{collections::BTreeMap, net::IpAddr, time::Instant}; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - debug, + Err, Error, Result, debug, debug::INFO_SPAN_LEVEL, debug_warn, err, error, result::LogErr, trace, utils::{ - stream::{automatic_width, BroadbandExt, TryBroadbandExt}, IterStream, ReadyExt, + stream::{BroadbandExt, TryBroadbandExt, automatic_width}, }, - warn, Err, Error, Result, + warn, }; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use itertools::Itertools; use ruma::{ + CanonicalJsonObject, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, ServerName, UserId, api::{ client::error::ErrorKind, federation::transactions::{ @@ -31,17 +32,16 @@ use ruma::{ events::receipt::{ReceiptEvent, ReceiptEventContent, ReceiptType}, serde::Raw, to_device::DeviceIdOrAllDevices, - CanonicalJsonObject, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, ServerName, UserId, }; use service::{ - sending::{EDU_LIMIT, PDU_LIMIT}, Services, + sending::{EDU_LIMIT, PDU_LIMIT}, }; use utils::millis_since_unix_epoch; use crate::{ - utils::{self}, Ruma, + utils::{self}, }; type ResolvedMap = BTreeMap; diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index e81d7672..08fa3835 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -4,22 +4,22 @@ use std::borrow::Borrow; use axum::extract::State; use conduwuit::{ - at, err, + Err, Result, at, err, pdu::gen_event_id_canonical_json, utils::stream::{IterStream, TryBroadbandExt}, - warn, Err, Result, + warn, }; use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{ - api::federation::membership::create_join_event, - events::{ - room::member::{MembershipState, RoomMemberEventContent}, - StateEventType, - }, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, + api::federation::membership::create_join_event, + events::{ + StateEventType, + room::member::{MembershipState, RoomMemberEventContent}, + }, }; -use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; +use serde_json::value::{RawValue as RawJsonValue, to_raw_value}; use service::Services; use crate::Ruma; diff --git a/src/api/server/send_knock.rs b/src/api/server/send_knock.rs index b07620af..1d4c2a6c 100644 --- a/src/api/server/send_knock.rs +++ b/src/api/server/send_knock.rs @@ -1,15 +1,15 @@ use axum::extract::State; -use conduwuit::{err, pdu::gen_event_id_canonical_json, warn, Err, PduEvent, Result}; +use conduwuit::{Err, PduEvent, Result, err, pdu::gen_event_id_canonical_json, warn}; use futures::FutureExt; use ruma::{ - api::federation::knock::send_knock, - events::{ - room::member::{MembershipState, RoomMemberEventContent}, - StateEventType, - }, - serde::JsonObject, OwnedServerName, OwnedUserId, RoomVersionId::*, + api::federation::knock::send_knock, + events::{ + StateEventType, + room::member::{MembershipState, RoomMemberEventContent}, + }, + serde::JsonObject, }; use crate::Ruma; diff --git a/src/api/server/send_leave.rs b/src/api/server/send_leave.rs index e955a267..71516553 100644 --- a/src/api/server/send_leave.rs +++ b/src/api/server/send_leave.rs @@ -1,21 +1,21 @@ #![allow(deprecated)] use axum::extract::State; -use conduwuit::{err, Err, Result}; +use conduwuit::{Err, Result, err}; use futures::FutureExt; use ruma::{ + OwnedRoomId, OwnedUserId, RoomId, ServerName, api::federation::membership::create_leave_event, events::{ - room::member::{MembershipState, RoomMemberEventContent}, StateEventType, + room::member::{MembershipState, RoomMemberEventContent}, }, - OwnedRoomId, OwnedUserId, RoomId, ServerName, }; use serde_json::value::RawValue as RawJsonValue; use crate::{ - service::{pdu::gen_event_id_canonical_json, Services}, Ruma, + service::{Services, pdu::gen_event_id_canonical_json}, }; /// # `PUT /_matrix/federation/v1/send_leave/{roomId}/{eventId}` diff --git a/src/api/server/state.rs b/src/api/server/state.rs index b16e61a0..8c786815 100644 --- a/src/api/server/state.rs +++ b/src/api/server/state.rs @@ -1,9 +1,9 @@ use std::{borrow::Borrow, iter::once}; use axum::extract::State; -use conduwuit::{at, err, utils::IterStream, Result}; +use conduwuit::{Result, at, err, utils::IterStream}; use futures::{FutureExt, StreamExt, TryStreamExt}; -use ruma::{api::federation::event::get_room_state, OwnedEventId}; +use ruma::{OwnedEventId, api::federation::event::get_room_state}; use super::AccessCheck; use crate::Ruma; diff --git a/src/api/server/state_ids.rs b/src/api/server/state_ids.rs index 7d0440bf..648d4575 100644 --- a/src/api/server/state_ids.rs +++ b/src/api/server/state_ids.rs @@ -1,9 +1,9 @@ use std::{borrow::Borrow, iter::once}; use axum::extract::State; -use conduwuit::{at, err, Result}; +use conduwuit::{Result, at, err}; use futures::{StreamExt, TryStreamExt}; -use ruma::{api::federation::event::get_room_state_ids, OwnedEventId}; +use ruma::{OwnedEventId, api::federation::event::get_room_state_ids}; use super::AccessCheck; use crate::Ruma; diff --git a/src/api/server/user.rs b/src/api/server/user.rs index 321d0b66..80c353ab 100644 --- a/src/api/server/user.rs +++ b/src/api/server/user.rs @@ -10,8 +10,8 @@ use ruma::api::{ }; use crate::{ - client::{claim_keys_helper, get_keys_helper}, Ruma, + client::{claim_keys_helper, get_keys_helper}, }; /// # `GET /_matrix/federation/v1/user/devices/{userId}` diff --git a/src/api/server/utils.rs b/src/api/server/utils.rs index 4f3fa245..5696e44b 100644 --- a/src/api/server/utils.rs +++ b/src/api/server/utils.rs @@ -1,6 +1,6 @@ -use conduwuit::{implement, is_false, Err, Result}; +use conduwuit::{Err, Result, implement, is_false}; use conduwuit_service::Services; -use futures::{future::OptionFuture, join, FutureExt, StreamExt}; +use futures::{FutureExt, StreamExt, future::OptionFuture, join}; use ruma::{EventId, RoomId, ServerName}; pub(super) struct AccessCheck<'a> { diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs index 57143e85..6870c1c0 100644 --- a/src/core/alloc/je.rs +++ b/src/core/alloc/je.rs @@ -2,7 +2,7 @@ use std::{ cell::OnceCell, - ffi::{c_char, c_void, CStr}, + ffi::{CStr, c_char, c_void}, fmt::Debug, sync::RwLock, }; @@ -14,9 +14,8 @@ use tikv_jemalloc_sys as ffi; use tikv_jemallocator as jemalloc; use crate::{ - err, is_equal_to, is_nonzero, + Result, err, is_equal_to, is_nonzero, utils::{math, math::Tried}, - Result, }; #[cfg(feature = "jemalloc_conf")] @@ -128,7 +127,7 @@ unsafe extern "C" fn malloc_stats_cb(opaque: *mut c_void, msg: *const c_char) { } macro_rules! mallctl { - ($name:expr) => {{ + ($name:expr_2021) => {{ thread_local! { static KEY: OnceCell = OnceCell::default(); }; @@ -141,7 +140,7 @@ macro_rules! mallctl { } pub mod this_thread { - use super::{is_nonzero, key, math, Debug, Key, OnceCell, Result}; + use super::{Debug, Key, OnceCell, Result, is_nonzero, key, math}; thread_local! { static ALLOCATED_BYTES: OnceCell<&'static u64> = const { OnceCell::new() }; @@ -261,18 +260,18 @@ pub fn decay>>(arena: I) -> Result { } pub fn set_muzzy_decay>>(arena: I, decay_ms: isize) -> Result { - if let Some(arena) = arena.into() { - set_by_arena(Some(arena), mallctl!("arena.4096.muzzy_decay_ms"), decay_ms) - } else { - set(&mallctl!("arenas.muzzy_decay_ms"), decay_ms) + match arena.into() { + | Some(arena) => + set_by_arena(Some(arena), mallctl!("arena.4096.muzzy_decay_ms"), decay_ms), + | _ => set(&mallctl!("arenas.muzzy_decay_ms"), decay_ms), } } pub fn set_dirty_decay>>(arena: I, decay_ms: isize) -> Result { - if let Some(arena) = arena.into() { - set_by_arena(Some(arena), mallctl!("arena.4096.dirty_decay_ms"), decay_ms) - } else { - set(&mallctl!("arenas.dirty_decay_ms"), decay_ms) + match arena.into() { + | Some(arena) => + set_by_arena(Some(arena), mallctl!("arena.4096.dirty_decay_ms"), decay_ms), + | _ => set(&mallctl!("arenas.dirty_decay_ms"), decay_ms), } } diff --git a/src/core/config/check.rs b/src/core/config/check.rs index 5532c5a2..488f7f94 100644 --- a/src/core/config/check.rs +++ b/src/core/config/check.rs @@ -4,7 +4,7 @@ use either::Either; use figment::Figment; use super::DEPRECATED_KEYS; -use crate::{debug, debug_info, debug_warn, error, warn, Config, Err, Result, Server}; +use crate::{Config, Err, Result, Server, debug, debug_info, debug_warn, error, warn}; /// Performs check() with additional checks specific to reloading old config /// with new config. diff --git a/src/core/config/manager.rs b/src/core/config/manager.rs index 0c95ca15..e55916ba 100644 --- a/src/core/config/manager.rs +++ b/src/core/config/manager.rs @@ -4,13 +4,13 @@ use std::{ ptr, ptr::null_mut, sync::{ - atomic::{AtomicPtr, Ordering}, Arc, + atomic::{AtomicPtr, Ordering}, }, }; use super::Config; -use crate::{implement, Result}; +use crate::{Result, implement}; /// The configuration manager is an indirection to reload the configuration for /// the server while it is running. In order to not burden or clutter the many diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index e66532ee..67c3b95c 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -14,18 +14,18 @@ use either::{ Either::{Left, Right}, }; use figment::providers::{Env, Format, Toml}; -pub use figment::{value::Value as FigmentValue, Figment}; +pub use figment::{Figment, value::Value as FigmentValue}; use regex::RegexSet; use ruma::{ - api::client::discovery::discover_support::ContactRole, OwnedRoomOrAliasId, OwnedServerName, - OwnedUserId, RoomVersionId, + OwnedRoomOrAliasId, OwnedServerName, OwnedUserId, RoomVersionId, + api::client::discovery::discover_support::ContactRole, }; -use serde::{de::IgnoredAny, Deserialize}; +use serde::{Deserialize, de::IgnoredAny}; use url::Url; use self::proxy::ProxyConfig; pub use self::{check::check, manager::Manager}; -use crate::{err, error::Error, utils::sys, Result}; +use crate::{Result, err, error::Error, utils::sys}; /// All the config options for conduwuit. #[allow(clippy::struct_excessive_bools)] diff --git a/src/core/debug.rs b/src/core/debug.rs index 8a5eccfd..b9a53038 100644 --- a/src/core/debug.rs +++ b/src/core/debug.rs @@ -13,7 +13,7 @@ pub use crate::{result::DebugInspect, utils::debug::*}; /// In release-mode it becomes DEBUG level, and possibly subject to elision. #[macro_export] macro_rules! debug_event { - ( $level:expr, $($x:tt)+ ) => { + ( $level:expr_2021, $($x:tt)+ ) => { if $crate::debug::logging() { ::tracing::event!( $level, _debug = true, $($x)+ ) } else { diff --git a/src/core/error/err.rs b/src/core/error/err.rs index 60fa5bff..0962c4ee 100644 --- a/src/core/error/err.rs +++ b/src/core/error/err.rs @@ -165,10 +165,10 @@ macro_rules! err_lev { use std::{fmt, fmt::Write}; use tracing::{ - level_enabled, Callsite, Event, __macro_support, __tracing_log, + __macro_support, __tracing_log, Callsite, Event, Level, callsite::DefaultCallsite, field::{Field, ValueSet, Visit}, - Level, + level_enabled, }; struct Visitor<'a>(&'a mut String); diff --git a/src/core/error/mod.rs b/src/core/error/mod.rs index 16613b7e..02ab6fa3 100644 --- a/src/core/error/mod.rs +++ b/src/core/error/mod.rs @@ -152,8 +152,8 @@ impl Error { /// Generate the error message string. pub fn message(&self) -> String { match self { - | Self::Federation(ref origin, ref error) => format!("Answer from {origin}: {error}"), - | Self::Ruma(ref error) => response::ruma_error_message(error), + | Self::Federation(origin, error) => format!("Answer from {origin}: {error}"), + | Self::Ruma(error) => response::ruma_error_message(error), | _ => format!("{self}"), } } diff --git a/src/core/error/panic.rs b/src/core/error/panic.rs index c6a83ae0..2e63105b 100644 --- a/src/core/error/panic.rs +++ b/src/core/error/panic.rs @@ -1,6 +1,6 @@ use std::{ any::Any, - panic::{panic_any, RefUnwindSafe, UnwindSafe}, + panic::{RefUnwindSafe, UnwindSafe, panic_any}, }; use super::Error; diff --git a/src/core/error/response.rs b/src/core/error/response.rs index 75e4050d..00ade5ae 100644 --- a/src/core/error/response.rs +++ b/src/core/error/response.rs @@ -2,11 +2,11 @@ use bytes::BytesMut; use http::StatusCode; use http_body_util::Full; use ruma::api::{ + OutgoingResponse, client::{ error::{ErrorBody, ErrorKind}, uiaa::UiaaResponse, }, - OutgoingResponse, }; use super::Error; diff --git a/src/core/info/room_version.rs b/src/core/info/room_version.rs index b33a8562..51d5d3c6 100644 --- a/src/core/info/room_version.rs +++ b/src/core/info/room_version.rs @@ -2,7 +2,7 @@ use std::iter::once; -use ruma::{api::client::discovery::get_capabilities::RoomVersionStability, RoomVersionId}; +use ruma::{RoomVersionId, api::client::discovery::get_capabilities::RoomVersionStability}; use crate::{at, is_equal_to}; diff --git a/src/core/log/capture/data.rs b/src/core/log/capture/data.rs index 0ad7a6c2..a4a1225b 100644 --- a/src/core/log/capture/data.rs +++ b/src/core/log/capture/data.rs @@ -1,7 +1,7 @@ use tracing::Level; -use tracing_core::{span::Current, Event}; +use tracing_core::{Event, span::Current}; -use super::{layer::Value, Layer}; +use super::{Layer, layer::Value}; use crate::{info, utils::string::EMPTY}; pub struct Data<'a> { diff --git a/src/core/log/capture/util.rs b/src/core/log/capture/util.rs index 8bad4ba0..65524be5 100644 --- a/src/core/log/capture/util.rs +++ b/src/core/log/capture/util.rs @@ -1,7 +1,7 @@ use std::sync::{Arc, Mutex}; use super::{ - super::{fmt, Level}, + super::{Level, fmt}, Closure, Data, }; use crate::Result; diff --git a/src/core/log/console.rs b/src/core/log/console.rs index 1f04ba26..d91239ac 100644 --- a/src/core/log/console.rs +++ b/src/core/log/console.rs @@ -1,20 +1,20 @@ use std::{env, io, sync::LazyLock}; use tracing::{ - field::{Field, Visit}, Event, Level, Subscriber, + field::{Field, Visit}, }; use tracing_subscriber::{ field::RecordFields, fmt, fmt::{ - format::{Compact, DefaultVisitor, Format, Full, Pretty, Writer}, FmtContext, FormatEvent, FormatFields, MakeWriter, + format::{Compact, DefaultVisitor, Format, Full, Pretty, Writer}, }, registry::LookupSpan, }; -use crate::{apply, Config, Result}; +use crate::{Config, Result, apply}; static SYSTEMD_MODE: LazyLock = LazyLock::new(|| env::var("SYSTEMD_EXEC_PID").is_ok() && env::var("JOURNAL_STREAM").is_ok()); diff --git a/src/core/log/fmt.rs b/src/core/log/fmt.rs index 353d4442..b73d0c9b 100644 --- a/src/core/log/fmt.rs +++ b/src/core/log/fmt.rs @@ -1,6 +1,6 @@ use std::fmt::Write; -use super::{color, Level}; +use super::{Level, color}; use crate::Result; pub fn html(out: &mut S, level: &Level, span: &str, msg: &str) -> Result<()> diff --git a/src/core/log/mod.rs b/src/core/log/mod.rs index 0c1840d0..5ac374e8 100644 --- a/src/core/log/mod.rs +++ b/src/core/log/mod.rs @@ -9,7 +9,7 @@ mod reload; mod suppress; pub use capture::Capture; -pub use console::{is_systemd_mode, ConsoleFormat, ConsoleWriter}; +pub use console::{ConsoleFormat, ConsoleWriter, is_systemd_mode}; pub use reload::{LogLevelReloadHandles, ReloadHandle}; pub use suppress::Suppress; pub use tracing::Level; @@ -34,7 +34,7 @@ pub struct Log { #[macro_export] macro_rules! event { - ( $level:expr, $($x:tt)+ ) => { ::tracing::event!( $level, $($x)+ ) } + ( $level:expr_2021, $($x:tt)+ ) => { ::tracing::event!( $level, $($x)+ ) } } #[macro_export] diff --git a/src/core/log/reload.rs b/src/core/log/reload.rs index 12d14f48..e6a16c9f 100644 --- a/src/core/log/reload.rs +++ b/src/core/log/reload.rs @@ -3,9 +3,9 @@ use std::{ sync::{Arc, Mutex}, }; -use tracing_subscriber::{reload, EnvFilter}; +use tracing_subscriber::{EnvFilter, reload}; -use crate::{error, Result}; +use crate::{Result, error}; /// We need to store a reload::Handle value, but can't name it's type explicitly /// because the S type parameter depends on the subscriber's previous layers. In diff --git a/src/core/mods/module.rs b/src/core/mods/module.rs index ff181e4f..b65bbca2 100644 --- a/src/core/mods/module.rs +++ b/src/core/mods/module.rs @@ -3,8 +3,8 @@ use std::{ time::SystemTime, }; -use super::{canary, new, path, Library, Symbol}; -use crate::{error, Result}; +use super::{Library, Symbol, canary, new, path}; +use crate::{Result, error}; pub struct Module { handle: Option, diff --git a/src/core/mods/new.rs b/src/core/mods/new.rs index 77d89af4..258fdedc 100644 --- a/src/core/mods/new.rs +++ b/src/core/mods/new.rs @@ -1,6 +1,6 @@ use std::ffi::OsStr; -use super::{path, Library}; +use super::{Library, path}; use crate::{Err, Result}; const OPEN_FLAGS: i32 = libloading::os::unix::RTLD_LAZY | libloading::os::unix::RTLD_GLOBAL; diff --git a/src/core/pdu/builder.rs b/src/core/pdu/builder.rs index 0efee128..5aa0c9ca 100644 --- a/src/core/pdu/builder.rs +++ b/src/core/pdu/builder.rs @@ -1,11 +1,11 @@ use std::collections::BTreeMap; use ruma::{ - events::{EventContent, MessageLikeEventType, StateEventType, TimelineEventType}, MilliSecondsSinceUnixEpoch, OwnedEventId, + events::{EventContent, MessageLikeEventType, StateEventType, TimelineEventType}, }; use serde::Deserialize; -use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; +use serde_json::value::{RawValue as RawJsonValue, to_raw_value}; use super::StateKey; diff --git a/src/core/pdu/content.rs b/src/core/pdu/content.rs index fa724cb2..4e60ce6e 100644 --- a/src/core/pdu/content.rs +++ b/src/core/pdu/content.rs @@ -1,7 +1,7 @@ use serde::Deserialize; use serde_json::value::Value as JsonValue; -use crate::{err, implement, Result}; +use crate::{Result, err, implement}; #[must_use] #[implement(super::Pdu)] diff --git a/src/core/pdu/count.rs b/src/core/pdu/count.rs index 0135cf28..b880278f 100644 --- a/src/core/pdu/count.rs +++ b/src/core/pdu/count.rs @@ -4,7 +4,7 @@ use std::{cmp::Ordering, fmt, fmt::Display, str::FromStr}; use ruma::api::Direction; -use crate::{err, Error, Result}; +use crate::{Error, Result, err}; #[derive(Hash, PartialEq, Eq, Clone, Copy, Debug)] pub enum Count { diff --git a/src/core/pdu/event.rs b/src/core/pdu/event.rs index d5c0561e..09ad1666 100644 --- a/src/core/pdu/event.rs +++ b/src/core/pdu/event.rs @@ -1,4 +1,4 @@ -use ruma::{events::TimelineEventType, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId}; +use ruma::{MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, events::TimelineEventType}; use serde_json::value::RawValue as RawJsonValue; use super::Pdu; diff --git a/src/core/pdu/event_id.rs b/src/core/pdu/event_id.rs index 09b33edc..e9d868b1 100644 --- a/src/core/pdu/event_id.rs +++ b/src/core/pdu/event_id.rs @@ -1,7 +1,7 @@ use ruma::{CanonicalJsonObject, OwnedEventId, RoomVersionId}; use serde_json::value::RawValue as RawJsonValue; -use crate::{err, Result}; +use crate::{Result, err}; /// Generates a correct eventId for the incoming pdu. /// diff --git a/src/core/pdu/mod.rs b/src/core/pdu/mod.rs index 9cb42239..9fb2a3da 100644 --- a/src/core/pdu/mod.rs +++ b/src/core/pdu/mod.rs @@ -17,13 +17,14 @@ mod unsigned; use std::cmp::Ordering; use ruma::{ - events::TimelineEventType, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, - OwnedRoomId, OwnedServerName, OwnedUserId, UInt, + CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, + OwnedUserId, UInt, events::TimelineEventType, }; use serde::{Deserialize, Serialize}; use serde_json::value::RawValue as RawJsonValue; pub use self::{ + Count as PduCount, Id as PduId, Pdu as PduEvent, RawId as RawPduId, builder::{Builder, Builder as PduBuilder}, count::Count, event::Event, @@ -31,7 +32,6 @@ pub use self::{ id::*, raw_id::*, state_key::{ShortStateKey, StateKey}, - Count as PduCount, Id as PduId, Pdu as PduEvent, RawId as RawPduId, }; use crate::Result; diff --git a/src/core/pdu/raw_id.rs b/src/core/pdu/raw_id.rs index e1fd2381..318a0cd7 100644 --- a/src/core/pdu/raw_id.rs +++ b/src/core/pdu/raw_id.rs @@ -55,8 +55,8 @@ impl RawId { #[must_use] pub fn as_bytes(&self) -> &[u8] { match self { - | Self::Normal(ref raw) => raw, - | Self::Backfilled(ref raw) => raw, + | Self::Normal(raw) => raw, + | Self::Backfilled(raw) => raw, } } } diff --git a/src/core/pdu/redact.rs b/src/core/pdu/redact.rs index 7c332719..409debfe 100644 --- a/src/core/pdu/redact.rs +++ b/src/core/pdu/redact.rs @@ -1,15 +1,15 @@ use ruma::{ - canonical_json::redact_content_in_place, - events::{room::redaction::RoomRedactionEventContent, TimelineEventType}, OwnedEventId, RoomVersionId, + canonical_json::redact_content_in_place, + events::{TimelineEventType, room::redaction::RoomRedactionEventContent}, }; use serde::Deserialize; use serde_json::{ json, - value::{to_raw_value, RawValue as RawJsonValue}, + value::{RawValue as RawJsonValue, to_raw_value}, }; -use crate::{implement, Error, Result}; +use crate::{Error, Result, implement}; #[derive(Deserialize)] struct ExtractRedactedBecause { @@ -76,14 +76,21 @@ pub fn copy_redacts(&self) -> (Option, Box) { if let Ok(mut content) = serde_json::from_str::(self.content.get()) { - if let Some(redacts) = content.redacts { - return (Some(redacts), self.content.clone()); - } else if let Some(redacts) = self.redacts.clone() { - content.redacts = Some(redacts); - return ( - self.redacts.clone(), - to_raw_value(&content).expect("Must be valid, we only added redacts field"), - ); + match content.redacts { + | Some(redacts) => { + return (Some(redacts), self.content.clone()); + }, + | _ => match self.redacts.clone() { + | Some(redacts) => { + content.redacts = Some(redacts); + return ( + self.redacts.clone(), + to_raw_value(&content) + .expect("Must be valid, we only added redacts field"), + ); + }, + | _ => {}, + }, } } } diff --git a/src/core/pdu/strip.rs b/src/core/pdu/strip.rs index 7d2fb1d6..4e7c5b83 100644 --- a/src/core/pdu/strip.rs +++ b/src/core/pdu/strip.rs @@ -1,8 +1,8 @@ use ruma::{ events::{ - room::member::RoomMemberEventContent, space::child::HierarchySpaceChildEvent, AnyEphemeralRoomEvent, AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncStateEvent, AnySyncTimelineEvent, AnyTimelineEvent, StateEvent, + room::member::RoomMemberEventContent, space::child::HierarchySpaceChildEvent, }, serde::Raw, }; diff --git a/src/core/pdu/unsigned.rs b/src/core/pdu/unsigned.rs index 8482a48a..23897519 100644 --- a/src/core/pdu/unsigned.rs +++ b/src/core/pdu/unsigned.rs @@ -2,10 +2,10 @@ use std::collections::BTreeMap; use ruma::MilliSecondsSinceUnixEpoch; use serde::Deserialize; -use serde_json::value::{to_raw_value, RawValue as RawJsonValue, Value as JsonValue}; +use serde_json::value::{RawValue as RawJsonValue, Value as JsonValue, to_raw_value}; use super::Pdu; -use crate::{err, implement, is_true, Result}; +use crate::{Result, err, implement, is_true}; #[implement(Pdu)] pub fn remove_transaction_id(&mut self) -> Result { diff --git a/src/core/server.rs b/src/core/server.rs index 80493c94..b67759d6 100644 --- a/src/core/server.rs +++ b/src/core/server.rs @@ -1,7 +1,7 @@ use std::{ sync::{ - atomic::{AtomicBool, Ordering}, Arc, + atomic::{AtomicBool, Ordering}, }, time::SystemTime, }; @@ -9,7 +9,7 @@ use std::{ use ruma::OwnedServerName; use tokio::{runtime, sync::broadcast}; -use crate::{config, config::Config, log::Log, metrics::Metrics, Err, Result}; +use crate::{Err, Result, config, config::Config, log::Log, metrics::Metrics}; /// Server runtime state; public portion pub struct Server { diff --git a/src/core/state_res/event_auth.rs b/src/core/state_res/event_auth.rs index df2f8b36..4b8e55f3 100644 --- a/src/core/state_res/event_auth.rs +++ b/src/core/state_res/event_auth.rs @@ -1,10 +1,11 @@ use std::{borrow::Borrow, collections::BTreeSet}; use futures::{ - future::{join3, OptionFuture}, Future, + future::{OptionFuture, join3}, }; use ruma::{ + Int, OwnedUserId, RoomVersionId, UserId, events::room::{ create::RoomCreateEventContent, join_rules::{JoinRule, RoomJoinRulesEventContent}, @@ -14,21 +15,20 @@ use ruma::{ }, int, serde::{Base64, Raw}, - Int, OwnedUserId, RoomVersionId, UserId, }; use serde::{ - de::{Error as _, IgnoredAny}, Deserialize, + de::{Error as _, IgnoredAny}, }; use serde_json::{from_str as from_json_str, value::RawValue as RawJsonValue}; use super::{ + Error, Event, Result, StateEventType, StateKey, TimelineEventType, power_levels::{ deserialize_power_levels, deserialize_power_levels_content_fields, deserialize_power_levels_content_invite, deserialize_power_levels_content_redact, }, room_version::RoomVersion, - Error, Event, Result, StateEventType, StateKey, TimelineEventType, }; use crate::{debug, error, trace, warn}; @@ -394,28 +394,27 @@ where } // If type is m.room.third_party_invite - let sender_power_level = if let Some(pl) = &power_levels_event { - let content = deserialize_power_levels_content_fields(pl.content().get(), room_version)?; - if let Some(level) = content.get_user_power(sender) { - *level - } else { - content.users_default - } - } else { - // If no power level event found the creator gets 100 everyone else gets 0 - let is_creator = if room_version.use_room_create_sender { - room_create_event.sender() == sender - } else { - #[allow(deprecated)] - from_json_str::(room_create_event.content().get()) - .is_ok_and(|create| create.creator.unwrap() == *sender) - }; + let sender_power_level = match &power_levels_event { + | Some(pl) => { + let content = + deserialize_power_levels_content_fields(pl.content().get(), room_version)?; + match content.get_user_power(sender) { + | Some(level) => *level, + | _ => content.users_default, + } + }, + | _ => { + // If no power level event found the creator gets 100 everyone else gets 0 + let is_creator = if room_version.use_room_create_sender { + room_create_event.sender() == sender + } else { + #[allow(deprecated)] + from_json_str::(room_create_event.content().get()) + .is_ok_and(|create| create.creator.unwrap() == *sender) + }; - if is_creator { - int!(100) - } else { - int!(0) - } + if is_creator { int!(100) } else { int!(0) } + }, }; // Allow if and only if sender's current power level is greater than @@ -452,19 +451,21 @@ where if *incoming_event.event_type() == TimelineEventType::RoomPowerLevels { debug!("starting m.room.power_levels check"); - if let Some(required_pwr_lvl) = check_power_levels( + match check_power_levels( room_version, incoming_event, power_levels_event.as_ref(), sender_power_level, ) { - if !required_pwr_lvl { + | Some(required_pwr_lvl) => + if !required_pwr_lvl { + warn!("m.room.power_levels was not allowed"); + return Ok(false); + }, + | _ => { warn!("m.room.power_levels was not allowed"); return Ok(false); - } - } else { - warn!("m.room.power_levels was not allowed"); - return Ok(false); + }, } debug!("m.room.power_levels event allowed"); } @@ -576,10 +577,9 @@ fn valid_membership_change( let content = deserialize_power_levels_content_fields(pl.content().get(), room_version)?; - let user_pl = if let Some(level) = content.get_user_power(user_for_join_auth) { - *level - } else { - content.users_default + let user_pl = match content.get_user_power(user_for_join_auth) { + | Some(level) => *level, + | _ => content.users_default, }; (user_pl, invite) @@ -665,45 +665,48 @@ fn valid_membership_change( }, | MembershipState::Invite => { // If content has third_party_invite key - if let Some(tp_id) = third_party_invite.and_then(|i| i.deserialize().ok()) { - if target_user_current_membership == MembershipState::Ban { - warn!(?target_user_membership_event_id, "Can't invite banned user"); - false - } else { - let allow = verify_third_party_invite( - Some(target_user), - sender, - &tp_id, - current_third_party_invite, - ); - if !allow { - warn!("Third party invite invalid"); - } - allow - } - } else if !sender_is_joined - || target_user_current_membership == MembershipState::Join - || target_user_current_membership == MembershipState::Ban - { - warn!( - ?target_user_membership_event_id, - ?sender_membership_event_id, - "Can't invite user if sender not joined or the user is currently joined or \ - banned", - ); - false - } else { - let allow = sender_power - .filter(|&p| p >= &power_levels.invite) - .is_some(); - if !allow { - warn!( - ?target_user_membership_event_id, - ?power_levels_event_id, - "User does not have enough power to invite", - ); - } - allow + match third_party_invite.and_then(|i| i.deserialize().ok()) { + | Some(tp_id) => + if target_user_current_membership == MembershipState::Ban { + warn!(?target_user_membership_event_id, "Can't invite banned user"); + false + } else { + let allow = verify_third_party_invite( + Some(target_user), + sender, + &tp_id, + current_third_party_invite, + ); + if !allow { + warn!("Third party invite invalid"); + } + allow + }, + | _ => + if !sender_is_joined + || target_user_current_membership == MembershipState::Join + || target_user_current_membership == MembershipState::Ban + { + warn!( + ?target_user_membership_event_id, + ?sender_membership_event_id, + "Can't invite user if sender not joined or the user is currently \ + joined or banned", + ); + false + } else { + let allow = sender_power + .filter(|&p| p >= &power_levels.invite) + .is_some(); + if !allow { + warn!( + ?target_user_membership_event_id, + ?power_levels_event_id, + "User does not have enough power to invite", + ); + } + allow + }, } }, | MembershipState::Leave => @@ -1111,23 +1114,23 @@ mod tests { use std::sync::Arc; use ruma::events::{ + StateEventType, TimelineEventType, room::{ join_rules::{ AllowRule, JoinRule, Restricted, RoomJoinRulesEventContent, RoomMembership, }, member::{MembershipState, RoomMemberEventContent}, }, - StateEventType, TimelineEventType, }; use serde_json::value::to_raw_value as to_raw_json_value; use crate::state_res::{ + Event, EventTypeExt, RoomVersion, StateMap, event_auth::valid_membership_change, test_utils::{ - alice, charlie, ella, event_id, member_content_ban, member_content_join, room_id, - to_pdu_event, PduEvent, INITIAL_EVENTS, INITIAL_EVENTS_CREATE_ROOM, + INITIAL_EVENTS, INITIAL_EVENTS_CREATE_ROOM, PduEvent, alice, charlie, ella, event_id, + member_content_ban, member_content_join, room_id, to_pdu_event, }, - Event, EventTypeExt, RoomVersion, StateMap, }; #[test] @@ -1156,21 +1159,23 @@ mod tests { let target_user = charlie(); let sender = alice(); - assert!(valid_membership_change( - &RoomVersion::V6, - target_user, - fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), - sender, - fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), - &requester, - None::<&PduEvent>, - fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), - fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), - None, - &MembershipState::Leave, - &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), - ) - .unwrap()); + assert!( + valid_membership_change( + &RoomVersion::V6, + target_user, + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), + sender, + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), + &requester, + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), + None, + &MembershipState::Leave, + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), + ) + .unwrap() + ); } #[test] @@ -1199,21 +1204,23 @@ mod tests { let target_user = charlie(); let sender = charlie(); - assert!(!valid_membership_change( - &RoomVersion::V6, - target_user, - fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), - sender, - fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), - &requester, - None::<&PduEvent>, - fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), - fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), - None, - &MembershipState::Leave, - &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), - ) - .unwrap()); + assert!( + !valid_membership_change( + &RoomVersion::V6, + target_user, + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), + sender, + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), + &requester, + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), + None, + &MembershipState::Leave, + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), + ) + .unwrap() + ); } #[test] @@ -1242,21 +1249,23 @@ mod tests { let target_user = alice(); let sender = alice(); - assert!(valid_membership_change( - &RoomVersion::V6, - target_user, - fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), - sender, - fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), - &requester, - None::<&PduEvent>, - fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), - fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), - None, - &MembershipState::Leave, - &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), - ) - .unwrap()); + assert!( + valid_membership_change( + &RoomVersion::V6, + target_user, + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), + sender, + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), + &requester, + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), + None, + &MembershipState::Leave, + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), + ) + .unwrap() + ); } #[test] @@ -1285,21 +1294,23 @@ mod tests { let target_user = alice(); let sender = charlie(); - assert!(!valid_membership_change( - &RoomVersion::V6, - target_user, - fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), - sender, - fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), - &requester, - None::<&PduEvent>, - fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), - fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), - None, - &MembershipState::Leave, - &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), - ) - .unwrap()); + assert!( + !valid_membership_change( + &RoomVersion::V6, + target_user, + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), + sender, + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), + &requester, + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), + None, + &MembershipState::Leave, + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), + ) + .unwrap() + ); } #[test] @@ -1345,37 +1356,41 @@ mod tests { let target_user = ella(); let sender = ella(); - assert!(valid_membership_change( - &RoomVersion::V9, - target_user, - fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), - sender, - fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), - &requester, - None::<&PduEvent>, - fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), - fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), - Some(alice()), - &MembershipState::Join, - &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), - ) - .unwrap()); + assert!( + valid_membership_change( + &RoomVersion::V9, + target_user, + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), + sender, + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), + &requester, + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), + Some(alice()), + &MembershipState::Join, + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), + ) + .unwrap() + ); - assert!(!valid_membership_change( - &RoomVersion::V9, - target_user, - fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), - sender, - fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), - &requester, - None::<&PduEvent>, - fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), - fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), - Some(ella()), - &MembershipState::Leave, - &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), - ) - .unwrap()); + assert!( + !valid_membership_change( + &RoomVersion::V9, + target_user, + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), + sender, + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), + &requester, + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), + Some(ella()), + &MembershipState::Leave, + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), + ) + .unwrap() + ); } #[test] @@ -1413,20 +1428,22 @@ mod tests { let target_user = ella(); let sender = ella(); - assert!(valid_membership_change( - &RoomVersion::V7, - target_user, - fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), - sender, - fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), - &requester, - None::<&PduEvent>, - fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), - fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), - None, - &MembershipState::Leave, - &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), - ) - .unwrap()); + assert!( + valid_membership_change( + &RoomVersion::V7, + target_user, + fetch_state(StateEventType::RoomMember, target_user.as_str().into()).as_ref(), + sender, + fetch_state(StateEventType::RoomMember, sender.as_str().into()).as_ref(), + &requester, + None::<&PduEvent>, + fetch_state(StateEventType::RoomPowerLevels, "".into()).as_ref(), + fetch_state(StateEventType::RoomJoinRules, "".into()).as_ref(), + None, + &MembershipState::Leave, + &fetch_state(StateEventType::RoomCreate, "".into()).unwrap(), + ) + .unwrap() + ); } } diff --git a/src/core/state_res/mod.rs b/src/core/state_res/mod.rs index 19ea3cc0..6bff0cf8 100644 --- a/src/core/state_res/mod.rs +++ b/src/core/state_res/mod.rs @@ -17,13 +17,14 @@ use std::{ hash::{BuildHasher, Hash}, }; -use futures::{future, stream, Future, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; +use futures::{Future, FutureExt, StreamExt, TryFutureExt, TryStreamExt, future, stream}; use ruma::{ + EventId, Int, MilliSecondsSinceUnixEpoch, RoomVersionId, events::{ - room::member::{MembershipState, RoomMemberEventContent}, StateEventType, TimelineEventType, + room::member::{MembershipState, RoomMemberEventContent}, }, - int, EventId, Int, MilliSecondsSinceUnixEpoch, RoomVersionId, + int, }; use serde_json::from_str as from_json_str; @@ -263,7 +264,7 @@ where #[allow(clippy::arithmetic_side_effects)] fn get_auth_chain_diff( auth_chain_sets: &[HashSet], -) -> impl Iterator + Send +) -> impl Iterator + Send + use where Id: Clone + Eq + Hash + Send, Hasher: BuildHasher + Send + Sync, @@ -864,23 +865,23 @@ mod tests { use maplit::{hashmap, hashset}; use rand::seq::SliceRandom; use ruma::{ + MilliSecondsSinceUnixEpoch, OwnedEventId, RoomVersionId, events::{ - room::join_rules::{JoinRule, RoomJoinRulesEventContent}, StateEventType, TimelineEventType, + room::join_rules::{JoinRule, RoomJoinRulesEventContent}, }, - int, uint, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomVersionId, + int, uint, }; use serde_json::{json, value::to_raw_value as to_raw_json_value}; use super::{ - is_power_event, + Event, EventTypeExt, StateMap, is_power_event, room_version::RoomVersion, test_utils::{ - alice, bob, charlie, do_check, ella, event_id, member_content_ban, - member_content_join, room_id, to_init_pdu_event, to_pdu_event, zara, PduEvent, - TestStore, INITIAL_EVENTS, + INITIAL_EVENTS, PduEvent, TestStore, alice, bob, charlie, do_check, ella, event_id, + member_content_ban, member_content_join, room_id, to_init_pdu_event, to_pdu_event, + zara, }, - Event, EventTypeExt, StateMap, }; use crate::debug; @@ -1557,7 +1558,7 @@ mod tests { } macro_rules! state_set { - ($($kind:expr => $key:expr => $id:expr),* $(,)?) => {{ + ($($kind:expr_2021 => $key:expr_2021 => $id:expr_2021),* $(,)?) => {{ #[allow(unused_mut)] let mut x = StateMap::new(); $( diff --git a/src/core/state_res/power_levels.rs b/src/core/state_res/power_levels.rs index e1768574..045b1666 100644 --- a/src/core/state_res/power_levels.rs +++ b/src/core/state_res/power_levels.rs @@ -1,16 +1,16 @@ use std::collections::BTreeMap; use ruma::{ - events::{room::power_levels::RoomPowerLevelsEventContent, TimelineEventType}, - power_levels::{default_power_level, NotificationPowerLevels}, + Int, OwnedUserId, UserId, + events::{TimelineEventType, room::power_levels::RoomPowerLevelsEventContent}, + power_levels::{NotificationPowerLevels, default_power_level}, serde::{ deserialize_v1_powerlevel, vec_deserialize_int_powerlevel_values, vec_deserialize_v1_powerlevel_values, }, - Int, OwnedUserId, UserId, }; use serde::Deserialize; -use serde_json::{from_str as from_json_str, Error}; +use serde_json::{Error, from_str as from_json_str}; use tracing::error; use super::{Result, RoomVersion}; diff --git a/src/core/state_res/state_event.rs b/src/core/state_res/state_event.rs index 2c038cfe..ac9e29d6 100644 --- a/src/core/state_res/state_event.rs +++ b/src/core/state_res/state_event.rs @@ -5,7 +5,7 @@ use std::{ sync::Arc, }; -use ruma::{events::TimelineEventType, EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId}; +use ruma::{EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId, events::TimelineEventType}; use serde_json::value::RawValue as RawJsonValue; /// Abstraction of a PDU so users can have their own PDU types. diff --git a/src/core/state_res/test_utils.rs b/src/core/state_res/test_utils.rs index 9c2b151f..d96ee927 100644 --- a/src/core/state_res/test_utils.rs +++ b/src/core/state_res/test_utils.rs @@ -2,33 +2,33 @@ use std::{ borrow::Borrow, collections::{BTreeMap, HashMap, HashSet}, sync::{ - atomic::{AtomicU64, Ordering::SeqCst}, Arc, + atomic::{AtomicU64, Ordering::SeqCst}, }, }; use futures::future::ready; use ruma::{ - event_id, + EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, RoomVersionId, ServerSignatures, + UserId, event_id, events::{ + TimelineEventType, pdu::{EventHash, Pdu, RoomV3Pdu}, room::{ join_rules::{JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, }, - TimelineEventType, }, - int, room_id, uint, user_id, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, - RoomVersionId, ServerSignatures, UserId, + int, room_id, uint, user_id, }; use serde_json::{ json, - value::{to_raw_value as to_raw_json_value, RawValue as RawJsonValue}, + value::{RawValue as RawJsonValue, to_raw_value as to_raw_json_value}, }; pub(crate) use self::event::PduEvent; use super::auth_types_for_event; -use crate::{info, Event, EventTypeExt, Result, StateMap}; +use crate::{Event, EventTypeExt, Result, StateMap, info}; static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); @@ -584,8 +584,8 @@ pub(crate) fn INITIAL_EDGES() -> Vec { pub(crate) mod event { use ruma::{ - events::{pdu::Pdu, TimelineEventType}, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, + events::{TimelineEventType, pdu::Pdu}, }; use serde::{Deserialize, Serialize}; use serde_json::value::RawValue as RawJsonValue; diff --git a/src/core/utils/bytes.rs b/src/core/utils/bytes.rs index 40316440..04101be4 100644 --- a/src/core/utils/bytes.rs +++ b/src/core/utils/bytes.rs @@ -1,6 +1,6 @@ use bytesize::ByteSize; -use crate::{err, Result}; +use crate::{Result, err}; /// Parse a human-writable size string w/ si-unit suffix into integer #[inline] diff --git a/src/core/utils/defer.rs b/src/core/utils/defer.rs index 60243e97..4887d164 100644 --- a/src/core/utils/defer.rs +++ b/src/core/utils/defer.rs @@ -12,14 +12,14 @@ macro_rules! defer { let _defer_ = _Defer_ { closure: || $body }; }; - ($body:expr) => { + ($body:expr_2021) => { $crate::defer! {{ $body }} }; } #[macro_export] macro_rules! scope_restore { - ($val:ident, $ours:expr) => { + ($val:ident, $ours:expr_2021) => { let theirs = $crate::utils::exchange($val, $ours); $crate::defer! {{ *$val = theirs; }}; }; diff --git a/src/core/utils/future/bool_ext.rs b/src/core/utils/future/bool_ext.rs index 6cb2f1fe..c93c7dbc 100644 --- a/src/core/utils/future/bool_ext.rs +++ b/src/core/utils/future/bool_ext.rs @@ -3,8 +3,8 @@ use std::marker::Unpin; use futures::{ - future::{select_ok, try_join, try_join_all, try_select}, Future, FutureExt, + future::{select_ok, try_join, try_join_all, try_select}, }; pub trait BoolExt diff --git a/src/core/utils/future/ext_ext.rs b/src/core/utils/future/ext_ext.rs index 38decaae..219bb664 100644 --- a/src/core/utils/future/ext_ext.rs +++ b/src/core/utils/future/ext_ext.rs @@ -2,7 +2,7 @@ use std::marker::Unpin; -use futures::{future, future::Select, Future}; +use futures::{Future, future, future::Select}; /// This interface is not necessarily complete; feel free to add as-needed. pub trait ExtExt diff --git a/src/core/utils/future/mod.rs b/src/core/utils/future/mod.rs index 2198a84f..e1d96941 100644 --- a/src/core/utils/future/mod.rs +++ b/src/core/utils/future/mod.rs @@ -3,7 +3,7 @@ mod ext_ext; mod option_ext; mod try_ext_ext; -pub use bool_ext::{and, or, BoolExt}; +pub use bool_ext::{BoolExt, and, or}; pub use ext_ext::ExtExt; pub use option_ext::OptionExt; pub use try_ext_ext::TryExtExt; diff --git a/src/core/utils/future/option_ext.rs b/src/core/utils/future/option_ext.rs index ed61de56..d553e5dc 100644 --- a/src/core/utils/future/option_ext.rs +++ b/src/core/utils/future/option_ext.rs @@ -1,6 +1,6 @@ #![allow(clippy::wrong_self_convention)] -use futures::{future::OptionFuture, Future, FutureExt}; +use futures::{Future, FutureExt, future::OptionFuture}; pub trait OptionExt { fn is_none_or(self, f: impl FnOnce(&T) -> bool + Send) -> impl Future + Send; diff --git a/src/core/utils/future/try_ext_ext.rs b/src/core/utils/future/try_ext_ext.rs index aa3d72e4..b2114e56 100644 --- a/src/core/utils/future/try_ext_ext.rs +++ b/src/core/utils/future/try_ext_ext.rs @@ -7,9 +7,8 @@ use std::marker::Unpin; use futures::{ - future, + TryFuture, TryFutureExt, future, future::{MapOkOrElse, TrySelect, UnwrapOrElse}, - TryFuture, TryFutureExt, }; /// This interface is not necessarily complete; feel free to add as-needed. diff --git a/src/core/utils/hash/argon.rs b/src/core/utils/hash/argon.rs index 18146b47..66dfab75 100644 --- a/src/core/utils/hash/argon.rs +++ b/src/core/utils/hash/argon.rs @@ -1,11 +1,11 @@ use std::sync::OnceLock; use argon2::{ - password_hash, password_hash::SaltString, Algorithm, Argon2, Params, PasswordHash, - PasswordHasher, PasswordVerifier, Version, + Algorithm, Argon2, Params, PasswordHash, PasswordHasher, PasswordVerifier, Version, + password_hash, password_hash::SaltString, }; -use crate::{err, Error, Result}; +use crate::{Error, Result, err}; const M_COST: u32 = Params::DEFAULT_M_COST; // memory size in 1 KiB blocks const T_COST: u32 = Params::DEFAULT_T_COST; // nr of iterations diff --git a/src/core/utils/json.rs b/src/core/utils/json.rs index 4a3fec8f..3f2f225e 100644 --- a/src/core/utils/json.rs +++ b/src/core/utils/json.rs @@ -1,6 +1,6 @@ use std::{fmt, str::FromStr}; -use ruma::{canonical_json::try_from_json_map, CanonicalJsonError, CanonicalJsonObject}; +use ruma::{CanonicalJsonError, CanonicalJsonObject, canonical_json::try_from_json_map}; use crate::Result; diff --git a/src/core/utils/math.rs b/src/core/utils/math.rs index ed157daf..488f2a13 100644 --- a/src/core/utils/math.rs +++ b/src/core/utils/math.rs @@ -6,7 +6,7 @@ use std::{cmp, convert::TryFrom}; pub use checked_ops::checked_ops; pub use self::{expected::Expected, tried::Tried}; -use crate::{debug::type_name, err, Err, Error, Result}; +use crate::{Err, Error, Result, debug::type_name, err}; /// Checked arithmetic expression. Returns a Result #[macro_export] diff --git a/src/core/utils/math/tried.rs b/src/core/utils/math/tried.rs index 2006d2d5..09de731f 100644 --- a/src/core/utils/math/tried.rs +++ b/src/core/utils/math/tried.rs @@ -1,6 +1,6 @@ use num_traits::ops::checked::{CheckedAdd, CheckedDiv, CheckedMul, CheckedRem, CheckedSub}; -use crate::{checked, Result}; +use crate::{Result, checked}; pub trait Tried { #[inline] diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index c2d8ed45..53460c59 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -49,7 +49,7 @@ pub fn exchange(state: &mut T, source: T) -> T { std::mem::replace(state, sou #[macro_export] macro_rules! extract_variant { - ($e:expr, $variant:path) => { + ($e:expr_2021, $variant:path) => { match $e { | $variant(value) => Some(value), | _ => None, @@ -90,7 +90,7 @@ macro_rules! pair_of { ($decl, $decl) }; - ($init:expr) => { + ($init:expr_2021) => { ($init, $init) }; } @@ -134,7 +134,7 @@ macro_rules! is_equal_to { |x| x == $val }; - ($val:expr) => { + ($val:expr_2021) => { |x| x == $val }; } @@ -146,7 +146,7 @@ macro_rules! is_less_than { |x| x < $val }; - ($val:expr) => { + ($val:expr_2021) => { |x| x < $val }; } diff --git a/src/core/utils/mutex_map.rs b/src/core/utils/mutex_map.rs index 03a4adf1..01504ce6 100644 --- a/src/core/utils/mutex_map.rs +++ b/src/core/utils/mutex_map.rs @@ -6,7 +6,7 @@ use std::{ use tokio::sync::OwnedMutexGuard as Omg; -use crate::{err, Result}; +use crate::{Result, err}; /// Map of Mutexes pub struct MutexMap { diff --git a/src/core/utils/rand.rs b/src/core/utils/rand.rs index 1d289c6e..72487633 100644 --- a/src/core/utils/rand.rs +++ b/src/core/utils/rand.rs @@ -4,7 +4,7 @@ use std::{ }; use arrayvec::ArrayString; -use rand::{seq::SliceRandom, thread_rng, Rng}; +use rand::{Rng, seq::SliceRandom, thread_rng}; pub fn shuffle(vec: &mut [T]) { let mut rng = thread_rng(); diff --git a/src/core/utils/stream/broadband.rs b/src/core/utils/stream/broadband.rs index 282008e7..832f2638 100644 --- a/src/core/utils/stream/broadband.rs +++ b/src/core/utils/stream/broadband.rs @@ -3,11 +3,11 @@ use std::convert::identity; use futures::{ - stream::{Stream, StreamExt}, Future, + stream::{Stream, StreamExt}, }; -use super::{automatic_width, ReadyExt}; +use super::{ReadyExt, automatic_width}; /// Concurrency extensions to augment futures::StreamExt. broad_ combinators /// produce out-of-order diff --git a/src/core/utils/stream/cloned.rs b/src/core/utils/stream/cloned.rs index d6a0e647..b89e4695 100644 --- a/src/core/utils/stream/cloned.rs +++ b/src/core/utils/stream/cloned.rs @@ -1,6 +1,6 @@ use std::clone::Clone; -use futures::{stream::Map, Stream, StreamExt}; +use futures::{Stream, StreamExt, stream::Map}; pub trait Cloned<'a, T, S> where diff --git a/src/core/utils/stream/ignore.rs b/src/core/utils/stream/ignore.rs index 9baa00f3..37c89d9a 100644 --- a/src/core/utils/stream/ignore.rs +++ b/src/core/utils/stream/ignore.rs @@ -1,4 +1,4 @@ -use futures::{future::ready, Stream, StreamExt, TryStream}; +use futures::{Stream, StreamExt, TryStream, future::ready}; use crate::{Error, Result}; diff --git a/src/core/utils/stream/iter_stream.rs b/src/core/utils/stream/iter_stream.rs index 9077deac..e9a91b1c 100644 --- a/src/core/utils/stream/iter_stream.rs +++ b/src/core/utils/stream/iter_stream.rs @@ -1,7 +1,6 @@ use futures::{ - stream, + StreamExt, stream, stream::{Stream, TryStream}, - StreamExt, }; use crate::{Error, Result}; diff --git a/src/core/utils/stream/mod.rs b/src/core/utils/stream/mod.rs index 23455322..a356f05f 100644 --- a/src/core/utils/stream/mod.rs +++ b/src/core/utils/stream/mod.rs @@ -14,8 +14,8 @@ mod try_wideband; mod wideband; pub use band::{ - automatic_amplification, automatic_width, set_amplification, set_width, AMPLIFICATION_LIMIT, - WIDTH_LIMIT, + AMPLIFICATION_LIMIT, WIDTH_LIMIT, automatic_amplification, automatic_width, + set_amplification, set_width, }; pub use broadband::BroadbandExt; pub use cloned::Cloned; diff --git a/src/core/utils/stream/ready.rs b/src/core/utils/stream/ready.rs index d93187e9..dce7d378 100644 --- a/src/core/utils/stream/ready.rs +++ b/src/core/utils/stream/ready.rs @@ -2,7 +2,7 @@ #![allow(clippy::type_complexity)] use futures::{ - future::{ready, Ready}, + future::{Ready, ready}, stream::{ All, Any, Filter, FilterMap, Fold, ForEach, Scan, SkipWhile, Stream, StreamExt, TakeWhile, }, diff --git a/src/core/utils/stream/try_parallel.rs b/src/core/utils/stream/try_parallel.rs index 7f8a63b1..60fef0ae 100644 --- a/src/core/utils/stream/try_parallel.rs +++ b/src/core/utils/stream/try_parallel.rs @@ -1,10 +1,10 @@ //! Parallelism stream combinator extensions to futures::Stream -use futures::{stream::TryStream, TryFutureExt}; +use futures::{TryFutureExt, stream::TryStream}; use tokio::{runtime, task::JoinError}; use super::TryBroadbandExt; -use crate::{utils::sys::available_parallelism, Error, Result}; +use crate::{Error, Result, utils::sys::available_parallelism}; /// Parallelism extensions to augment futures::StreamExt. These combinators are /// for computation-oriented workloads, unlike -band combinators for I/O diff --git a/src/core/utils/stream/try_ready.rs b/src/core/utils/stream/try_ready.rs index 3261acb6..611c177f 100644 --- a/src/core/utils/stream/try_ready.rs +++ b/src/core/utils/stream/try_ready.rs @@ -2,7 +2,7 @@ #![allow(clippy::type_complexity)] use futures::{ - future::{ready, Ready}, + future::{Ready, ready}, stream::{AndThen, TryFilterMap, TryFold, TryForEach, TryStream, TryStreamExt, TryTakeWhile}, }; diff --git a/src/core/utils/stream/try_tools.rs b/src/core/utils/stream/try_tools.rs index 3ddce6ad..ea3b50fc 100644 --- a/src/core/utils/stream/try_tools.rs +++ b/src/core/utils/stream/try_tools.rs @@ -1,7 +1,7 @@ //! TryStreamTools for futures::TryStream #![allow(clippy::type_complexity)] -use futures::{future, future::Ready, stream::TryTakeWhile, TryStream, TryStreamExt}; +use futures::{TryStream, TryStreamExt, future, future::Ready, stream::TryTakeWhile}; use crate::Result; diff --git a/src/core/utils/stream/wideband.rs b/src/core/utils/stream/wideband.rs index a8560bb4..cbebf610 100644 --- a/src/core/utils/stream/wideband.rs +++ b/src/core/utils/stream/wideband.rs @@ -3,11 +3,11 @@ use std::convert::identity; use futures::{ - stream::{Stream, StreamExt}, Future, + stream::{Stream, StreamExt}, }; -use super::{automatic_width, ReadyExt}; +use super::{ReadyExt, automatic_width}; /// Concurrency extensions to augment futures::StreamExt. wideband_ combinators /// produce in-order. diff --git a/src/core/utils/string.rs b/src/core/utils/string.rs index cc692c14..9340d009 100644 --- a/src/core/utils/string.rs +++ b/src/core/utils/string.rs @@ -5,7 +5,7 @@ mod unquote; mod unquoted; pub use self::{between::Between, split::SplitInfallible, unquote::Unquote, unquoted::Unquoted}; -use crate::{utils::exchange, Result}; +use crate::{Result, utils::exchange}; pub const EMPTY: &str = ""; diff --git a/src/core/utils/string/unquoted.rs b/src/core/utils/string/unquoted.rs index 5b002d99..88fa011f 100644 --- a/src/core/utils/string/unquoted.rs +++ b/src/core/utils/string/unquoted.rs @@ -1,9 +1,9 @@ use std::ops::Deref; -use serde::{de, Deserialize, Deserializer}; +use serde::{Deserialize, Deserializer, de}; use super::Unquote; -use crate::{err, Result}; +use crate::{Result, err}; /// Unquoted string which deserialized from a quoted string. Construction from a /// &str is infallible such that the input can already be unquoted. Construction diff --git a/src/core/utils/sys.rs b/src/core/utils/sys.rs index a0d5be52..f795ccb8 100644 --- a/src/core/utils/sys.rs +++ b/src/core/utils/sys.rs @@ -5,7 +5,7 @@ use std::path::PathBuf; pub use compute::available_parallelism; -use crate::{debug, Result}; +use crate::{Result, debug}; /// This is needed for opening lots of file descriptors, which tends to /// happen more often when using RocksDB and making lots of federation @@ -16,7 +16,7 @@ use crate::{debug, Result}; /// * #[cfg(unix)] pub fn maximize_fd_limit() -> Result<(), nix::errno::Errno> { - use nix::sys::resource::{getrlimit, setrlimit, Resource::RLIMIT_NOFILE as NOFILE}; + use nix::sys::resource::{Resource::RLIMIT_NOFILE as NOFILE, getrlimit, setrlimit}; let (soft_limit, hard_limit) = getrlimit(NOFILE)?; if soft_limit < hard_limit { diff --git a/src/core/utils/sys/compute.rs b/src/core/utils/sys/compute.rs index ce2aa504..5274cd66 100644 --- a/src/core/utils/sys/compute.rs +++ b/src/core/utils/sys/compute.rs @@ -2,7 +2,7 @@ use std::{cell::Cell, fmt::Debug, path::PathBuf, sync::LazyLock}; -use crate::{is_equal_to, Result}; +use crate::{Result, is_equal_to}; type Id = usize; @@ -45,7 +45,7 @@ pub fn set_affinity(mut ids: I) where I: Iterator + Clone + Debug, { - use core_affinity::{set_each_for_current, set_for_current, CoreId}; + use core_affinity::{CoreId, set_each_for_current, set_for_current}; let n = ids.clone().count(); let mask: Mask = ids.clone().fold(0, |mask, id| { @@ -118,7 +118,7 @@ pub fn cores_available() -> impl Iterator { from_mask(*CORES_AVAILABL #[cfg(target_os = "linux")] #[inline] pub fn getcpu() -> Result { - use crate::{utils::math, Error}; + use crate::{Error, utils::math}; // SAFETY: This is part of an interface with many low-level calls taking many // raw params, but it's unclear why this specific call is unsafe. Nevertheless diff --git a/src/core/utils/sys/storage.rs b/src/core/utils/sys/storage.rs index 25b17904..b11df7bb 100644 --- a/src/core/utils/sys/storage.rs +++ b/src/core/utils/sys/storage.rs @@ -3,7 +3,7 @@ use std::{ ffi::OsStr, fs, - fs::{read_to_string, FileType}, + fs::{FileType, read_to_string}, iter::IntoIterator, path::{Path, PathBuf}, }; @@ -11,9 +11,9 @@ use std::{ use libc::dev_t; use crate::{ + Result, result::FlatOk, utils::{result::LogDebugErr, string::SplitInfallible}, - Result, }; /// Device characteristics useful for random access throughput diff --git a/src/core/utils/tests.rs b/src/core/utils/tests.rs index 1bcb92b8..05a0655b 100644 --- a/src/core/utils/tests.rs +++ b/src/core/utils/tests.rs @@ -241,7 +241,7 @@ fn set_intersection_sorted_all() { #[tokio::test] async fn set_intersection_sorted_stream2() { use futures::StreamExt; - use utils::{set::intersection_sorted_stream2, IterStream}; + use utils::{IterStream, set::intersection_sorted_stream2}; let a = ["bar"]; let b = ["bar", "foo"]; diff --git a/src/core/utils/time.rs b/src/core/utils/time.rs index 81fdda2a..73f73971 100644 --- a/src/core/utils/time.rs +++ b/src/core/utils/time.rs @@ -2,7 +2,7 @@ pub mod exponential_backoff; use std::time::{Duration, SystemTime, UNIX_EPOCH}; -use crate::{err, Result}; +use crate::{Result, err}; #[inline] #[must_use] diff --git a/src/database/de.rs b/src/database/de.rs index 441bb4ec..9c0997ff 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -1,10 +1,9 @@ use conduwuit::{ - arrayvec::ArrayVec, checked, debug::DebugInspect, err, utils::string, Error, Result, + Error, Result, arrayvec::ArrayVec, checked, debug::DebugInspect, err, utils::string, }; use serde::{ - de, + Deserialize, de, de::{DeserializeSeed, Visitor}, - Deserialize, }; use crate::util::unhandled; diff --git a/src/database/engine.rs b/src/database/engine.rs index 22e2b9c8..38dd7512 100644 --- a/src/database/engine.rs +++ b/src/database/engine.rs @@ -12,21 +12,21 @@ mod repair; use std::{ ffi::CStr, sync::{ - atomic::{AtomicU32, Ordering}, Arc, + atomic::{AtomicU32, Ordering}, }, }; -use conduwuit::{debug, info, warn, Err, Result}; +use conduwuit::{Err, Result, debug, info, warn}; use rocksdb::{ AsColumnFamilyRef, BoundColumnFamily, DBCommon, DBWithThreadMode, MultiThreaded, WaitForCompactOptions, }; use crate::{ + Context, pool::Pool, util::{map_err, result}, - Context, }; pub struct Engine { diff --git a/src/database/engine/backup.rs b/src/database/engine/backup.rs index db718c2c..bb110630 100644 --- a/src/database/engine/backup.rs +++ b/src/database/engine/backup.rs @@ -1,6 +1,6 @@ use std::fmt::Write; -use conduwuit::{error, implement, info, utils::time::rfc2822_from_seconds, warn, Result}; +use conduwuit::{Result, error, implement, info, utils::time::rfc2822_from_seconds, warn}; use rocksdb::backup::{BackupEngine, BackupEngineOptions}; use super::Engine; diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index 83bce08c..5ddb9473 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -1,4 +1,4 @@ -use conduwuit::{err, utils::math::Expected, Config, Result}; +use conduwuit::{Config, Result, err, utils::math::Expected}; use rocksdb::{ BlockBasedIndexType, BlockBasedOptions, BlockBasedPinningTier, Cache, DBCompressionType as CompressionType, DataBlockIndexType, LruCacheOptions, Options, @@ -6,7 +6,7 @@ use rocksdb::{ }; use super::descriptor::{CacheDisp, Descriptor}; -use crate::{util::map_err, Context}; +use crate::{Context, util::map_err}; pub(super) const SENTINEL_COMPRESSION_LEVEL: i32 = 32767; diff --git a/src/database/engine/context.rs b/src/database/engine/context.rs index 04e08854..380e37af 100644 --- a/src/database/engine/context.rs +++ b/src/database/engine/context.rs @@ -3,7 +3,7 @@ use std::{ sync::{Arc, Mutex}, }; -use conduwuit::{debug, utils::math::usize_from_f64, Result, Server}; +use conduwuit::{Result, Server, debug, utils::math::usize_from_f64}; use rocksdb::{Cache, Env, LruCacheOptions}; use crate::{or_else, pool::Pool}; diff --git a/src/database/engine/db_opts.rs b/src/database/engine/db_opts.rs index 6abeb4b0..18cec742 100644 --- a/src/database/engine/db_opts.rs +++ b/src/database/engine/db_opts.rs @@ -1,7 +1,7 @@ use std::{cmp, convert::TryFrom}; -use conduwuit::{utils, Config, Result}; -use rocksdb::{statistics::StatsLevel, Cache, DBRecoveryMode, Env, LogLevel, Options}; +use conduwuit::{Config, Result, utils}; +use rocksdb::{Cache, DBRecoveryMode, Env, LogLevel, Options, statistics::StatsLevel}; use super::{cf_opts::cache_size_f64, logger::handle as handle_log}; diff --git a/src/database/engine/files.rs b/src/database/engine/files.rs index 33d6fdc4..1f38a63c 100644 --- a/src/database/engine/files.rs +++ b/src/database/engine/files.rs @@ -1,11 +1,11 @@ -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use rocksdb::LiveFile as SstFile; use super::Engine; use crate::util::map_err; #[implement(Engine)] -pub fn file_list(&self) -> impl Iterator> + Send { +pub fn file_list(&self) -> impl Iterator> + Send + use<> { self.db .live_files() .map_err(map_err) diff --git a/src/database/engine/memory_usage.rs b/src/database/engine/memory_usage.rs index 01859815..9bb5c535 100644 --- a/src/database/engine/memory_usage.rs +++ b/src/database/engine/memory_usage.rs @@ -1,6 +1,6 @@ use std::fmt::Write; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use rocksdb::perf::get_memory_usage_stats; use super::Engine; diff --git a/src/database/engine/open.rs b/src/database/engine/open.rs index 59dabce1..24010c3a 100644 --- a/src/database/engine/open.rs +++ b/src/database/engine/open.rs @@ -1,20 +1,20 @@ use std::{ collections::BTreeSet, path::Path, - sync::{atomic::AtomicU32, Arc}, + sync::{Arc, atomic::AtomicU32}, }; -use conduwuit::{debug, implement, info, warn, Result}; +use conduwuit::{Result, debug, implement, info, warn}; use rocksdb::{ColumnFamilyDescriptor, Options}; use super::{ + Db, Engine, cf_opts::cf_options, db_opts::db_options, descriptor::{self, Descriptor}, repair::repair, - Db, Engine, }; -use crate::{or_else, Context}; +use crate::{Context, or_else}; #[implement(Engine)] #[tracing::instrument(skip_all)] diff --git a/src/database/engine/repair.rs b/src/database/engine/repair.rs index 61283904..aeec0caf 100644 --- a/src/database/engine/repair.rs +++ b/src/database/engine/repair.rs @@ -1,6 +1,6 @@ use std::path::PathBuf; -use conduwuit::{info, warn, Err, Result}; +use conduwuit::{Err, Result, info, warn}; use rocksdb::Options; use super::Db; diff --git a/src/database/handle.rs b/src/database/handle.rs index 43b57839..484e5618 100644 --- a/src/database/handle.rs +++ b/src/database/handle.rs @@ -4,7 +4,7 @@ use conduwuit::Result; use rocksdb::DBPinnableSlice; use serde::{Deserialize, Serialize, Serializer}; -use crate::{keyval::deserialize_val, Deserialized, Slice}; +use crate::{Deserialized, Slice, keyval::deserialize_val}; pub struct Handle<'a> { val: DBPinnableSlice<'a>, diff --git a/src/database/keyval.rs b/src/database/keyval.rs index f572d15f..6059cd53 100644 --- a/src/database/keyval.rs +++ b/src/database/keyval.rs @@ -1,4 +1,4 @@ -use conduwuit::{smallvec::SmallVec, Result}; +use conduwuit::{Result, smallvec::SmallVec}; use serde::{Deserialize, Serialize}; use crate::{de, ser}; diff --git a/src/database/map.rs b/src/database/map.rs index 37425ecf..c5a908ba 100644 --- a/src/database/map.rs +++ b/src/database/map.rs @@ -40,7 +40,7 @@ pub(crate) use self::options::{ read_options_default, write_options_default, }; pub use self::{get_batch::Get, qry_batch::Qry}; -use crate::{watchers::Watchers, Engine}; +use crate::{Engine, watchers::Watchers}; pub struct Map { name: &'static str, diff --git a/src/database/map/compact.rs b/src/database/map/compact.rs index c0381eb4..84476de6 100644 --- a/src/database/map/compact.rs +++ b/src/database/map/compact.rs @@ -1,4 +1,4 @@ -use conduwuit::{implement, Err, Result}; +use conduwuit::{Err, Result, implement}; use rocksdb::{BottommostLevelCompaction, CompactOptions}; use crate::keyval::KeyBuf; diff --git a/src/database/map/contains.rs b/src/database/map/contains.rs index 7a09b358..474818e8 100644 --- a/src/database/map/contains.rs +++ b/src/database/map/contains.rs @@ -1,10 +1,10 @@ use std::{convert::AsRef, fmt::Debug, future::Future, io::Write, sync::Arc}; use conduwuit::{ + Result, arrayvec::ArrayVec, err, implement, utils::{future::TryExtExt, result::FlatOk}, - Result, }; use futures::FutureExt; use serde::Serialize; @@ -16,7 +16,10 @@ use crate::{keyval::KeyBuf, ser}; /// - harder errors may not be reported #[inline] #[implement(super::Map)] -pub fn contains(self: &Arc, key: &K) -> impl Future + Send + '_ +pub fn contains( + self: &Arc, + key: &K, +) -> impl Future + Send + '_ + use<'_, K> where K: Serialize + ?Sized + Debug, { @@ -32,7 +35,7 @@ where pub fn acontains( self: &Arc, key: &K, -) -> impl Future + Send + '_ +) -> impl Future + Send + '_ + use<'_, MAX, K> where K: Serialize + ?Sized + Debug, { @@ -49,7 +52,7 @@ pub fn bcontains( self: &Arc, key: &K, buf: &mut B, -) -> impl Future + Send + '_ +) -> impl Future + Send + '_ + use<'_, K, B> where K: Serialize + ?Sized + Debug, B: Write + AsRef<[u8]>, @@ -62,7 +65,10 @@ where /// - key is raw #[inline] #[implement(super::Map)] -pub fn exists<'a, K>(self: &'a Arc, key: &K) -> impl Future + Send + 'a +pub fn exists<'a, K>( + self: &'a Arc, + key: &K, +) -> impl Future + Send + 'a + use<'a, K> where K: AsRef<[u8]> + ?Sized + Debug + 'a, { diff --git a/src/database/map/count.rs b/src/database/map/count.rs index 22b298b9..78f9e2e3 100644 --- a/src/database/map/count.rs +++ b/src/database/map/count.rs @@ -16,7 +16,10 @@ pub fn count(self: &Arc) -> impl Future + Send + '_ { /// - From is a structured key #[implement(super::Map)] #[inline] -pub fn count_from<'a, P>(self: &'a Arc, from: &P) -> impl Future + Send + 'a +pub fn count_from<'a, P>( + self: &'a Arc, + from: &P, +) -> impl Future + Send + 'a + use<'a, P> where P: Serialize + ?Sized + Debug + 'a, { @@ -46,7 +49,7 @@ where pub fn count_prefix<'a, P>( self: &'a Arc, prefix: &P, -) -> impl Future + Send + 'a +) -> impl Future + Send + 'a + use<'a, P> where P: Serialize + ?Sized + Debug + 'a, { diff --git a/src/database/map/get.rs b/src/database/map/get.rs index d6c65be2..0971fb17 100644 --- a/src/database/map/get.rs +++ b/src/database/map/get.rs @@ -1,20 +1,23 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{err, implement, utils::result::MapExpect, Err, Result}; -use futures::{future::ready, Future, FutureExt, TryFutureExt}; +use conduwuit::{Err, Result, err, implement, utils::result::MapExpect}; +use futures::{Future, FutureExt, TryFutureExt, future::ready}; use rocksdb::{DBPinnableSlice, ReadOptions}; use tokio::task; use crate::{ - util::{is_incomplete, map_err, or_else}, Handle, + util::{is_incomplete, map_err, or_else}, }; /// Fetch a value from the database into cache, returning a reference-handle /// asynchronously. The key is referenced directly to perform the query. #[implement(super::Map)] #[tracing::instrument(skip(self, key), fields(%self), level = "trace")] -pub fn get(self: &Arc, key: &K) -> impl Future>> + Send +pub fn get( + self: &Arc, + key: &K, +) -> impl Future>> + Send + use<'_, K> where K: AsRef<[u8]> + Debug + ?Sized, { diff --git a/src/database/map/get_batch.rs b/src/database/map/get_batch.rs index ab9c1dc8..e23a8848 100644 --- a/src/database/map/get_batch.rs +++ b/src/database/map/get_batch.rs @@ -1,12 +1,11 @@ use std::{convert::AsRef, sync::Arc}; use conduwuit::{ - implement, + Result, implement, utils::{ - stream::{automatic_amplification, automatic_width, WidebandExt}, IterStream, + stream::{WidebandExt, automatic_amplification, automatic_width}, }, - Result, }; use futures::{Stream, StreamExt, TryStreamExt}; use rocksdb::{DBPinnableSlice, ReadOptions}; @@ -64,7 +63,7 @@ where pub(crate) fn get_batch_cached<'a, I, K>( &self, keys: I, -) -> impl Iterator>>> + Send +) -> impl Iterator>>> + Send + use<'_, I, K> where I: Iterator + ExactSizeIterator + Send, K: AsRef<[u8]> + Send + ?Sized + Sync + 'a, @@ -78,7 +77,7 @@ where pub(crate) fn get_batch_blocking<'a, I, K>( &self, keys: I, -) -> impl Iterator>> + Send +) -> impl Iterator>> + Send + use<'_, I, K> where I: Iterator + ExactSizeIterator + Send, K: AsRef<[u8]> + Send + ?Sized + Sync + 'a, @@ -92,7 +91,7 @@ fn get_batch_blocking_opts<'a, I, K>( &self, keys: I, read_options: &ReadOptions, -) -> impl Iterator>, rocksdb::Error>> + Send +) -> impl Iterator>, rocksdb::Error>> + Send + use<'_, I, K> where I: Iterator + ExactSizeIterator + Send, K: AsRef<[u8]> + Send + ?Sized + Sync + 'a, diff --git a/src/database/map/keys.rs b/src/database/map/keys.rs index 2fe70f15..7ca932a5 100644 --- a/src/database/map/keys.rs +++ b/src/database/map/keys.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::Deserialize; diff --git a/src/database/map/keys_from.rs b/src/database/map/keys_from.rs index 76c76325..c9b1717a 100644 --- a/src/database/map/keys_from.rs +++ b/src/database/map/keys_from.rs @@ -1,13 +1,13 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::{Deserialize, Serialize}; use super::stream_from::is_cached; use crate::{ - keyval::{result_deserialize_key, serialize_key, Key}, + keyval::{Key, result_deserialize_key, serialize_key}, stream, }; @@ -15,7 +15,7 @@ use crate::{ pub fn keys_from<'a, K, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -25,7 +25,10 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self), level = "trace")] -pub fn keys_from_raw

(self: &Arc, from: &P) -> impl Stream>> + Send +pub fn keys_from_raw

( + self: &Arc, + from: &P, +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { @@ -37,7 +40,7 @@ where pub fn keys_raw_from<'a, K, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, P> where P: AsRef<[u8]> + ?Sized + Debug + Sync, K: Deserialize<'a> + Send, @@ -47,7 +50,10 @@ where #[implement(super::Map)] #[tracing::instrument(skip(self, from), fields(%self), level = "trace")] -pub fn raw_keys_from

(self: &Arc, from: &P) -> impl Stream>> + Send +pub fn raw_keys_from

( + self: &Arc, + from: &P, +) -> impl Stream>> + Send + use<'_, P> where P: AsRef<[u8]> + ?Sized + Debug, { diff --git a/src/database/map/keys_prefix.rs b/src/database/map/keys_prefix.rs index 28bc7ccd..09dd79ac 100644 --- a/src/database/map/keys_prefix.rs +++ b/src/database/map/keys_prefix.rs @@ -1,16 +1,16 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; -use futures::{future, Stream, StreamExt, TryStreamExt}; +use conduwuit::{Result, implement}; +use futures::{Stream, StreamExt, TryStreamExt, future}; use serde::{Deserialize, Serialize}; -use crate::keyval::{result_deserialize_key, serialize_key, Key}; +use crate::keyval::{Key, result_deserialize_key, serialize_key}; #[implement(super::Map)] pub fn keys_prefix<'a, K, P>( self: &'a Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -24,7 +24,7 @@ where pub fn keys_prefix_raw

( self: &Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { diff --git a/src/database/map/qry.rs b/src/database/map/qry.rs index 178f4a61..c6f13c0b 100644 --- a/src/database/map/qry.rs +++ b/src/database/map/qry.rs @@ -1,17 +1,20 @@ use std::{convert::AsRef, fmt::Debug, io::Write, sync::Arc}; -use conduwuit::{arrayvec::ArrayVec, implement, Result}; +use conduwuit::{Result, arrayvec::ArrayVec, implement}; use futures::Future; use serde::Serialize; -use crate::{keyval::KeyBuf, ser, Handle}; +use crate::{Handle, keyval::KeyBuf, ser}; /// Fetch a value from the database into cache, returning a reference-handle /// asynchronously. The key is serialized into an allocated buffer to perform /// the query. #[implement(super::Map)] #[inline] -pub fn qry(self: &Arc, key: &K) -> impl Future>> + Send +pub fn qry( + self: &Arc, + key: &K, +) -> impl Future>> + Send + use<'_, K> where K: Serialize + ?Sized + Debug, { @@ -27,7 +30,7 @@ where pub fn aqry( self: &Arc, key: &K, -) -> impl Future>> + Send +) -> impl Future>> + Send + use<'_, MAX, K> where K: Serialize + ?Sized + Debug, { @@ -43,7 +46,7 @@ pub fn bqry( self: &Arc, key: &K, buf: &mut B, -) -> impl Future>> + Send +) -> impl Future>> + Send + use<'_, K, B> where K: Serialize + ?Sized + Debug, B: Write + AsRef<[u8]>, diff --git a/src/database/map/qry_batch.rs b/src/database/map/qry_batch.rs index 31817c48..f44d1c86 100644 --- a/src/database/map/qry_batch.rs +++ b/src/database/map/qry_batch.rs @@ -1,17 +1,16 @@ use std::{fmt::Debug, sync::Arc}; use conduwuit::{ - implement, + Result, implement, utils::{ - stream::{automatic_amplification, automatic_width, WidebandExt}, IterStream, + stream::{WidebandExt, automatic_amplification, automatic_width}, }, - Result, }; use futures::{Stream, StreamExt, TryStreamExt}; use serde::Serialize; -use crate::{keyval::KeyBuf, ser, Handle}; +use crate::{Handle, keyval::KeyBuf, ser}; pub trait Qry<'a, K, S> where diff --git a/src/database/map/rev_keys.rs b/src/database/map/rev_keys.rs index 21558a17..c00f3e55 100644 --- a/src/database/map/rev_keys.rs +++ b/src/database/map/rev_keys.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::Deserialize; diff --git a/src/database/map/rev_keys_from.rs b/src/database/map/rev_keys_from.rs index 65072337..04e457dc 100644 --- a/src/database/map/rev_keys_from.rs +++ b/src/database/map/rev_keys_from.rs @@ -1,13 +1,13 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::{Deserialize, Serialize}; use super::rev_stream_from::is_cached; use crate::{ - keyval::{result_deserialize_key, serialize_key, Key}, + keyval::{Key, result_deserialize_key, serialize_key}, stream, }; @@ -15,7 +15,7 @@ use crate::{ pub fn rev_keys_from<'a, K, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -29,7 +29,7 @@ where pub fn rev_keys_from_raw

( self: &Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { @@ -41,7 +41,7 @@ where pub fn rev_keys_raw_from<'a, K, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, P> where P: AsRef<[u8]> + ?Sized + Debug + Sync, K: Deserialize<'a> + Send, @@ -55,7 +55,7 @@ where pub fn rev_raw_keys_from

( self: &Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: AsRef<[u8]> + ?Sized + Debug, { diff --git a/src/database/map/rev_keys_prefix.rs b/src/database/map/rev_keys_prefix.rs index fb29acaf..fbe9f9ca 100644 --- a/src/database/map/rev_keys_prefix.rs +++ b/src/database/map/rev_keys_prefix.rs @@ -1,16 +1,16 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; -use futures::{future, Stream, StreamExt, TryStreamExt}; +use conduwuit::{Result, implement}; +use futures::{Stream, StreamExt, TryStreamExt, future}; use serde::{Deserialize, Serialize}; -use crate::keyval::{result_deserialize_key, serialize_key, Key}; +use crate::keyval::{Key, result_deserialize_key, serialize_key}; #[implement(super::Map)] pub fn rev_keys_prefix<'a, K, P>( self: &'a Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -24,7 +24,7 @@ where pub fn rev_keys_prefix_raw

( self: &Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { diff --git a/src/database/map/rev_stream.rs b/src/database/map/rev_stream.rs index f55053be..fc2d1116 100644 --- a/src/database/map/rev_stream.rs +++ b/src/database/map/rev_stream.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::Deserialize; diff --git a/src/database/map/rev_stream_from.rs b/src/database/map/rev_stream_from.rs index ddc98607..d67986e7 100644 --- a/src/database/map/rev_stream_from.rs +++ b/src/database/map/rev_stream_from.rs @@ -1,13 +1,13 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::{Deserialize, Serialize}; use tokio::task; use crate::{ - keyval::{result_deserialize, serialize_key, KeyVal}, + keyval::{KeyVal, result_deserialize, serialize_key}, stream, util::is_incomplete, }; @@ -20,7 +20,7 @@ use crate::{ pub fn rev_stream_from<'a, K, V, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, V, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -39,7 +39,7 @@ where pub fn rev_stream_from_raw

( self: &Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { @@ -55,7 +55,7 @@ where pub fn rev_stream_raw_from<'a, K, V, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, V, P> where P: AsRef<[u8]> + ?Sized + Debug + Sync, K: Deserialize<'a> + Send, @@ -74,7 +74,7 @@ where pub fn rev_raw_stream_from

( self: &Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: AsRef<[u8]> + ?Sized + Debug, { diff --git a/src/database/map/rev_stream_prefix.rs b/src/database/map/rev_stream_prefix.rs index 22a2ce53..46dc9247 100644 --- a/src/database/map/rev_stream_prefix.rs +++ b/src/database/map/rev_stream_prefix.rs @@ -1,10 +1,10 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; -use futures::{future, Stream, StreamExt, TryStreamExt}; +use conduwuit::{Result, implement}; +use futures::{Stream, StreamExt, TryStreamExt, future}; use serde::{Deserialize, Serialize}; -use crate::keyval::{result_deserialize, serialize_key, KeyVal}; +use crate::keyval::{KeyVal, result_deserialize, serialize_key}; /// Iterate key-value entries in the map where the key matches a prefix. /// @@ -14,7 +14,7 @@ use crate::keyval::{result_deserialize, serialize_key, KeyVal}; pub fn rev_stream_prefix<'a, K, V, P>( self: &'a Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, V, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -33,7 +33,7 @@ where pub fn rev_stream_prefix_raw

( self: &Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { diff --git a/src/database/map/stream.rs b/src/database/map/stream.rs index bfc8ba04..f1450b6f 100644 --- a/src/database/map/stream.rs +++ b/src/database/map/stream.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::Deserialize; diff --git a/src/database/map/stream_from.rs b/src/database/map/stream_from.rs index 74140a65..00c3a051 100644 --- a/src/database/map/stream_from.rs +++ b/src/database/map/stream_from.rs @@ -1,13 +1,13 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use rocksdb::Direction; use serde::{Deserialize, Serialize}; use tokio::task; use crate::{ - keyval::{result_deserialize, serialize_key, KeyVal}, + keyval::{KeyVal, result_deserialize, serialize_key}, stream, }; @@ -19,7 +19,7 @@ use crate::{ pub fn stream_from<'a, K, V, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, V, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -37,7 +37,7 @@ where pub fn stream_from_raw

( self: &Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { @@ -53,7 +53,7 @@ where pub fn stream_raw_from<'a, K, V, P>( self: &'a Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, V, P> where P: AsRef<[u8]> + ?Sized + Debug + Sync, K: Deserialize<'a> + Send, @@ -71,7 +71,7 @@ where pub fn raw_stream_from

( self: &Arc, from: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: AsRef<[u8]> + ?Sized + Debug, { diff --git a/src/database/map/stream_prefix.rs b/src/database/map/stream_prefix.rs index adacfc81..a26478aa 100644 --- a/src/database/map/stream_prefix.rs +++ b/src/database/map/stream_prefix.rs @@ -1,10 +1,10 @@ use std::{convert::AsRef, fmt::Debug, sync::Arc}; -use conduwuit::{implement, Result}; -use futures::{future, Stream, StreamExt, TryStreamExt}; +use conduwuit::{Result, implement}; +use futures::{Stream, StreamExt, TryStreamExt, future}; use serde::{Deserialize, Serialize}; -use crate::keyval::{result_deserialize, serialize_key, KeyVal}; +use crate::keyval::{KeyVal, result_deserialize, serialize_key}; /// Iterate key-value entries in the map where the key matches a prefix. /// @@ -14,7 +14,7 @@ use crate::keyval::{result_deserialize, serialize_key, KeyVal}; pub fn stream_prefix<'a, K, V, P>( self: &'a Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'a, K, V, P> where P: Serialize + ?Sized + Debug, K: Deserialize<'a> + Send, @@ -33,7 +33,7 @@ where pub fn stream_prefix_raw

( self: &Arc, prefix: &P, -) -> impl Stream>> + Send +) -> impl Stream>> + Send + use<'_, P> where P: Serialize + ?Sized + Debug, { diff --git a/src/database/maps.rs b/src/database/maps.rs index fc216ee0..b060ab8d 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -3,8 +3,8 @@ use std::{collections::BTreeMap, sync::Arc}; use conduwuit::Result; use crate::{ - engine::descriptor::{self, CacheDisp, Descriptor}, Engine, Map, + engine::descriptor::{self, CacheDisp, Descriptor}, }; pub(super) type Maps = BTreeMap; diff --git a/src/database/mod.rs b/src/database/mod.rs index 4f8e2ad9..0481d1bd 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -23,18 +23,18 @@ mod watchers; use std::{ops::Index, sync::Arc}; -use conduwuit::{err, Result, Server}; +use conduwuit::{Result, Server, err}; pub use self::{ de::{Ignore, IgnoreAll}, deserialized::Deserialized, handle::Handle, - keyval::{serialize_key, serialize_val, KeyVal, Slice}, - map::{compact, Get, Map, Qry}, - ser::{serialize, serialize_to, serialize_to_vec, Cbor, Interfix, Json, Separator, SEP}, + keyval::{KeyVal, Slice, serialize_key, serialize_val}, + map::{Get, Map, Qry, compact}, + ser::{Cbor, Interfix, Json, SEP, Separator, serialize, serialize_to, serialize_to_vec}, }; pub(crate) use self::{ - engine::{context::Context, Engine}, + engine::{Engine, context::Context}, util::or_else, }; use crate::maps::{Maps, MapsKey, MapsVal}; diff --git a/src/database/pool.rs b/src/database/pool.rs index 7636ff5e..e6ed59ac 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -3,8 +3,8 @@ mod configure; use std::{ mem::take, sync::{ - atomic::{AtomicUsize, Ordering}, Arc, Mutex, + atomic::{AtomicUsize, Ordering}, }, thread, thread::JoinHandle, @@ -12,19 +12,18 @@ use std::{ use async_channel::{QueueStrategy, Receiver, RecvError, Sender}; use conduwuit::{ - debug, debug_warn, err, error, implement, + Error, Result, Server, debug, debug_warn, err, error, implement, result::DebugInspect, smallvec::SmallVec, trace, utils::sys::compute::{get_affinity, nth_core_available, set_affinity}, - Error, Result, Server, }; -use futures::{channel::oneshot, TryFutureExt}; +use futures::{TryFutureExt, channel::oneshot}; use oneshot::Sender as ResultSender; use rocksdb::Direction; use self::configure::configure; -use crate::{keyval::KeyBuf, stream, Handle, Map}; +use crate::{Handle, Map, keyval::KeyBuf, stream}; /// Frontend thread-pool. Operating system threads are used to make database /// requests which are not cached. These thread-blocking requests are offloaded diff --git a/src/database/pool/configure.rs b/src/database/pool/configure.rs index ff42ef51..92dda56e 100644 --- a/src/database/pool/configure.rs +++ b/src/database/pool/configure.rs @@ -1,7 +1,7 @@ use std::{path::PathBuf, sync::Arc}; use conduwuit::{ - debug, debug_info, expected, is_equal_to, + Server, debug, debug_info, expected, is_equal_to, utils::{ math::usize_from_f64, result::LogDebugErr, @@ -9,7 +9,6 @@ use conduwuit::{ stream::{AMPLIFICATION_LIMIT, WIDTH_LIMIT}, sys::{compute::is_core_available, storage}, }, - Server, }; use super::{QUEUE_LIMIT, WORKER_LIMIT}; diff --git a/src/database/ser.rs b/src/database/ser.rs index 372b7522..6dd2043d 100644 --- a/src/database/ser.rs +++ b/src/database/ser.rs @@ -1,7 +1,7 @@ use std::io::Write; -use conduwuit::{debug::type_name, err, result::DebugInspect, utils::exchange, Error, Result}; -use serde::{ser, Deserialize, Serialize}; +use conduwuit::{Error, Result, debug::type_name, err, result::DebugInspect, utils::exchange}; +use serde::{Deserialize, Serialize, ser}; use crate::util::unhandled; diff --git a/src/database/stream.rs b/src/database/stream.rs index f3063bb3..eb856b3f 100644 --- a/src/database/stream.rs +++ b/src/database/stream.rs @@ -5,15 +5,15 @@ mod keys_rev; use std::sync::Arc; -use conduwuit::{utils::exchange, Result}; +use conduwuit::{Result, utils::exchange}; use rocksdb::{DBRawIteratorWithThreadMode, ReadOptions}; pub(crate) use self::{items::Items, items_rev::ItemsRev, keys::Keys, keys_rev::KeysRev}; use crate::{ + Map, Slice, engine::Db, keyval::{Key, KeyVal, Val}, util::{is_incomplete, map_err}, - Map, Slice, }; pub(crate) struct State<'a> { diff --git a/src/database/stream/items.rs b/src/database/stream/items.rs index 8814419e..ede2b822 100644 --- a/src/database/stream/items.rs +++ b/src/database/stream/items.rs @@ -2,12 +2,12 @@ use std::pin::Pin; use conduwuit::Result; use futures::{ + Stream, stream::FusedStream, task::{Context, Poll}, - Stream, }; -use super::{keyval_longevity, Cursor, State}; +use super::{Cursor, State, keyval_longevity}; use crate::keyval::KeyVal; pub(crate) struct Items<'a> { diff --git a/src/database/stream/items_rev.rs b/src/database/stream/items_rev.rs index f6fcb0e5..dba8d16c 100644 --- a/src/database/stream/items_rev.rs +++ b/src/database/stream/items_rev.rs @@ -2,12 +2,12 @@ use std::pin::Pin; use conduwuit::Result; use futures::{ + Stream, stream::FusedStream, task::{Context, Poll}, - Stream, }; -use super::{keyval_longevity, Cursor, State}; +use super::{Cursor, State, keyval_longevity}; use crate::keyval::KeyVal; pub(crate) struct ItemsRev<'a> { diff --git a/src/database/stream/keys.rs b/src/database/stream/keys.rs index b953f51c..7c89869b 100644 --- a/src/database/stream/keys.rs +++ b/src/database/stream/keys.rs @@ -2,12 +2,12 @@ use std::pin::Pin; use conduwuit::Result; use futures::{ + Stream, stream::FusedStream, task::{Context, Poll}, - Stream, }; -use super::{slice_longevity, Cursor, State}; +use super::{Cursor, State, slice_longevity}; use crate::keyval::Key; pub(crate) struct Keys<'a> { diff --git a/src/database/stream/keys_rev.rs b/src/database/stream/keys_rev.rs index acf78d88..51561e5c 100644 --- a/src/database/stream/keys_rev.rs +++ b/src/database/stream/keys_rev.rs @@ -2,12 +2,12 @@ use std::pin::Pin; use conduwuit::Result; use futures::{ + Stream, stream::FusedStream, task::{Context, Poll}, - Stream, }; -use super::{slice_longevity, Cursor, State}; +use super::{Cursor, State, slice_longevity}; use crate::keyval::Key; pub(crate) struct KeysRev<'a> { diff --git a/src/database/tests.rs b/src/database/tests.rs index 594170e8..140bc56d 100644 --- a/src/database/tests.rs +++ b/src/database/tests.rs @@ -4,14 +4,13 @@ use std::fmt::Debug; use conduwuit::{ arrayvec::ArrayVec, - ruma::{serde::Raw, EventId, RoomId, UserId}, + ruma::{EventId, RoomId, UserId, serde::Raw}, }; use serde::Serialize; use crate::{ - de, ser, - ser::{serialize_to_vec, Json}, - Ignore, Interfix, + Ignore, Interfix, de, ser, + ser::{Json, serialize_to_vec}, }; #[test] diff --git a/src/database/watchers.rs b/src/database/watchers.rs index 9ce6f74c..be814f8c 100644 --- a/src/database/watchers.rs +++ b/src/database/watchers.rs @@ -1,5 +1,5 @@ use std::{ - collections::{hash_map, HashMap}, + collections::{HashMap, hash_map}, future::Future, pin::Pin, sync::RwLock, diff --git a/src/macros/admin.rs b/src/macros/admin.rs index e35bd586..bf1586a0 100644 --- a/src/macros/admin.rs +++ b/src/macros/admin.rs @@ -1,10 +1,10 @@ use itertools::Itertools; use proc_macro::{Span, TokenStream}; use proc_macro2::TokenStream as TokenStream2; -use quote::{quote, ToTokens}; -use syn::{parse_quote, Attribute, Error, Fields, Ident, ItemEnum, ItemFn, Meta, Variant}; +use quote::{ToTokens, quote}; +use syn::{Attribute, Error, Fields, Ident, ItemEnum, ItemFn, Meta, Variant, parse_quote}; -use crate::{utils::camel_to_snake_string, Result}; +use crate::{Result, utils::camel_to_snake_string}; pub(super) fn command(mut item: ItemFn, _args: &[Meta]) -> Result { let attr: Attribute = parse_quote! { diff --git a/src/macros/cargo.rs b/src/macros/cargo.rs index cd36658e..a452c672 100644 --- a/src/macros/cargo.rs +++ b/src/macros/cargo.rs @@ -4,7 +4,7 @@ use proc_macro::{Span, TokenStream}; use quote::quote; use syn::{Error, ItemConst, Meta}; -use crate::{utils, Result}; +use crate::{Result, utils}; pub(super) fn manifest(item: ItemConst, args: &[Meta]) -> Result { let member = utils::get_named_string(args, "crate"); diff --git a/src/macros/config.rs b/src/macros/config.rs index 50feefa8..07ac1c0a 100644 --- a/src/macros/config.rs +++ b/src/macros/config.rs @@ -2,15 +2,15 @@ use std::{collections::HashSet, fmt::Write as _, fs::OpenOptions, io::Write as _ use proc_macro::TokenStream; use proc_macro2::{Span, TokenStream as TokenStream2}; -use quote::{quote, ToTokens}; +use quote::{ToTokens, quote}; use syn::{ - parse::Parser, punctuated::Punctuated, spanned::Spanned, Error, Expr, ExprLit, Field, Fields, - FieldsNamed, ItemStruct, Lit, Meta, MetaList, MetaNameValue, Type, TypePath, + Error, Expr, ExprLit, Field, Fields, FieldsNamed, ItemStruct, Lit, Meta, MetaList, + MetaNameValue, Type, TypePath, parse::Parser, punctuated::Punctuated, spanned::Spanned, }; use crate::{ - utils::{get_simple_settings, is_cargo_build, is_cargo_test}, Result, + utils::{get_simple_settings, is_cargo_build, is_cargo_test}, }; const UNDOCUMENTED: &str = "# This item is undocumented. Please contribute documentation for it."; diff --git a/src/macros/implement.rs b/src/macros/implement.rs index 8d18f243..7acc12d2 100644 --- a/src/macros/implement.rs +++ b/src/macros/implement.rs @@ -3,7 +3,7 @@ use quote::quote; use syn::{Error, ItemFn, Meta, Path}; use utils::get_named_generics; -use crate::{utils, Result}; +use crate::{Result, utils}; pub(super) fn implement(item: ItemFn, args: &[Meta]) -> Result { let generics = get_named_generics(args, "generics")?; diff --git a/src/macros/mod.rs b/src/macros/mod.rs index 1aa1e24f..31a797fe 100644 --- a/src/macros/mod.rs +++ b/src/macros/mod.rs @@ -9,8 +9,9 @@ mod utils; use proc_macro::TokenStream; use syn::{ + Error, Item, ItemConst, ItemEnum, ItemFn, ItemStruct, Meta, parse::{Parse, Parser}, - parse_macro_input, Error, Item, ItemConst, ItemEnum, ItemFn, ItemStruct, Meta, + parse_macro_input, }; pub(crate) type Result = std::result::Result; diff --git a/src/macros/refutable.rs b/src/macros/refutable.rs index 66e0ebc3..acfc4cd5 100644 --- a/src/macros/refutable.rs +++ b/src/macros/refutable.rs @@ -1,5 +1,5 @@ use proc_macro::{Span, TokenStream}; -use quote::{quote, ToTokens}; +use quote::{ToTokens, quote}; use syn::{FnArg::Typed, Ident, ItemFn, Meta, Pat, PatIdent, PatType, Stmt}; use crate::Result; @@ -20,7 +20,7 @@ pub(super) fn refutable(mut item: ItemFn, _args: &[Meta]) -> Result let variant = &pat.path; let fields = &pat.fields; - let Some(Typed(PatType { ref mut pat, .. })) = sig.inputs.get_mut(i) else { + let Some(Typed(PatType { pat, .. })) = sig.inputs.get_mut(i) else { continue; }; diff --git a/src/macros/utils.rs b/src/macros/utils.rs index af2519a7..a45e5ecc 100644 --- a/src/macros/utils.rs +++ b/src/macros/utils.rs @@ -1,6 +1,6 @@ use std::collections::HashMap; -use syn::{parse_str, Expr, ExprLit, Generics, Lit, Meta, MetaNameValue}; +use syn::{Expr, ExprLit, Generics, Lit, Meta, MetaNameValue, parse_str}; use crate::Result; diff --git a/src/main/clap.rs b/src/main/clap.rs index 2bb6f3f2..c7f33bfe 100644 --- a/src/main/clap.rs +++ b/src/main/clap.rs @@ -4,10 +4,10 @@ use std::path::PathBuf; use clap::{ArgAction, Parser}; use conduwuit::{ + Err, Result, config::{Figment, FigmentValue}, err, toml, utils::available_parallelism, - Err, Result, }; /// Commandline arguments diff --git a/src/main/logging.rs b/src/main/logging.rs index 35e482de..7ce86d56 100644 --- a/src/main/logging.rs +++ b/src/main/logging.rs @@ -1,13 +1,13 @@ use std::sync::Arc; use conduwuit::{ + Result, config::Config, debug_warn, err, - log::{capture, fmt_span, ConsoleFormat, ConsoleWriter, LogLevelReloadHandles}, + log::{ConsoleFormat, ConsoleWriter, LogLevelReloadHandles, capture, fmt_span}, result::UnwrapOrErr, - Result, }; -use tracing_subscriber::{fmt, layer::SubscriberExt, reload, EnvFilter, Layer, Registry}; +use tracing_subscriber::{EnvFilter, Layer, Registry, fmt, layer::SubscriberExt, reload}; #[cfg(feature = "perf_measurements")] pub(crate) type TracingFlameGuard = diff --git a/src/main/main.rs b/src/main/main.rs index dacc2a2e..2bfc3c06 100644 --- a/src/main/main.rs +++ b/src/main/main.rs @@ -9,9 +9,9 @@ mod signal; extern crate conduwuit_core as conduwuit; -use std::sync::{atomic::Ordering, Arc}; +use std::sync::{Arc, atomic::Ordering}; -use conduwuit::{debug_info, error, rustc_flags_capture, Error, Result}; +use conduwuit::{Error, Result, debug_info, error, rustc_flags_capture}; use server::Server; rustc_flags_capture! {} diff --git a/src/main/mods.rs b/src/main/mods.rs index 9ab36e6c..6dc79b2f 100644 --- a/src/main/mods.rs +++ b/src/main/mods.rs @@ -6,10 +6,10 @@ extern crate conduwuit_service; use std::{ future::Future, pin::Pin, - sync::{atomic::Ordering, Arc}, + sync::{Arc, atomic::Ordering}, }; -use conduwuit::{debug, error, mods, Error, Result}; +use conduwuit::{Error, Result, debug, error, mods}; use conduwuit_service::Services; use crate::Server; diff --git a/src/main/runtime.rs b/src/main/runtime.rs index 474b373b..b3174e9c 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -1,8 +1,8 @@ use std::{ iter::once, sync::{ - atomic::{AtomicUsize, Ordering}, OnceLock, + atomic::{AtomicUsize, Ordering}, }, thread, time::Duration, @@ -11,9 +11,8 @@ use std::{ #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] use conduwuit::result::LogDebugErr; use conduwuit::{ - is_true, + Result, is_true, utils::sys::compute::{nth_core_available, set_affinity}, - Result, }; use tokio::runtime::Builder; diff --git a/src/main/sentry.rs b/src/main/sentry.rs index 02835ec8..1ea1f3ae 100644 --- a/src/main/sentry.rs +++ b/src/main/sentry.rs @@ -7,11 +7,11 @@ use std::{ use conduwuit::{config::Config, debug, trace}; use sentry::{ - types::{ - protocol::v7::{Context, Event}, - Dsn, - }, Breadcrumb, ClientOptions, Level, + types::{ + Dsn, + protocol::v7::{Context, Event}, + }, }; static SEND_PANIC: OnceLock = OnceLock::new(); diff --git a/src/main/server.rs b/src/main/server.rs index 7376b2fc..44ca69b0 100644 --- a/src/main/server.rs +++ b/src/main/server.rs @@ -1,11 +1,11 @@ use std::{path::PathBuf, sync::Arc}; use conduwuit::{ + Error, Result, config::Config, info, log::Log, utils::{stream, sys}, - Error, Result, }; use tokio::{runtime, sync::Mutex}; diff --git a/src/router/layers.rs b/src/router/layers.rs index 7ebec16e..88e6a8d5 100644 --- a/src/router/layers.rs +++ b/src/router/layers.rs @@ -1,16 +1,16 @@ use std::{any::Any, sync::Arc, time::Duration}; use axum::{ - extract::{DefaultBodyLimit, MatchedPath}, Router, + extract::{DefaultBodyLimit, MatchedPath}, }; use axum_client_ip::SecureClientIpSource; -use conduwuit::{debug, error, Result, Server}; +use conduwuit::{Result, Server, debug, error}; use conduwuit_api::router::state::Guard; use conduwuit_service::Services; use http::{ - header::{self, HeaderName}, HeaderValue, Method, StatusCode, + header::{self, HeaderName}, }; use tower::ServiceBuilder; use tower_http::{ @@ -176,12 +176,12 @@ fn catch_panic( .requests_panic .fetch_add(1, std::sync::atomic::Ordering::Release); - let details = if let Some(s) = err.downcast_ref::() { - s.clone() - } else if let Some(s) = err.downcast_ref::<&str>() { - (*s).to_owned() - } else { - "Unknown internal server error occurred.".to_owned() + let details = match err.downcast_ref::() { + | Some(s) => s.clone(), + | _ => match err.downcast_ref::<&str>() { + | Some(s) => (*s).to_owned(), + | _ => "Unknown internal server error occurred.".to_owned(), + }, }; error!("{details:#}"); diff --git a/src/router/request.rs b/src/router/request.rs index b6c22d45..00769b3f 100644 --- a/src/router/request.rs +++ b/src/router/request.rs @@ -1,6 +1,6 @@ use std::{ fmt::Debug, - sync::{atomic::Ordering, Arc}, + sync::{Arc, atomic::Ordering}, time::Duration, }; @@ -8,7 +8,7 @@ use axum::{ extract::State, response::{IntoResponse, Response}, }; -use conduwuit::{debug, debug_error, debug_warn, err, error, trace, Result}; +use conduwuit::{Result, debug, debug_error, debug_warn, err, error, trace}; use conduwuit_service::Services; use futures::FutureExt; use http::{Method, StatusCode, Uri}; diff --git a/src/router/router.rs b/src/router/router.rs index b3531418..0f95b924 100644 --- a/src/router/router.rs +++ b/src/router/router.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use axum::{response::IntoResponse, routing::get, Router}; +use axum::{Router, response::IntoResponse, routing::get}; use conduwuit::Error; use conduwuit_api::router::{state, state::Guard}; use conduwuit_service::Services; diff --git a/src/router/run.rs b/src/router/run.rs index 024cb813..31789626 100644 --- a/src/router/run.rs +++ b/src/router/run.rs @@ -3,12 +3,12 @@ extern crate conduwuit_core as conduwuit; extern crate conduwuit_service as service; use std::{ - sync::{atomic::Ordering, Arc, Weak}, + sync::{Arc, Weak, atomic::Ordering}, time::Duration, }; use axum_server::Handle as ServerHandle; -use conduwuit::{debug, debug_error, debug_info, error, info, Error, Result, Server}; +use conduwuit::{Error, Result, Server, debug, debug_error, debug_info, error, info}; use futures::FutureExt; use service::Services; use tokio::{ diff --git a/src/router/serve/mod.rs b/src/router/serve/mod.rs index 5c822f2b..2399edf0 100644 --- a/src/router/serve/mod.rs +++ b/src/router/serve/mod.rs @@ -6,7 +6,7 @@ mod unix; use std::sync::Arc; use axum_server::Handle as ServerHandle; -use conduwuit::{err, Result}; +use conduwuit::{Result, err}; use conduwuit_service::Services; use tokio::sync::broadcast; diff --git a/src/router/serve/plain.rs b/src/router/serve/plain.rs index 535282b9..6db7e138 100644 --- a/src/router/serve/plain.rs +++ b/src/router/serve/plain.rs @@ -1,11 +1,11 @@ use std::{ net::SocketAddr, - sync::{atomic::Ordering, Arc}, + sync::{Arc, atomic::Ordering}, }; use axum::Router; -use axum_server::{bind, Handle as ServerHandle}; -use conduwuit::{debug_info, info, Result, Server}; +use axum_server::{Handle as ServerHandle, bind}; +use conduwuit::{Result, Server, debug_info, info}; use tokio::task::JoinSet; pub(super) async fn serve( diff --git a/src/router/serve/tls.rs b/src/router/serve/tls.rs index ab1a9371..dd46ab53 100644 --- a/src/router/serve/tls.rs +++ b/src/router/serve/tls.rs @@ -3,10 +3,10 @@ use std::{net::SocketAddr, sync::Arc}; use axum::Router; use axum_server::Handle as ServerHandle; use axum_server_dual_protocol::{ - axum_server::{bind_rustls, tls_rustls::RustlsConfig}, ServerExt, + axum_server::{bind_rustls, tls_rustls::RustlsConfig}, }; -use conduwuit::{err, Result, Server}; +use conduwuit::{Result, Server, err}; use tokio::task::JoinSet; use tracing::{debug, info, warn}; diff --git a/src/router/serve/unix.rs b/src/router/serve/unix.rs index 6a030c30..2af17274 100644 --- a/src/router/serve/unix.rs +++ b/src/router/serve/unix.rs @@ -4,15 +4,15 @@ use std::{ net::{self, IpAddr, Ipv4Addr}, os::fd::AsRawFd, path::Path, - sync::{atomic::Ordering, Arc}, + sync::{Arc, atomic::Ordering}, }; use axum::{ - extract::{connect_info::IntoMakeServiceWithConnectInfo, Request}, Router, + extract::{Request, connect_info::IntoMakeServiceWithConnectInfo}, }; use conduwuit::{ - debug, debug_error, info, result::UnwrapInfallible, trace, warn, Err, Result, Server, + Err, Result, Server, debug, debug_error, info, result::UnwrapInfallible, trace, warn, }; use hyper::{body::Incoming, service::service_fn}; use hyper_util::{ @@ -21,10 +21,10 @@ use hyper_util::{ }; use tokio::{ fs, - net::{unix::SocketAddr, UnixListener, UnixStream}, + net::{UnixListener, UnixStream, unix::SocketAddr}, sync::broadcast::{self}, task::JoinSet, - time::{sleep, Duration}, + time::{Duration, sleep}, }; use tower::{Service, ServiceExt}; diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index 5a943f88..453051be 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -1,23 +1,22 @@ use std::sync::Arc; use conduwuit::{ - err, implement, - utils::{result::LogErr, stream::TryIgnore, ReadyExt}, - Err, Result, + Err, Result, err, implement, + utils::{ReadyExt, result::LogErr, stream::TryIgnore}, }; use database::{Deserialized, Handle, Ignore, Json, Map}; use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{ + RoomId, UserId, events::{ AnyGlobalAccountDataEvent, AnyRawAccountDataEvent, AnyRoomAccountDataEvent, GlobalAccountDataEventType, RoomAccountDataEventType, }, serde::Raw, - RoomId, UserId, }; use serde::Deserialize; -use crate::{globals, Dep}; +use crate::{Dep, globals}; pub struct Service { services: Services, diff --git a/src/service/admin/console.rs b/src/service/admin/console.rs index 59b9a31b..02f41303 100644 --- a/src/service/admin/console.rs +++ b/src/service/admin/console.rs @@ -5,14 +5,14 @@ use std::{ sync::{Arc, Mutex}, }; -use conduwuit::{debug, defer, error, log, log::is_systemd_mode, Server}; +use conduwuit::{Server, debug, defer, error, log, log::is_systemd_mode}; use futures::future::{AbortHandle, Abortable}; use ruma::events::room::message::RoomMessageEventContent; use rustyline_async::{Readline, ReadlineError, ReadlineEvent}; use termimad::MadSkin; use tokio::task::JoinHandle; -use crate::{admin, Dep}; +use crate::{Dep, admin}; pub struct Console { server: Arc, @@ -221,7 +221,7 @@ pub fn print(markdown: &str) { } fn configure_output_err(mut output: MadSkin) -> MadSkin { - use termimad::{crossterm::style::Color, Alignment, CompoundStyle, LineStyle}; + use termimad::{Alignment, CompoundStyle, LineStyle, crossterm::style::Color}; let code_style = CompoundStyle::with_fgbg(Color::AnsiValue(196), Color::AnsiValue(234)); output.inline_code = code_style.clone(); @@ -236,7 +236,7 @@ fn configure_output_err(mut output: MadSkin) -> MadSkin { } fn configure_output(mut output: MadSkin) -> MadSkin { - use termimad::{crossterm::style::Color, Alignment, CompoundStyle, LineStyle}; + use termimad::{Alignment, CompoundStyle, LineStyle, crossterm::style::Color}; let code_style = CompoundStyle::with_fgbg(Color::AnsiValue(40), Color::AnsiValue(234)); output.inline_code = code_style.clone(); diff --git a/src/service/admin/create.rs b/src/service/admin/create.rs index 7b691fb1..7f71665a 100644 --- a/src/service/admin/create.rs +++ b/src/service/admin/create.rs @@ -1,7 +1,8 @@ use std::collections::BTreeMap; -use conduwuit::{pdu::PduBuilder, Result}; +use conduwuit::{Result, pdu::PduBuilder}; use ruma::{ + RoomId, RoomVersionId, events::room::{ canonical_alias::RoomCanonicalAliasEventContent, create::RoomCreateEventContent, @@ -14,7 +15,6 @@ use ruma::{ preview_url::RoomPreviewUrlsEventContent, topic::RoomTopicEventContent, }, - RoomId, RoomVersionId, }; use crate::Services; diff --git a/src/service/admin/execute.rs b/src/service/admin/execute.rs index 462681da..174b28ed 100644 --- a/src/service/admin/execute.rs +++ b/src/service/admin/execute.rs @@ -1,6 +1,6 @@ -use conduwuit::{debug, debug_info, error, implement, info, Err, Result}; +use conduwuit::{Err, Result, debug, debug_info, error, implement, info}; use ruma::events::room::message::RoomMessageEventContent; -use tokio::time::{sleep, Duration}; +use tokio::time::{Duration, sleep}; pub(super) const SIGNAL: &str = "SIGUSR2"; diff --git a/src/service/admin/grant.rs b/src/service/admin/grant.rs index 3ad9283f..358ea267 100644 --- a/src/service/admin/grant.rs +++ b/src/service/admin/grant.rs @@ -1,17 +1,17 @@ use std::collections::BTreeMap; -use conduwuit::{error, implement, Result}; +use conduwuit::{Result, error, implement}; use ruma::{ + RoomId, UserId, events::{ + RoomAccountDataEventType, room::{ member::{MembershipState, RoomMemberEventContent}, message::RoomMessageEventContent, power_levels::RoomPowerLevelsEventContent, }, tag::{TagEvent, TagEventContent, TagInfo}, - RoomAccountDataEventType, }, - RoomId, UserId, }; use crate::pdu::PduBuilder; diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 31b046b7..4622f10e 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -11,18 +11,18 @@ use std::{ use async_trait::async_trait; use conduwuit::{ - debug, err, error, error::default_log, pdu::PduBuilder, Error, PduEvent, Result, Server, + Error, PduEvent, Result, Server, debug, err, error, error::default_log, pdu::PduBuilder, }; pub use create::create_admin_room; use futures::{FutureExt, TryFutureExt}; use loole::{Receiver, Sender}; use ruma::{ - events::room::message::{Relation, RoomMessageEventContent}, OwnedEventId, OwnedRoomId, RoomId, UserId, + events::room::message::{Relation, RoomMessageEventContent}, }; use tokio::sync::RwLock; -use crate::{account_data, globals, rooms, rooms::state::RoomMutexGuard, Dep}; +use crate::{Dep, account_data, globals, rooms, rooms::state::RoomMutexGuard}; pub struct Service { services: Services, diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index 2a54ee09..5aba0018 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -4,14 +4,14 @@ mod registration_info; use std::{collections::BTreeMap, sync::Arc}; use async_trait::async_trait; -use conduwuit::{err, utils::stream::TryIgnore, Result}; +use conduwuit::{Result, err, utils::stream::TryIgnore}; use database::Map; use futures::{Future, StreamExt, TryStreamExt}; -use ruma::{api::appservice::Registration, RoomAliasId, RoomId, UserId}; +use ruma::{RoomAliasId, RoomId, UserId, api::appservice::Registration}; use tokio::sync::RwLock; pub use self::{namespace_regex::NamespaceRegex, registration_info::RegistrationInfo}; -use crate::{sending, Dep}; +use crate::{Dep, sending}; pub struct Service { registration_info: RwLock>, diff --git a/src/service/appservice/registration_info.rs b/src/service/appservice/registration_info.rs index 9758e186..a511f58d 100644 --- a/src/service/appservice/registration_info.rs +++ b/src/service/appservice/registration_info.rs @@ -1,5 +1,5 @@ use conduwuit::Result; -use ruma::{api::appservice::Registration, UserId}; +use ruma::{UserId, api::appservice::Registration}; use super::NamespaceRegex; diff --git a/src/service/client/mod.rs b/src/service/client/mod.rs index f63d78b8..d5008491 100644 --- a/src/service/client/mod.rs +++ b/src/service/client/mod.rs @@ -1,6 +1,6 @@ use std::{sync::Arc, time::Duration}; -use conduwuit::{err, implement, trace, Config, Result}; +use conduwuit::{Config, Result, err, implement, trace}; use either::Either; use ipaddress::IPAddress; use reqwest::redirect; @@ -172,10 +172,9 @@ fn base(config: &Config) -> Result { builder = builder.no_zstd(); }; - if let Some(proxy) = config.proxy.to_proxy()? { - Ok(builder.proxy(proxy)) - } else { - Ok(builder) + match config.proxy.to_proxy()? { + | Some(proxy) => Ok(builder.proxy(proxy)), + | _ => Ok(builder), } } diff --git a/src/service/config/mod.rs b/src/service/config/mod.rs index c9ac37a3..fd0d8764 100644 --- a/src/service/config/mod.rs +++ b/src/service/config/mod.rs @@ -2,8 +2,9 @@ use std::{iter, ops::Deref, path::Path, sync::Arc}; use async_trait::async_trait; use conduwuit::{ - config::{check, Config}, - error, implement, Result, Server, + Result, Server, + config::{Config, check}, + error, implement, }; pub struct Service { diff --git a/src/service/emergency/mod.rs b/src/service/emergency/mod.rs index 9b2e4025..47a309a5 100644 --- a/src/service/emergency/mod.rs +++ b/src/service/emergency/mod.rs @@ -1,15 +1,15 @@ use std::sync::Arc; use async_trait::async_trait; -use conduwuit::{error, warn, Result}; +use conduwuit::{Result, error, warn}; use ruma::{ events::{ - push_rules::PushRulesEventContent, GlobalAccountDataEvent, GlobalAccountDataEventType, + GlobalAccountDataEvent, GlobalAccountDataEventType, push_rules::PushRulesEventContent, }, push::Ruleset, }; -use crate::{account_data, globals, users, Dep}; +use crate::{Dep, account_data, globals, users}; pub struct Service { services: Services, diff --git a/src/service/federation/execute.rs b/src/service/federation/execute.rs index 3146bb8a..d254486f 100644 --- a/src/service/federation/execute.rs +++ b/src/service/federation/execute.rs @@ -2,20 +2,20 @@ use std::{fmt::Debug, mem}; use bytes::Bytes; use conduwuit::{ - debug, debug::INFO_SPAN_LEVEL, debug_error, debug_warn, err, error::inspect_debug_log, - implement, trace, utils::string::EMPTY, Err, Error, Result, + Err, Error, Result, debug, debug::INFO_SPAN_LEVEL, debug_error, debug_warn, err, + error::inspect_debug_log, implement, trace, utils::string::EMPTY, }; -use http::{header::AUTHORIZATION, HeaderValue}; +use http::{HeaderValue, header::AUTHORIZATION}; use ipaddress::IPAddress; use reqwest::{Client, Method, Request, Response, Url}; use ruma::{ + CanonicalJsonObject, CanonicalJsonValue, ServerName, ServerSigningKeyId, api::{ - client::error::Error as RumaError, EndpointError, IncomingResponse, MatrixVersion, - OutgoingRequest, SendAccessToken, + EndpointError, IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, + client::error::Error as RumaError, }, serde::Base64, server_util::authorization::XMatrix, - CanonicalJsonObject, CanonicalJsonValue, ServerName, ServerSigningKeyId, }; use crate::resolver::actual::ActualDest; diff --git a/src/service/federation/mod.rs b/src/service/federation/mod.rs index dacdb20e..ce7765ee 100644 --- a/src/service/federation/mod.rs +++ b/src/service/federation/mod.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use conduwuit::{Result, Server}; -use crate::{client, resolver, server_keys, Dep}; +use crate::{Dep, client, resolver, server_keys}; pub struct Service { services: Services, diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index 26a18607..b43b7c5f 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -1,6 +1,6 @@ use std::sync::{Arc, RwLock}; -use conduwuit::{utils, Result}; +use conduwuit::{Result, utils}; use database::{Database, Deserialized, Map}; pub struct Data { diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 485d5020..16b3ef3c 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -7,7 +7,7 @@ use std::{ time::Instant, }; -use conduwuit::{error, utils::bytes::pretty, Result, Server}; +use conduwuit::{Result, Server, error, utils::bytes::pretty}; use data::Data; use regex::RegexSet; use ruma::{OwnedEventId, OwnedRoomAliasId, OwnedServerName, OwnedUserId, ServerName, UserId}; diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index 1165c3ed..1bf048ef 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -1,19 +1,18 @@ use std::{collections::BTreeMap, sync::Arc}; use conduwuit::{ - err, implement, + Err, Result, err, implement, utils::stream::{ReadyExt, TryIgnore}, - Err, Result, }; use database::{Deserialized, Ignore, Interfix, Json, Map}; use futures::StreamExt; use ruma::{ + OwnedRoomId, RoomId, UserId, api::client::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, serde::Raw, - OwnedRoomId, RoomId, UserId, }; -use crate::{globals, Dep}; +use crate::{Dep, globals}; pub struct Service { db: Data, diff --git a/src/service/manager.rs b/src/service/manager.rs index e0d885c2..3cdf5945 100644 --- a/src/service/manager.rs +++ b/src/service/manager.rs @@ -1,6 +1,6 @@ use std::{panic::AssertUnwindSafe, sync::Arc, time::Duration}; -use conduwuit::{debug, debug_warn, error, trace, utils::time, warn, Err, Error, Result, Server}; +use conduwuit::{Err, Error, Result, Server, debug, debug_warn, error, trace, utils::time, warn}; use futures::{FutureExt, TryFutureExt}; use tokio::{ sync::{Mutex, MutexGuard}, @@ -8,7 +8,7 @@ use tokio::{ time::sleep, }; -use crate::{service, service::Service, Services}; +use crate::{Services, service, service::Service}; pub(crate) struct Manager { manager: Mutex>>>, diff --git a/src/service/media/blurhash.rs b/src/service/media/blurhash.rs index 60ade723..9d73f5dc 100644 --- a/src/service/media/blurhash.rs +++ b/src/service/media/blurhash.rs @@ -1,6 +1,6 @@ #[cfg(feature = "blurhashing")] use conduwuit::config::BlurhashConfig as CoreBlurhashConfig; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use super::Service; diff --git a/src/service/media/data.rs b/src/service/media/data.rs index f48482ea..0ccd844f 100644 --- a/src/service/media/data.rs +++ b/src/service/media/data.rs @@ -1,13 +1,12 @@ use std::{sync::Arc, time::Duration}; use conduwuit::{ - debug, debug_info, err, - utils::{str_from_bytes, stream::TryIgnore, string_from_bytes, ReadyExt}, - Err, Result, + Err, Result, debug, debug_info, err, + utils::{ReadyExt, str_from_bytes, stream::TryIgnore, string_from_bytes}, }; use database::{Database, Interfix, Map}; use futures::StreamExt; -use ruma::{http_headers::ContentDisposition, Mxc, OwnedMxcUri, UserId}; +use ruma::{Mxc, OwnedMxcUri, UserId, http_headers::ContentDisposition}; use super::{preview::UrlPreviewData, thumbnail::Dim}; diff --git a/src/service/media/migrations.rs b/src/service/media/migrations.rs index 8526ffcd..5fd628cd 100644 --- a/src/service/media/migrations.rs +++ b/src/service/media/migrations.rs @@ -8,9 +8,9 @@ use std::{ }; use conduwuit::{ - debug, debug_info, debug_warn, error, info, - utils::{stream::TryIgnore, ReadyExt}, - warn, Config, Result, + Config, Result, debug, debug_info, debug_warn, error, info, + utils::{ReadyExt, stream::TryIgnore}, + warn, }; use crate::Services; diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index f5913f43..5c26efe8 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -8,13 +8,13 @@ mod thumbnail; use std::{path::PathBuf, sync::Arc, time::SystemTime}; use async_trait::async_trait; -use base64::{engine::general_purpose, Engine as _}; +use base64::{Engine as _, engine::general_purpose}; use conduwuit::{ - debug, debug_error, debug_info, debug_warn, err, error, trace, + Err, Result, Server, debug, debug_error, debug_info, debug_warn, err, error, trace, utils::{self, MutexMap}, - warn, Err, Result, Server, + warn, }; -use ruma::{http_headers::ContentDisposition, Mxc, OwnedMxcUri, UserId}; +use ruma::{Mxc, OwnedMxcUri, UserId, http_headers::ContentDisposition}; use tokio::{ fs, io::{AsyncReadExt, AsyncWriteExt, BufReader}, @@ -22,7 +22,7 @@ use tokio::{ use self::data::{Data, Metadata}; pub use self::thumbnail::Dim; -use crate::{client, globals, sending, Dep}; +use crate::{Dep, client, globals, sending}; #[derive(Debug)] pub struct FileMeta { @@ -105,22 +105,27 @@ impl Service { /// Deletes a file in the database and from the media directory via an MXC pub async fn delete(&self, mxc: &Mxc<'_>) -> Result<()> { - if let Ok(keys) = self.db.search_mxc_metadata_prefix(mxc).await { - for key in keys { - trace!(?mxc, "MXC Key: {key:?}"); - debug_info!(?mxc, "Deleting from filesystem"); + match self.db.search_mxc_metadata_prefix(mxc).await { + | Ok(keys) => { + for key in keys { + trace!(?mxc, "MXC Key: {key:?}"); + debug_info!(?mxc, "Deleting from filesystem"); - if let Err(e) = self.remove_media_file(&key).await { - debug_error!(?mxc, "Failed to remove media file: {e}"); + if let Err(e) = self.remove_media_file(&key).await { + debug_error!(?mxc, "Failed to remove media file: {e}"); + } + + debug_info!(?mxc, "Deleting from database"); + self.db.delete_file_mxc(mxc).await; } - debug_info!(?mxc, "Deleting from database"); - self.db.delete_file_mxc(mxc).await; - } - - Ok(()) - } else { - Err!(Database(error!("Failed to find any media keys for MXC {mxc} in our database."))) + Ok(()) + }, + | _ => { + Err!(Database(error!( + "Failed to find any media keys for MXC {mxc} in our database." + ))) + }, } } @@ -154,22 +159,21 @@ impl Service { /// Downloads a file. pub async fn get(&self, mxc: &Mxc<'_>) -> Result> { - if let Ok(Metadata { content_disposition, content_type, key }) = - self.db.search_file_metadata(mxc, &Dim::default()).await - { - let mut content = Vec::with_capacity(8192); - let path = self.get_media_file(&key); - BufReader::new(fs::File::open(path).await?) - .read_to_end(&mut content) - .await?; + match self.db.search_file_metadata(mxc, &Dim::default()).await { + | Ok(Metadata { content_disposition, content_type, key }) => { + let mut content = Vec::with_capacity(8192); + let path = self.get_media_file(&key); + BufReader::new(fs::File::open(path).await?) + .read_to_end(&mut content) + .await?; - Ok(Some(FileMeta { - content: Some(content), - content_type, - content_disposition, - })) - } else { - Ok(None) + Ok(Some(FileMeta { + content: Some(content), + content_type, + content_disposition, + })) + }, + | _ => Ok(None), } } diff --git a/src/service/media/preview.rs b/src/service/media/preview.rs index e7f76bab..17216869 100644 --- a/src/service/media/preview.rs +++ b/src/service/media/preview.rs @@ -7,7 +7,7 @@ use std::time::SystemTime; -use conduwuit::{debug, Err, Result}; +use conduwuit::{Err, Result, debug}; use conduwuit_core::implement; use ipaddress::IPAddress; use serde::Serialize; diff --git a/src/service/media/remote.rs b/src/service/media/remote.rs index 72f1184e..61635011 100644 --- a/src/service/media/remote.rs +++ b/src/service/media/remote.rs @@ -1,21 +1,21 @@ use std::{fmt::Debug, time::Duration}; use conduwuit::{ - debug_warn, err, implement, utils::content_disposition::make_content_disposition, Err, Error, - Result, + Err, Error, Result, debug_warn, err, implement, + utils::content_disposition::make_content_disposition, }; -use http::header::{HeaderValue, CONTENT_DISPOSITION, CONTENT_TYPE}; +use http::header::{CONTENT_DISPOSITION, CONTENT_TYPE, HeaderValue}; use ruma::{ + Mxc, ServerName, UserId, api::{ + OutgoingRequest, client::{ error::ErrorKind::{NotFound, Unrecognized}, media, }, federation, federation::authenticated_media::{Content, FileOrLocation}, - OutgoingRequest, }, - Mxc, ServerName, UserId, }; use super::{Dim, FileMeta}; diff --git a/src/service/media/tests.rs b/src/service/media/tests.rs index 1d6dce30..651e0ade 100644 --- a/src/service/media/tests.rs +++ b/src/service/media/tests.rs @@ -5,7 +5,7 @@ async fn long_file_names_works() { use std::path::PathBuf; - use base64::{engine::general_purpose, Engine as _}; + use base64::{Engine as _, engine::general_purpose}; use super::*; diff --git a/src/service/media/thumbnail.rs b/src/service/media/thumbnail.rs index 7350b3a1..e5a98774 100644 --- a/src/service/media/thumbnail.rs +++ b/src/service/media/thumbnail.rs @@ -7,14 +7,14 @@ use std::{cmp, num::Saturating as Sat}; -use conduwuit::{checked, err, implement, Result}; -use ruma::{http_headers::ContentDisposition, media::Method, Mxc, UInt, UserId}; +use conduwuit::{Result, checked, err, implement}; +use ruma::{Mxc, UInt, UserId, http_headers::ContentDisposition, media::Method}; use tokio::{ fs, io::{AsyncReadExt, AsyncWriteExt}, }; -use super::{data::Metadata, FileMeta}; +use super::{FileMeta, data::Metadata}; /// Dimension specification for a thumbnail. #[derive(Debug)] @@ -65,12 +65,12 @@ impl super::Service { // 0, 0 because that's the original file let dim = dim.normalized(); - if let Ok(metadata) = self.db.search_file_metadata(mxc, &dim).await { - self.get_thumbnail_saved(metadata).await - } else if let Ok(metadata) = self.db.search_file_metadata(mxc, &Dim::default()).await { - self.get_thumbnail_generate(mxc, &dim, metadata).await - } else { - Ok(None) + match self.db.search_file_metadata(mxc, &dim).await { + | Ok(metadata) => self.get_thumbnail_saved(metadata).await, + | _ => match self.db.search_file_metadata(mxc, &Dim::default()).await { + | Ok(metadata) => self.get_thumbnail_generate(mxc, &dim, metadata).await, + | _ => Ok(None), + }, } } } diff --git a/src/service/migrations.rs b/src/service/migrations.rs index 69b1be4e..512a7867 100644 --- a/src/service/migrations.rs +++ b/src/service/migrations.rs @@ -1,25 +1,25 @@ use std::cmp; use conduwuit::{ - debug, debug_info, debug_warn, error, info, + Err, Result, debug, debug_info, debug_warn, error, info, result::NotFound, utils::{ - stream::{TryExpect, TryIgnore}, IterStream, ReadyExt, + stream::{TryExpect, TryIgnore}, }, - warn, Err, Result, + warn, }; use futures::{FutureExt, StreamExt}; use itertools::Itertools; use ruma::{ + OwnedUserId, RoomId, UserId, events::{ - push_rules::PushRulesEvent, room::member::MembershipState, GlobalAccountDataEventType, + GlobalAccountDataEventType, push_rules::PushRulesEvent, room::member::MembershipState, }, push::Ruleset, - OwnedUserId, RoomId, UserId, }; -use crate::{media, Services}; +use crate::{Services, media}; /// The current schema version. /// - If database is opened at greater version we reject with error. The diff --git a/src/service/mod.rs b/src/service/mod.rs index 71bd0eb4..0bde0255 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -30,7 +30,7 @@ pub mod users; extern crate conduwuit_core as conduwuit; extern crate conduwuit_database as database; -pub use conduwuit::{pdu, PduBuilder, PduCount, PduEvent}; +pub use conduwuit::{PduBuilder, PduCount, PduEvent, pdu}; pub(crate) use service::{Args, Dep, Service}; pub use crate::services::Services; diff --git a/src/service/presence/data.rs b/src/service/presence/data.rs index 4ec0a7ee..d7ef5175 100644 --- a/src/service/presence/data.rs +++ b/src/service/presence/data.rs @@ -1,16 +1,15 @@ use std::sync::Arc; use conduwuit::{ - debug_warn, utils, - utils::{stream::TryIgnore, ReadyExt}, - Result, + Result, debug_warn, utils, + utils::{ReadyExt, stream::TryIgnore}, }; use database::{Deserialized, Json, Map}; use futures::Stream; -use ruma::{events::presence::PresenceEvent, presence::PresenceState, UInt, UserId}; +use ruma::{UInt, UserId, events::presence::PresenceEvent, presence::PresenceState}; use super::Presence; -use crate::{globals, users, Dep}; +use crate::{Dep, globals, users}; pub(crate) struct Data { presenceid_presence: Arc, diff --git a/src/service/presence/mod.rs b/src/service/presence/mod.rs index eb4105e5..8f646be6 100644 --- a/src/service/presence/mod.rs +++ b/src/service/presence/mod.rs @@ -5,16 +5,16 @@ use std::{sync::Arc, time::Duration}; use async_trait::async_trait; use conduwuit::{ - checked, debug, debug_warn, error, result::LogErr, trace, Error, Result, Server, + Error, Result, Server, checked, debug, debug_warn, error, result::LogErr, trace, }; use database::Database; -use futures::{stream::FuturesUnordered, Stream, StreamExt, TryFutureExt}; +use futures::{Stream, StreamExt, TryFutureExt, stream::FuturesUnordered}; use loole::{Receiver, Sender}; -use ruma::{events::presence::PresenceEvent, presence::PresenceState, OwnedUserId, UInt, UserId}; +use ruma::{OwnedUserId, UInt, UserId, events::presence::PresenceEvent, presence::PresenceState}; use tokio::time::sleep; use self::{data::Data, presence::Presence}; -use crate::{globals, users, Dep}; +use crate::{Dep, globals, users}; pub struct Service { timer_channel: (Sender, Receiver), diff --git a/src/service/presence/presence.rs b/src/service/presence/presence.rs index b322dfb4..3357bd61 100644 --- a/src/service/presence/presence.rs +++ b/src/service/presence/presence.rs @@ -1,8 +1,8 @@ -use conduwuit::{utils, Error, Result}; +use conduwuit::{Error, Result, utils}; use ruma::{ + UInt, UserId, events::presence::{PresenceEvent, PresenceEventContent}, presence::PresenceState, - UInt, UserId, }; use serde::{Deserialize, Serialize}; diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 43d60c08..2b269b3d 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -2,34 +2,35 @@ use std::{fmt::Debug, mem, sync::Arc}; use bytes::BytesMut; use conduwuit::{ - debug_warn, err, trace, + Err, PduEvent, Result, debug_warn, err, trace, utils::{stream::TryIgnore, string_from_bytes}, - warn, Err, PduEvent, Result, + warn, }; use database::{Deserialized, Ignore, Interfix, Json, Map}; use futures::{Stream, StreamExt}; use ipaddress::IPAddress; use ruma::{ + RoomId, UInt, UserId, api::{ - client::push::{set_pusher, Pusher, PusherKind}, + IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, + client::push::{Pusher, PusherKind, set_pusher}, push_gateway::send_event_notification::{ self, v1::{Device, Notification, NotificationCounts, NotificationPriority}, }, - IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, }, events::{ - room::power_levels::RoomPowerLevelsEventContent, AnySyncTimelineEvent, StateEventType, - TimelineEventType, + AnySyncTimelineEvent, StateEventType, TimelineEventType, + room::power_levels::RoomPowerLevelsEventContent, }, push::{ Action, PushConditionPowerLevelsCtx, PushConditionRoomCtx, PushFormat, Ruleset, Tweak, }, serde::Raw, - uint, RoomId, UInt, UserId, + uint, }; -use crate::{client, globals, rooms, sending, users, Dep}; +use crate::{Dep, client, globals, rooms, sending, users}; pub struct Service { db: Data, diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index 66854764..8860d0a0 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -3,7 +3,7 @@ use std::{ net::{IpAddr, SocketAddr}, }; -use conduwuit::{debug, debug_error, debug_info, debug_warn, err, error, trace, Err, Result}; +use conduwuit::{Err, Result, debug, debug_error, debug_info, debug_warn, err, error, trace}; use futures::{FutureExt, TryFutureExt}; use hickory_resolver::error::ResolveError; use ipaddress::IPAddress; @@ -11,7 +11,7 @@ use ruma::ServerName; use super::{ cache::{CachedDest, CachedOverride, MAX_IPS}, - fed::{add_port_to_hostname, get_ip_with_port, FedDest, PortString}, + fed::{FedDest, PortString, add_port_to_hostname, get_ip_with_port}, }; #[derive(Clone, Debug)] @@ -71,12 +71,16 @@ impl super::Service { | None => if let Some(pos) = dest.as_str().find(':') { self.actual_dest_2(dest, cache, pos).await? - } else if let Some(delegated) = self.request_well_known(dest.as_str()).await? { - self.actual_dest_3(&mut host, cache, delegated).await? - } else if let Some(overrider) = self.query_srv_record(dest.as_str()).await? { - self.actual_dest_4(&host, cache, overrider).await? } else { - self.actual_dest_5(dest, cache).await? + match self.request_well_known(dest.as_str()).await? { + | Some(delegated) => + self.actual_dest_3(&mut host, cache, delegated).await?, + | _ => match self.query_srv_record(dest.as_str()).await? { + | Some(overrider) => + self.actual_dest_4(&host, cache, overrider).await?, + | _ => self.actual_dest_5(dest, cache).await?, + }, + } }, }; @@ -136,10 +140,10 @@ impl super::Service { self.actual_dest_3_2(cache, delegated, pos).await } else { trace!("Delegated hostname has no port in this branch"); - if let Some(overrider) = self.query_srv_record(&delegated).await? { - self.actual_dest_3_3(cache, delegated, overrider).await - } else { - self.actual_dest_3_4(cache, delegated).await + match self.query_srv_record(&delegated).await? { + | Some(overrider) => + self.actual_dest_3_3(cache, delegated, overrider).await, + | _ => self.actual_dest_3_4(cache, delegated).await, } }, } diff --git a/src/service/resolver/cache.rs b/src/service/resolver/cache.rs index 7b4f104d..6b05c00c 100644 --- a/src/service/resolver/cache.rs +++ b/src/service/resolver/cache.rs @@ -1,10 +1,10 @@ use std::{net::IpAddr, sync::Arc, time::SystemTime}; use conduwuit::{ + Result, arrayvec::ArrayVec, at, err, implement, utils::{math::Expected, rand, stream::TryIgnore}, - Result, }; use database::{Cbor, Deserialized, Map}; use futures::{Stream, StreamExt}; @@ -96,7 +96,7 @@ pub fn destinations(&self) -> impl Stream + Se self.destinations .stream() .ignore_err() - .map(|item: (&ServerName, Cbor<_>)| (item.0, item.1 .0)) + .map(|item: (&ServerName, Cbor<_>)| (item.0, item.1.0)) } #[implement(Cache)] @@ -104,7 +104,7 @@ pub fn overrides(&self) -> impl Stream + S self.overrides .stream() .ignore_err() - .map(|item: (&ServerName, Cbor<_>)| (item.0, item.1 .0)) + .map(|item: (&ServerName, Cbor<_>)| (item.0, item.1.0)) } impl CachedDest { diff --git a/src/service/resolver/dns.rs b/src/service/resolver/dns.rs index ca6106e2..98ad7e60 100644 --- a/src/service/resolver/dns.rs +++ b/src/service/resolver/dns.rs @@ -1,8 +1,8 @@ use std::{net::SocketAddr, sync::Arc, time::Duration}; -use conduwuit::{err, Result, Server}; +use conduwuit::{Result, Server, err}; use futures::FutureExt; -use hickory_resolver::{lookup_ip::LookupIp, TokioAsyncResolver}; +use hickory_resolver::{TokioAsyncResolver, lookup_ip::LookupIp}; use reqwest::dns::{Addrs, Name, Resolve, Resolving}; use super::cache::{Cache, CachedOverride}; diff --git a/src/service/resolver/mod.rs b/src/service/resolver/mod.rs index 6be9d42d..2ec9c0ef 100644 --- a/src/service/resolver/mod.rs +++ b/src/service/resolver/mod.rs @@ -6,10 +6,10 @@ mod tests; use std::sync::Arc; -use conduwuit::{arrayvec::ArrayString, utils::MutexMap, Result, Server}; +use conduwuit::{Result, Server, arrayvec::ArrayString, utils::MutexMap}; use self::{cache::Cache, dns::Resolver}; -use crate::{client, Dep}; +use crate::{Dep, client}; pub struct Service { pub cache: Arc, diff --git a/src/service/resolver/tests.rs b/src/service/resolver/tests.rs index 870f5eab..6e9d0e71 100644 --- a/src/service/resolver/tests.rs +++ b/src/service/resolver/tests.rs @@ -1,6 +1,6 @@ #![cfg(test)] -use super::fed::{add_port_to_hostname, get_ip_with_port, FedDest}; +use super::fed::{FedDest, add_port_to_hostname, get_ip_with_port}; #[test] fn ips_get_default_ports() { diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index 17ed5e13..866e45a9 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -3,21 +3,20 @@ mod remote; use std::sync::Arc; use conduwuit::{ - err, - utils::{stream::TryIgnore, ReadyExt}, - Err, Result, Server, + Err, Result, Server, err, + utils::{ReadyExt, stream::TryIgnore}, }; use database::{Deserialized, Ignore, Interfix, Map}; use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{ - events::{ - room::power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, - StateEventType, - }, OwnedRoomId, OwnedServerName, OwnedUserId, RoomAliasId, RoomId, RoomOrAliasId, UserId, + events::{ + StateEventType, + room::power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, + }, }; -use crate::{admin, appservice, appservice::RegistrationInfo, globals, rooms, sending, Dep}; +use crate::{Dep, admin, appservice, appservice::RegistrationInfo, globals, rooms, sending}; pub struct Service { db: Data, diff --git a/src/service/rooms/alias/remote.rs b/src/service/rooms/alias/remote.rs index 7744bee2..60aed76d 100644 --- a/src/service/rooms/alias/remote.rs +++ b/src/service/rooms/alias/remote.rs @@ -1,8 +1,8 @@ use std::iter::once; -use conduwuit::{debug, debug_error, err, implement, Result}; +use conduwuit::{Result, debug, debug_error, err, implement}; use federation::query::get_room_information::v1::Response; -use ruma::{api::federation, OwnedRoomId, OwnedServerName, RoomAliasId, ServerName}; +use ruma::{OwnedRoomId, OwnedServerName, RoomAliasId, ServerName, api::federation}; #[implement(super::Service)] pub(super) async fn remote_resolve( diff --git a/src/service/rooms/auth_chain/data.rs b/src/service/rooms/auth_chain/data.rs index af8ae364..8c3588cc 100644 --- a/src/service/rooms/auth_chain/data.rs +++ b/src/service/rooms/auth_chain/data.rs @@ -3,7 +3,7 @@ use std::{ sync::{Arc, Mutex}, }; -use conduwuit::{err, utils, utils::math::usize_from_f64, Err, Result}; +use conduwuit::{Err, Result, err, utils, utils::math::usize_from_f64}; use database::Map; use lru_cache::LruCache; diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index 0ff96846..0903ea75 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -8,18 +8,18 @@ use std::{ }; use conduwuit::{ - at, debug, debug_error, implement, trace, + Err, Result, at, debug, debug_error, implement, trace, utils::{ - stream::{ReadyExt, TryBroadbandExt}, IterStream, + stream::{ReadyExt, TryBroadbandExt}, }, - validated, warn, Err, Result, + validated, warn, }; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use ruma::{EventId, OwnedEventId, RoomId}; use self::data::Data; -use crate::{rooms, rooms::short::ShortEventId, Dep}; +use crate::{Dep, rooms, rooms::short::ShortEventId}; pub struct Service { services: Services, diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index 039efca7..4ea10641 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -1,9 +1,9 @@ use std::sync::Arc; -use conduwuit::{implement, utils::stream::TryIgnore, Result}; +use conduwuit::{Result, implement, utils::stream::TryIgnore}; use database::Map; use futures::Stream; -use ruma::{api::client::room::Visibility, RoomId}; +use ruma::{RoomId, api::client::room::Visibility}; pub struct Service { db: Data, diff --git a/src/service/rooms/event_handler/acl_check.rs b/src/service/rooms/event_handler/acl_check.rs index 714b6fc1..6b432a4b 100644 --- a/src/service/rooms/event_handler/acl_check.rs +++ b/src/service/rooms/event_handler/acl_check.rs @@ -1,7 +1,7 @@ -use conduwuit::{debug, implement, trace, warn, Err, Result}; +use conduwuit::{Err, Result, debug, implement, trace, warn}; use ruma::{ - events::{room::server_acl::RoomServerAclEventContent, StateEventType}, RoomId, ServerName, + events::{StateEventType, room::server_acl::RoomServerAclEventContent}, }; /// Returns Ok if the acl allows the server diff --git a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs index 540ebb64..80e91eff 100644 --- a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs +++ b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs @@ -1,16 +1,16 @@ use std::{ - collections::{hash_map, BTreeMap, HashSet, VecDeque}, + collections::{BTreeMap, HashSet, VecDeque, hash_map}, sync::Arc, time::Instant, }; use conduwuit::{ - debug, debug_error, debug_warn, implement, pdu, trace, - utils::continue_exponential_backoff_secs, warn, PduEvent, + PduEvent, debug, debug_error, debug_warn, implement, pdu, trace, + utils::continue_exponential_backoff_secs, warn, }; use futures::TryFutureExt; use ruma::{ - api::federation::event::get_event, CanonicalJsonValue, OwnedEventId, RoomId, ServerName, + CanonicalJsonValue, OwnedEventId, RoomId, ServerName, api::federation::event::get_event, }; use super::get_room_version_id; @@ -138,12 +138,15 @@ pub(super) async fn fetch_and_handle_outliers<'a>( .and_then(CanonicalJsonValue::as_array) { for auth_event in auth_events { - if let Ok(auth_event) = - serde_json::from_value::(auth_event.clone().into()) - { - todo_auth_events.push_back(auth_event); - } else { - warn!("Auth event id is not valid"); + match serde_json::from_value::( + auth_event.clone().into(), + ) { + | Ok(auth_event) => { + todo_auth_events.push_back(auth_event); + }, + | _ => { + warn!("Auth event id is not valid"); + }, } } } else { diff --git a/src/service/rooms/event_handler/fetch_prev.rs b/src/service/rooms/event_handler/fetch_prev.rs index 5a38f7fe..e817430b 100644 --- a/src/service/rooms/event_handler/fetch_prev.rs +++ b/src/service/rooms/event_handler/fetch_prev.rs @@ -4,14 +4,13 @@ use std::{ }; use conduwuit::{ - debug_warn, err, implement, + PduEvent, Result, debug_warn, err, implement, state_res::{self}, - PduEvent, Result, }; -use futures::{future, FutureExt}; +use futures::{FutureExt, future}; use ruma::{ - int, uint, CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, ServerName, - UInt, + CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, ServerName, UInt, int, + uint, }; use super::check_room_id; @@ -43,54 +42,59 @@ pub(super) async fn fetch_prev( while let Some(prev_event_id) = todo_outlier_stack.pop_front() { self.services.server.check_running()?; - if let Some((pdu, mut json_opt)) = self + match self .fetch_and_handle_outliers(origin, &[prev_event_id.clone()], create_event, room_id) .boxed() .await .pop() { - check_room_id(room_id, &pdu)?; + | Some((pdu, mut json_opt)) => { + check_room_id(room_id, &pdu)?; - let limit = self.services.server.config.max_fetch_prev_events; - if amount > limit { - debug_warn!("Max prev event limit reached! Limit: {limit}"); - graph.insert(prev_event_id.clone(), HashSet::new()); - continue; - } - - if json_opt.is_none() { - json_opt = self - .services - .outlier - .get_outlier_pdu_json(&prev_event_id) - .await - .ok(); - } - - if let Some(json) = json_opt { - if pdu.origin_server_ts > first_ts_in_room { - amount = amount.saturating_add(1); - for prev_prev in &pdu.prev_events { - if !graph.contains_key(prev_prev) { - todo_outlier_stack.push_back(prev_prev.clone()); - } - } - - graph - .insert(prev_event_id.clone(), pdu.prev_events.iter().cloned().collect()); - } else { - // Time based check failed + let limit = self.services.server.config.max_fetch_prev_events; + if amount > limit { + debug_warn!("Max prev event limit reached! Limit: {limit}"); graph.insert(prev_event_id.clone(), HashSet::new()); + continue; } - eventid_info.insert(prev_event_id.clone(), (pdu, json)); - } else { - // Get json failed, so this was not fetched over federation + if json_opt.is_none() { + json_opt = self + .services + .outlier + .get_outlier_pdu_json(&prev_event_id) + .await + .ok(); + } + + if let Some(json) = json_opt { + if pdu.origin_server_ts > first_ts_in_room { + amount = amount.saturating_add(1); + for prev_prev in &pdu.prev_events { + if !graph.contains_key(prev_prev) { + todo_outlier_stack.push_back(prev_prev.clone()); + } + } + + graph.insert( + prev_event_id.clone(), + pdu.prev_events.iter().cloned().collect(), + ); + } else { + // Time based check failed + graph.insert(prev_event_id.clone(), HashSet::new()); + } + + eventid_info.insert(prev_event_id.clone(), (pdu, json)); + } else { + // Get json failed, so this was not fetched over federation + graph.insert(prev_event_id.clone(), HashSet::new()); + } + }, + | _ => { + // Fetch and handle failed graph.insert(prev_event_id.clone(), HashSet::new()); - } - } else { - // Fetch and handle failed - graph.insert(prev_event_id.clone(), HashSet::new()); + }, } } diff --git a/src/service/rooms/event_handler/fetch_state.rs b/src/service/rooms/event_handler/fetch_state.rs index 4f2580db..b1a4a38b 100644 --- a/src/service/rooms/event_handler/fetch_state.rs +++ b/src/service/rooms/event_handler/fetch_state.rs @@ -1,10 +1,10 @@ -use std::collections::{hash_map, HashMap}; +use std::collections::{HashMap, hash_map}; -use conduwuit::{debug, debug_warn, implement, Err, Error, PduEvent, Result}; +use conduwuit::{Err, Error, PduEvent, Result, debug, debug_warn, implement}; use futures::FutureExt; use ruma::{ - api::federation::event::get_room_state_ids, events::StateEventType, EventId, OwnedEventId, - RoomId, ServerName, + EventId, OwnedEventId, RoomId, ServerName, api::federation::event::get_room_state_ids, + events::StateEventType, }; use crate::rooms::short::ShortStateKey; diff --git a/src/service/rooms/event_handler/handle_incoming_pdu.rs b/src/service/rooms/event_handler/handle_incoming_pdu.rs index 31c7762d..b6d3e21e 100644 --- a/src/service/rooms/event_handler/handle_incoming_pdu.rs +++ b/src/service/rooms/event_handler/handle_incoming_pdu.rs @@ -1,14 +1,14 @@ use std::{ - collections::{hash_map, BTreeMap}, + collections::{BTreeMap, hash_map}, time::Instant, }; -use conduwuit::{debug, debug::INFO_SPAN_LEVEL, err, implement, warn, Err, Result}; +use conduwuit::{Err, Result, debug, debug::INFO_SPAN_LEVEL, err, implement, warn}; use futures::{ - future::{try_join5, OptionFuture}, FutureExt, + future::{OptionFuture, try_join5}, }; -use ruma::{events::StateEventType, CanonicalJsonValue, EventId, RoomId, ServerName, UserId}; +use ruma::{CanonicalJsonValue, EventId, RoomId, ServerName, UserId, events::StateEventType}; use crate::rooms::timeline::RawPduId; diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index e628c77a..974eb300 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -1,15 +1,15 @@ use std::{ - collections::{hash_map, BTreeMap, HashMap}, + collections::{BTreeMap, HashMap, hash_map}, sync::Arc, }; use conduwuit::{ - debug, debug_info, err, implement, state_res, trace, warn, Err, Error, PduEvent, Result, + Err, Error, PduEvent, Result, debug, debug_info, err, implement, state_res, trace, warn, }; -use futures::{future::ready, TryFutureExt}; +use futures::{TryFutureExt, future::ready}; use ruma::{ - api::client::error::ErrorKind, events::StateEventType, CanonicalJsonObject, - CanonicalJsonValue, EventId, RoomId, ServerName, + CanonicalJsonObject, CanonicalJsonValue, EventId, RoomId, ServerName, + api::client::error::ErrorKind, events::StateEventType, }; use super::{check_room_id, get_room_version_id, to_room_version}; diff --git a/src/service/rooms/event_handler/handle_prev_pdu.rs b/src/service/rooms/event_handler/handle_prev_pdu.rs index f911f1fd..cf69a515 100644 --- a/src/service/rooms/event_handler/handle_prev_pdu.rs +++ b/src/service/rooms/event_handler/handle_prev_pdu.rs @@ -5,8 +5,8 @@ use std::{ }; use conduwuit::{ - debug, debug::INFO_SPAN_LEVEL, implement, utils::continue_exponential_backoff_secs, Err, - PduEvent, Result, + Err, PduEvent, Result, debug, debug::INFO_SPAN_LEVEL, implement, + utils::continue_exponential_backoff_secs, }; use ruma::{CanonicalJsonValue, EventId, OwnedEventId, RoomId, ServerName, UInt}; diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 5960c734..e9e79ce4 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -18,16 +18,16 @@ use std::{ }; use conduwuit::{ - utils::{MutexMap, TryFutureExtExt}, Err, PduEvent, Result, RoomVersion, Server, + utils::{MutexMap, TryFutureExtExt}, }; use futures::TryFutureExt; use ruma::{ - events::room::create::RoomCreateEventContent, OwnedEventId, OwnedRoomId, RoomId, - RoomVersionId, + OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, + events::room::create::RoomCreateEventContent, }; -use crate::{globals, rooms, sending, server_keys, Dep}; +use crate::{Dep, globals, rooms, sending, server_keys}; pub struct Service { pub mutex_federation: RoomMutexMap, diff --git a/src/service/rooms/event_handler/parse_incoming_pdu.rs b/src/service/rooms/event_handler/parse_incoming_pdu.rs index 9b130763..a49fc541 100644 --- a/src/service/rooms/event_handler/parse_incoming_pdu.rs +++ b/src/service/rooms/event_handler/parse_incoming_pdu.rs @@ -1,4 +1,4 @@ -use conduwuit::{err, implement, pdu::gen_event_id_canonical_json, result::FlatOk, Result}; +use conduwuit::{Result, err, implement, pdu::gen_event_id_canonical_json, result::FlatOk}; use ruma::{CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId}; use serde_json::value::RawValue as RawJsonValue; diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 37d47d47..9033c3a8 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -5,13 +5,12 @@ use std::{ }; use conduwuit::{ - err, implement, + Error, Result, err, implement, state_res::{self, StateMap}, trace, - utils::stream::{automatic_width, IterStream, ReadyExt, TryWidebandExt, WidebandExt}, - Error, Result, + utils::stream::{IterStream, ReadyExt, TryWidebandExt, WidebandExt, automatic_width}, }; -use futures::{future::try_join, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; +use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::try_join}; use ruma::{OwnedEventId, RoomId, RoomVersionId}; use crate::rooms::state_compressor::CompressedState; @@ -93,11 +92,7 @@ pub async fn resolve_state( let new_room_state: CompressedState = self .services .state_compressor - .compress_state_events( - state_events - .iter() - .map(|(ref ssk, eid)| (ssk, (*eid).borrow())), - ) + .compress_state_events(state_events.iter().map(|(ssk, eid)| (ssk, (*eid).borrow()))) .collect() .await; diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 2eb6013a..8326f9da 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -6,11 +6,10 @@ use std::{ }; use conduwuit::{ - debug, err, implement, trace, + PduEvent, Result, StateMap, debug, err, implement, trace, utils::stream::{BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, TryWidebandExt}, - PduEvent, Result, StateMap, }; -use futures::{future::try_join, FutureExt, StreamExt, TryFutureExt, TryStreamExt}; +use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::try_join}; use ruma::{OwnedEventId, RoomId, RoomVersionId}; use crate::rooms::short::ShortStateHash; diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index 385d2142..c1a1c3eb 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -1,12 +1,13 @@ use std::{borrow::Borrow, collections::BTreeMap, iter::once, sync::Arc, time::Instant}; use conduwuit::{ - debug, debug_info, err, implement, state_res, trace, + Err, EventTypeExt, PduEvent, Result, StateKey, debug, debug_info, err, implement, state_res, + trace, utils::stream::{BroadbandExt, ReadyExt}, - warn, Err, EventTypeExt, PduEvent, Result, StateKey, + warn, }; -use futures::{future::ready, FutureExt, StreamExt}; -use ruma::{events::StateEventType, CanonicalJsonValue, RoomId, ServerName}; +use futures::{FutureExt, StreamExt, future::ready}; +use ruma::{CanonicalJsonValue, RoomId, ServerName, events::StateEventType}; use super::{get_room_version_id, to_room_version}; use crate::rooms::{ diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index a6e00271..346314d1 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -3,13 +3,12 @@ use std::{collections::HashSet, sync::Arc}; use conduwuit::{ - implement, - utils::{stream::TryIgnore, IterStream, ReadyExt}, - Result, + Result, implement, + utils::{IterStream, ReadyExt, stream::TryIgnore}, }; use database::{Database, Deserialized, Handle, Interfix, Map, Qry}; -use futures::{pin_mut, Stream, StreamExt}; -use ruma::{api::client::filter::LazyLoadOptions, DeviceId, OwnedUserId, RoomId, UserId}; +use futures::{Stream, StreamExt, pin_mut}; +use ruma::{DeviceId, OwnedUserId, RoomId, UserId, api::client::filter::LazyLoadOptions}; pub struct Service { db: Data, diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index 6d5a85a0..54eef47d 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -1,11 +1,11 @@ use std::sync::Arc; -use conduwuit::{implement, utils::stream::TryIgnore, Result}; +use conduwuit::{Result, implement, utils::stream::TryIgnore}; use database::Map; use futures::{Stream, StreamExt}; use ruma::RoomId; -use crate::{rooms, Dep}; +use crate::{Dep, rooms}; pub struct Service { db: Data, diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index 9cd3d805..a1b0263a 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use database::{Deserialized, Json, Map}; use ruma::{CanonicalJsonObject, EventId}; diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index 26e11ded..f0beab5a 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -1,25 +1,25 @@ use std::{mem::size_of, sync::Arc}; use conduwuit::{ + PduCount, PduEvent, arrayvec::ArrayVec, result::LogErr, utils::{ + ReadyExt, stream::{TryIgnore, WidebandExt}, - u64_from_u8, ReadyExt, + u64_from_u8, }, - PduCount, PduEvent, }; use database::Map; use futures::{Stream, StreamExt}; -use ruma::{api::Direction, EventId, RoomId, UserId}; +use ruma::{EventId, RoomId, UserId, api::Direction}; use crate::{ - rooms, + Dep, rooms, rooms::{ short::{ShortEventId, ShortRoomId}, timeline::{PduId, RawPduId}, }, - Dep, }; pub(super) struct Data { diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index ba289f9b..18221c2d 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -2,11 +2,11 @@ mod data; use std::sync::Arc; use conduwuit::{PduCount, Result}; -use futures::{future::try_join, StreamExt}; -use ruma::{api::Direction, EventId, RoomId, UserId}; +use futures::{StreamExt, future::try_join}; +use ruma::{EventId, RoomId, UserId, api::Direction}; use self::data::{Data, PdusIterItem}; -use crate::{rooms, Dep}; +use crate::{Dep, rooms}; pub struct Service { services: Services, @@ -81,7 +81,7 @@ impl Service { .collect(); 'limit: while let Some(stack_pdu) = stack.pop() { - let target = match stack_pdu.0 .0 { + let target = match stack_pdu.0.0 { | PduCount::Normal(c) => c, // TODO: Support backfilled relations | PduCount::Backfilled(_) => 0, // This will result in an empty iterator diff --git a/src/service/rooms/read_receipt/data.rs b/src/service/rooms/read_receipt/data.rs index c21ad36c..62f87948 100644 --- a/src/service/rooms/read_receipt/data.rs +++ b/src/service/rooms/read_receipt/data.rs @@ -1,18 +1,18 @@ use std::sync::Arc; use conduwuit::{ - utils::{stream::TryIgnore, ReadyExt}, Result, + utils::{ReadyExt, stream::TryIgnore}, }; use database::{Deserialized, Json, Map}; use futures::{Stream, StreamExt}; use ruma::{ - events::{receipt::ReceiptEvent, AnySyncEphemeralRoomEvent}, - serde::Raw, CanonicalJsonObject, RoomId, UserId, + events::{AnySyncEphemeralRoomEvent, receipt::ReceiptEvent}, + serde::Raw, }; -use crate::{globals, Dep}; +use crate::{Dep, globals}; pub(super) struct Data { roomuserid_privateread: Arc, diff --git a/src/service/rooms/read_receipt/mod.rs b/src/service/rooms/read_receipt/mod.rs index 2bc21355..d6239aee 100644 --- a/src/service/rooms/read_receipt/mod.rs +++ b/src/service/rooms/read_receipt/mod.rs @@ -2,19 +2,19 @@ mod data; use std::{collections::BTreeMap, sync::Arc}; -use conduwuit::{debug, err, warn, PduCount, PduId, RawPduId, Result}; -use futures::{try_join, Stream, TryFutureExt}; +use conduwuit::{PduCount, PduId, RawPduId, Result, debug, err, warn}; +use futures::{Stream, TryFutureExt, try_join}; use ruma::{ + OwnedEventId, OwnedUserId, RoomId, UserId, events::{ - receipt::{ReceiptEvent, ReceiptEventContent, Receipts}, AnySyncEphemeralRoomEvent, SyncEphemeralRoomEvent, + receipt::{ReceiptEvent, ReceiptEventContent, Receipts}, }, serde::Raw, - OwnedEventId, OwnedUserId, RoomId, UserId, }; use self::data::{Data, ReceiptItem}; -use crate::{rooms, sending, Dep}; +use crate::{Dep, rooms, sending}; pub struct Service { services: Services, @@ -145,12 +145,14 @@ where let receipt = serde_json::from_str::>( value.json().get(), ); - if let Ok(value) = receipt { - for (event, receipt) in value.content { - json.insert(event, receipt); - } - } else { - debug!("failed to parse receipt: {:?}", receipt); + match receipt { + | Ok(value) => + for (event, receipt) in value.content { + json.insert(event, receipt); + }, + | _ => { + debug!("failed to parse receipt: {:?}", receipt); + }, } } let content = ReceiptEventContent::from_iter(json); diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index cc015237..4100dd75 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -1,26 +1,24 @@ use std::sync::Arc; use conduwuit::{ + PduCount, PduEvent, Result, arrayvec::ArrayVec, implement, utils::{ - set, + ArrayVecExt, IterStream, ReadyExt, set, stream::{TryIgnore, WidebandExt}, - ArrayVecExt, IterStream, ReadyExt, }, - PduCount, PduEvent, Result, }; -use database::{keyval::Val, Map}; +use database::{Map, keyval::Val}; use futures::{Stream, StreamExt}; -use ruma::{api::client::search::search_events::v3::Criteria, RoomId, UserId}; +use ruma::{RoomId, UserId, api::client::search::search_events::v3::Criteria}; use crate::{ - rooms, + Dep, rooms, rooms::{ short::ShortRoomId, timeline::{PduId, RawPduId}, }, - Dep, }; pub struct Service { @@ -140,7 +138,7 @@ pub async fn search_pdus<'a>( pub async fn search_pdu_ids( &self, query: &RoomQuery<'_>, -) -> Result + Send + '_> { +) -> Result + Send + '_ + use<'_>> { let shortroomid = self.services.short.get_shortroomid(query.room_id).await?; let pdu_ids = self.search_pdu_ids_query_room(query, shortroomid).await; @@ -187,7 +185,7 @@ fn search_pdu_ids_query_word( &self, shortroomid: ShortRoomId, word: &str, -) -> impl Stream> + Send + '_ { +) -> impl Stream> + Send + '_ + use<'_> { // rustc says const'ing this not yet stable let end_id: RawPduId = PduId { shortroomid, diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 8728325a..3980617e 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -1,13 +1,13 @@ use std::{borrow::Borrow, fmt::Debug, mem::size_of_val, sync::Arc}; pub use conduwuit::pdu::{ShortEventId, ShortId, ShortRoomId, ShortStateKey}; -use conduwuit::{err, implement, utils, utils::IterStream, Result, StateKey}; +use conduwuit::{Result, StateKey, err, implement, utils, utils::IterStream}; use database::{Deserialized, Get, Map, Qry}; use futures::{Stream, StreamExt}; -use ruma::{events::StateEventType, EventId, RoomId}; +use ruma::{EventId, RoomId, events::StateEventType}; use serde::Deserialize; -use crate::{globals, Dep}; +use crate::{Dep, globals}; pub struct Service { db: Data, diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 268d6dfe..52e7d2be 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -5,18 +5,18 @@ mod tests; use std::sync::Arc; use conduwuit::{ - implement, + Err, Error, Result, implement, utils::{ + IterStream, future::BoolExt, math::usize_from_f64, stream::{BroadbandExt, ReadyExt}, - IterStream, }, - Err, Error, Result, }; -use futures::{pin_mut, stream::FuturesUnordered, FutureExt, Stream, StreamExt, TryFutureExt}; +use futures::{FutureExt, Stream, StreamExt, TryFutureExt, pin_mut, stream::FuturesUnordered}; use lru_cache::LruCache; use ruma::{ + OwnedEventId, OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, api::{ client::space::SpaceHierarchyRoomsChunk, federation::{ @@ -25,18 +25,17 @@ use ruma::{ }, }, events::{ + StateEventType, room::join_rules::{JoinRule, RoomJoinRulesEventContent}, space::child::{HierarchySpaceChildEvent, SpaceChildEventContent}, - StateEventType, }, serde::Raw, space::SpaceRoomJoinRule, - OwnedEventId, OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, }; use tokio::sync::{Mutex, MutexGuard}; pub use self::pagination_token::PaginationToken; -use crate::{conduwuit::utils::TryFutureExtExt, rooms, sending, Dep}; +use crate::{Dep, conduwuit::utils::TryFutureExtExt, rooms, sending}; pub struct Service { services: Services, @@ -440,8 +439,9 @@ async fn is_accessible_child( pub fn get_parent_children_via( parent: &SpaceHierarchyParentSummary, suggested_only: bool, -) -> impl DoubleEndedIterator)> + Send + '_ -{ +) -> impl DoubleEndedIterator + use<>)> ++ Send ++ '_ { parent .children_state .iter() diff --git a/src/service/rooms/spaces/pagination_token.rs b/src/service/rooms/spaces/pagination_token.rs index 8f019e8d..d97b7a2f 100644 --- a/src/service/rooms/spaces/pagination_token.rs +++ b/src/service/rooms/spaces/pagination_token.rs @@ -4,7 +4,7 @@ use std::{ }; use conduwuit::{Error, Result}; -use ruma::{api::client::error::ErrorKind, UInt}; +use ruma::{UInt, api::client::error::ErrorKind}; use crate::rooms::short::ShortRoomId; diff --git a/src/service/rooms/spaces/tests.rs b/src/service/rooms/spaces/tests.rs index dd6c2f35..d0395fdd 100644 --- a/src/service/rooms/spaces/tests.rs +++ b/src/service/rooms/spaces/tests.rs @@ -1,13 +1,13 @@ use std::str::FromStr; use ruma::{ + UInt, api::federation::space::{SpaceHierarchyParentSummary, SpaceHierarchyParentSummaryInit}, owned_room_id, owned_server_name, space::SpaceRoomJoinRule, - UInt, }; -use crate::rooms::spaces::{get_parent_children_via, PaginationToken}; +use crate::rooms::spaces::{PaginationToken, get_parent_children_via}; #[test] fn get_summary_children() { diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index d538de3c..8683a3be 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -1,36 +1,34 @@ use std::{collections::HashMap, fmt::Write, iter::once, sync::Arc}; use conduwuit::{ - err, + PduEvent, Result, err, result::FlatOk, state_res::{self, StateMap}, utils::{ - calculate_hash, + IterStream, MutexMap, MutexMapGuard, ReadyExt, calculate_hash, stream::{BroadbandExt, TryIgnore}, - IterStream, MutexMap, MutexMapGuard, ReadyExt, }, - warn, PduEvent, Result, + warn, }; use database::{Deserialized, Ignore, Interfix, Map}; use futures::{ - future::join_all, pin_mut, FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt, + FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt, future::join_all, pin_mut, }; use ruma::{ + EventId, OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, UserId, events::{ - room::{create::RoomCreateEventContent, member::RoomMemberEventContent}, AnyStrippedStateEvent, StateEventType, TimelineEventType, + room::{create::RoomCreateEventContent, member::RoomMemberEventContent}, }, serde::Raw, - EventId, OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, UserId, }; use crate::{ - globals, rooms, + Dep, globals, rooms, rooms::{ short::{ShortEventId, ShortStateHash}, - state_compressor::{parse_compressed_state_event, CompressedState}, + state_compressor::{CompressedState, parse_compressed_state_event}, }, - Dep, }; pub struct Service { @@ -192,13 +190,13 @@ impl Service { .await; if !already_existed { - let states_parents = if let Ok(p) = previous_shortstatehash { - self.services - .state_compressor - .load_shortstatehash_info(p) - .await? - } else { - Vec::new() + let states_parents = match previous_shortstatehash { + | Ok(p) => + self.services + .state_compressor + .load_shortstatehash_info(p) + .await?, + | _ => Vec::new(), }; let (statediffnew, statediffremoved) = @@ -256,63 +254,65 @@ impl Service { .aput::(shorteventid, p); } - if let Some(state_key) = &new_pdu.state_key { - let states_parents = if let Ok(p) = previous_shortstatehash { - self.services + match &new_pdu.state_key { + | Some(state_key) => { + let states_parents = match previous_shortstatehash { + | Ok(p) => + self.services + .state_compressor + .load_shortstatehash_info(p) + .await?, + | _ => Vec::new(), + }; + + let shortstatekey = self + .services + .short + .get_or_create_shortstatekey(&new_pdu.kind.to_string().into(), state_key) + .await; + + let new = self + .services .state_compressor - .load_shortstatehash_info(p) - .await? - } else { - Vec::new() - }; + .compress_state_event(shortstatekey, &new_pdu.event_id) + .await; - let shortstatekey = self - .services - .short - .get_or_create_shortstatekey(&new_pdu.kind.to_string().into(), state_key) - .await; + let replaces = states_parents + .last() + .map(|info| { + info.full_state + .iter() + .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) + }) + .unwrap_or_default(); - let new = self - .services - .state_compressor - .compress_state_event(shortstatekey, &new_pdu.event_id) - .await; + if Some(&new) == replaces { + return Ok(previous_shortstatehash.expect("must exist")); + } - let replaces = states_parents - .last() - .map(|info| { - info.full_state - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); + // TODO: statehash with deterministic inputs + let shortstatehash = self.services.globals.next_count()?; - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } + let mut statediffnew = CompressedState::new(); + statediffnew.insert(new); - // TODO: statehash with deterministic inputs - let shortstatehash = self.services.globals.next_count()?; + let mut statediffremoved = CompressedState::new(); + if let Some(replaces) = replaces { + statediffremoved.insert(*replaces); + } - let mut statediffnew = CompressedState::new(); - statediffnew.insert(new); + self.services.state_compressor.save_state_from_diff( + shortstatehash, + Arc::new(statediffnew), + Arc::new(statediffremoved), + 2, + states_parents, + )?; - let mut statediffremoved = CompressedState::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.services.state_compressor.save_state_from_diff( - shortstatehash, - Arc::new(statediffnew), - Arc::new(statediffremoved), - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) + Ok(shortstatehash) + }, + | _ => + Ok(previous_shortstatehash.expect("first event in room must be a state event")), } } diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index b7952ce6..7004e35a 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -9,14 +9,16 @@ use std::{ }; use conduwuit::{ - err, utils, - utils::math::{usize_from_f64, Expected}, - Result, + Result, err, utils, + utils::math::{Expected, usize_from_f64}, }; use database::Map; use lru_cache::LruCache; use ruma::{ + EventEncryptionAlgorithm, JsOption, OwnedRoomAliasId, OwnedRoomId, OwnedServerName, + OwnedUserId, RoomId, UserId, events::{ + StateEventType, room::{ avatar::RoomAvatarEventContent, canonical_alias::RoomCanonicalAliasEventContent, @@ -29,15 +31,12 @@ use ruma::{ name::RoomNameEventContent, topic::RoomTopicEventContent, }, - StateEventType, }, room::RoomType, space::SpaceRoomJoinRule, - EventEncryptionAlgorithm, JsOption, OwnedRoomAliasId, OwnedRoomId, OwnedServerName, - OwnedUserId, RoomId, UserId, }; -use crate::{rooms, rooms::short::ShortStateHash, Dep}; +use crate::{Dep, rooms, rooms::short::ShortStateHash}; pub struct Service { pub server_visibility_cache: Mutex>, diff --git a/src/service/rooms/state_accessor/room_state.rs b/src/service/rooms/state_accessor/room_state.rs index e3ec55fe..ff26b33a 100644 --- a/src/service/rooms/state_accessor/room_state.rs +++ b/src/service/rooms/state_accessor/room_state.rs @@ -1,8 +1,8 @@ use std::borrow::Borrow; -use conduwuit::{err, implement, PduEvent, Result, StateKey}; +use conduwuit::{PduEvent, Result, StateKey, err, implement}; use futures::{Stream, StreamExt, TryFutureExt}; -use ruma::{events::StateEventType, EventId, RoomId}; +use ruma::{EventId, RoomId, events::StateEventType}; use serde::Deserialize; /// Returns a single PDU from `room_id` with key (`event_type`,`state_key`). diff --git a/src/service/rooms/state_accessor/server_can.rs b/src/service/rooms/state_accessor/server_can.rs index 4d834227..2e8f3325 100644 --- a/src/service/rooms/state_accessor/server_can.rs +++ b/src/service/rooms/state_accessor/server_can.rs @@ -1,11 +1,11 @@ use conduwuit::{error, implement, utils::stream::ReadyExt}; use futures::StreamExt; use ruma::{ - events::{ - room::history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, - StateEventType, - }, EventId, RoomId, ServerName, + events::{ + StateEventType, + room::history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + }, }; /// Whether a server is allowed to see an event through federation, based on diff --git a/src/service/rooms/state_accessor/state.rs b/src/service/rooms/state_accessor/state.rs index da1500cb..625defe6 100644 --- a/src/service/rooms/state_accessor/state.rs +++ b/src/service/rooms/state_accessor/state.rs @@ -1,27 +1,26 @@ use std::{borrow::Borrow, ops::Deref, sync::Arc}; use conduwuit::{ - at, err, implement, pair_of, + PduEvent, Result, StateKey, at, err, implement, pair_of, utils::{ result::FlatOk, stream::{BroadbandExt, IterStream, ReadyExt, TryExpect}, }, - PduEvent, Result, StateKey, }; use database::Deserialized; -use futures::{future::try_join, pin_mut, FutureExt, Stream, StreamExt, TryFutureExt}; +use futures::{FutureExt, Stream, StreamExt, TryFutureExt, future::try_join, pin_mut}; use ruma::{ - events::{ - room::member::{MembershipState, RoomMemberEventContent}, - StateEventType, - }, EventId, OwnedEventId, UserId, + events::{ + StateEventType, + room::member::{MembershipState, RoomMemberEventContent}, + }, }; use serde::Deserialize; use crate::rooms::{ short::{ShortEventId, ShortStateHash, ShortStateKey}, - state_compressor::{compress_state_event, parse_compressed_state_event, CompressedState}, + state_compressor::{CompressedState, compress_state_event, parse_compressed_state_event}, }; /// The user was a joined member at this state (potentially in the past) diff --git a/src/service/rooms/state_accessor/user_can.rs b/src/service/rooms/state_accessor/user_can.rs index 0332c227..c30e1da8 100644 --- a/src/service/rooms/state_accessor/user_can.rs +++ b/src/service/rooms/state_accessor/user_can.rs @@ -1,14 +1,14 @@ -use conduwuit::{error, implement, pdu::PduBuilder, Err, Error, Result}; +use conduwuit::{Err, Error, Result, error, implement, pdu::PduBuilder}; use ruma::{ + EventId, RoomId, UserId, events::{ + StateEventType, TimelineEventType, room::{ history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, member::{MembershipState, RoomMemberEventContent}, power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, }, - StateEventType, TimelineEventType, }, - EventId, RoomId, UserId, }; use crate::rooms::state::RoomMutexGuard; @@ -44,7 +44,7 @@ pub async fn user_can_redact( ))); } - if let Ok(pl_event_content) = self + match self .room_state_get_content::( room_id, &StateEventType::RoomPowerLevels, @@ -52,33 +52,35 @@ pub async fn user_can_redact( ) .await { - let pl_event: RoomPowerLevels = pl_event_content.into(); - Ok(pl_event.user_can_redact_event_of_other(sender) - || pl_event.user_can_redact_own_event(sender) - && if let Ok(redacting_event) = redacting_event { - if federation { - redacting_event.sender.server_name() == sender.server_name() - } else { - redacting_event.sender == sender - } - } else { - false - }) - } else { - // Falling back on m.room.create to judge power level - if let Ok(room_create) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "") - .await - { - Ok(room_create.sender == sender - || redacting_event - .as_ref() - .is_ok_and(|redacting_event| redacting_event.sender == sender)) - } else { - Err(Error::bad_database( - "No m.room.power_levels or m.room.create events in database for room", - )) - } + | Ok(pl_event_content) => { + let pl_event: RoomPowerLevels = pl_event_content.into(); + Ok(pl_event.user_can_redact_event_of_other(sender) + || pl_event.user_can_redact_own_event(sender) + && match redacting_event { + | Ok(redacting_event) => + if federation { + redacting_event.sender.server_name() == sender.server_name() + } else { + redacting_event.sender == sender + }, + | _ => false, + }) + }, + | _ => { + // Falling back on m.room.create to judge power level + match self + .room_state_get(room_id, &StateEventType::RoomCreate, "") + .await + { + | Ok(room_create) => Ok(room_create.sender == sender + || redacting_event + .as_ref() + .is_ok_and(|redacting_event| redacting_event.sender == sender)), + | _ => Err(Error::bad_database( + "No m.room.power_levels or m.room.create events in database for room", + )), + } + }, } } diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 0d25142d..4403468b 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -4,31 +4,31 @@ use std::{ }; use conduwuit::{ - is_not_empty, + Result, is_not_empty, result::LogErr, - utils::{stream::TryIgnore, ReadyExt, StreamTools}, - warn, Result, + utils::{ReadyExt, StreamTools, stream::TryIgnore}, + warn, }; -use database::{serialize_key, Deserialized, Ignore, Interfix, Json, Map}; -use futures::{future::join5, pin_mut, stream::iter, Stream, StreamExt}; +use database::{Deserialized, Ignore, Interfix, Json, Map, serialize_key}; +use futures::{Stream, StreamExt, future::join5, pin_mut, stream::iter}; use itertools::Itertools; use ruma::{ + OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, events::{ + AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, + RoomAccountDataEventType, StateEventType, direct::DirectEvent, room::{ create::RoomCreateEventContent, member::{MembershipState, RoomMemberEventContent}, power_levels::RoomPowerLevelsEventContent, }, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, StateEventType, }, int, serde::Raw, - OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, }; -use crate::{account_data, appservice::RegistrationInfo, globals, rooms, users, Dep}; +use crate::{Dep, account_data, appservice::RegistrationInfo, globals, rooms, users}; pub struct Service { appservice_in_room_cache: AppServiceInRoomCache, diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 18731809..c566eb1c 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -6,10 +6,10 @@ use std::{ }; use conduwuit::{ + Result, arrayvec::ArrayVec, at, checked, err, expected, utils, utils::{bytes, math::usize_from_f64, stream::IterStream}, - Result, }; use database::Map; use futures::{Stream, StreamExt}; @@ -17,9 +17,8 @@ use lru_cache::LruCache; use ruma::{EventId, RoomId}; use crate::{ - rooms, + Dep, rooms, rooms::short::{ShortEventId, ShortId, ShortStateHash, ShortStateKey}, - Dep, }; pub struct Service { diff --git a/src/service/rooms/threads/mod.rs b/src/service/rooms/threads/mod.rs index bc995e27..7f9a7515 100644 --- a/src/service/rooms/threads/mod.rs +++ b/src/service/rooms/threads/mod.rs @@ -1,22 +1,21 @@ use std::{collections::BTreeMap, sync::Arc}; use conduwuit::{ - err, + PduCount, PduEvent, PduId, RawPduId, Result, err, utils::{ - stream::{TryIgnore, WidebandExt}, ReadyExt, + stream::{TryIgnore, WidebandExt}, }, - PduCount, PduEvent, PduId, RawPduId, Result, }; use database::{Deserialized, Map}; use futures::{Stream, StreamExt}; use ruma::{ - api::client::threads::get_threads::v1::IncludeThreads, events::relation::BundledThread, uint, CanonicalJsonValue, EventId, OwnedUserId, RoomId, UserId, + api::client::threads::get_threads::v1::IncludeThreads, events::relation::BundledThread, uint, }; use serde_json::json; -use crate::{rooms, rooms::short::ShortRoomId, Dep}; +use crate::{Dep, rooms, rooms::short::ShortRoomId}; pub struct Service { db: Data, @@ -121,10 +120,13 @@ impl Service { } let mut users = Vec::new(); - if let Ok(userids) = self.get_participants(&root_id).await { - users.extend_from_slice(&userids); - } else { - users.push(root_pdu.sender); + match self.get_participants(&root_id).await { + | Ok(userids) => { + users.extend_from_slice(&userids); + }, + | _ => { + users.push(root_pdu.sender); + }, } users.push(pdu.sender.clone()); diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 457c1e8d..94c78bb0 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -1,18 +1,17 @@ use std::{borrow::Borrow, sync::Arc}; use conduwuit::{ - at, err, + Err, PduCount, PduEvent, Result, at, err, result::{LogErr, NotFound}, utils, utils::stream::TryReadyExt, - Err, PduCount, PduEvent, Result, }; use database::{Database, Deserialized, Json, KeyVal, Map}; -use futures::{future::select_ok, pin_mut, FutureExt, Stream, TryFutureExt, TryStreamExt}; -use ruma::{api::Direction, CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId}; +use futures::{FutureExt, Stream, TryFutureExt, TryStreamExt, future::select_ok, pin_mut}; +use ruma::{CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId, api::Direction}; use super::{PduId, RawPduId}; -use crate::{rooms, rooms::short::ShortRoomId, Dep}; +use crate::{Dep, rooms, rooms::short::ShortRoomId}; pub(super) struct Data { eventid_outlierpdu: Arc, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 9d6ee982..4be97fb2 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -10,22 +10,25 @@ use std::{ }; use conduwuit::{ - at, debug, debug_warn, err, error, implement, info, - pdu::{gen_event_id, EventHash, PduBuilder, PduCount, PduEvent}, + Err, Error, Result, Server, at, debug, debug_warn, err, error, implement, info, + pdu::{EventHash, PduBuilder, PduCount, PduEvent, gen_event_id}, state_res::{self, Event, RoomVersion}, utils::{ - self, future::TryExtExt, stream::TryIgnore, IterStream, MutexMap, MutexMapGuard, ReadyExt, + self, IterStream, MutexMap, MutexMapGuard, ReadyExt, future::TryExtExt, stream::TryIgnore, }, - validated, warn, Err, Error, Result, Server, + validated, warn, }; pub use conduwuit::{PduId, RawPduId}; use futures::{ - future, future::ready, pin_mut, Future, FutureExt, Stream, StreamExt, TryStreamExt, + Future, FutureExt, Stream, StreamExt, TryStreamExt, future, future::ready, pin_mut, }; use ruma::{ + CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, + RoomId, RoomVersionId, ServerName, UserId, api::federation, canonical_json::to_canonical_value, events::{ + GlobalAccountDataEventType, StateEventType, TimelineEventType, push_rules::PushRulesEvent, room::{ create::RoomCreateEventContent, @@ -34,23 +37,21 @@ use ruma::{ power_levels::RoomPowerLevelsEventContent, redaction::RoomRedactionEventContent, }, - GlobalAccountDataEventType, StateEventType, TimelineEventType, }, push::{Action, Ruleset, Tweak}, - uint, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, - OwnedServerName, RoomId, RoomVersionId, ServerName, UserId, + uint, }; use serde::Deserialize; -use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; +use serde_json::value::{RawValue as RawJsonValue, to_raw_value}; use self::data::Data; pub use self::data::PdusIterItem; use crate::{ - account_data, admin, appservice, + Dep, account_data, admin, appservice, appservice::NamespaceRegex, globals, pusher, rooms, rooms::{short::ShortRoomId, state_compressor::CompressedState}, - sending, server_keys, users, Dep, + sending, server_keys, users, }; // Update Relationships diff --git a/src/service/rooms/typing/mod.rs b/src/service/rooms/typing/mod.rs index c710b33a..a81ee95c 100644 --- a/src/service/rooms/typing/mod.rs +++ b/src/service/rooms/typing/mod.rs @@ -1,19 +1,18 @@ use std::{collections::BTreeMap, sync::Arc}; use conduwuit::{ - debug_info, trace, + Result, Server, debug_info, trace, utils::{self, IterStream}, - Result, Server, }; use futures::StreamExt; use ruma::{ + OwnedRoomId, OwnedUserId, RoomId, UserId, api::federation::transactions::edu::{Edu, TypingContent}, events::SyncEphemeralRoomEvent, - OwnedRoomId, OwnedUserId, RoomId, UserId, }; -use tokio::sync::{broadcast, RwLock}; +use tokio::sync::{RwLock, broadcast}; -use crate::{globals, sending, sending::EduBuf, users, Dep}; +use crate::{Dep, globals, sending, sending::EduBuf, users}; pub struct Service { server: Arc, diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 6a0c6aa1..bd76f1f4 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -1,10 +1,10 @@ use std::sync::Arc; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use database::{Database, Deserialized, Map}; use ruma::{RoomId, UserId}; -use crate::{globals, rooms, rooms::short::ShortStateHash, Dep}; +use crate::{Dep, globals, rooms, rooms::short::ShortStateHash}; pub struct Service { db: Data, diff --git a/src/service/sending/appservice.rs b/src/service/sending/appservice.rs index 6b58d964..7fa0be9a 100644 --- a/src/service/sending/appservice.rs +++ b/src/service/sending/appservice.rs @@ -1,10 +1,10 @@ use std::{fmt::Debug, mem}; use bytes::BytesMut; -use conduwuit::{debug_error, err, trace, utils, warn, Err, Result}; +use conduwuit::{Err, Result, debug_error, err, trace, utils, warn}; use reqwest::Client; use ruma::api::{ - appservice::Registration, IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, + IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, appservice::Registration, }; /// Sends a request to an appservice diff --git a/src/service/sending/data.rs b/src/service/sending/data.rs index 4dd2d5aa..a6bcc2b2 100644 --- a/src/service/sending/data.rs +++ b/src/service/sending/data.rs @@ -1,16 +1,15 @@ use std::{fmt::Debug, sync::Arc}; use conduwuit::{ - at, utils, - utils::{stream::TryIgnore, ReadyExt}, - Error, Result, + Error, Result, at, utils, + utils::{ReadyExt, stream::TryIgnore}, }; use database::{Database, Deserialized, Map}; use futures::{Stream, StreamExt}; use ruma::{OwnedServerName, ServerName, UserId}; use super::{Destination, SendingEvent}; -use crate::{globals, Dep}; +use crate::{Dep, globals}; pub(super) type OutgoingItem = (Key, SendingEvent, Destination); pub(super) type SendingItem = (Key, SendingEvent); @@ -102,7 +101,7 @@ impl Data { pub fn active_requests_for( &self, destination: &Destination, - ) -> impl Stream + Send + '_ { + ) -> impl Stream + Send + '_ + use<'_> { let prefix = destination.get_prefix(); self.servercurrentevent_data .raw_stream_from(&prefix) @@ -156,7 +155,7 @@ impl Data { pub fn queued_requests( &self, destination: &Destination, - ) -> impl Stream + Send + '_ { + ) -> impl Stream + Send + '_ + use<'_> { let prefix = destination.get_prefix(); self.servernameevent_data .raw_stream_from(&prefix) diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index b46ce7a8..379829b4 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -12,15 +12,15 @@ use std::{ use async_trait::async_trait; use conduwuit::{ - debug, debug_warn, err, error, + Result, Server, debug, debug_warn, err, error, smallvec::SmallVec, - utils::{available_parallelism, math::usize_from_u64_truncated, ReadyExt, TryReadyExt}, - warn, Result, Server, + utils::{ReadyExt, TryReadyExt, available_parallelism, math::usize_from_u64_truncated}, + warn, }; use futures::{FutureExt, Stream, StreamExt}; use ruma::{ - api::{appservice::Registration, OutgoingRequest}, RoomId, ServerName, UserId, + api::{OutgoingRequest, appservice::Registration}, }; use tokio::{task, task::JoinSet}; @@ -30,8 +30,8 @@ pub use self::{ sender::{EDU_LIMIT, PDU_LIMIT}, }; use crate::{ - account_data, client, federation, globals, presence, pusher, rooms, - rooms::timeline::RawPduId, users, Dep, + Dep, account_data, client, federation, globals, presence, pusher, rooms, + rooms::timeline::RawPduId, users, }; pub struct Service { diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 3e86de2d..c4f34177 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -2,32 +2,33 @@ use std::{ collections::{BTreeMap, HashMap, HashSet}, fmt::Debug, sync::{ - atomic::{AtomicU64, AtomicUsize, Ordering}, Arc, + atomic::{AtomicU64, AtomicUsize, Ordering}, }, time::{Duration, Instant}, }; -use base64::{engine::general_purpose::URL_SAFE_NO_PAD, Engine as _}; +use base64::{Engine as _, engine::general_purpose::URL_SAFE_NO_PAD}; use conduwuit::{ - debug, err, error, + Error, Result, debug, err, error, result::LogErr, trace, utils::{ - calculate_hash, continue_exponential_backoff_secs, + ReadyExt, calculate_hash, continue_exponential_backoff_secs, future::TryExtExt, stream::{BroadbandExt, IterStream, WidebandExt}, - ReadyExt, }, - warn, Error, Result, + warn, }; use futures::{ + FutureExt, StreamExt, future::{BoxFuture, OptionFuture}, join, pin_mut, stream::FuturesUnordered, - FutureExt, StreamExt, }; use ruma::{ + CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedRoomId, OwnedServerName, OwnedUserId, + RoomId, RoomVersionId, ServerName, UInt, api::{ appservice::event::push_events::v1::EphemeralData, federation::transactions::{ @@ -40,18 +41,17 @@ use ruma::{ }, device_id, events::{ - push_rules::PushRulesEvent, receipt::ReceiptType, AnySyncEphemeralRoomEvent, - GlobalAccountDataEventType, + AnySyncEphemeralRoomEvent, GlobalAccountDataEventType, push_rules::PushRulesEvent, + receipt::ReceiptType, }, push, serde::Raw, - uint, CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedRoomId, OwnedServerName, - OwnedUserId, RoomId, RoomVersionId, ServerName, UInt, + uint, }; -use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; +use serde_json::value::{RawValue as RawJsonValue, to_raw_value}; use super::{ - appservice, data::QueueItem, Destination, EduBuf, EduVec, Msg, SendingEvent, Service, + Destination, EduBuf, EduVec, Msg, SendingEvent, Service, appservice, data::QueueItem, }; #[derive(Debug)] @@ -146,7 +146,7 @@ impl Service { statuses.entry(dest).and_modify(|e| { *e = match e { | TransactionStatus::Running => TransactionStatus::Failed(1, Instant::now()), - | TransactionStatus::Retrying(ref n) => + | &mut TransactionStatus::Retrying(ref n) => TransactionStatus::Failed(n.saturating_add(1), Instant::now()), | TransactionStatus::Failed(..) => { panic!("Request that was not even running failed?!") @@ -211,7 +211,7 @@ impl Service { async fn finish_responses<'a>(&'a self, futures: &mut SendingFutures<'a>) { use tokio::{ select, - time::{sleep_until, Instant}, + time::{Instant, sleep_until}, }; let timeout = self.server.config.sender_shutdown_timeout; diff --git a/src/service/server_keys/acquire.rs b/src/service/server_keys/acquire.rs index 305cbfef..64b936b6 100644 --- a/src/service/server_keys/acquire.rs +++ b/src/service/server_keys/acquire.rs @@ -7,13 +7,13 @@ use std::{ use conduwuit::{ debug, debug_error, debug_warn, error, implement, info, result::FlatOk, trace, warn, }; -use futures::{stream::FuturesUnordered, StreamExt}; +use futures::{StreamExt, stream::FuturesUnordered}; use ruma::{ - api::federation::discovery::ServerSigningKeys, serde::Raw, CanonicalJsonObject, - OwnedServerName, OwnedServerSigningKeyId, ServerName, ServerSigningKeyId, + CanonicalJsonObject, OwnedServerName, OwnedServerSigningKeyId, ServerName, + ServerSigningKeyId, api::federation::discovery::ServerSigningKeys, serde::Raw, }; use serde_json::value::RawValue as RawJsonValue; -use tokio::time::{timeout_at, Instant}; +use tokio::time::{Instant, timeout_at}; use super::key_exists; diff --git a/src/service/server_keys/get.rs b/src/service/server_keys/get.rs index 5a027d64..00aeae1e 100644 --- a/src/service/server_keys/get.rs +++ b/src/service/server_keys/get.rs @@ -1,12 +1,12 @@ use std::borrow::Borrow; -use conduwuit::{implement, Err, Result}; +use conduwuit::{Err, Result, implement}; use ruma::{ - api::federation::discovery::VerifyKey, CanonicalJsonObject, RoomVersionId, ServerName, - ServerSigningKeyId, + CanonicalJsonObject, RoomVersionId, ServerName, ServerSigningKeyId, + api::federation::discovery::VerifyKey, }; -use super::{extract_key, PubKeyMap, PubKeys}; +use super::{PubKeyMap, PubKeys, extract_key}; #[implement(super::Service)] pub async fn get_event_keys( diff --git a/src/service/server_keys/keypair.rs b/src/service/server_keys/keypair.rs index 6f983c26..259c37fb 100644 --- a/src/service/server_keys/keypair.rs +++ b/src/service/server_keys/keypair.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{debug, debug_info, err, error, utils, utils::string_from_bytes, Result}; +use conduwuit::{Result, debug, debug_info, err, error, utils, utils::string_from_bytes}; use database::Database; use ruma::{api::federation::discovery::VerifyKey, serde::Base64, signatures::Ed25519KeyPair}; diff --git a/src/service/server_keys/mod.rs b/src/service/server_keys/mod.rs index 3f6a3039..bf6799ba 100644 --- a/src/service/server_keys/mod.rs +++ b/src/service/server_keys/mod.rs @@ -8,22 +8,21 @@ mod verify; use std::{collections::BTreeMap, sync::Arc, time::Duration}; use conduwuit::{ - implement, - utils::{timepoint_from_now, IterStream}, - Result, Server, + Result, Server, implement, + utils::{IterStream, timepoint_from_now}, }; use database::{Deserialized, Json, Map}; use futures::StreamExt; use ruma::{ + CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, RoomVersionId, + ServerName, ServerSigningKeyId, api::federation::discovery::{ServerSigningKeys, VerifyKey}, serde::Raw, signatures::{Ed25519KeyPair, PublicKeyMap, PublicKeySet}, - CanonicalJsonObject, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, RoomVersionId, - ServerName, ServerSigningKeyId, }; use serde_json::value::RawValue as RawJsonValue; -use crate::{globals, sending, Dep}; +use crate::{Dep, globals, sending}; pub struct Service { keypair: Box, diff --git a/src/service/server_keys/request.rs b/src/service/server_keys/request.rs index afe8958b..171b755b 100644 --- a/src/service/server_keys/request.rs +++ b/src/service/server_keys/request.rs @@ -1,13 +1,13 @@ use std::{collections::BTreeMap, fmt::Debug}; -use conduwuit::{debug, implement, Err, Result}; +use conduwuit::{Err, Result, debug, implement}; use ruma::{ - api::federation::discovery::{ - get_remote_server_keys, - get_remote_server_keys_batch::{self, v2::QueryCriteria}, - get_server_keys, ServerSigningKeys, - }, OwnedServerName, OwnedServerSigningKeyId, ServerName, ServerSigningKeyId, + api::federation::discovery::{ + ServerSigningKeys, get_remote_server_keys, + get_remote_server_keys_batch::{self, v2::QueryCriteria}, + get_server_keys, + }, }; #[implement(super::Service)] @@ -79,7 +79,7 @@ pub async fn notary_request( &self, notary: &ServerName, target: &ServerName, -) -> Result + Clone + Debug + Send> { +) -> Result + Clone + Debug + Send + use<>> { use get_remote_server_keys::v2::Request; let request = Request { diff --git a/src/service/server_keys/sign.rs b/src/service/server_keys/sign.rs index 8d6f108c..e8cc485d 100644 --- a/src/service/server_keys/sign.rs +++ b/src/service/server_keys/sign.rs @@ -1,4 +1,4 @@ -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use ruma::{CanonicalJsonObject, RoomVersionId}; #[implement(super::Service)] diff --git a/src/service/server_keys/verify.rs b/src/service/server_keys/verify.rs index 0f03e59e..84433628 100644 --- a/src/service/server_keys/verify.rs +++ b/src/service/server_keys/verify.rs @@ -1,6 +1,6 @@ -use conduwuit::{implement, pdu::gen_event_id_canonical_json, Err, Result}; +use conduwuit::{Err, Result, implement, pdu::gen_event_id_canonical_json}; use ruma::{ - signatures::Verified, CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, RoomVersionId, + CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, RoomVersionId, signatures::Verified, }; use serde_json::value::RawValue as RawJsonValue; diff --git a/src/service/service.rs b/src/service/service.rs index cad01437..2907a562 100644 --- a/src/service/service.rs +++ b/src/service/service.rs @@ -7,7 +7,7 @@ use std::{ }; use async_trait::async_trait; -use conduwuit::{err, error::inspect_log, utils::string::SplitInfallible, Err, Result, Server}; +use conduwuit::{Err, Result, Server, err, error::inspect_log, utils::string::SplitInfallible}; use database::Database; /// Abstract interface for a Service diff --git a/src/service/services.rs b/src/service/services.rs index fb334b96..269a1f87 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -5,7 +5,7 @@ use std::{ sync::{Arc, RwLock}, }; -use conduwuit::{debug, debug_info, info, trace, Result, Server}; +use conduwuit::{Result, Server, debug, debug_info, info, trace}; use database::Database; use tokio::sync::Mutex; diff --git a/src/service/sync/mod.rs b/src/service/sync/mod.rs index 0b86377a..bf2bc142 100644 --- a/src/service/sync/mod.rs +++ b/src/service/sync/mod.rs @@ -8,15 +8,15 @@ use std::{ use conduwuit::{Result, Server}; use database::Map; use ruma::{ + DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, UserId, api::client::sync::sync_events::{ self, v4::{ExtensionsConfig, SyncRequestList}, v5, }, - DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, UserId, }; -use crate::{rooms, Dep}; +use crate::{Dep, rooms}; pub struct Service { db: Data, diff --git a/src/service/sync/watch.rs b/src/service/sync/watch.rs index 0a9c5d15..96981472 100644 --- a/src/service/sync/watch.rs +++ b/src/service/sync/watch.rs @@ -1,5 +1,5 @@ -use conduwuit::{implement, trace, Result}; -use futures::{pin_mut, stream::FuturesUnordered, FutureExt, StreamExt}; +use conduwuit::{Result, implement, trace}; +use futures::{FutureExt, StreamExt, pin_mut, stream::FuturesUnordered}; use ruma::{DeviceId, UserId}; #[implement(super::Service)] diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs index 912c0b49..9c284b70 100644 --- a/src/service/transaction_ids/mod.rs +++ b/src/service/transaction_ids/mod.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{implement, Result}; +use conduwuit::{Result, implement}; use database::{Handle, Map}; use ruma::{DeviceId, TransactionId, UserId}; diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 7084f32a..51f5fb11 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -4,20 +4,19 @@ use std::{ }; use conduwuit::{ - err, error, implement, utils, + Error, Result, err, error, implement, utils, utils::{hash, string::EMPTY}, - Error, Result, }; use database::{Deserialized, Json, Map}; use ruma::{ + CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedUserId, UserId, api::client::{ error::ErrorKind, uiaa::{AuthData, AuthType, Password, UiaaInfo, UserIdentifier}, }, - CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedUserId, UserId, }; -use crate::{config, globals, users, Dep}; +use crate::{Dep, config, globals, users}; pub struct Service { userdevicesessionid_uiaarequest: RwLock, @@ -144,8 +143,7 @@ pub async fn try_auth( }; #[cfg(not(feature = "element_hacks"))] - let Some(UserIdentifier::UserIdOrLocalpart(username)) = identifier - else { + let Some(UserIdentifier::UserIdOrLocalpart(username)) = identifier else { return Err(Error::BadRequest( ErrorKind::Unrecognized, "Identifier type not recognized.", diff --git a/src/service/updates/mod.rs b/src/service/updates/mod.rs index 7fd93b6c..28bee65a 100644 --- a/src/service/updates/mod.rs +++ b/src/service/updates/mod.rs @@ -1,16 +1,16 @@ use std::{sync::Arc, time::Duration}; use async_trait::async_trait; -use conduwuit::{debug, info, warn, Result, Server}; +use conduwuit::{Result, Server, debug, info, warn}; use database::{Deserialized, Map}; use ruma::events::room::message::RoomMessageEventContent; use serde::Deserialize; use tokio::{ sync::Notify, - time::{interval, MissedTickBehavior}, + time::{MissedTickBehavior, interval}, }; -use crate::{admin, client, globals, Dep}; +use crate::{Dep, admin, client, globals}; pub struct Service { interval: Duration, diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index f0389a4a..b3f5db88 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,25 +1,24 @@ use std::{collections::BTreeMap, mem, sync::Arc}; use conduwuit::{ - at, debug_warn, err, trace, - utils::{self, stream::TryIgnore, string::Unquoted, ReadyExt}, - Err, Error, Result, Server, + Err, Error, Result, Server, at, debug_warn, err, trace, + utils::{self, ReadyExt, stream::TryIgnore, string::Unquoted}, }; use database::{Deserialized, Ignore, Interfix, Json, Map}; use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{ + DeviceId, KeyId, MilliSecondsSinceUnixEpoch, OneTimeKeyAlgorithm, OneTimeKeyId, + OneTimeKeyName, OwnedDeviceId, OwnedKeyId, OwnedMxcUri, OwnedUserId, RoomId, UInt, UserId, api::client::{device::Device, error::ErrorKind, filter::FilterDefinition}, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::{ - ignored_user_list::IgnoredUserListEvent, AnyToDeviceEvent, GlobalAccountDataEventType, + AnyToDeviceEvent, GlobalAccountDataEventType, ignored_user_list::IgnoredUserListEvent, }, serde::Raw, - DeviceId, KeyId, MilliSecondsSinceUnixEpoch, OneTimeKeyAlgorithm, OneTimeKeyId, - OneTimeKeyName, OwnedDeviceId, OwnedKeyId, OwnedMxcUri, OwnedUserId, RoomId, UInt, UserId, }; use serde_json::json; -use crate::{account_data, admin, globals, rooms, Dep}; +use crate::{Dep, account_data, admin, globals, rooms}; pub struct Service { services: Services, @@ -246,10 +245,13 @@ impl Service { /// Sets a new avatar_url or removes it if avatar_url is None. pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) { - if let Some(avatar_url) = avatar_url { - self.db.userid_avatarurl.insert(user_id, &avatar_url); - } else { - self.db.userid_avatarurl.remove(user_id); + match avatar_url { + | Some(avatar_url) => { + self.db.userid_avatarurl.insert(user_id, &avatar_url); + }, + | _ => { + self.db.userid_avatarurl.remove(user_id); + }, } } From 045e8a293740ba1ee94d93d09d27d07a6c0d67d0 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 23 Feb 2025 02:51:55 -0500 Subject: [PATCH 071/310] stop building mac binaries for now because of linker issues Signed-off-by: June Clementine Strawberry --- .github/workflows/ci.yml | 126 --------------------------------------- 1 file changed, 126 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 24f2db45..8e1cf6c6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -623,132 +623,6 @@ jobs: scp oci-image-${{ matrix.target }}-debug.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/oci-image-${{ matrix.target }}-debug.tar.gz fi - build_mac_binaries: - name: Build MacOS Binaries - strategy: - matrix: - os: [macos-latest, macos-13] - runs-on: ${{ matrix.os }} - steps: - - name: Sync repository - uses: actions/checkout@v4 - with: - persist-credentials: false - - - name: Setup SSH web publish - env: - web_upload_ssh_private_key: ${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }} - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' - run: | - mkdir -p -v ~/.ssh - - echo "${{ secrets.WEB_UPLOAD_SSH_KNOWN_HOSTS }}" >> ~/.ssh/known_hosts - echo "${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}" >> ~/.ssh/id_ed25519 - - chmod 600 ~/.ssh/id_ed25519 - - cat >>~/.ssh/config <> "$GITHUB_ENV" - - - name: Tag comparison check - if: ${{ startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') }} - run: | - # Tag mismatch with latest repo tag check to prevent potential downgrades - LATEST_TAG=$(git describe --tags `git rev-list --tags --max-count=1`) - - if [ ${LATEST_TAG} != ${GH_REF_NAME} ]; then - echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.' - echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.' >> $GITHUB_STEP_SUMMARY - exit 1 - fi - - # use sccache for Rust - - name: Run sccache-cache - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - #if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} - uses: mozilla-actions/sccache-action@main - - # use rust-cache - - uses: Swatinem/rust-cache@v2 - with: - cache-all-crates: "true" - cache-on-failure: "true" - cache-targets: "true" - - # Nix can't do portable macOS builds yet - - name: Build macOS x86_64 binary - if: ${{ matrix.os == 'macos-13' }} - run: | - CONDUWUIT_VERSION_EXTRA="$(git rev-parse --short ${{ github.sha }})" cargo build --release --locked --features=perf_measurements,sentry_telemetry,direct_tls - cp -v -f target/release/conduwuit conduwuit-macos-x86_64 - otool -L conduwuit-macos-x86_64 - - # quick smoke test of the x86_64 macOS binary - - name: Run x86_64 macOS release binary - if: ${{ matrix.os == 'macos-13' }} - run: | - ./conduwuit-macos-x86_64 --help - ./conduwuit-macos-x86_64 --version - - - name: Build macOS arm64 binary - if: ${{ matrix.os == 'macos-latest' }} - run: | - CONDUWUIT_VERSION_EXTRA="$(git rev-parse --short ${{ github.sha }})" cargo build --release --locked --features=perf_measurements,sentry_telemetry,direct_tls - cp -v -f target/release/conduwuit conduwuit-macos-arm64 - otool -L conduwuit-macos-arm64 - - # quick smoke test of the arm64 macOS binary - - name: Run arm64 macOS release binary - if: ${{ matrix.os == 'macos-latest' }} - run: | - ./conduwuit-macos-arm64 --help - ./conduwuit-macos-arm64 --version - - - name: Upload macOS x86_64 binary to webserver - if: ${{ matrix.os == 'macos-13' }} - run: | - if [ ! -z $SSH_WEBSITE ]; then - chmod +x conduwuit-macos-x86_64 - scp conduwuit-macos-x86_64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/conduwuit-macos-x86_64 - fi - - - name: Upload macOS arm64 binary to webserver - if: ${{ matrix.os == 'macos-latest' }} - run: | - if [ ! -z $SSH_WEBSITE ]; then - chmod +x conduwuit-macos-arm64 - scp conduwuit-macos-arm64 website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/conduwuit-macos-arm64 - fi - - - name: Upload macOS x86_64 binary - if: ${{ matrix.os == 'macos-13' }} - uses: actions/upload-artifact@v4 - with: - name: conduwuit-macos-x86_64 - path: conduwuit-macos-x86_64 - if-no-files-found: error - - - name: Upload macOS arm64 binary - if: ${{ matrix.os == 'macos-latest' }} - uses: actions/upload-artifact@v4 - with: - name: conduwuit-macos-arm64 - path: conduwuit-macos-arm64 - if-no-files-found: error variables: outputs: github_repository: ${{ steps.var.outputs.github_repository }} From 4bdd0d77db9b4eaa7864431da6c5b19218e18c79 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 23 Feb 2025 02:52:19 -0500 Subject: [PATCH 072/310] bump complement, actually run all tests Signed-off-by: June Clementine Strawberry --- bin/complement | 2 +- flake.lock | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/bin/complement b/bin/complement index ffd7a938..4356f2e7 100755 --- a/bin/complement +++ b/bin/complement @@ -45,7 +45,7 @@ set +o pipefail env \ -C "$COMPLEMENT_SRC" \ COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \ - go test -tags="conduwuit_blacklist" "$SKIPPED_COMPLEMENT_TESTS" -v -timeout 1h -json ./tests ./tests/msc3967 | tee "$LOG_FILE" + go test -tags="conduwuit_blacklist" "$SKIPPED_COMPLEMENT_TESTS" -v -timeout 1h -json ./tests/... | tee "$LOG_FILE" set -o pipefail # Post-process the results into an easy-to-compare format, sorted by Test name for reproducible results diff --git a/flake.lock b/flake.lock index 9bf6ac55..a7d80508 100644 --- a/flake.lock +++ b/flake.lock @@ -80,11 +80,11 @@ "complement": { "flake": false, "locked": { - "lastModified": 1734303596, - "narHash": "sha256-HjDRyLR4MBqQ3IjfMM6eE+8ayztXlbz3gXdyDmFla68=", + "lastModified": 1740291865, + "narHash": "sha256-wl1+yCTEtvIH8vgXygnxPkaSgg4MYNKs+c9tzVytr20=", "owner": "girlbossceo", "repo": "complement", - "rev": "14cc5be797b774f1a2b9f826f38181066d4952b8", + "rev": "35ad9d9051498fbac8ea4abff8ab7d8b1844f87b", "type": "github" }, "original": { From cbf207bd1f1587418be0de2a1a5cbd745baec9e2 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 23 Feb 2025 03:11:34 -0500 Subject: [PATCH 073/310] try adding back some skipped complement tests Signed-off-by: June Clementine Strawberry --- bin/complement | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/complement b/bin/complement index 4356f2e7..9960299c 100755 --- a/bin/complement +++ b/bin/complement @@ -18,7 +18,7 @@ RESULTS_FILE="$3" OCI_IMAGE="complement-conduwuit:main" # Complement tests that are skipped due to flakiness/reliability issues -SKIPPED_COMPLEMENT_TESTS='-skip=TestClientSpacesSummary.*|TestJoinFederatedRoomFromApplicationServiceBridgeUser.*|TestJumpToDateEndpoint.*|TestUnbanViaInvite.*' +SKIPPED_COMPLEMENT_TESTS='-skip=TestPartialStateJoin.*' # $COMPLEMENT_SRC needs to be a directory to Complement source code if [ -f "$COMPLEMENT_SRC" ]; then From a67ab754179d0bbaa09aa19d974035c521643fe9 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 25 Feb 2025 18:38:12 +0000 Subject: [PATCH 074/310] fix edition 2024 lints Signed-off-by: Jason Volk --- Cargo.toml | 2 ++ src/admin/admin.rs | 2 +- src/admin/room/commands.rs | 2 +- src/admin/room/directory.rs | 2 +- src/admin/user/commands.rs | 6 +++--- src/api/client/account.rs | 2 +- src/api/client/directory.rs | 14 +++----------- src/api/client/membership.rs | 6 +++--- src/api/client/report.rs | 6 +++--- src/api/router/auth.rs | 2 +- src/api/server/send_join.rs | 2 +- src/api/server/send_knock.rs | 2 +- src/database/de.rs | 2 +- src/database/engine/logger.rs | 2 +- src/database/map/compact.rs | 2 +- src/database/map/qry_batch.rs | 1 - src/database/map/rev_stream.rs | 2 +- src/database/map/rev_stream_from.rs | 2 +- src/database/map/stream.rs | 2 +- src/database/map/stream_from.rs | 2 +- src/database/pool.rs | 14 ++++++-------- src/database/ser.rs | 2 +- src/database/stream.rs | 6 +++--- src/database/watchers.rs | 2 +- src/macros/config.rs | 2 +- src/service/media/blurhash.rs | 2 +- src/service/media/remote.rs | 2 +- .../rooms/event_handler/handle_incoming_pdu.rs | 2 +- src/service/rooms/spaces/mod.rs | 2 +- src/service/rooms/state_cache/mod.rs | 4 ++-- src/service/rooms/state_compressor/mod.rs | 4 ++-- src/service/rooms/timeline/mod.rs | 14 +++++++------- src/service/sending/mod.rs | 2 +- src/service/sending/sender.rs | 7 ++----- src/service/server_keys/request.rs | 2 +- src/service/uiaa/mod.rs | 2 +- 36 files changed, 60 insertions(+), 72 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 76de2212..52695d89 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -905,6 +905,7 @@ missing_docs_in_private_items = { level = "allow", priority = 1 } missing_errors_doc = { level = "allow", priority = 1 } missing_panics_doc = { level = "allow", priority = 1 } module_name_repetitions = { level = "allow", priority = 1 } +needless_continue = { level = "allow", priority = 1 } no_effect_underscore_binding = { level = "allow", priority = 1 } similar_names = { level = "allow", priority = 1 } single_match_else = { level = "allow", priority = 1 } @@ -969,6 +970,7 @@ style = { level = "warn", priority = -1 } # trivial assertions are quite alright assertions_on_constants = { level = "allow", priority = 1 } module_inception = { level = "allow", priority = 1 } +obfuscated_if_else = { level = "allow", priority = 1 } ################### suspicious = { level = "warn", priority = -1 } diff --git a/src/admin/admin.rs b/src/admin/admin.rs index b6de1ec6..9e010a59 100644 --- a/src/admin/admin.rs +++ b/src/admin/admin.rs @@ -62,7 +62,7 @@ pub(super) async fn process(command: AdminCommand, context: &Command<'_>) -> Res | Debug(command) => debug::process(command, context).await?, | Query(command) => query::process(command, context).await?, | Check(command) => check::process(command, context).await?, - }; + } Ok(()) } diff --git a/src/admin/room/commands.rs b/src/admin/room/commands.rs index b5c303c8..6dd31b48 100644 --- a/src/admin/room/commands.rs +++ b/src/admin/room/commands.rs @@ -42,7 +42,7 @@ pub(super) async fn list_rooms( if rooms.is_empty() { return Ok(RoomMessageEventContent::text_plain("No more rooms.")); - }; + } let output_plain = format!( "Rooms ({}):\n```\n{}\n```", diff --git a/src/admin/room/directory.rs b/src/admin/room/directory.rs index e9c23a1d..ca036825 100644 --- a/src/admin/room/directory.rs +++ b/src/admin/room/directory.rs @@ -67,7 +67,7 @@ pub(super) async fn reprocess( if rooms.is_empty() { return Ok(RoomMessageEventContent::text_plain("No more rooms.")); - }; + } let output = format!( "Rooms (page {page}):\n```\n{}\n```", diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 8cb8edc3..8565f04a 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -166,7 +166,7 @@ pub(super) async fn create_user( "Failed to automatically join room {room} for user {user_id}: {e}" ); }, - }; + } } } } @@ -550,7 +550,7 @@ pub(super) async fn force_join_list_of_local_users( debug_warn!("Failed force joining {user_id} to {room_id} during bulk join: {e}"); failed_joins = failed_joins.saturating_add(1); }, - }; + } } Ok(RoomMessageEventContent::notice_markdown(format!( @@ -646,7 +646,7 @@ pub(super) async fn force_join_all_local_users( debug_warn!("Failed force joining {user_id} to {room_id} during bulk join: {e}"); failed_joins = failed_joins.saturating_add(1); }, - }; + } } Ok(RoomMessageEventContent::notice_markdown(format!( diff --git a/src/api/client/account.rs b/src/api/client/account.rs index cb49a6db..b42f51f7 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -499,7 +499,7 @@ pub(crate) async fn register_route( | _ => { info!("Automatically joined room {room} for user {user_id}"); }, - }; + } } } } diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index 136c5961..6af9b533 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -131,7 +131,7 @@ pub(crate) async fn set_room_visibility_route( if !services.rooms.metadata.exists(&body.room_id).await { // Return 404 if the room doesn't exist - return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found")); + return Err!(Request(NotFound("Room not found"))); } if services @@ -145,10 +145,7 @@ pub(crate) async fn set_room_visibility_route( } if !user_can_publish_room(&services, sender_user, &body.room_id).await? { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "User is not allowed to publish this room", - )); + return Err!(Request(Forbidden("User is not allowed to publish this room"))); } match &body.visibility { @@ -386,12 +383,7 @@ async fn user_can_publish_room( .await { | Ok(event) => Ok(event.sender == user_id), - | _ => { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "User is not allowed to publish this room", - )); - }, + | _ => Err!(Request(Forbidden("User is not allowed to publish this room"))), } }, } diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 9c2693dc..0b9c0c69 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -993,7 +993,7 @@ async fn join_room_by_id_helper_remote( | _ => { join_event_stub.remove("event_id"); }, - }; + } // In order to create a compatible ref hash (EventID) the `hashes` field needs // to be present @@ -1420,7 +1420,7 @@ async fn join_room_by_id_helper_local( | _ => { join_event_stub.remove("event_id"); }, - }; + } // In order to create a compatible ref hash (EventID) the `hashes` field needs // to be present @@ -1947,7 +1947,7 @@ async fn remote_leave_room( | _ => { leave_event_stub.remove("event_id"); }, - }; + } // In order to create a compatible ref hash (EventID) the `hashes` field needs // to be present diff --git a/src/api/client/report.rs b/src/api/client/report.rs index db085721..7922caca 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -43,7 +43,7 @@ pub(crate) async fn report_room_route( ErrorKind::InvalidParam, "Reason too long, should be 750 characters or fewer", )); - }; + } delay_response().await; @@ -164,14 +164,14 @@ async fn is_event_report_valid( ErrorKind::InvalidParam, "Invalid score, must be within 0 to -100", )); - }; + } if reason.as_ref().is_some_and(|s| s.len() > 750) { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Reason too long, should be 750 characters or fewer", )); - }; + } if !services .rooms diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index 56256683..92b75cfa 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -110,7 +110,7 @@ pub(super) async fn auth( } }, | _ => {}, - }; + } } match (metadata.authentication, token) { diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index 08fa3835..c1749835 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -135,7 +135,7 @@ async fn create_join_event( if state_key != sender { return Err!(Request(BadJson("State key does not match sender user."))); - }; + } if let Some(authorising_user) = content.join_authorized_via_users_server { use ruma::RoomVersionId::*; diff --git a/src/api/server/send_knock.rs b/src/api/server/send_knock.rs index 1d4c2a6c..f7bb0735 100644 --- a/src/api/server/send_knock.rs +++ b/src/api/server/send_knock.rs @@ -137,7 +137,7 @@ pub(crate) async fn create_knock_event_v1_route( if state_key != sender { return Err!(Request(InvalidParam("state_key does not match sender user of event."))); - }; + } let origin: OwnedServerName = serde_json::from_value( value diff --git a/src/database/de.rs b/src/database/de.rs index 9c0997ff..849b3b2e 100644 --- a/src/database/de.rs +++ b/src/database/de.rs @@ -241,7 +241,7 @@ impl<'a, 'de: 'a> de::Deserializer<'de> for &'a mut Deserializer<'de> { | "Ignore" => self.record_ignore(), | "IgnoreAll" => self.record_ignore_all(), | _ => unhandled!("Unrecognized deserialization Directive {name:?}"), - }; + } visitor.visit_unit() } diff --git a/src/database/engine/logger.rs b/src/database/engine/logger.rs index a1898e30..23e23fc7 100644 --- a/src/database/engine/logger.rs +++ b/src/database/engine/logger.rs @@ -18,5 +18,5 @@ pub(crate) fn handle(level: LogLevel, msg: &str) { | LogLevel::Error | LogLevel::Fatal => error!("{msg}"), | LogLevel::Info => debug!("{msg}"), | LogLevel::Warn => warn!("{msg}"), - }; + } } diff --git a/src/database/map/compact.rs b/src/database/map/compact.rs index 84476de6..b49bf30b 100644 --- a/src/database/map/compact.rs +++ b/src/database/map/compact.rs @@ -52,7 +52,7 @@ pub fn compact_blocking(&self, opts: Options) -> Result { co.set_target_level(level.try_into()?); }, | (Some(_), Some(_)) => return Err!("compacting between specific levels not supported"), - }; + } self.db .db diff --git a/src/database/map/qry_batch.rs b/src/database/map/qry_batch.rs index f44d1c86..e42d3e63 100644 --- a/src/database/map/qry_batch.rs +++ b/src/database/map/qry_batch.rs @@ -50,7 +50,6 @@ where .iter() .map(ser::serialize_to::) .map(|result| result.expect("failed to serialize query key")) - .map(Into::into) .collect(); self.db diff --git a/src/database/map/rev_stream.rs b/src/database/map/rev_stream.rs index fc2d1116..789a52e8 100644 --- a/src/database/map/rev_stream.rs +++ b/src/database/map/rev_stream.rs @@ -40,7 +40,7 @@ pub fn rev_raw_stream(self: &Arc) -> impl Stream> .into_stream() .flatten() .boxed(); - }; + } let seek = Seek { map: self.clone(), diff --git a/src/database/map/rev_stream_from.rs b/src/database/map/rev_stream_from.rs index d67986e7..a612d2a2 100644 --- a/src/database/map/rev_stream_from.rs +++ b/src/database/map/rev_stream_from.rs @@ -89,7 +89,7 @@ where .into_stream() .flatten() .boxed(); - }; + } let seek = Seek { map: self.clone(), diff --git a/src/database/map/stream.rs b/src/database/map/stream.rs index f1450b6f..f7371b6c 100644 --- a/src/database/map/stream.rs +++ b/src/database/map/stream.rs @@ -39,7 +39,7 @@ pub fn raw_stream(self: &Arc) -> impl Stream>> + .into_stream() .flatten() .boxed(); - }; + } let seek = Seek { map: self.clone(), diff --git a/src/database/map/stream_from.rs b/src/database/map/stream_from.rs index 00c3a051..ccf48db6 100644 --- a/src/database/map/stream_from.rs +++ b/src/database/map/stream_from.rs @@ -86,7 +86,7 @@ where .into_stream() .flatten() .boxed(); - }; + } let seek = Seek { map: self.clone(), diff --git a/src/database/pool.rs b/src/database/pool.rs index e6ed59ac..47e61c30 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -146,11 +146,9 @@ pub(crate) fn close(&self) { .map(JoinHandle::join) .map(|result| result.map_err(Error::from_panic)) .enumerate() - .for_each(|(id, result)| { - match result { - | Ok(()) => trace!(?id, "worker joined"), - | Err(error) => error!(?id, "worker joined with error: {error}"), - }; + .for_each(|(id, result)| match result { + | Ok(()) => trace!(?id, "worker joined"), + | Err(error) => error!(?id, "worker joined with error: {error}"), }); } @@ -345,7 +343,7 @@ fn worker_handle(self: &Arc, cmd: Cmd) { | Cmd::Get(cmd) if cmd.key.len() == 1 => self.handle_get(cmd), | Cmd::Get(cmd) => self.handle_batch(cmd), | Cmd::Iter(cmd) => self.handle_iter(cmd), - }; + } } #[implement(Pool)] @@ -362,7 +360,7 @@ fn handle_iter(&self, mut cmd: Seek) { return; } - let from = cmd.key.as_deref().map(Into::into); + let from = cmd.key.as_deref(); let result = match cmd.dir { | Direction::Forward => cmd.state.init_fwd(from), @@ -394,7 +392,7 @@ fn handle_batch(self: &Arc, mut cmd: Get) { return; } - let keys = cmd.key.iter().map(Into::into); + let keys = cmd.key.iter(); let result: SmallVec<_> = cmd.map.get_batch_blocking(keys).collect(); diff --git a/src/database/ser.rs b/src/database/ser.rs index 6dd2043d..2e1a2cb0 100644 --- a/src/database/ser.rs +++ b/src/database/ser.rs @@ -224,7 +224,7 @@ impl ser::Serializer for &mut Serializer<'_, W> { self.separator()?; }, | _ => unhandled!("Unrecognized serialization directive: {name:?}"), - }; + } Ok(()) } diff --git a/src/database/stream.rs b/src/database/stream.rs index eb856b3f..eb264ccd 100644 --- a/src/database/stream.rs +++ b/src/database/stream.rs @@ -113,13 +113,13 @@ impl<'a> State<'a> { } #[inline] - fn fetch_key(&self) -> Option> { self.inner.key().map(Key::from) } + fn fetch_key(&self) -> Option> { self.inner.key() } #[inline] - fn _fetch_val(&self) -> Option> { self.inner.value().map(Val::from) } + fn _fetch_val(&self) -> Option> { self.inner.value() } #[inline] - fn fetch(&self) -> Option> { self.inner.item().map(KeyVal::from) } + fn fetch(&self) -> Option> { self.inner.item() } #[inline] pub(super) fn status(&self) -> Option { self.inner.status().err() } diff --git a/src/database/watchers.rs b/src/database/watchers.rs index be814f8c..b3907833 100644 --- a/src/database/watchers.rs +++ b/src/database/watchers.rs @@ -53,6 +53,6 @@ impl Watchers { tx.0.send(()).expect("channel should still be open"); } } - }; + } } } diff --git a/src/macros/config.rs b/src/macros/config.rs index 07ac1c0a..7b424325 100644 --- a/src/macros/config.rs +++ b/src/macros/config.rs @@ -205,7 +205,7 @@ fn get_default(field: &Field) -> Option { }, | Meta::Path { .. } => return Some("false".to_owned()), | _ => return None, - }; + } } None diff --git a/src/service/media/blurhash.rs b/src/service/media/blurhash.rs index 9d73f5dc..91e00228 100644 --- a/src/service/media/blurhash.rs +++ b/src/service/media/blurhash.rs @@ -172,7 +172,7 @@ impl std::fmt::Display for BlurhashingError { #[cfg(feature = "blurhashing")] | Self::ImageError(e) => write!(f, "There was an error with the image loading library => {e}")?, - }; + } Ok(()) } diff --git a/src/service/media/remote.rs b/src/service/media/remote.rs index 61635011..b6c853d2 100644 --- a/src/service/media/remote.rs +++ b/src/service/media/remote.rs @@ -283,7 +283,7 @@ async fn location_request(&self, location: &str) -> Result { .map_err(Into::into) .map(|content| FileMeta { content: Some(content), - content_type: content_type.clone().map(Into::into), + content_type: content_type.clone(), content_disposition: Some(make_content_disposition( content_disposition.as_ref(), content_type.as_deref(), diff --git a/src/service/rooms/event_handler/handle_incoming_pdu.rs b/src/service/rooms/event_handler/handle_incoming_pdu.rs index b6d3e21e..b437bf2e 100644 --- a/src/service/rooms/event_handler/handle_incoming_pdu.rs +++ b/src/service/rooms/event_handler/handle_incoming_pdu.rs @@ -170,7 +170,7 @@ pub async fn handle_incoming_pdu<'a>( | Entry::Occupied(mut e) => { *e.get_mut() = (now, e.get().1.saturating_add(1)); }, - }; + } } } diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 52e7d2be..910da914 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -125,7 +125,7 @@ pub async fn get_summary_and_children_local( SummaryAccessibility::Inaccessible }, )), - }; + } let children_pdus: Vec<_> = self .get_stripped_space_child_events(current_room) diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 4403468b..02ffa0d1 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -218,7 +218,7 @@ impl Service { ) .await .ok(); - }; + } // Copy direct chat flag if let Ok(mut direct_event) = self @@ -250,7 +250,7 @@ impl Service { ) .await?; } - }; + } } } diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index c566eb1c..305d3187 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -303,7 +303,7 @@ impl Service { }); return Ok(()); - }; + } // Else we have two options. // 1. We add the current diff on top of the parent layer. @@ -419,7 +419,7 @@ impl Service { 2, // every state change is 2 event changes on average states_parents, )?; - }; + } Ok(HashSetCompressStateEvent { shortstatehash: new_shortstatehash, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 4be97fb2..35c972fa 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -422,7 +422,7 @@ impl Service { highlight = true; }, | _ => {}, - }; + } // Break early if both conditions are true if notify && highlight { @@ -484,7 +484,7 @@ impl Service { } } }, - }; + } }, | TimelineEventType::SpaceChild => if let Some(_state_key) = &pdu.state_key { @@ -776,7 +776,7 @@ impl Service { | _ => { pdu_json.remove("event_id"); }, - }; + } // Add origin because synapse likes that (and it's required in the spec) pdu_json.insert( @@ -847,7 +847,7 @@ impl Service { { return Err!(Request(Forbidden("User cannot redact this event."))); } - }; + } }, | _ => { let content: RoomRedactionEventContent = pdu.get_content()?; @@ -863,7 +863,7 @@ impl Service { } }, } - }; + } if pdu.kind == TimelineEventType::RoomMember { let content: RoomMemberEventContent = pdu.get_content()?; @@ -1293,10 +1293,10 @@ async fn check_pdu_for_admin_room(&self, pdu: &PduEvent, sender: &UserId) -> Res } }, | _ => {}, - }; + } }, | _ => {}, - }; + } Ok(()) } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 379829b4..08ca7010 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -131,7 +131,7 @@ impl crate::Service for Service { | Err(error) => { error!(id = ?error.id(), ?error, "sender worker finished"); }, - }; + } } Ok(()) diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index c4f34177..616f0846 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -138,7 +138,7 @@ impl Service { match response { | Ok(dest) => self.handle_response_ok(&dest, futures, statuses).await, | Err((dest, e)) => Self::handle_response_err(dest, statuses, &e), - }; + } } fn handle_response_err(dest: Destination, statuses: &mut CurTransactionStatus, e: &Error) { @@ -319,10 +319,7 @@ impl Service { if let Destination::Federation(server_name) = dest { if let Ok((select_edus, last_count)) = self.select_edus(server_name).await { debug_assert!(select_edus.len() <= EDU_LIMIT, "exceeded edus limit"); - let select_edus = select_edus - .into_iter() - .map(Into::into) - .map(SendingEvent::Edu); + let select_edus = select_edus.into_iter().map(SendingEvent::Edu); events.extend(select_edus); self.db.set_latest_educount(server_name, last_count); diff --git a/src/service/server_keys/request.rs b/src/service/server_keys/request.rs index 171b755b..d9907616 100644 --- a/src/service/server_keys/request.rs +++ b/src/service/server_keys/request.rs @@ -43,7 +43,7 @@ where .keys() .rev() .take(self.services.server.config.trusted_server_batch_size) - .last() + .next_back() .cloned() { let request = Request { diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 51f5fb11..39dd2b41 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -69,7 +69,7 @@ pub async fn read_tokens(&self) -> Result> { }, | Err(e) => error!("Failed to read the registration token file: {e}"), } - }; + } if let Some(token) = &self.services.config.registration_token { tokens.insert(token.to_owned()); } From dca7bf9635ecd1fef3cd4bca56a25054d346692d Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 27 Feb 2025 10:39:06 -0500 Subject: [PATCH 075/310] try bumping cache-nix-action to v6 Signed-off-by: June Clementine Strawberry --- .github/workflows/ci.yml | 8 ++------ .github/workflows/documentation.yml | 4 +--- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8e1cf6c6..82ffc6b6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -129,7 +129,7 @@ jobs: # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags #if: ${{ !startsWith(github.ref, 'refs/tags/') }} - uses: nix-community/cache-nix-action@v5.1.0 + uses: nix-community/cache-nix-action@v6 with: # restore and save a cache using this key primary-key: nix-${{ runner.os }}-${{ hashFiles('**/*.nix', '**/.lock') }} @@ -146,8 +146,6 @@ jobs: purge-last-accessed: 86400 # except the version with the `primary-key`, if it exists purge-primary-key: never - # always save the cache - save-always: true - name: Enable Cachix binary cache run: | @@ -324,7 +322,7 @@ jobs: # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags #if: ${{ !startsWith(github.ref, 'refs/tags/') }} - uses: nix-community/cache-nix-action@v5.1.0 + uses: nix-community/cache-nix-action@v6 with: # restore and save a cache using this key primary-key: nix-${{ runner.os }}-${{ matrix.target }}-${{ hashFiles('**/*.nix', '**/.lock') }} @@ -341,8 +339,6 @@ jobs: purge-last-accessed: 86400 # except the version with the `primary-key`, if it exists purge-primary-key: never - # always save the cache - save-always: true - name: Enable Cachix binary cache run: | diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index 0eefe0a4..fadc7b3f 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -76,7 +76,7 @@ jobs: # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting # releases and tags if: ${{ !startsWith(github.ref, 'refs/tags/') }} - uses: nix-community/cache-nix-action@v5.1.0 + uses: nix-community/cache-nix-action@v6 with: # restore and save a cache using this key primary-key: nix-${{ runner.os }}-${{ hashFiles('**/*.nix', '**/.lock') }} @@ -93,8 +93,6 @@ jobs: purge-last-accessed: 86400 # except the version with the `primary-key`, if it exists purge-primary-key: never - # always save the cache - save-always: true - name: Enable Cachix binary cache run: | From 17e0384eeb91bfbb77576359252db25e3248cc40 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 2 Mar 2025 23:11:43 -0500 Subject: [PATCH 076/310] ignore errors instead of expecting for state gathering Signed-off-by: June Clementine Strawberry --- src/service/rooms/state_accessor/state.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/service/rooms/state_accessor/state.rs b/src/service/rooms/state_accessor/state.rs index 625defe6..02a6194e 100644 --- a/src/service/rooms/state_accessor/state.rs +++ b/src/service/rooms/state_accessor/state.rs @@ -4,7 +4,7 @@ use conduwuit::{ PduEvent, Result, StateKey, at, err, implement, pair_of, utils::{ result::FlatOk, - stream::{BroadbandExt, IterStream, ReadyExt, TryExpect}, + stream::{BroadbandExt, IterStream, ReadyExt, TryIgnore}, }, }; use database::Deserialized; @@ -232,7 +232,7 @@ pub fn state_keys_with_shortids<'a>( ) -> impl Stream + Send + 'a { let short_ids = self .state_full_shortids(shortstatehash) - .expect_ok() + .ignore_err() .unzip() .map(|(ssks, sids): (Vec, Vec)| (ssks, sids)) .shared(); @@ -269,7 +269,7 @@ pub fn state_keys<'a>( ) -> impl Stream + Send + 'a { let short_ids = self .state_full_shortids(shortstatehash) - .expect_ok() + .ignore_err() .map(at!(0)); self.services @@ -305,7 +305,7 @@ pub fn state_added( .map_ok(|(a, b)| b.difference(&a).copied().collect::>()) .map_ok(IterStream::try_stream) .try_flatten_stream() - .expect_ok() + .ignore_err() .map(parse_compressed_state_event) } @@ -327,7 +327,7 @@ pub fn state_full_pdus( ) -> impl Stream + Send + '_ { let short_ids = self .state_full_shortids(shortstatehash) - .expect_ok() + .ignore_err() .map(at!(1)); self.services @@ -352,7 +352,7 @@ where { let shortids = self .state_full_shortids(shortstatehash) - .expect_ok() + .ignore_err() .unzip() .shared(); From de53ad83b2ec49170075cc5176e0ec7a604aad94 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 2 Mar 2025 23:15:05 -0500 Subject: [PATCH 077/310] run nightly cargo fmt again Signed-off-by: June Clementine Strawberry --- src/admin/room/alias.rs | 5 +++-- src/admin/room/moderation.rs | 20 +++++++++++-------- src/api/client/directory.rs | 5 +++-- src/api/client/read_marker.rs | 5 +++-- src/api/server/hierarchy.rs | 5 +++-- src/core/state_res/event_auth.rs | 5 +++-- src/router/layers.rs | 8 ++++++-- .../rooms/event_handler/fetch_state.rs | 5 +++-- .../rooms/event_handler/handle_outlier_pdu.rs | 5 +++-- src/service/rooms/spaces/mod.rs | 5 +++-- src/service/server_keys/get.rs | 5 +++-- 11 files changed, 45 insertions(+), 28 deletions(-) diff --git a/src/admin/room/alias.rs b/src/admin/room/alias.rs index 6262f33e..ab21170c 100644 --- a/src/admin/room/alias.rs +++ b/src/admin/room/alias.rs @@ -66,10 +66,11 @@ pub(super) async fn reprocess( format!("#{}:{}", room_alias_localpart, services.globals.server_name()); let room_alias = match OwnedRoomAliasId::parse(room_alias_str) { | Ok(alias) => alias, - | Err(err) => + | Err(err) => { return Ok(RoomMessageEventContent::text_plain(format!( "Failed to parse alias: {err}" - ))), + ))); + }, }; match command { | RoomAliasCommand::Set { force, room_id, .. } => { diff --git a/src/admin/room/moderation.rs b/src/admin/room/moderation.rs index ee132590..444dfa2f 100644 --- a/src/admin/room/moderation.rs +++ b/src/admin/room/moderation.rs @@ -96,12 +96,13 @@ async fn ban_room( let room_id = if room.is_room_id() { let room_id = match RoomId::parse(&room) { | Ok(room_id) => room_id, - | Err(e) => + | Err(e) => { return Ok(RoomMessageEventContent::text_plain(format!( "Failed to parse room ID {room}. Please note that this requires a full room \ ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`): {e}" - ))), + ))); + }, }; debug!("Room specified is a room ID, banning room ID"); @@ -111,12 +112,13 @@ async fn ban_room( } else if room.is_room_alias_id() { let room_alias = match RoomAliasId::parse(&room) { | Ok(room_alias) => room_alias, - | Err(e) => + | Err(e) => { return Ok(RoomMessageEventContent::text_plain(format!( "Failed to parse room ID {room}. Please note that this requires a full room \ ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`): {e}" - ))), + ))); + }, }; debug!( @@ -514,12 +516,13 @@ async fn unban_room( let room_id = if room.is_room_id() { let room_id = match RoomId::parse(&room) { | Ok(room_id) => room_id, - | Err(e) => + | Err(e) => { return Ok(RoomMessageEventContent::text_plain(format!( "Failed to parse room ID {room}. Please note that this requires a full room \ ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`): {e}" - ))), + ))); + }, }; debug!("Room specified is a room ID, unbanning room ID"); @@ -529,12 +532,13 @@ async fn unban_room( } else if room.is_room_alias_id() { let room_alias = match RoomAliasId::parse(&room) { | Ok(room_alias) => room_alias, - | Err(e) => + | Err(e) => { return Ok(RoomMessageEventContent::text_plain(format!( "Failed to parse room ID {room}. Please note that this requires a full room \ ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`): {e}" - ))), + ))); + }, }; debug!( diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index 6af9b533..88f0e668 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -267,8 +267,9 @@ pub(crate) async fn get_public_rooms_filtered_helper( let backwards = match characters.next() { | Some('n') => false, | Some('p') => true, - | _ => - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token")), + | _ => { + return Err(Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token")); + }, }; num_since = characters diff --git a/src/api/client/read_marker.rs b/src/api/client/read_marker.rs index d01327f6..187616b4 100644 --- a/src/api/client/read_marker.rs +++ b/src/api/client/read_marker.rs @@ -197,11 +197,12 @@ pub(crate) async fn create_receipt_route( .read_receipt .private_read_set(&body.room_id, sender_user, count); }, - | _ => + | _ => { return Err!(Request(InvalidParam(warn!( "Received unknown read receipt type: {}", &body.receipt_type - )))), + )))); + }, } Ok(create_receipt::v3::Response {}) diff --git a/src/api/server/hierarchy.rs b/src/api/server/hierarchy.rs index 41eaedd0..c759c8ea 100644 --- a/src/api/server/hierarchy.rs +++ b/src/api/server/hierarchy.rs @@ -32,8 +32,9 @@ pub(crate) async fn get_hierarchy_route( { | None => Err!(Request(NotFound("The requested room was not found"))), - | Some(SummaryAccessibility::Inaccessible) => - Err!(Request(NotFound("The requested room is inaccessible"))), + | Some(SummaryAccessibility::Inaccessible) => { + Err!(Request(NotFound("The requested room is inaccessible"))) + }, | Some(SummaryAccessibility::Accessible(room)) => { let (children, inaccessible_children) = diff --git a/src/core/state_res/event_auth.rs b/src/core/state_res/event_auth.rs index 4b8e55f3..65bec802 100644 --- a/src/core/state_res/event_auth.rs +++ b/src/core/state_res/event_auth.rs @@ -682,7 +682,7 @@ fn valid_membership_change( } allow }, - | _ => + | _ => { if !sender_is_joined || target_user_current_membership == MembershipState::Join || target_user_current_membership == MembershipState::Ban @@ -706,7 +706,8 @@ fn valid_membership_change( ); } allow - }, + } + }, } }, | MembershipState::Leave => diff --git a/src/router/layers.rs b/src/router/layers.rs index 88e6a8d5..6920555d 100644 --- a/src/router/layers.rs +++ b/src/router/layers.rs @@ -61,8 +61,12 @@ pub(crate) fn build(services: &Arc) -> Result<(Router, Guard)> { ) .layer(axum::middleware::from_fn_with_state(Arc::clone(services), request::handle)) .layer(SecureClientIpSource::ConnectInfo.into_extension()) - .layer(ResponseBodyTimeoutLayer::new(Duration::from_secs(server.config.client_response_timeout))) - .layer(RequestBodyTimeoutLayer::new(Duration::from_secs(server.config.client_receive_timeout))) + .layer(ResponseBodyTimeoutLayer::new(Duration::from_secs( + server.config.client_response_timeout, + ))) + .layer(RequestBodyTimeoutLayer::new(Duration::from_secs( + server.config.client_receive_timeout, + ))) .layer(TimeoutLayer::new(Duration::from_secs(server.config.client_request_timeout))) .layer(SetResponseHeaderLayer::if_not_present( HeaderName::from_static("origin-agent-cluster"), // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Origin-Agent-Cluster diff --git a/src/service/rooms/event_handler/fetch_state.rs b/src/service/rooms/event_handler/fetch_state.rs index b1a4a38b..0f9e093b 100644 --- a/src/service/rooms/event_handler/fetch_state.rs +++ b/src/service/rooms/event_handler/fetch_state.rs @@ -58,10 +58,11 @@ pub(super) async fn fetch_state( | hash_map::Entry::Vacant(v) => { v.insert(pdu.event_id.clone()); }, - | hash_map::Entry::Occupied(_) => + | hash_map::Entry::Occupied(_) => { return Err!(Database( "State event's type and state_key combination exists multiple times.", - )), + )); + }, } } diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index 974eb300..99e90a50 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -56,10 +56,11 @@ pub(super) async fn handle_outlier_pdu<'a>( obj }, - | Err(e) => + | Err(e) => { return Err!(Request(InvalidParam(debug_error!( "Signature verification failed for {event_id}: {e}" - )))), + )))); + }, }; // Now that we have checked the signature and hashes we can add the eventID and diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 910da914..1da38234 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -109,7 +109,7 @@ pub async fn get_summary_and_children_local( { | None => (), // cache miss | Some(None) => return Ok(None), - | Some(Some(cached)) => + | Some(Some(cached)) => { return Ok(Some( if self .is_accessible_child( @@ -124,7 +124,8 @@ pub async fn get_summary_and_children_local( } else { SummaryAccessibility::Inaccessible }, - )), + )); + }, } let children_pdus: Vec<_> = self diff --git a/src/service/server_keys/get.rs b/src/service/server_keys/get.rs index 00aeae1e..f9c5bdaf 100644 --- a/src/service/server_keys/get.rs +++ b/src/service/server_keys/get.rs @@ -18,8 +18,9 @@ pub async fn get_event_keys( let required = match required_keys(object, version) { | Ok(required) => required, - | Err(e) => - return Err!(BadServerResponse("Failed to determine keys required to verify: {e}")), + | Err(e) => { + return Err!(BadServerResponse("Failed to determine keys required to verify: {e}")); + }, }; let batch = required From 00cc23b6496533b9cfb77145966e2e7355f1f886 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 2 Mar 2025 23:15:30 -0500 Subject: [PATCH 078/310] bump nix lockfile, bump cargo.lock, bump ruwuma Signed-off-by: June Clementine Strawberry --- Cargo.lock | 263 +++++++++++++++++++++++++++-------------------------- Cargo.toml | 4 +- flake.lock | 24 ++--- 3 files changed, 146 insertions(+), 145 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7e84437c..e632b504 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -55,9 +55,9 @@ checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anyhow" -version = "1.0.95" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" +checksum = "6b964d184e89d9b6b67dd2715bc8e74cf3107fb2b529990c90cf517326150bf4" [[package]] name = "arbitrary" @@ -128,9 +128,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.18" +version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df895a515f70646414f4b45c0b79082783b80552b373a68283012928df56f522" +checksum = "310c9bcae737a48ef5cdee3174184e6d548b292739ede61a1f955ef76a738861" dependencies = [ "brotli", "flate2", @@ -212,18 +212,18 @@ dependencies = [ [[package]] name = "avif-serialize" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e335041290c43101ca215eed6f43ec437eb5a42125573f600fc3fa42b9bddd62" +checksum = "98922d6a4cfbcb08820c69d8eeccc05bb1f29bfa06b4f5b1dbfe9a868bd7608e" dependencies = [ "arrayvec", ] [[package]] name = "aws-lc-rs" -version = "1.12.2" +version = "1.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c2b7ddaa2c56a367ad27a094ad8ef4faacf8a617c2575acb2ba88949df999ca" +checksum = "5e4e8200b9a4a5801a769d50eeabc05670fec7e959a8cb7a63a93e4e519942ae" dependencies = [ "aws-lc-sys", "paste", @@ -232,9 +232,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.25.1" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54ac4f13dad353b209b34cbec082338202cbc01c8f00336b55c750c13ac91f8f" +checksum = "0f9dd2e03ee80ca2822dd6ea431163d2ef259f2066a4d6ccaca6d9dcb386aa43" dependencies = [ "bindgen", "cc", @@ -414,7 +414,7 @@ version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "cexpr", "clang-sys", "itertools 0.12.1", @@ -445,9 +445,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.8.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" +checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" [[package]] name = "bitstream-io" @@ -505,9 +505,9 @@ dependencies = [ [[package]] name = "built" -version = "0.7.5" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c360505aed52b7ec96a3636c3f039d99103c37d1d9b4f7a8c743d3ea9ffcd03b" +checksum = "56ed6191a7e78c36abdb16ab65341eefd73d64d303fffccdbb00d51e4205967b" [[package]] name = "bumpalo" @@ -541,18 +541,17 @@ checksum = "f61dac84819c6588b558454b194026eb1f09c293b9036ae9b159e74e73ab6cf9" [[package]] name = "bytesize" -version = "1.3.0" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3e368af43e418a04d52505cf3dbc23dda4e3407ae2fa99fd0e4f308ce546acc" +checksum = "2d2c12f985c78475a6b8d629afd0c360260ef34cfef52efccdcfd31972f81c2e" [[package]] name = "bzip2-sys" -version = "0.1.11+1.0.8" +version = "0.1.13+1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" +checksum = "225bff33b2141874fe80d71e07d6eec4f85c5c216453dd96388240f96e1acc14" dependencies = [ "cc", - "libc", "pkg-config", ] @@ -568,9 +567,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.12" +version = "1.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "755717a7de9ec452bf7f3f1a3099085deabd7f2962b861dae91ecd7a365903d2" +checksum = "be714c154be609ec7f5dad223a33bf1482fff90472de28f7362806e6d4832b8c" dependencies = [ "jobserver", "libc", @@ -619,9 +618,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.39" +version = "0.4.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e36cc9d416881d2e24f9a963be5fb1cd90966419ac844274161d10488b3e825" +checksum = "1a7964611d71df112cb1730f2ee67324fcf4d0fc6606acbbe9bfe06df124637c" dependencies = [ "num-traits", ] @@ -639,9 +638,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.28" +version = "4.5.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e77c3243bd94243c03672cb5154667347c457ca271254724f9f393aee1c05ff" +checksum = "027bb0d98429ae334a8698531da7077bdf906419543a35a55c2cb1b66437d767" dependencies = [ "clap_builder", "clap_derive", @@ -649,9 +648,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.27" +version = "4.5.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b26884eb4b57140e4d2d93652abfa49498b938b3c9179f9fc487b0acc3edad7" +checksum = "5589e0cba072e0f3d23791efac0fd8627b49c829c196a492e88168e6a669d863" dependencies = [ "anstyle", "clap_lex", @@ -677,9 +676,9 @@ checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" [[package]] name = "cmake" -version = "0.1.53" +version = "0.1.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e24a03c8b52922d68a1589ad61032f2c1aa5a8158d2aa0d93c6e9534944bbad6" +checksum = "e7caa3f9de89ddbe2c607f4101924c5abec803763ae9534e4f4d7d8f84aa81f0" dependencies = [ "cc", ] @@ -1134,7 +1133,7 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "crossterm_winapi", "futures-core", "mio", @@ -1215,9 +1214,9 @@ checksum = "817fa642fb0ee7fe42e95783e00e0969927b96091bdd4b9b1af082acd943913b" [[package]] name = "data-encoding" -version = "2.7.0" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e60eed09d8c01d3cee5b7d30acb059b76614c918fa0f992e0dd6eeb10daad6f" +checksum = "575f75dfd25738df5b91b8e43e14d44bda14637a58fae779fd2b064f8bf3e010" [[package]] name = "date_header" @@ -1309,9 +1308,9 @@ dependencies = [ [[package]] name = "either" -version = "1.13.0" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" +checksum = "b7914353092ddf589ad78f25c5c1c21b7f80b0ff8621e7c814c3485b5306da9d" dependencies = [ "serde", ] @@ -1330,9 +1329,9 @@ dependencies = [ [[package]] name = "equivalent" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" @@ -1422,9 +1421,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.35" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c" +checksum = "11faaf5a5236997af9848be0bef4db95824b1d534ebc64d0f0c6cf3e67bd38dc" dependencies = [ "crc32fast", "miniz_oxide", @@ -1618,9 +1617,9 @@ checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" [[package]] name = "h2" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccae279728d634d083c00f6099cb58f01cc99c145b84b8be2f6c74618d79922e" +checksum = "5017294ff4bb30944501348f6f8e42e6ad28f42c8bbef7a74029aff064a4e3c2" dependencies = [ "atomic-waker", "bytes", @@ -1720,9 +1719,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hickory-proto" -version = "0.24.3" +version = "0.24.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ad3d6d98c648ed628df039541a5577bee1a7c83e9e16fe3dbedeea4cdfeb971" +checksum = "92652067c9ce6f66ce53cc38d1169daa36e6e7eb7dd3b63b5103bd9d97117248" dependencies = [ "async-trait", "cfg-if", @@ -1744,9 +1743,9 @@ dependencies = [ [[package]] name = "hickory-resolver" -version = "0.24.3" +version = "0.24.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcf287bde7b776e85d7188e6e5db7cf410a2f9531fe82817eb87feed034c8d14" +checksum = "cbb117a1ca520e111743ab2f6688eddee69db4e0ea242545a604dce8a66fd22e" dependencies = [ "cfg-if", "futures-util", @@ -2223,6 +2222,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.14" @@ -2335,9 +2343,9 @@ checksum = "03087c2bad5e1034e8cace5926dec053fb3790248370865f5117a7d0213354c8" [[package]] name = "libc" -version = "0.2.169" +version = "0.2.170" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" +checksum = "875b3680cb2f8f71bdcf9a30f38d48282f5d3c95cbf9b3fa57269bb5d5c06828" [[package]] name = "libfuzzer-sys" @@ -2384,9 +2392,9 @@ checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" [[package]] name = "litemap" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ee93343901ab17bd981295f2cf0026d4ad018c7c31ba84549a4ddbb47a45104" +checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" [[package]] name = "lock_api" @@ -2400,9 +2408,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.25" +version = "0.4.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" +checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e" [[package]] name = "loole" @@ -2570,9 +2578,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.3" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8402cab7aefae129c6977bb0ff1b8fd9a04eb5b51efc50a70bea51cda0c7924" +checksum = "8e3e04debbb59698c15bacbb6d93584a8c0ca9cc3213cb423d31f760d8843ce5" dependencies = [ "adler2", "simd-adler32", @@ -2602,7 +2610,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "cfg-if", "cfg_aliases", "libc", @@ -2844,9 +2852,9 @@ dependencies = [ [[package]] name = "os_info" -version = "3.9.2" +version = "3.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e6520c8cc998c5741ee68ec1dc369fc47e5f0ea5320018ecf2a1ccd6328f48b" +checksum = "2a604e53c24761286860eba4e2c8b23a0161526476b1de520139d69cdb85a6b5" dependencies = [ "log", "serde", @@ -3116,9 +3124,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.13.4" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c0fef6c4230e4ccf618a35c59d7ede15dea37de8427500f50aff708806e42ec" +checksum = "2796faa41db3ec313a31f7624d9286acf277b52de526150b7e69f3debf891ee5" dependencies = [ "bytes", "prost-derive", @@ -3126,12 +3134,12 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.13.4" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "157c5a9d7ea5c2ed2d9fb8f495b64759f7816c7eaea54ba3978f0d63000162e3" +checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" dependencies = [ "anyhow", - "itertools 0.13.0", + "itertools 0.14.0", "proc-macro2", "quote", "syn 2.0.98", @@ -3139,20 +3147,20 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.13.4" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc2f1e56baa61e93533aebc21af4d2134b70f66275e0fcdf3cbe43d77ff7e8fc" +checksum = "52c2c1bf36ddb1a1c396b3601a3cec27c2462e45f07c386894ec3ccf5332bd16" dependencies = [ "prost", ] [[package]] name = "pulldown-cmark" -version = "0.12.2" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f86ba2052aebccc42cbbb3ed234b8b13ce76f75c3551a303cb2bcffcff12bb14" +checksum = "1e8bbe1a966bd2f362681a44f6edce3c2310ac21e4d5067a6e7ec396297a6ea0" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "memchr", "pulldown-cmark-escape", "unicase", @@ -3225,9 +3233,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.9" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c40286217b4ba3a71d644d752e6a0b71f13f1b6a2c5311acfcbe0c2418ed904" +checksum = "e46f3055866785f6b92bc6164b76be02ca8f2eb4b002c0354b28cf4c119e5944" dependencies = [ "cfg_aliases", "libc", @@ -3348,11 +3356,11 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" +checksum = "82b568323e98e49e2a0899dcee453dd679fae22d69adf9b11dd508d1549b7e2f" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", ] [[package]] @@ -3466,15 +3474,14 @@ checksum = "57397d16646700483b67d2dd6511d79318f9d057fdbd21a4066aeac8b41d310a" [[package]] name = "ring" -version = "0.17.8" +version = "0.17.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +checksum = "da5349ae27d3887ca812fb375b45a4fbb36d8d12d2df394968cd86e35683fe73" dependencies = [ "cc", "cfg-if", "getrandom 0.2.15", "libc", - "spin", "untrusted", "windows-sys 0.52.0", ] @@ -3482,7 +3489,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "assign", "js_int", @@ -3504,7 +3511,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "js_int", "ruma-common", @@ -3516,7 +3523,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "as_variant", "assign", @@ -3539,7 +3546,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "as_variant", "base64 0.22.1", @@ -3571,7 +3578,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "as_variant", "indexmap 2.7.1", @@ -3596,7 +3603,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "bytes", "http", @@ -3614,7 +3621,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3623,7 +3630,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "js_int", "ruma-common", @@ -3633,7 +3640,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3648,7 +3655,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "js_int", "ruma-common", @@ -3660,7 +3667,7 @@ dependencies = [ [[package]] name = "ruma-server-util" version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "headers", "http", @@ -3673,7 +3680,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3689,7 +3696,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=5dc3e0f81d614ed9dc96b50f646b2e4385291c55#5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" dependencies = [ "futures-util", "js_int", @@ -3768,7 +3775,7 @@ version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "errno", "libc", "linux-raw-sys", @@ -3777,9 +3784,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.22" +version = "0.23.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb9263ab4eb695e42321db096e3b8fbd715a59b154d5c88d82db2175b681ba7" +checksum = "47796c98c480fce5406ef69d1c76378375492c3b0a0de587be0c1d9feb12f395" dependencies = [ "aws-lc-rs", "log", @@ -3899,7 +3906,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", "core-foundation", "core-foundation-sys", "libc", @@ -4059,18 +4066,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.217" +version = "1.0.218" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" +checksum = "e8dfc9d19bdbf6d17e22319da49161d5d0108e4188e8b680aef6299eed22df60" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.217" +version = "1.0.218" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" +checksum = "f09503e191f4e797cb8aac08e9a4a4695c5edf6a2e70e376d961ddd5c969f82b" dependencies = [ "proc-macro2", "quote", @@ -4092,9 +4099,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.138" +version = "1.0.139" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d434192e7da787e94a6ea7e9670b26a036d0ca41e0b7efb2676dd32bae872949" +checksum = "44f86c3acccc9c65b153fe1b85a3be07fe5515274ec9f0653b4a0875731c72a6" dependencies = [ "itoa", "memchr", @@ -4274,9 +4281,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.13.2" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" +checksum = "7fcf8323ef1faaee30a44a340193b1ac6814fd9b7b4e88e9d4519a3e4abe1cfd" dependencies = [ "serde", ] @@ -4291,12 +4298,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "spin" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" - [[package]] name = "spki" version = "0.7.3" @@ -4334,9 +4335,9 @@ dependencies = [ [[package]] name = "string_cache_codegen" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "244292f3441c89febe5b5bdfbb6863aeaf4f64da810ea3050fd927b27b8d92ce" +checksum = "c711928715f1fe0fe509c53b43e993a9a557babc2d0a3567d0a3006f1ac931a0" dependencies = [ "phf_generator", "phf_shared", @@ -4667,9 +4668,9 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f6d0975eaace0cf0fcadee4e4aaa5da15b5c079146f2cffb67c113be122bf37" +checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" dependencies = [ "rustls", "tokio", @@ -4734,9 +4735,9 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.23" +version = "0.22.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02a8b472d1a3d7c18e2d61a489aee3453fd9031c33e4f55bd533f4a7adca1bee" +checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" dependencies = [ "indexmap 2.7.1", "serde", @@ -4817,7 +4818,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "403fa3b783d4b626a8ad51d766ab03cb6d2dbfc46b1c5d4448395e6628dc9697" dependencies = [ "async-compression", - "bitflags 2.8.0", + "bitflags 2.9.0", "bytes", "futures-core", "futures-util", @@ -4939,9 +4940,9 @@ checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "typenum" -version = "1.17.0" +version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" [[package]] name = "typewit" @@ -4984,9 +4985,9 @@ checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-ident" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a210d160f08b701c8721ba1c726c11662f877ea6b7094007e1ca9a1041945034" +checksum = "00e2473a93778eb0bad35909dff6a10d28e63f792f16ed15e404fca9d5eeedbe" [[package]] name = "unicode-segmentation" @@ -5071,9 +5072,9 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "uuid" -version = "1.13.1" +version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced87ca4be083373936a67f8de945faa23b6b42384bd5b64434850802c6dccd0" +checksum = "e0f540e3240398cce6128b64ba83fdbdd86129c16a3aa1a3a252efd66eb3d587" dependencies = [ "getrandom 0.3.1", "serde", @@ -5511,9 +5512,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.7.1" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86e376c75f4f43f44db463cf729e0d3acbf954d13e22c51e26e4c264b4ab545f" +checksum = "0e7f4ea97f6f78012141bcdb6a216b2609f0979ada50b20ca5b52dde2eac2bb1" dependencies = [ "memchr", ] @@ -5534,7 +5535,7 @@ version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.9.0", ] [[package]] @@ -5613,18 +5614,18 @@ dependencies = [ [[package]] name = "zerofrom" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff3ee08c995dee1859d998dea82f7374f2826091dd9cd47def953cae446cd2e" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" dependencies = [ "zerofrom-derive", ] [[package]] name = "zerofrom-derive" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "595eed982f7d355beb85837f651fa22e90b3c044842dc7f2c2842c086f295808" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", @@ -5662,27 +5663,27 @@ dependencies = [ [[package]] name = "zstd" -version = "0.13.2" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcf2b778a664581e31e389454a7072dab1647606d44f7feea22cd5abb9c9f3f9" +checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "7.2.1" +version = "7.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54a3ab4db68cea366acc5c897c7b4d4d1b8994a9cd6e6f841f8964566a419059" +checksum = "f3051792fbdc2e1e143244dc28c60f73d8470e93f3f9cbd0ead44da5ed802722" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.13+zstd.1.5.6" +version = "2.0.14+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" +checksum = "8fb060d4926e4ac3a3ad15d864e99ceb5f343c6b34f5bd6d81ae6ed417311be5" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index 52695d89..e2fe7021 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "5dc3e0f81d614ed9dc96b50f646b2e4385291c55" +rev = "b40e76528660f6a389eacd19a83ef9060644ee8f" features = [ "compat", "rand", @@ -509,7 +509,7 @@ version = "1.0.37" version = "1.0.89" [workspace.dependencies.bytesize] -version = "1.3.0" +version = "1.3.2" [workspace.dependencies.core_affinity] version = "0.8.1" diff --git a/flake.lock b/flake.lock index a7d80508..59fcbd8d 100644 --- a/flake.lock +++ b/flake.lock @@ -170,11 +170,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1740206139, - "narHash": "sha256-wWSv4KYhPKggKuJLzghfBs99pS3Kli9UBlyXVBzuIzc=", + "lastModified": 1740724364, + "narHash": "sha256-D1jLIueJx1dPrP09ZZwTrPf4cubV+TsFMYbpYYTVj6A=", "owner": "nix-community", "repo": "fenix", - "rev": "133a9eb59fb4ddac443ebe5ab2449d3940396533", + "rev": "edf7d9e431cda8782e729253835f178a356d3aab", "type": "github" }, "original": { @@ -364,11 +364,11 @@ "liburing": { "flake": false, "locked": { - "lastModified": 1740063075, - "narHash": "sha256-AfrCMPiXwgB0yxociq4no4NjCqGf/nRVhC3CLRoKqhA=", + "lastModified": 1740613216, + "narHash": "sha256-NpPOBqNND3Qe9IwqYs0mJLGTmIx7e6FgUEBAnJ+1ZLA=", "owner": "axboe", "repo": "liburing", - "rev": "5c788d514b9ed6d1a3624150de8aa6db403c1c65", + "rev": "e1003e496e66f9b0ae06674869795edf772d5500", "type": "github" }, "original": { @@ -550,11 +550,11 @@ }, "nixpkgs_5": { "locked": { - "lastModified": 1740019556, - "narHash": "sha256-vn285HxnnlHLWnv59Og7muqECNMS33mWLM14soFIv2g=", + "lastModified": 1740547748, + "narHash": "sha256-Ly2fBL1LscV+KyCqPRufUBuiw+zmWrlJzpWOWbahplg=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "dad564433178067be1fbdfcce23b546254b6d641", + "rev": "3a05eebede89661660945da1f151959900903b6a", "type": "github" }, "original": { @@ -599,11 +599,11 @@ "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1740077634, - "narHash": "sha256-KlYdDhon/hy91NutuBeN8e3qTKf3FXgsudWsjnHud68=", + "lastModified": 1740691488, + "narHash": "sha256-Fs6vBrByuiOf2WO77qeMDMTXcTGzrIMqLBv+lNeywwM=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "88fbdcd510e79ef3bcd81d6d9d4f07bdce84be8c", + "rev": "fe3eda77d3a7ce212388bda7b6cec8bffcc077e5", "type": "github" }, "original": { From af714d5778bf8b5ba4356821941e48bff55aefea Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 2 Mar 2025 23:16:30 -0500 Subject: [PATCH 079/310] refactor+fix various issues with regs/logins and admin user commands Signed-off-by: June Clementine Strawberry --- src/admin/user/commands.rs | 35 ++++-- src/api/client/account.rs | 224 ++++++++++++++++++--------------- src/api/client/session.rs | 88 ++++++------- src/core/config/check.rs | 8 ++ src/service/admin/create.rs | 20 +-- src/service/admin/grant.rs | 129 ++++++++++++------- src/service/admin/mod.rs | 7 +- src/service/appservice/mod.rs | 2 +- src/service/emergency/mod.rs | 9 +- src/service/globals/mod.rs | 2 - src/service/resolver/actual.rs | 5 +- 11 files changed, 309 insertions(+), 220 deletions(-) diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 8565f04a..35067304 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -2,7 +2,7 @@ use std::{collections::BTreeMap, fmt::Write as _}; use api::client::{full_user_deactivate, join_room_by_id_helper, leave_room}; use conduwuit::{ - PduBuilder, Result, debug_warn, error, info, is_equal_to, + PduBuilder, Result, debug, debug_warn, error, info, is_equal_to, utils::{self, ReadyExt}, warn, }; @@ -57,16 +57,16 @@ pub(super) async fn create_user( // Validate user id let user_id = parse_local_user_id(self.services, &username)?; - if self.services.users.exists(&user_id).await { - return Ok(RoomMessageEventContent::text_plain(format!( - "Userid {user_id} already exists" - ))); + if let Err(e) = user_id.validate_strict() { + if self.services.config.emergency_password.is_none() { + return Ok(RoomMessageEventContent::text_plain(format!( + "Username {user_id} contains disallowed characters or spaces: {e}" + ))); + } } - if user_id.is_historical() { - return Ok(RoomMessageEventContent::text_plain(format!( - "User ID {user_id} does not conform to new Matrix identifier spec" - ))); + if self.services.users.exists(&user_id).await { + return Ok(RoomMessageEventContent::text_plain(format!("User {user_id} already exists"))); } let password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH)); @@ -185,12 +185,12 @@ pub(super) async fn create_user( .is_ok_and(is_equal_to!(1)) { self.services.admin.make_user_admin(&user_id).await?; - warn!("Granting {user_id} admin privileges as the first user"); } + } else { + debug!("create_user admin command called without an admin room being available"); } - // Inhibit login does not work for guests Ok(RoomMessageEventContent::text_plain(format!( "Created user with user_id: {user_id} and password: `{password}`" ))) @@ -694,6 +694,19 @@ pub(super) async fn force_leave_room( self.services.globals.user_is_local(&user_id), "Parsed user_id must be a local user" ); + + if !self + .services + .rooms + .state_cache + .is_joined(&user_id, &room_id) + .await + { + return Ok(RoomMessageEventContent::notice_markdown(format!( + "{user_id} is not joined in the room" + ))); + } + leave_room(self.services, &user_id, &room_id, None).await?; Ok(RoomMessageEventContent::notice_markdown(format!( diff --git a/src/api/client/account.rs b/src/api/client/account.rs index b42f51f7..2b8209d4 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -3,7 +3,8 @@ use std::fmt::Write; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - Error, PduBuilder, Result, debug_info, error, info, is_equal_to, utils, utils::ReadyExt, warn, + Err, Error, PduBuilder, Result, debug_info, err, error, info, is_equal_to, utils, + utils::ReadyExt, warn, }; use futures::{FutureExt, StreamExt}; use register::RegistrationKind; @@ -17,7 +18,6 @@ use ruma::{ request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn, whoami, }, - error::ErrorKind, uiaa::{AuthFlow, AuthType, UiaaInfo}, }, events::{ @@ -60,6 +60,14 @@ pub(crate) async fn get_register_available_route( || appservice.registration.id.contains("matrix_appservice_irc") }); + if services + .globals + .forbidden_usernames() + .is_match(&body.username) + { + return Err!(Request(Forbidden("Username is forbidden"))); + } + // don't force the username lowercase if it's from matrix-appservice-irc let body_username = if is_matrix_appservice_irc { body.username.clone() @@ -68,30 +76,45 @@ pub(crate) async fn get_register_available_route( }; // Validate user id - let user_id = UserId::parse_with_server_name(body_username, services.globals.server_name()) - .ok() - .filter(|user_id| { - (!user_id.is_historical() || is_matrix_appservice_irc) - && services.globals.user_is_local(user_id) - }) - .ok_or(Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; + let user_id = + match UserId::parse_with_server_name(&body_username, services.globals.server_name()) { + | Ok(user_id) => { + if let Err(e) = user_id.validate_strict() { + // unless the username is from the broken matrix appservice IRC bridge, we + // should follow synapse's behaviour on not allowing things like spaces + // and UTF-8 characters in usernames + if !is_matrix_appservice_irc { + return Err!(Request(InvalidUsername(debug_warn!( + "Username {body_username} contains disallowed characters or spaces: \ + {e}" + )))); + } + } + + user_id + }, + | Err(e) => { + return Err!(Request(InvalidUsername(debug_warn!( + "Username {body_username} is not valid: {e}" + )))); + }, + }; // Check if username is creative enough if services.users.exists(&user_id).await { - return Err(Error::BadRequest(ErrorKind::UserInUse, "Desired user ID is already taken.")); + return Err!(Request(UserInUse("User ID is not available."))); } - if services - .globals - .forbidden_usernames() - .is_match(user_id.localpart()) - { - return Err(Error::BadRequest(ErrorKind::Unknown, "Username is forbidden.")); + if let Some(ref info) = body.appservice_info { + if !info.is_user_match(&user_id) { + return Err!(Request(Exclusive("Username is not in an appservice namespace."))); + } + }; + + if services.appservice.is_exclusive_user_id(&user_id).await { + return Err!(Request(Exclusive("Username is reserved by an appservice."))); } - // TODO add check for appservice namespaces - - // If no if check is true we have an username that's available to be used. Ok(get_username_availability::v3::Response { available: true }) } @@ -119,16 +142,27 @@ pub(crate) async fn register_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - if !services.globals.allow_registration() && body.appservice_info.is_none() { - info!( - "Registration disabled and request not from known appservice, rejecting \ - registration attempt for username \"{}\"", - body.username.as_deref().unwrap_or("") - ); - return Err(Error::BadRequest(ErrorKind::forbidden(), "Registration has been disabled.")); - } - let is_guest = body.kind == RegistrationKind::Guest; + let emergency_mode_enabled = services.config.emergency_password.is_some(); + + if !services.globals.allow_registration() && body.appservice_info.is_none() { + match (body.username.as_ref(), body.initial_device_display_name.as_ref()) { + | (Some(username), Some(device_display_name)) => { + info!(%is_guest, user = %username, device_name = %device_display_name, "Rejecting registration attempt as registration is disabled"); + }, + | (Some(username), _) => { + info!(%is_guest, user = %username, "Rejecting registration attempt as registration is disabled"); + }, + | (_, Some(device_display_name)) => { + info!(%is_guest, device_name = %device_display_name, "Rejecting registration attempt as registration is disabled"); + }, + | (None, _) => { + info!(%is_guest, "Rejecting registration attempt as registration is disabled"); + }, + }; + + return Err!(Request(Forbidden("Registration has been disabled."))); + } if is_guest && (!services.globals.allow_guest_registration() @@ -140,10 +174,7 @@ pub(crate) async fn register_route( rejecting guest registration attempt, initial device name: \"{}\"", body.initial_device_display_name.as_deref().unwrap_or("") ); - return Err(Error::BadRequest( - ErrorKind::GuestAccessForbidden, - "Guest registration is disabled.", - )); + return Err!(Request(GuestAccessForbidden("Guest registration is disabled."))); } // forbid guests from registering if there is not a real admin user yet. give @@ -154,13 +185,10 @@ pub(crate) async fn register_route( rejecting registration. Guest's initial device name: \"{}\"", body.initial_device_display_name.as_deref().unwrap_or("") ); - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Registration temporarily disabled.", - )); + return Err!(Request(Forbidden("Registration is temporarily disabled."))); } - let user_id = match (&body.username, is_guest) { + let user_id = match (body.username.as_ref(), is_guest) { | (Some(username), false) => { // workaround for https://github.com/matrix-org/matrix-appservice-irc/issues/1780 due to inactivity of fixing the issue let is_matrix_appservice_irc = @@ -170,6 +198,12 @@ pub(crate) async fn register_route( || appservice.registration.id.contains("matrix_appservice_irc") }); + if services.globals.forbidden_usernames().is_match(username) + && !emergency_mode_enabled + { + return Err!(Request(Forbidden("Username is forbidden"))); + } + // don't force the username lowercase if it's from matrix-appservice-irc let body_username = if is_matrix_appservice_irc { username.clone() @@ -177,31 +211,34 @@ pub(crate) async fn register_route( username.to_lowercase() }; - let proposed_user_id = - UserId::parse_with_server_name(body_username, services.globals.server_name()) - .ok() - .filter(|user_id| { - (!user_id.is_historical() || is_matrix_appservice_irc) - && services.globals.user_is_local(user_id) - }) - .ok_or(Error::BadRequest( - ErrorKind::InvalidUsername, - "Username is invalid.", - ))?; + let proposed_user_id = match UserId::parse_with_server_name( + &body_username, + services.globals.server_name(), + ) { + | Ok(user_id) => { + if let Err(e) = user_id.validate_strict() { + // unless the username is from the broken matrix appservice IRC bridge, or + // we are in emergency mode, we should follow synapse's behaviour on + // not allowing things like spaces and UTF-8 characters in usernames + if !is_matrix_appservice_irc && !emergency_mode_enabled { + return Err!(Request(InvalidUsername(debug_warn!( + "Username {body_username} contains disallowed characters or \ + spaces: {e}" + )))); + } + } + + user_id + }, + | Err(e) => { + return Err!(Request(InvalidUsername(debug_warn!( + "Username {body_username} is not valid: {e}" + )))); + }, + }; if services.users.exists(&proposed_user_id).await { - return Err(Error::BadRequest( - ErrorKind::UserInUse, - "Desired user ID is already taken.", - )); - } - - if services - .globals - .forbidden_usernames() - .is_match(proposed_user_id.localpart()) - { - return Err(Error::BadRequest(ErrorKind::Unknown, "Username is forbidden.")); + return Err!(Request(UserInUse("User ID is not available."))); } proposed_user_id @@ -221,21 +258,18 @@ pub(crate) async fn register_route( if body.body.login_type == Some(LoginType::ApplicationService) { match body.appservice_info { | Some(ref info) => - if !info.is_user_match(&user_id) { - return Err(Error::BadRequest( - ErrorKind::Exclusive, - "User is not in namespace.", - )); + if !info.is_user_match(&user_id) && !emergency_mode_enabled { + return Err!(Request(Exclusive( + "Username is not in an appservice namespace." + ))); }, | _ => { - return Err(Error::BadRequest( - ErrorKind::MissingToken, - "Missing appservice token.", - )); + return Err!(Request(MissingToken("Missing appservice token."))); }, } - } else if services.appservice.is_exclusive_user_id(&user_id).await { - return Err(Error::BadRequest(ErrorKind::Exclusive, "User ID reserved by appservice.")); + } else if services.appservice.is_exclusive_user_id(&user_id).await && !emergency_mode_enabled + { + return Err!(Request(Exclusive("Username is reserved by an appservice."))); } // UIAA @@ -271,7 +305,7 @@ pub(crate) async fn register_route( .uiaa .try_auth( &UserId::parse_with_server_name("", services.globals.server_name()) - .expect("we know this is valid"), + .unwrap(), "".into(), auth, &uiaainfo, @@ -287,7 +321,7 @@ pub(crate) async fn register_route( uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services.uiaa.create( &UserId::parse_with_server_name("", services.globals.server_name()) - .expect("we know this is valid"), + .unwrap(), "".into(), &uiaainfo, &json, @@ -295,7 +329,7 @@ pub(crate) async fn register_route( return Err(Error::Uiaa(uiaainfo)); }, | _ => { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + return Err!(Request(NotJson("JSON body is not valid"))); }, }, } @@ -407,7 +441,7 @@ pub(crate) async fn register_route( // log in conduit admin channel if a guest registered if body.appservice_info.is_none() && is_guest && services.globals.log_guest_registrations() { - info!("New guest user \"{user_id}\" registered on this server."); + debug_info!("New guest user \"{user_id}\" registered on this server."); if !device_display_name.is_empty() { if services.server.config.admin_room_notices { @@ -436,7 +470,8 @@ pub(crate) async fn register_route( } // If this is the first real user, grant them admin privileges except for guest - // users Note: the server user, @conduit:servername, is generated first + // users + // Note: the server user is generated first if !is_guest { if let Ok(admin_room) = services.admin.get_admin_room().await { if services @@ -541,8 +576,8 @@ pub(crate) async fn change_password_route( let sender_user = body .sender_user .as_ref() - .ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?; - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + .ok_or_else(|| err!(Request(MissingToken("Missing access token."))))?; + let sender_device = body.sender_device(); let mut uiaainfo = UiaaInfo { flows: vec![AuthFlow { stages: vec![AuthType::Password] }], @@ -566,16 +601,16 @@ pub(crate) async fn change_password_route( // Success! }, | _ => match body.json_body { - | Some(json) => { + | Some(ref json) => { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + .create(sender_user, sender_device, &uiaainfo, json); return Err(Error::Uiaa(uiaainfo)); }, | _ => { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + return Err!(Request(NotJson("JSON body is not valid"))); }, }, } @@ -589,7 +624,7 @@ pub(crate) async fn change_password_route( services .users .all_device_ids(sender_user) - .ready_filter(|id| id != sender_device) + .ready_filter(|id| *id != sender_device) .for_each(|id| services.users.remove_device(sender_user, id)) .await; } @@ -651,8 +686,8 @@ pub(crate) async fn deactivate_route( let sender_user = body .sender_user .as_ref() - .ok_or_else(|| Error::BadRequest(ErrorKind::MissingToken, "Missing access token."))?; - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + .ok_or_else(|| err!(Request(MissingToken("Missing access token."))))?; + let sender_device = body.sender_device(); let mut uiaainfo = UiaaInfo { flows: vec![AuthFlow { stages: vec![AuthType::Password] }], @@ -675,16 +710,16 @@ pub(crate) async fn deactivate_route( // Success! }, | _ => match body.json_body { - | Some(json) => { + | Some(ref json) => { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + .create(sender_user, sender_device, &uiaainfo, json); return Err(Error::Uiaa(uiaainfo)); }, | _ => { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + return Err!(Request(NotJson("JSON body is not valid"))); }, }, } @@ -743,10 +778,7 @@ pub(crate) async fn third_party_route( pub(crate) async fn request_3pid_management_token_via_email_route( _body: Ruma, ) -> Result { - Err(Error::BadRequest( - ErrorKind::ThreepidDenied, - "Third party identifier is not allowed", - )) + Err!(Request(ThreepidDenied("Third party identifiers are not implemented"))) } /// # `POST /_matrix/client/v3/account/3pid/msisdn/requestToken` @@ -759,10 +791,7 @@ pub(crate) async fn request_3pid_management_token_via_email_route( pub(crate) async fn request_3pid_management_token_via_msisdn_route( _body: Ruma, ) -> Result { - Err(Error::BadRequest( - ErrorKind::ThreepidDenied, - "Third party identifier is not allowed", - )) + Err!(Request(ThreepidDenied("Third party identifiers are not implemented"))) } /// # `GET /_matrix/client/v1/register/m.login.registration_token/validity` @@ -776,10 +805,7 @@ pub(crate) async fn check_registration_token_validity( body: Ruma, ) -> Result { let Some(reg_token) = services.globals.registration_token.clone() else { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Server does not allow token registration.", - )); + return Err!(Request(Forbidden("Server does not allow token registration"))); }; Ok(check_registration_token_validity::v1::Response { valid: reg_token == body.token }) diff --git a/src/api/client/session.rs b/src/api/client/session.rs index 5c0ab47d..6db761af 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -2,12 +2,11 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{Err, debug, err, info, utils::ReadyExt, warn}; +use conduwuit::{Err, debug, err, info, utils::ReadyExt}; use futures::StreamExt; use ruma::{ - OwnedUserId, UserId, + UserId, api::client::{ - error::ErrorKind, session::{ get_login_token, get_login_types::{ @@ -67,6 +66,8 @@ pub(crate) async fn login_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { + let emergency_mode_enabled = services.config.emergency_password.is_some(); + // Validate login method // TODO: Other login methods let user_id = match &body.login_info { @@ -78,20 +79,22 @@ pub(crate) async fn login_route( .. }) => { debug!("Got password login type"); - let user_id = if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = - identifier - { - UserId::parse_with_server_name( - user_id.to_lowercase(), - services.globals.server_name(), - ) - } else if let Some(user) = user { - OwnedUserId::parse(user) - } else { - warn!("Bad login type: {:?}", &body.login_info); - return Err!(Request(Forbidden("Bad login type."))); - } - .map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; + let user_id = + if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { + UserId::parse_with_server_name(user_id, &services.config.server_name) + } else if let Some(user) = user { + UserId::parse_with_server_name(user, &services.config.server_name) + } else { + return Err!(Request(Unknown( + warn!(?body.login_info, "Invalid or unsupported login type") + ))); + } + .map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?; + + assert!( + services.globals.user_is_local(&user_id), + "User ID does not belong to this homeserver" + ); let hash = services .users @@ -124,46 +127,40 @@ pub(crate) async fn login_route( debug!("Got appservice login type"); let user_id = if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { - UserId::parse_with_server_name( - user_id.to_lowercase(), - services.globals.server_name(), - ) + UserId::parse_with_server_name(user_id, &services.config.server_name) } else if let Some(user) = user { - OwnedUserId::parse(user) + UserId::parse_with_server_name(user, &services.config.server_name) } else { - warn!("Bad login type: {:?}", &body.login_info); - return Err(Error::BadRequest(ErrorKind::forbidden(), "Bad login type.")); + return Err!(Request(Unknown( + warn!(?body.login_info, "Invalid or unsupported login type") + ))); } - .map_err(|e| { - warn!("Failed to parse username from appservice logging in: {e}"); - Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") - })?; + .map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?; + + assert!( + services.globals.user_is_local(&user_id), + "User ID does not belong to this homeserver" + ); match body.appservice_info { | Some(ref info) => - if !info.is_user_match(&user_id) { - return Err(Error::BadRequest( - ErrorKind::Exclusive, - "User is not in namespace.", - )); + if !info.is_user_match(&user_id) && !emergency_mode_enabled { + return Err!(Request(Exclusive( + "Username is not in an appservice namespace." + ))); }, | _ => { - return Err(Error::BadRequest( - ErrorKind::MissingToken, - "Missing appservice token.", - )); + return Err!(Request(MissingToken("Missing appservice token."))); }, } user_id }, | _ => { - warn!("Unsupported or unknown login type: {:?}", &body.login_info); - debug!("JSON body: {:?}", &body.json_body); - return Err(Error::BadRequest( - ErrorKind::Unknown, - "Unsupported or unknown login type.", - )); + debug!("/login json_body: {:?}", &body.json_body); + return Err!(Request(Unknown( + warn!(?body.login_info, "Invalid or unsupported login type") + ))); }, }; @@ -216,9 +213,6 @@ pub(crate) async fn login_route( info!("{user_id} logged in"); - // home_server is deprecated but apparently must still be sent despite it being - // deprecated over 6 years ago. initially i thought this macro was unnecessary, - // but ruma uses this same macro for the same reason so... #[allow(deprecated)] Ok(login::v3::Response { user_id, @@ -226,7 +220,7 @@ pub(crate) async fn login_route( device_id, well_known: client_discovery_info, expires_in: None, - home_server: Some(services.globals.server_name().to_owned()), + home_server: Some(services.config.server_name.clone()), refresh_token: None, }) } diff --git a/src/core/config/check.rs b/src/core/config/check.rs index 488f7f94..98223be4 100644 --- a/src/core/config/check.rs +++ b/src/core/config/check.rs @@ -126,6 +126,14 @@ pub fn check(config: &Config) -> Result { )); } + if config.emergency_password == Some(String::new()) { + return Err!(Config( + "emergency_password", + "Emergency password was set to an empty string, this is not valid. Unset \ + emergency_password to disable it or set it to a real password." + )); + } + // check if the user specified a registration token as `""` if config.registration_token == Some(String::new()) { return Err!(Config( diff --git a/src/service/admin/create.rs b/src/service/admin/create.rs index 7f71665a..4de37092 100644 --- a/src/service/admin/create.rs +++ b/src/service/admin/create.rs @@ -21,11 +21,11 @@ use crate::Services; /// Create the admin room. /// -/// Users in this room are considered admins by conduit, and the room can be +/// Users in this room are considered admins by conduwuit, and the room can be /// used to issue admin commands by talking to the server user inside it. -pub async fn create_admin_room(services: &Services) -> Result<()> { +pub async fn create_admin_room(services: &Services) -> Result { let room_id = RoomId::new(services.globals.server_name()); - let room_version = &services.server.config.default_room_version; + let room_version = &services.config.default_room_version; let _short_id = services .rooms @@ -36,14 +36,14 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { let state_lock = services.rooms.state.mutex.lock(&room_id).await; // Create a user for the server - let server_user = &services.globals.server_user; + let server_user = services.globals.server_user.as_ref(); services.users.create(server_user, None)?; let create_content = { use RoomVersionId::*; match room_version { | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => - RoomCreateEventContent::new_v1(server_user.clone()), + RoomCreateEventContent::new_v1(server_user.into()), | _ => RoomCreateEventContent::new_v11(), } }; @@ -71,7 +71,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .timeline .build_and_append_pdu( PduBuilder::state( - server_user.to_string(), + String::from(server_user), &RoomMemberEventContent::new(MembershipState::Join), ), server_user, @@ -81,7 +81,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .await?; // 3. Power levels - let users = BTreeMap::from_iter([(server_user.clone(), 100.into())]); + let users = BTreeMap::from_iter([(server_user.into(), 69420.into())]); services .rooms @@ -140,7 +140,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .await?; // 5. Events implied by name and topic - let room_name = format!("{} Admin Room", services.globals.server_name()); + let room_name = format!("{} Admin Room", services.config.server_name); services .rooms .timeline @@ -157,7 +157,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .timeline .build_and_append_pdu( PduBuilder::state(String::new(), &RoomTopicEventContent { - topic: format!("Manage {}", services.globals.server_name()), + topic: format!("Manage {} | Run commands prefixed with `!admin` | Run `!admin -h` for help | Documentation: https://conduwuit.puppyirl.gay/", services.config.server_name), }), server_user, &room_id, @@ -187,7 +187,7 @@ pub async fn create_admin_room(services: &Services) -> Result<()> { .alias .set_alias(alias, &room_id, server_user)?; - // 7. (ad-hoc) Disable room previews for everyone by default + // 7. (ad-hoc) Disable room URL previews for everyone by default services .rooms .timeline diff --git a/src/service/admin/grant.rs b/src/service/admin/grant.rs index 358ea267..5173987a 100644 --- a/src/service/admin/grant.rs +++ b/src/service/admin/grant.rs @@ -1,10 +1,10 @@ use std::collections::BTreeMap; -use conduwuit::{Result, error, implement}; +use conduwuit::{Err, Result, debug_info, debug_warn, error, implement}; use ruma::{ RoomId, UserId, events::{ - RoomAccountDataEventType, + RoomAccountDataEventType, StateEventType, room::{ member::{MembershipState, RoomMemberEventContent}, message::RoomMessageEventContent, @@ -20,55 +20,98 @@ use crate::pdu::PduBuilder; /// /// This is equivalent to granting server admin privileges. #[implement(super::Service)] -pub async fn make_user_admin(&self, user_id: &UserId) -> Result<()> { +pub async fn make_user_admin(&self, user_id: &UserId) -> Result { let Ok(room_id) = self.get_admin_room().await else { + debug_warn!( + "make_user_admin was called without an admin room being available or created" + ); return Ok(()); }; let state_lock = self.services.state.mutex.lock(&room_id).await; + if self.services.state_cache.is_joined(user_id, &room_id).await { + return Err!(debug_warn!("User is already joined in the admin room")); + } + if self + .services + .state_cache + .is_invited(user_id, &room_id) + .await + { + return Err!(debug_warn!("User is already pending an invitation to the admin room")); + } + // Use the server user to grant the new admin's power level - let server_user = &self.services.globals.server_user; + let server_user = self.services.globals.server_user.as_ref(); - // Invite and join the real user - self.services - .timeline - .build_and_append_pdu( - PduBuilder::state( - user_id.to_string(), - &RoomMemberEventContent::new(MembershipState::Invite), - ), - server_user, + // if this is our local user, just forcefully join them in the room. otherwise, + // invite the remote user. + if self.services.globals.user_is_local(user_id) { + debug_info!("Inviting local user {user_id} to admin room {room_id}"); + self.services + .timeline + .build_and_append_pdu( + PduBuilder::state( + String::from(user_id), + &RoomMemberEventContent::new(MembershipState::Invite), + ), + server_user, + &room_id, + &state_lock, + ) + .await?; + + debug_info!("Force joining local user {user_id} to admin room {room_id}"); + self.services + .timeline + .build_and_append_pdu( + PduBuilder::state( + String::from(user_id), + &RoomMemberEventContent::new(MembershipState::Join), + ), + user_id, + &room_id, + &state_lock, + ) + .await?; + } else { + debug_info!("Inviting remote user {user_id} to admin room {room_id}"); + self.services + .timeline + .build_and_append_pdu( + PduBuilder::state( + user_id.to_string(), + &RoomMemberEventContent::new(MembershipState::Invite), + ), + server_user, + &room_id, + &state_lock, + ) + .await?; + } + + // Set power levels + let mut room_power_levels = self + .services + .state_accessor + .room_state_get_content::( &room_id, - &state_lock, + &StateEventType::RoomPowerLevels, + "", ) - .await?; - self.services - .timeline - .build_and_append_pdu( - PduBuilder::state( - user_id.to_string(), - &RoomMemberEventContent::new(MembershipState::Join), - ), - user_id, - &room_id, - &state_lock, - ) - .await?; + .await + .unwrap_or_default(); - // Set power level - let users = BTreeMap::from_iter([ - (server_user.clone(), 100.into()), - (user_id.to_owned(), 100.into()), - ]); + room_power_levels + .users + .insert(server_user.into(), 69420.into()); + room_power_levels.users.insert(user_id.into(), 100.into()); self.services .timeline .build_and_append_pdu( - PduBuilder::state(String::new(), &RoomPowerLevelsEventContent { - users, - ..Default::default() - }), + PduBuilder::state(String::new(), &room_power_levels), server_user, &room_id, &state_lock, @@ -76,15 +119,17 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result<()> { .await?; // Set room tag - let room_tag = &self.services.server.config.admin_room_tag; + let room_tag = self.services.server.config.admin_room_tag.as_str(); if !room_tag.is_empty() { if let Err(e) = self.set_room_tag(&room_id, user_id, room_tag).await { - error!(?room_id, ?user_id, ?room_tag, ?e, "Failed to set tag for admin grant"); + error!(?room_id, ?user_id, ?room_tag, "Failed to set tag for admin grant: {e}"); } } if self.services.server.config.admin_room_notices { - let welcome_message = String::from("## Thank you for trying out conduwuit!\n\nconduwuit is technically a hard fork of Conduit, which is in Beta. The Beta status initially was inherited from Conduit, however overtime this Beta status is rapidly becoming less and less relevant as our codebase significantly diverges more and more. conduwuit is quite stable and very usable as a daily driver and for a low-medium sized homeserver. There is still a lot of more work to be done, but it is in a far better place than the project was in early 2024.\n\nHelpful links:\n> GitHub Repo: https://github.com/girlbossceo/conduwuit\n> Documentation: https://conduwuit.puppyirl.gay/\n> Report issues: https://github.com/girlbossceo/conduwuit/issues\n\nFor a list of available commands, send the following message in this room: `!admin --help`\n\nHere are some rooms you can join (by typing the command into your client) -\n\nconduwuit space: `/join #conduwuit-space:puppygock.gay`\nconduwuit main room (Ask questions and get notified on updates): `/join #conduwuit:puppygock.gay`\nconduwuit offtopic room: `/join #conduwuit-offtopic:puppygock.gay`"); + let welcome_message = String::from( + "## Thank you for trying out conduwuit!\n\nconduwuit is technically a hard fork of Conduit, which is in Beta. The Beta status initially was inherited from Conduit, however overtime this Beta status is rapidly becoming less and less relevant as our codebase significantly diverges more and more. conduwuit is quite stable and very usable as a daily driver and for a low-medium sized homeserver. There is still a lot of more work to be done, but it is in a far better place than the project was in early 2024.\n\nHelpful links:\n> GitHub Repo: https://github.com/girlbossceo/conduwuit\n> Documentation: https://conduwuit.puppyirl.gay/\n> Report issues: https://github.com/girlbossceo/conduwuit/issues\n\nFor a list of available commands, send the following message in this room: `!admin --help`\n\nHere are some rooms you can join (by typing the command into your client) -\n\nconduwuit space: `/join #conduwuit-space:puppygock.gay`\nconduwuit main room (Ask questions and get notified on updates): `/join #conduwuit:puppygock.gay`\nconduwuit offtopic room: `/join #conduwuit-offtopic:puppygock.gay`", + ); // Send welcome message self.services @@ -102,7 +147,7 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result<()> { } #[implement(super::Service)] -async fn set_room_tag(&self, room_id: &RoomId, user_id: &UserId, tag: &str) -> Result<()> { +async fn set_room_tag(&self, room_id: &RoomId, user_id: &UserId, tag: &str) -> Result { let mut event = self .services .account_data @@ -125,7 +170,5 @@ async fn set_room_tag(&self, room_id: &RoomId, user_id: &UserId, tag: &str) -> R RoomAccountDataEventType::Tag, &serde_json::to_value(event)?, ) - .await?; - - Ok(()) + .await } diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 4622f10e..b3466711 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -40,6 +40,7 @@ struct Services { timeline: Dep, state: Dep, state_cache: Dep, + state_accessor: Dep, account_data: Dep, services: StdRwLock>>, } @@ -85,6 +86,8 @@ impl crate::Service for Service { timeline: args.depend::("rooms::timeline"), state: args.depend::("rooms::state"), state_cache: args.depend::("rooms::state_cache"), + state_accessor: args + .depend::("rooms::state_accessor"), account_data: args.depend::("account_data"), services: None.into(), }, @@ -357,8 +360,8 @@ impl Service { } // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let emergency_password_set = self.services.globals.emergency_password().is_some(); + // the administrator can execute commands as the server user + let emergency_password_set = self.services.server.config.emergency_password.is_some(); let from_server = pdu.sender == *server_user && !emergency_password_set; if from_server && self.is_admin_room(&pdu.room_id).await { return false; diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index 5aba0018..50a60033 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -90,7 +90,7 @@ impl Service { .write() .await .remove(appservice_id) - .ok_or(err!("Appservice not found"))?; + .ok_or_else(|| err!("Appservice not found"))?; // remove the appservice from the database self.db.id_appserviceregistrations.del(appservice_id); diff --git a/src/service/emergency/mod.rs b/src/service/emergency/mod.rs index 47a309a5..3a61f710 100644 --- a/src/service/emergency/mod.rs +++ b/src/service/emergency/mod.rs @@ -9,7 +9,7 @@ use ruma::{ push::Ruleset, }; -use crate::{Dep, account_data, globals, users}; +use crate::{Dep, account_data, config, globals, users}; pub struct Service { services: Services, @@ -17,6 +17,7 @@ pub struct Service { struct Services { account_data: Dep, + config: Dep, globals: Dep, users: Dep, } @@ -27,6 +28,8 @@ impl crate::Service for Service { Ok(Arc::new(Self { services: Services { account_data: args.depend::("account_data"), + config: args.depend::("config"), + globals: args.depend::("globals"), users: args.depend::("users"), }, @@ -54,9 +57,9 @@ impl Service { self.services .users - .set_password(server_user, self.services.globals.emergency_password().as_deref())?; + .set_password(server_user, self.services.config.emergency_password.as_deref())?; - let (ruleset, pwd_set) = match self.services.globals.emergency_password() { + let (ruleset, pwd_set) = match self.services.config.emergency_password { | Some(_) => (Ruleset::server_default(server_user), true), | None => (Ruleset::new(), false), }; diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 16b3ef3c..74f83228 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -153,8 +153,6 @@ impl Service { pub fn notification_push_path(&self) -> &String { &self.server.config.notification_push_path } - pub fn emergency_password(&self) -> &Option { &self.server.config.emergency_password } - pub fn url_preview_domain_contains_allowlist(&self) -> &Vec { &self.server.config.url_preview_domain_contains_allowlist } diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index 8860d0a0..b037cf77 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -363,7 +363,7 @@ impl super::Service { let hostname = hostname.trim_end_matches('.'); match self.resolver.resolver.srv_lookup(hostname).await { | Err(e) => Self::handle_resolve_error(&e, hostname)?, - | Ok(result) => + | Ok(result) => { return Ok(result.iter().next().map(|result| { FedDest::Named( result.target().to_string().trim_end_matches('.').to_owned(), @@ -372,7 +372,8 @@ impl super::Service { .try_into() .unwrap_or_else(|_| FedDest::default_port()), ) - })), + })); + }, } } From 0d741bbd46cd1c2a86321a4a68da3167c46d53e3 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Mon, 3 Mar 2025 00:15:12 -0500 Subject: [PATCH 080/310] remove nix run ci test as its covered by complement Signed-off-by: June Clementine Strawberry --- engage.toml | 28 ++-------------------------- 1 file changed, 2 insertions(+), 26 deletions(-) diff --git a/engage.toml b/engage.toml index c1a2be1f..71366532 100644 --- a/engage.toml +++ b/engage.toml @@ -18,12 +18,12 @@ script = "direnv --version" [[task]] name = "rustc" group = "versions" -script = "rustc --version" +script = "rustc --version -v" [[task]] name = "cargo" group = "versions" -script = "cargo --version" +script = "cargo --version -v" [[task]] name = "cargo-fmt" @@ -60,11 +60,6 @@ name = "markdownlint" group = "versions" script = "markdownlint --version" -[[task]] -name = "dpkg" -group = "versions" -script = "dpkg --version" - [[task]] name = "cargo-audit" group = "security" @@ -228,22 +223,3 @@ depends = ["cargo/default"] script = """ git diff --exit-code conduwuit-example.toml """ - -# Ensure that the flake's default output can build and run without crashing -# -# This is a dynamically-linked jemalloc build, which is a case not covered by -# our other tests. We've had linking problems in the past with dynamic -# jemalloc builds that usually show up as an immediate segfault or "invalid free" -[[task]] -name = "nix-default" -group = "tests" -script = """ -env DIRENV_DEVSHELL=dynamic \ - CARGO_PROFILE="test" \ - direnv exec . \ - bin/nix-build-and-cache just .#default-test -env DIRENV_DEVSHELL=dynamic \ - CARGO_PROFILE="test" \ - direnv exec . \ - nix run -L .#default-test -- --help && nix run -L .#default-test -- --version -""" From df72384c16aa77ccedf532888b0799a3edc2d8b0 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Mon, 3 Mar 2025 01:05:43 -0500 Subject: [PATCH 081/310] delete snappy, bump rust-rocksdb, bump rocksdb to v9.10.0 again Signed-off-by: June Clementine Strawberry --- Cargo.lock | 4 ++-- deps/rust-rocksdb/Cargo.toml | 3 +-- flake.lock | 8 ++++---- flake.nix | 2 +- 4 files changed, 8 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e632b504..ec531994 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3711,7 +3711,7 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" version = "0.32.0+9.10.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=7b0e1bbe395a41ba8a11347a4921da590e3ad0d9#7b0e1bbe395a41ba8a11347a4921da590e3ad0d9" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b#2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b" dependencies = [ "bindgen", "bzip2-sys", @@ -3728,7 +3728,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.36.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=7b0e1bbe395a41ba8a11347a4921da590e3ad0d9#7b0e1bbe395a41ba8a11347a4921da590e3ad0d9" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b#2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml index c6af428d..f9069fc1 100644 --- a/deps/rust-rocksdb/Cargo.toml +++ b/deps/rust-rocksdb/Cargo.toml @@ -14,7 +14,6 @@ default = ["lz4", "zstd", "zlib", "bzip2"] jemalloc = ["rust-rocksdb/jemalloc"] io-uring = ["rust-rocksdb/io-uring"] valgrind = ["rust-rocksdb/valgrind"] -snappy = ["rust-rocksdb/snappy"] lz4 = ["rust-rocksdb/lz4"] zstd = ["rust-rocksdb/zstd"] zlib = ["rust-rocksdb/zlib"] @@ -27,7 +26,7 @@ malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] [dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "7b0e1bbe395a41ba8a11347a4921da590e3ad0d9" +rev = "2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b" #branch = "master" default-features = false diff --git a/flake.lock b/flake.lock index 59fcbd8d..ba7fdcff 100644 --- a/flake.lock +++ b/flake.lock @@ -567,16 +567,16 @@ "rocksdb": { "flake": false, "locked": { - "lastModified": 1739735789, - "narHash": "sha256-BIzuZS0TV4gRnciP4ieW5J3Hql986iedM5dHQfK6z68=", + "lastModified": 1739735940, + "narHash": "sha256-9AqKOWsYXy0sU2C+kB+3NLCDMZ2VsjfbHqvSiydUlcs=", "owner": "girlbossceo", "repo": "rocksdb", - "rev": "34e401fd4392dd3268e042f1e40dffd064b9a7ff", + "rev": "f8ad8cd72fd7e527171d35fa8dbca9a073b5b26c", "type": "github" }, "original": { "owner": "girlbossceo", - "ref": "v9.9.3", + "ref": "v9.10.0", "repo": "rocksdb", "type": "github" } diff --git a/flake.nix b/flake.nix index 04dee681..6702111f 100644 --- a/flake.nix +++ b/flake.nix @@ -9,7 +9,7 @@ flake-utils.url = "github:numtide/flake-utils?ref=main"; nix-filter.url = "github:numtide/nix-filter?ref=main"; nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable"; - rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.9.3"; flake = false; }; + rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.10.0"; flake = false; }; liburing = { url = "github:axboe/liburing?ref=master"; flake = false; }; }; From 1ecd02738992f6fd75ea627e60a2ebf1133f4561 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Mon, 3 Mar 2025 01:06:04 -0500 Subject: [PATCH 082/310] always run checks when building in nix (doCheck true) Signed-off-by: June Clementine Strawberry --- nix/pkgs/main/default.nix | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/nix/pkgs/main/default.nix b/nix/pkgs/main/default.nix index 4150b389..5dfb32ec 100644 --- a/nix/pkgs/main/default.nix +++ b/nix/pkgs/main/default.nix @@ -162,18 +162,12 @@ commonAttrs = { ]; }; - # This is redundant with CI - doCheck = false; + doCheck = true; - cargoTestCommand = "cargo test --locked "; cargoExtraArgs = "--no-default-features --locked " + lib.optionalString (features'' != []) "--features " + (builtins.concatStringsSep "," features''); - cargoTestExtraArgs = "--no-default-features --locked " - + lib.optionalString - (features'' != []) - "--features " + (builtins.concatStringsSep "," features''); dontStrip = profile == "dev" || profile == "test"; dontPatchELF = profile == "dev" || profile == "test"; @@ -209,18 +203,12 @@ craneLib.buildPackage ( commonAttrs // { env = buildDepsOnlyEnv; }); - # This is redundant with CI - doCheck = false; + doCheck = true; - cargoTestCommand = "cargo test --locked "; cargoExtraArgs = "--no-default-features --locked " + lib.optionalString (features'' != []) "--features " + (builtins.concatStringsSep "," features''); - cargoTestExtraArgs = "--no-default-features --locked " - + lib.optionalString - (features'' != []) - "--features " + (builtins.concatStringsSep "," features''); env = buildPackageEnv; From 7c17163730fcd0f43132cce82cc28b6793ae662a Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 4 Mar 2025 23:35:21 -0500 Subject: [PATCH 083/310] switch to self-hosted ci runner, remove sudo usages Signed-off-by: June Clementine Strawberry --- .github/workflows/ci.yml | 132 ++-------------------------- .github/workflows/documentation.yml | 64 +------------- 2 files changed, 9 insertions(+), 187 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 82ffc6b6..c0425873 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -45,8 +45,8 @@ env: # Get error output from nix that we can actually use, and use our binary caches for the earlier CI steps NIX_CONFIG: | show-trace = true - extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net - extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= + extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net https://nix-community.cachix.org https://crane.cachix.org + extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs= crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk= experimental-features = nix-command flakes extra-experimental-features = nix-command flakes accept-flake-config = true @@ -59,7 +59,7 @@ permissions: {} jobs: tests: name: Test - runs-on: ubuntu-24.04 + runs-on: self-hosted steps: - name: Setup SSH web publish env: @@ -93,19 +93,6 @@ jobs: echo "SSH_WEBSITE=1" >> "$GITHUB_ENV" - - name: Install liburing - run: | - sudo apt install liburing-dev -y - - - name: Free up a bit of runner space - run: | - set +o pipefail - sudo docker image prune --all --force || true - sudo apt purge -y 'php.*' '^mongodb-.*' '^mysql-.*' azure-cli google-cloud-cli google-chrome-stable firefox powershell microsoft-edge-stable || true - sudo apt clean - sudo rm -rf /usr/local/lib/android /usr/local/julia* /usr/local/games /usr/local/sqlpackage /usr/local/share/powershell /usr/local/share/edge_driver /usr/local/share/gecko_driver /usr/local/share/chromium /usr/local/share/chromedriver-linux64 /usr/lib/google-cloud-sdk /usr/lib/jvm /usr/lib/mono /usr/local/lib/heroku /usr/lib/heroku /usr/local/share/boost /usr/share/dotnet /usr/local/bin/cmake* /usr/local/bin/stack /usr/local/bin/terraform /opt/microsoft/powershell /opt/hostedtoolcache/CodeQL /opt/hostedtoolcache/go /opt/hostedtoolcache/PyPy /usr/local/bin/sam || true - set -o pipefail - - name: Sync repository uses: actions/checkout@v4 with: @@ -123,58 +110,9 @@ jobs: exit 1 fi - - uses: nixbuild/nix-quick-install-action@master - - - name: Restore and cache Nix store - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - #if: ${{ !startsWith(github.ref, 'refs/tags/') }} - uses: nix-community/cache-nix-action@v6 - with: - # restore and save a cache using this key - primary-key: nix-${{ runner.os }}-${{ hashFiles('**/*.nix', '**/.lock') }} - # if there's no cache hit, restore a cache by this prefix - restore-prefixes-first-match: nix-${{ runner.os }}- - # collect garbage until Nix store size (in bytes) is at most this number - # before trying to save a new cache - gc-max-store-size-linux: 2073741824 - # do purge caches - purge: true - # purge all versions of the cache - purge-prefixes: nix-${{ runner.os }}- - # created more than this number of seconds ago relative to the start of the `Post Restore` phase - purge-last-accessed: 86400 - # except the version with the `primary-key`, if it exists - purge-primary-key: never - - - name: Enable Cachix binary cache - run: | - nix profile install nixpkgs#cachix - cachix use crane - cachix use nix-community - - - name: Apply Nix binary cache configuration - run: | - sudo tee -a "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" > /dev/null < /dev/null < "$HOME/.direnvrc" - nix profile install --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv direnv allow nix develop .#all-features --command true @@ -267,22 +205,13 @@ jobs: build: name: Build - runs-on: ubuntu-24.04 + runs-on: self-hosted strategy: matrix: include: - target: aarch64-linux-musl - target: x86_64-linux-musl steps: - - name: Free up a bit of runner space - run: | - set +o pipefail - sudo docker image prune --all --force || true - sudo apt purge -y 'php.*' '^mongodb-.*' '^mysql-.*' azure-cli google-cloud-cli google-chrome-stable firefox powershell microsoft-edge-stable || true - sudo apt clean - sudo rm -rf /usr/local/lib/android /usr/local/julia* /usr/local/games /usr/local/sqlpackage /usr/local/share/powershell /usr/local/share/edge_driver /usr/local/share/gecko_driver /usr/local/share/chromium /usr/local/share/chromedriver-linux64 /usr/lib/google-cloud-sdk /usr/lib/jvm /usr/lib/mono /usr/local/lib/heroku /usr/lib/heroku /usr/local/share/boost /usr/share/dotnet /usr/local/bin/cmake* /usr/local/bin/stack /usr/local/bin/terraform /opt/microsoft/powershell /opt/hostedtoolcache/CodeQL /opt/hostedtoolcache/go /opt/hostedtoolcache/PyPy /usr/local/bin/sam || true - set -o pipefail - - name: Sync repository uses: actions/checkout@v4 with: @@ -316,58 +245,9 @@ jobs: echo "SSH_WEBSITE=1" >> "$GITHUB_ENV" - - uses: nixbuild/nix-quick-install-action@master - - - name: Restore and cache Nix store - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - #if: ${{ !startsWith(github.ref, 'refs/tags/') }} - uses: nix-community/cache-nix-action@v6 - with: - # restore and save a cache using this key - primary-key: nix-${{ runner.os }}-${{ matrix.target }}-${{ hashFiles('**/*.nix', '**/.lock') }} - # if there's no cache hit, restore a cache by this prefix - restore-prefixes-first-match: nix-${{ runner.os }}- - # collect garbage until Nix store size (in bytes) is at most this number - # before trying to save a new cache - gc-max-store-size-linux: 2073741824 - # do purge caches - purge: true - # purge all versions of the cache - purge-prefixes: nix-${{ runner.os }}- - # created more than this number of seconds ago relative to the start of the `Post Restore` phase - purge-last-accessed: 86400 - # except the version with the `primary-key`, if it exists - purge-primary-key: never - - - name: Enable Cachix binary cache - run: | - nix profile install nixpkgs#cachix - cachix use crane - cachix use nix-community - - - name: Apply Nix binary cache configuration - run: | - sudo tee -a "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" > /dev/null < /dev/null < "$HOME/.direnvrc" - nix profile install --impure --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv direnv allow nix develop .#all-features --command true --impure @@ -622,7 +502,7 @@ jobs: variables: outputs: github_repository: ${{ steps.var.outputs.github_repository }} - runs-on: "ubuntu-latest" + runs-on: self-hosted steps: - name: Setting global variables uses: actions/github-script@v7 @@ -632,7 +512,7 @@ jobs: core.setOutput('github_repository', '${{ github.repository }}'.toLowerCase()) docker: name: Docker publish - runs-on: ubuntu-24.04 + runs-on: self-hosted needs: [build, variables, tests] permissions: packages: write diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index fadc7b3f..88e7bbe1 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -24,8 +24,8 @@ env: # Get error output from nix that we can actually use, and use our binary caches for the earlier CI steps NIX_CONFIG: | show-trace = true - extra-substituters = extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net - extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= + extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net https://nix-community.cachix.org https://crane.cachix.org + extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs= crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk= experimental-features = nix-command flakes extra-experimental-features = nix-command flakes accept-flake-config = true @@ -41,7 +41,7 @@ permissions: {} jobs: docs: name: Documentation and GitHub Pages - runs-on: ubuntu-24.04 + runs-on: self-hosted permissions: pages: write @@ -52,15 +52,6 @@ jobs: url: ${{ steps.deployment.outputs.page_url }} steps: - - name: Free up a bit of runner space - run: | - set +o pipefail - sudo docker image prune --all --force || true - sudo apt purge -y 'php.*' '^mongodb-.*' '^mysql-.*' azure-cli google-cloud-cli google-chrome-stable firefox powershell microsoft-edge-stable || true - sudo apt clean - sudo rm -v -rf /usr/local/games /usr/local/sqlpackage /usr/local/share/powershell /usr/local/share/edge_driver /usr/local/share/gecko_driver /usr/local/share/chromium /usr/local/share/chromedriver-linux64 /usr/lib/google-cloud-sdk /usr/lib/jvm /usr/lib/mono /usr/lib/heroku - set -o pipefail - - name: Sync repository uses: actions/checkout@v4 with: @@ -70,58 +61,9 @@ jobs: if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request') uses: actions/configure-pages@v5 - - uses: nixbuild/nix-quick-install-action@master - - - name: Restore and cache Nix store - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - if: ${{ !startsWith(github.ref, 'refs/tags/') }} - uses: nix-community/cache-nix-action@v6 - with: - # restore and save a cache using this key - primary-key: nix-${{ runner.os }}-${{ hashFiles('**/*.nix', '**/.lock') }} - # if there's no cache hit, restore a cache by this prefix - restore-prefixes-first-match: nix-${{ runner.os }}- - # collect garbage until Nix store size (in bytes) is at most this number - # before trying to save a new cache - gc-max-store-size-linux: 2073741824 - # do purge caches - purge: true - # purge all versions of the cache - purge-prefixes: nix-${{ runner.os }}- - # created more than this number of seconds ago relative to the start of the `Post Restore` phase - purge-last-accessed: 86400 - # except the version with the `primary-key`, if it exists - purge-primary-key: never - - - name: Enable Cachix binary cache - run: | - nix profile install nixpkgs#cachix - cachix use crane - cachix use nix-community - - - name: Apply Nix binary cache configuration - run: | - sudo tee -a "${XDG_CONFIG_HOME:-$HOME/.config}/nix/nix.conf" > /dev/null < /dev/null < "$HOME/.direnvrc" - nix profile install --inputs-from . nixpkgs#direnv nixpkgs#nix-direnv direnv allow nix develop --command true From 35981d5aef8785c132d2e2a166cfcde1cd24169e Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Wed, 5 Mar 2025 19:05:42 -0500 Subject: [PATCH 084/310] automatically forget rooms on leaving Signed-off-by: June Clementine Strawberry --- src/service/rooms/state_cache/mod.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 02ffa0d1..f406eb69 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -267,6 +267,10 @@ impl Service { }, | MembershipState::Leave | MembershipState::Ban => { self.mark_as_left(user_id, room_id); + + if self.services.globals.user_is_local(user_id) { + self.forget(room_id, user_id); + } }, | _ => {}, } From 97208d6081da92f8b5c732aa6b3bf06997ad4a16 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Wed, 5 Mar 2025 19:06:10 -0500 Subject: [PATCH 085/310] add more safety checks before allowing a room marked as forgotten Signed-off-by: June Clementine Strawberry --- src/api/client/membership.rs | 40 +++++++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 12 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 0b9c0c69..940c8639 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -11,12 +11,12 @@ use axum_client_ip::InsecureClientIp; use conduwuit::{ Err, PduEvent, Result, StateKey, at, debug, debug_info, debug_warn, err, error, info, pdu::{PduBuilder, gen_event_id_canonical_json}, - result::FlatOk, + result::{FlatOk, NotFound}, state_res, trace, utils::{self, IterStream, ReadyExt, shuffle}, warn, }; -use futures::{FutureExt, StreamExt, TryFutureExt, join}; +use futures::{FutureExt, StreamExt, TryFutureExt, future::join4, join}; use ruma::{ CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, @@ -717,21 +717,37 @@ pub(crate) async fn forget_room_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user(); + let user_id = body.sender_user(); + let room_id = &body.room_id; - if services - .rooms - .state_cache - .is_joined(sender_user, &body.room_id) - .await - { + let joined = services.rooms.state_cache.is_joined(user_id, room_id); + let knocked = services.rooms.state_cache.is_knocked(user_id, room_id); + let left = services.rooms.state_cache.is_left(user_id, room_id); + let invited = services.rooms.state_cache.is_invited(user_id, room_id); + + let (joined, knocked, left, invited) = join4(joined, knocked, left, invited).await; + + if joined || knocked || invited { return Err!(Request(Unknown("You must leave the room before forgetting it"))); } - services + let membership = services .rooms - .state_cache - .forget(&body.room_id, sender_user); + .state_accessor + .get_member(room_id, user_id) + .await; + + if membership.is_not_found() { + return Err!(Request(Unknown("No membership event was found, room was never joined"))); + } + + if left + || membership.is_ok_and(|member| { + member.membership == MembershipState::Leave + || member.membership == MembershipState::Ban + }) { + services.rooms.state_cache.forget(room_id, user_id); + } Ok(forget_room::v3::Response::new()) } From 408f5bd30cb461cec9472a51b87f0bb1ed6b7381 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Wed, 5 Mar 2025 19:06:31 -0500 Subject: [PATCH 086/310] add val_size_hints on membership cfs (todo remove these anyways) Signed-off-by: June Clementine Strawberry --- src/database/maps.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/database/maps.rs b/src/database/maps.rs index b060ab8d..9af45159 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -181,6 +181,7 @@ pub(super) static MAPS: &[Descriptor] = &[ }, Descriptor { name: "roomuserid_invitecount", + val_size_hint: Some(8), ..descriptor::RANDOM_SMALL }, Descriptor { @@ -193,10 +194,12 @@ pub(super) static MAPS: &[Descriptor] = &[ }, Descriptor { name: "roomuserid_leftcount", + val_size_hint: Some(8), ..descriptor::RANDOM }, Descriptor { name: "roomuserid_knockedcount", + val_size_hint: Some(8), ..descriptor::RANDOM_SMALL }, Descriptor { From 2c1ec3fb02a823515697b159e26d5464ebe29937 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Wed, 5 Mar 2025 21:31:49 -0500 Subject: [PATCH 087/310] allow both lowercase and uppercase usernames to login Signed-off-by: June Clementine Strawberry --- src/api/client/session.rs | 54 +++++++++++++++++++++++++++------------ 1 file changed, 37 insertions(+), 17 deletions(-) diff --git a/src/api/client/session.rs b/src/api/client/session.rs index 6db761af..ab67ee18 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -3,7 +3,7 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{Err, debug, err, info, utils::ReadyExt}; -use futures::StreamExt; +use futures::{StreamExt, TryFutureExt}; use ruma::{ UserId, api::client::{ @@ -86,29 +86,40 @@ pub(crate) async fn login_route( UserId::parse_with_server_name(user, &services.config.server_name) } else { return Err!(Request(Unknown( - warn!(?body.login_info, "Invalid or unsupported login type") + debug_warn!(?body.login_info, "Valid identifier or username was not provided (invalid or unsupported login type?)") ))); } .map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?; + let lowercased_user_id = UserId::parse_with_server_name( + user_id.localpart().to_lowercase(), + &services.config.server_name, + )?; + assert!( services.globals.user_is_local(&user_id), "User ID does not belong to this homeserver" ); + assert!( + services.globals.user_is_local(&lowercased_user_id), + "User ID does not belong to this homeserver" + ); let hash = services .users .password_hash(&user_id) + .or_else(|_| services.users.password_hash(&lowercased_user_id)) .await + .inspect_err(|e| debug!("{e}")) .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; if hash.is_empty() { return Err!(Request(UserDeactivated("The user has been deactivated"))); } - if hash::verify_password(password, &hash).is_err() { - return Err!(Request(Forbidden("Wrong username or password."))); - } + hash::verify_password(password, &hash) + .inspect_err(|e| debug!("{e}")) + .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; user_id }, @@ -125,6 +136,11 @@ pub(crate) async fn login_route( user, }) => { debug!("Got appservice login type"); + + let Some(ref info) = body.appservice_info else { + return Err!(Request(MissingToken("Missing appservice token."))); + }; + let user_id = if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = identifier { UserId::parse_with_server_name(user_id, &services.config.server_name) @@ -132,26 +148,30 @@ pub(crate) async fn login_route( UserId::parse_with_server_name(user, &services.config.server_name) } else { return Err!(Request(Unknown( - warn!(?body.login_info, "Invalid or unsupported login type") + debug_warn!(?body.login_info, "Valid identifier or username was not provided (invalid or unsupported login type?)") ))); } .map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?; + let lowercased_user_id = UserId::parse_with_server_name( + user_id.localpart().to_lowercase(), + &services.config.server_name, + )?; + assert!( services.globals.user_is_local(&user_id), "User ID does not belong to this homeserver" ); + assert!( + services.globals.user_is_local(&lowercased_user_id), + "User ID does not belong to this homeserver" + ); - match body.appservice_info { - | Some(ref info) => - if !info.is_user_match(&user_id) && !emergency_mode_enabled { - return Err!(Request(Exclusive( - "Username is not in an appservice namespace." - ))); - }, - | _ => { - return Err!(Request(MissingToken("Missing appservice token."))); - }, + if !info.is_user_match(&user_id) + && !info.is_user_match(&lowercased_user_id) + && !emergency_mode_enabled + { + return Err!(Request(Exclusive("Username is not in an appservice namespace."))); } user_id @@ -159,7 +179,7 @@ pub(crate) async fn login_route( | _ => { debug!("/login json_body: {:?}", &body.json_body); return Err!(Request(Unknown( - warn!(?body.login_info, "Invalid or unsupported login type") + debug_warn!(?body.login_info, "Invalid or unsupported login type") ))); }, }; From c10500f8aebcd52a219bdba4f2114b03d9474565 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 00:14:24 -0500 Subject: [PATCH 088/310] bump rust-rocksdb and ruwuma Signed-off-by: June Clementine Strawberry --- Cargo.lock | 60 +++++++++---------------------- Cargo.toml | 3 +- deps/rust-rocksdb/Cargo.toml | 5 +-- flake.lock | 8 ++--- flake.nix | 2 +- src/api/router/auth.rs | 3 +- src/service/federation/execute.rs | 3 +- 7 files changed, 28 insertions(+), 56 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ec531994..d51bb966 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3489,7 +3489,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "assign", "js_int", @@ -3502,16 +3502,14 @@ dependencies = [ "ruma-identifiers-validation", "ruma-identity-service-api", "ruma-push-gateway-api", - "ruma-server-util", "ruma-signatures", - "ruma-state-res", "web-time 1.1.0", ] [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "js_int", "ruma-common", @@ -3523,7 +3521,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "as_variant", "assign", @@ -3546,7 +3544,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "as_variant", "base64 0.22.1", @@ -3578,7 +3576,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "as_variant", "indexmap 2.7.1", @@ -3603,10 +3601,12 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "bytes", + "headers", "http", + "http-auth", "httparse", "js_int", "memchr", @@ -3616,12 +3616,14 @@ dependencies = [ "ruma-events", "serde", "serde_json", + "thiserror 2.0.11", + "tracing", ] [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3630,7 +3632,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "js_int", "ruma-common", @@ -3640,7 +3642,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3655,7 +3657,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "js_int", "ruma-common", @@ -3664,23 +3666,10 @@ dependencies = [ "serde_json", ] -[[package]] -name = "ruma-server-util" -version = "0.3.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" -dependencies = [ - "headers", - "http", - "http-auth", - "ruma-common", - "thiserror 2.0.11", - "tracing", -] - [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" +source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3693,25 +3682,10 @@ dependencies = [ "thiserror 2.0.11", ] -[[package]] -name = "ruma-state-res" -version = "0.11.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=b40e76528660f6a389eacd19a83ef9060644ee8f#b40e76528660f6a389eacd19a83ef9060644ee8f" -dependencies = [ - "futures-util", - "js_int", - "ruma-common", - "ruma-events", - "serde", - "serde_json", - "thiserror 2.0.11", - "tracing", -] - [[package]] name = "rust-librocksdb-sys" version = "0.32.0+9.10.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b#2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=513133a3dc24b667f32933aa3247c6ec71a958f3#513133a3dc24b667f32933aa3247c6ec71a958f3" dependencies = [ "bindgen", "bzip2-sys", @@ -3728,7 +3702,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.36.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b#2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=513133a3dc24b667f32933aa3247c6ec71a958f3#513133a3dc24b667f32933aa3247c6ec71a958f3" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/Cargo.toml b/Cargo.toml index e2fe7021..7f08a21a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "b40e76528660f6a389eacd19a83ef9060644ee8f" +rev = "bb42118bd85e731b652a6110896b6945085bf944" features = [ "compat", "rand", @@ -355,7 +355,6 @@ features = [ "federation-api", "markdown", "push-gateway-api-c", - "server-util", "unstable-exhaustive-types", "ring-compat", "compat-upload-signatures", diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml index f9069fc1..61bd2333 100644 --- a/deps/rust-rocksdb/Cargo.toml +++ b/deps/rust-rocksdb/Cargo.toml @@ -2,7 +2,7 @@ name = "rust-rocksdb-uwu" categories.workspace = true description = "dylib wrapper for rust-rocksdb" -edition = "2021" +edition = "2024" keywords.workspace = true license.workspace = true readme.workspace = true @@ -13,6 +13,7 @@ version = "0.0.1" default = ["lz4", "zstd", "zlib", "bzip2"] jemalloc = ["rust-rocksdb/jemalloc"] io-uring = ["rust-rocksdb/io-uring"] +numa = ["rust-rocksdb/numa"] # unused by rocksdb for now valgrind = ["rust-rocksdb/valgrind"] lz4 = ["rust-rocksdb/lz4"] zstd = ["rust-rocksdb/zstd"] @@ -26,7 +27,7 @@ malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] [dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "2ff4bbf31e944fa2686bb041d8c5caaf4b966d3b" +rev = "513133a3dc24b667f32933aa3247c6ec71a958f3" #branch = "master" default-features = false diff --git a/flake.lock b/flake.lock index ba7fdcff..a1bd423f 100644 --- a/flake.lock +++ b/flake.lock @@ -567,16 +567,16 @@ "rocksdb": { "flake": false, "locked": { - "lastModified": 1739735940, - "narHash": "sha256-9AqKOWsYXy0sU2C+kB+3NLCDMZ2VsjfbHqvSiydUlcs=", + "lastModified": 1741234703, + "narHash": "sha256-sT5g/RM9vrwY6AmjSfl4RoJPGtcJCkZCsxiX3PFJgKQ=", "owner": "girlbossceo", "repo": "rocksdb", - "rev": "f8ad8cd72fd7e527171d35fa8dbca9a073b5b26c", + "rev": "185593ce4534091e57025e9f3571dbf681c04631", "type": "github" }, "original": { "owner": "girlbossceo", - "ref": "v9.10.0", + "ref": "v9.9.3", "repo": "rocksdb", "type": "github" } diff --git a/flake.nix b/flake.nix index 6702111f..04dee681 100644 --- a/flake.nix +++ b/flake.nix @@ -9,7 +9,7 @@ flake-utils.url = "github:numtide/flake-utils?ref=main"; nix-filter.url = "github:numtide/nix-filter?ref=main"; nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable"; - rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.10.0"; flake = false; }; + rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.9.3"; flake = false; }; liburing = { url = "github:axboe/liburing?ref=master"; flake = false; }; }; diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index 92b75cfa..5cd7b831 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -17,9 +17,8 @@ use ruma::{ }, voip::get_turn_server_info, }, - federation::openid::get_openid_userinfo, + federation::{authentication::XMatrix, openid::get_openid_userinfo}, }, - server_util::authorization::XMatrix, }; use service::{ Services, diff --git a/src/service/federation/execute.rs b/src/service/federation/execute.rs index d254486f..63f2ccfb 100644 --- a/src/service/federation/execute.rs +++ b/src/service/federation/execute.rs @@ -12,10 +12,9 @@ use ruma::{ CanonicalJsonObject, CanonicalJsonValue, ServerName, ServerSigningKeyId, api::{ EndpointError, IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, - client::error::Error as RumaError, + client::error::Error as RumaError, federation::authentication::XMatrix, }, serde::Base64, - server_util::authorization::XMatrix, }; use crate::resolver::actual::ActualDest; From 17b625a85b908d4c2cb3df308c2337be6e571ce2 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 00:14:49 -0500 Subject: [PATCH 089/310] reject device keys if they dont match user ID or device ID or are missing fields Signed-off-by: June Clementine Strawberry --- src/api/client/keys.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 6f20153b..8a7eab7e 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -48,6 +48,19 @@ pub(crate) async fn upload_keys_route( } if let Some(device_keys) = &body.device_keys { + let deser_device_keys = device_keys.deserialize()?; + + if deser_device_keys.user_id != sender_user { + return Err!(Request(Unknown( + "User ID in keys uploaded does not match your own user ID" + ))); + } + if deser_device_keys.device_id != sender_device { + return Err!(Request(Unknown( + "Device ID in keys uploaded does not match your own device ID" + ))); + } + // TODO: merge this and the existing event? // This check is needed to assure that signatures are kept if services From f4c51cd405f1a0695b16c085655eb0180637fe2d Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 00:18:28 -0500 Subject: [PATCH 090/310] remove zlib as a default rocksdb compression option Signed-off-by: June Clementine Strawberry --- conduwuit-example.toml | 2 +- deps/rust-rocksdb/Cargo.toml | 2 +- src/core/config/mod.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 9b6f6ce0..541f062d 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -821,7 +821,7 @@ # Type of RocksDB database compression to use. # -# Available options are "zstd", "zlib", "bz2", "lz4", or "none". +# Available options are "zstd", "bz2", "lz4", or "none". # # It is best to use ZSTD as an overall good balance between # speed/performance, storage, IO amplification, and CPU usage. For more diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml index 61bd2333..35f755b4 100644 --- a/deps/rust-rocksdb/Cargo.toml +++ b/deps/rust-rocksdb/Cargo.toml @@ -10,7 +10,7 @@ repository.workspace = true version = "0.0.1" [features] -default = ["lz4", "zstd", "zlib", "bzip2"] +default = ["lz4", "zstd", "bzip2"] jemalloc = ["rust-rocksdb/jemalloc"] io-uring = ["rust-rocksdb/io-uring"] numa = ["rust-rocksdb/numa"] # unused by rocksdb for now diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 67c3b95c..5a4819e0 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -975,7 +975,7 @@ pub struct Config { /// Type of RocksDB database compression to use. /// - /// Available options are "zstd", "zlib", "bz2", "lz4", or "none". + /// Available options are "zstd", "bz2", "lz4", or "none". /// /// It is best to use ZSTD as an overall good balance between /// speed/performance, storage, IO amplification, and CPU usage. For more From 657e91fd4226d2521e9e7bb15d5982e62ad68624 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 00:34:17 -0500 Subject: [PATCH 091/310] dont send push notifications from ignored users PDUs Signed-off-by: June Clementine Strawberry --- src/service/rooms/timeline/mod.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 35c972fa..138340a4 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -368,7 +368,7 @@ impl Service { .state_accessor .room_state_get_content(&pdu.room_id, &StateEventType::RoomPowerLevels, "") .await - .map_err(|_| err!(Database("invalid m.room.power_levels event"))) + .map_err(|e| err!(Database(warn!("invalid m.room.power_levels event: {e}")))) .unwrap_or_default(); let sync_pdu = pdu.to_sync_room_event(); @@ -377,9 +377,10 @@ impl Service { .services .state_cache .active_local_users_in_room(&pdu.room_id) - // Don't notify the sender of their own events - .ready_filter(|user| user != &pdu.sender) .map(ToOwned::to_owned) + // Don't notify the sender of their own events, and dont send from ignored users + .ready_filter(|user| user != &pdu.sender) + .filter_map(|recipient_user| async move { (!self.services.users.user_is_ignored(&pdu.sender, recipient_user).await).then_some(recipient_user) }) .collect() .await; From 931fd4c80215cee5cd709d42c86d1fefe0844fe1 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 00:44:57 -0500 Subject: [PATCH 092/310] add missing target Signed-off-by: June Clementine Strawberry --- rust-toolchain.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 00fb6cee..97b4a789 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -24,5 +24,6 @@ targets = [ "x86_64-unknown-linux-gnu", "x86_64-unknown-linux-musl", "aarch64-unknown-linux-musl", + "aarch64-unknown-linux-gnu", #"aarch64-apple-darwin", ] From ecea0cff69d583439e4a84fba6bd2d5aaba8faee Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 00:51:13 -0500 Subject: [PATCH 093/310] fix TestFetchMessagesFromNonExistentRoom complement test Signed-off-by: June Clementine Strawberry --- src/api/client/message.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 571a238a..c755cc47 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -1,6 +1,6 @@ use axum::extract::State; use conduwuit::{ - Event, PduCount, PduEvent, Result, at, + Err, Event, PduCount, PduEvent, Result, at, utils::{ IterStream, ReadyExt, result::{FlatOk, LogErr}, @@ -68,6 +68,10 @@ pub(crate) async fn get_message_events_route( let room_id = &body.room_id; let filter = &body.filter; + if !services.rooms.metadata.exists(room_id).await { + return Err!(Request(Forbidden("Room does not exist to this server"))); + } + let from: PduCount = body .from .as_deref() From c92678ecbeb55cf323758da08e8c36e65496aa38 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 13:08:01 -0500 Subject: [PATCH 094/310] dont build with zlib in the nix flake Signed-off-by: June Clementine Strawberry --- Cargo.lock | 4 ++-- Cargo.toml | 1 - deps/rust-rocksdb/Cargo.toml | 2 +- flake.nix | 9 +++++++-- src/service/rooms/timeline/mod.rs | 4 ++-- 5 files changed, 12 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d51bb966..2ade8b83 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3685,7 +3685,7 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" version = "0.32.0+9.10.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=513133a3dc24b667f32933aa3247c6ec71a958f3#513133a3dc24b667f32933aa3247c6ec71a958f3" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=a5d5358ca1358f828283e1558cf6a402b6cbea34#a5d5358ca1358f828283e1558cf6a402b6cbea34" dependencies = [ "bindgen", "bzip2-sys", @@ -3702,7 +3702,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.36.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=513133a3dc24b667f32933aa3247c6ec71a958f3#513133a3dc24b667f32933aa3247c6ec71a958f3" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=a5d5358ca1358f828283e1558cf6a402b6cbea34#a5d5358ca1358f828283e1558cf6a402b6cbea34" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/Cargo.toml b/Cargo.toml index 7f08a21a..5edcc60a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -389,7 +389,6 @@ features = [ "mt_static", "lz4", "zstd", - "zlib", "bzip2", ] diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml index 35f755b4..f6e0a54f 100644 --- a/deps/rust-rocksdb/Cargo.toml +++ b/deps/rust-rocksdb/Cargo.toml @@ -27,7 +27,7 @@ malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] [dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "513133a3dc24b667f32933aa3247c6ec71a958f3" +rev = "a5d5358ca1358f828283e1558cf6a402b6cbea34" #branch = "master" default-features = false diff --git a/flake.nix b/flake.nix index 04dee681..faff87d6 100644 --- a/flake.nix +++ b/flake.nix @@ -64,8 +64,10 @@ patches = []; cmakeFlags = pkgs.lib.subtractLists [ - # no real reason to have snappy, no one uses this + # no real reason to have snappy or zlib, no one uses this "-DWITH_SNAPPY=1" + "-DZLIB=1" + "-DWITH_ZLIB=1" # we dont need to use ldb or sst_dump (core_tools) "-DWITH_CORE_TOOLS=1" # we dont need to build rocksdb tests @@ -82,6 +84,8 @@ ++ [ # no real reason to have snappy, no one uses this "-DWITH_SNAPPY=0" + "-DZLIB=0" + "-DWITH_ZLIB=0" # we dont need to use ldb or sst_dump (core_tools) "-DWITH_CORE_TOOLS=0" # we dont need trace tools @@ -171,7 +175,8 @@ sccache ] # liburing is Linux-exclusive - ++ lib.optional stdenv.hostPlatform.isLinux liburing) + ++ lib.optional stdenv.hostPlatform.isLinux liburing + ++ lib.optional stdenv.hostPlatform.isLinux numactl) ++ scope.main.buildInputs ++ scope.main.propagatedBuildInputs ++ scope.main.nativeBuildInputs; diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 138340a4..276b8b6a 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -379,8 +379,8 @@ impl Service { .active_local_users_in_room(&pdu.room_id) .map(ToOwned::to_owned) // Don't notify the sender of their own events, and dont send from ignored users - .ready_filter(|user| user != &pdu.sender) - .filter_map(|recipient_user| async move { (!self.services.users.user_is_ignored(&pdu.sender, recipient_user).await).then_some(recipient_user) }) + .ready_filter(|user| *user != pdu.sender) + .filter_map(|recipient_user| async move { (!self.services.users.user_is_ignored(&pdu.sender, &recipient_user).await).then_some(recipient_user) }) .collect() .await; From d80e61cbee21706454d1033ba46b51e4dcbb8679 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 17:53:23 -0500 Subject: [PATCH 095/310] bump ring to 0.17.12 Signed-off-by: June Clementine Strawberry --- Cargo.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2ade8b83..3a57df7b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2364,7 +2364,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] @@ -3474,9 +3474,9 @@ checksum = "57397d16646700483b67d2dd6511d79318f9d057fdbd21a4066aeac8b41d310a" [[package]] name = "ring" -version = "0.17.11" +version = "0.17.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da5349ae27d3887ca812fb375b45a4fbb36d8d12d2df394968cd86e35683fe73" +checksum = "ed9b823fa29b721a59671b41d6b06e66b29e0628e207e8b1c3ceeda701ec928d" dependencies = [ "cc", "cfg-if", From f34e0b21a3cbf7eaa737256fc57c13719b225507 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 18:12:54 -0500 Subject: [PATCH 096/310] remove rust-rocksdb dylib wrapper as we have a fork already Signed-off-by: June Clementine Strawberry --- Cargo.lock | 10 +----- Cargo.toml | 9 +++--- deps/rust-rocksdb/Cargo.toml | 42 ------------------------ deps/rust-rocksdb/lib.rs | 62 ------------------------------------ 4 files changed, 6 insertions(+), 117 deletions(-) delete mode 100644 deps/rust-rocksdb/Cargo.toml delete mode 100644 deps/rust-rocksdb/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 3a57df7b..9a46f008 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -848,7 +848,7 @@ dependencies = [ "log", "minicbor", "minicbor-serde", - "rust-rocksdb-uwu", + "rust-rocksdb", "serde", "serde_json", "tokio", @@ -3706,14 +3706,6 @@ source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=a5d5358ca dependencies = [ "libc", "rust-librocksdb-sys", - "serde", -] - -[[package]] -name = "rust-rocksdb-uwu" -version = "0.0.1" -dependencies = [ - "rust-rocksdb", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 5edcc60a..62c90119 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -382,8 +382,9 @@ features = [ ] [workspace.dependencies.rust-rocksdb] -path = "deps/rust-rocksdb" -package = "rust-rocksdb-uwu" +git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" +rev = "a5d5358ca1358f828283e1558cf6a402b6cbea34" +default-features = false features = [ "multi-threaded-cf", "mt_static", @@ -683,7 +684,7 @@ inherits = "release" # To enable hot-reloading: # 1. Uncomment all of the rustflags here. -# 2. Uncomment crate-type=dylib in src/*/Cargo.toml and deps/rust-rocksdb/Cargo.toml +# 2. Uncomment crate-type=dylib in src/*/Cargo.toml # # opt-level, mir-opt-level, validate-mir are not known to interfere with reloading # and can be raised if build times are tolerable. @@ -751,7 +752,7 @@ inherits = "dev" # '-Clink-arg=-Wl,-z,lazy', #] -[profile.dev.package.rust-rocksdb-uwu] +[profile.dev.package.rust-rocksdb] inherits = "dev" debug = 'limited' incremental = false diff --git a/deps/rust-rocksdb/Cargo.toml b/deps/rust-rocksdb/Cargo.toml deleted file mode 100644 index f6e0a54f..00000000 --- a/deps/rust-rocksdb/Cargo.toml +++ /dev/null @@ -1,42 +0,0 @@ -[package] -name = "rust-rocksdb-uwu" -categories.workspace = true -description = "dylib wrapper for rust-rocksdb" -edition = "2024" -keywords.workspace = true -license.workspace = true -readme.workspace = true -repository.workspace = true -version = "0.0.1" - -[features] -default = ["lz4", "zstd", "bzip2"] -jemalloc = ["rust-rocksdb/jemalloc"] -io-uring = ["rust-rocksdb/io-uring"] -numa = ["rust-rocksdb/numa"] # unused by rocksdb for now -valgrind = ["rust-rocksdb/valgrind"] -lz4 = ["rust-rocksdb/lz4"] -zstd = ["rust-rocksdb/zstd"] -zlib = ["rust-rocksdb/zlib"] -bzip2 = ["rust-rocksdb/bzip2"] -rtti = ["rust-rocksdb/rtti"] -mt_static = ["rust-rocksdb/mt_static"] -multi-threaded-cf = ["rust-rocksdb/multi-threaded-cf"] -serde1 = ["rust-rocksdb/serde1"] -malloc-usable-size = ["rust-rocksdb/malloc-usable-size"] - -[dependencies.rust-rocksdb] -git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "a5d5358ca1358f828283e1558cf6a402b6cbea34" -#branch = "master" -default-features = false - -[lib] -path = "lib.rs" -crate-type = [ - "rlib", -# "dylib" -] - -[lints] -workspace = true diff --git a/deps/rust-rocksdb/lib.rs b/deps/rust-rocksdb/lib.rs deleted file mode 100644 index 8dbbda98..00000000 --- a/deps/rust-rocksdb/lib.rs +++ /dev/null @@ -1,62 +0,0 @@ -pub use rust_rocksdb::*; - -#[cfg_attr(not(conduwuit_mods), link(name = "rocksdb"))] -#[cfg_attr(conduwuit_mods, link(name = "rocksdb", kind = "static"))] -unsafe extern "C" { - pub unsafe fn rocksdb_list_column_families(); - pub unsafe fn rocksdb_logger_create_stderr_logger(); - pub unsafe fn rocksdb_logger_create_callback_logger(); - pub unsafe fn rocksdb_options_set_info_log(); - pub unsafe fn rocksdb_get_options_from_string(); - pub unsafe fn rocksdb_writebatch_create(); - pub unsafe fn rocksdb_writebatch_destroy(); - pub unsafe fn rocksdb_writebatch_put_cf(); - pub unsafe fn rocksdb_writebatch_delete_cf(); - pub unsafe fn rocksdb_iter_value(); - pub unsafe fn rocksdb_iter_seek_to_last(); - pub unsafe fn rocksdb_iter_seek_for_prev(); - pub unsafe fn rocksdb_iter_seek_to_first(); - pub unsafe fn rocksdb_iter_next(); - pub unsafe fn rocksdb_iter_prev(); - pub unsafe fn rocksdb_iter_seek(); - pub unsafe fn rocksdb_iter_valid(); - pub unsafe fn rocksdb_iter_get_error(); - pub unsafe fn rocksdb_iter_key(); - pub unsafe fn rocksdb_iter_destroy(); - pub unsafe fn rocksdb_livefiles(); - pub unsafe fn rocksdb_livefiles_count(); - pub unsafe fn rocksdb_livefiles_destroy(); - pub unsafe fn rocksdb_livefiles_column_family_name(); - pub unsafe fn rocksdb_livefiles_name(); - pub unsafe fn rocksdb_livefiles_size(); - pub unsafe fn rocksdb_livefiles_level(); - pub unsafe fn rocksdb_livefiles_smallestkey(); - pub unsafe fn rocksdb_livefiles_largestkey(); - pub unsafe fn rocksdb_livefiles_entries(); - pub unsafe fn rocksdb_livefiles_deletions(); - pub unsafe fn rocksdb_put_cf(); - pub unsafe fn rocksdb_delete_cf(); - pub unsafe fn rocksdb_get_pinned_cf(); - pub unsafe fn rocksdb_create_column_family(); - pub unsafe fn rocksdb_get_latest_sequence_number(); - pub unsafe fn rocksdb_batched_multi_get_cf(); - pub unsafe fn rocksdb_cancel_all_background_work(); - pub unsafe fn rocksdb_repair_db(); - pub unsafe fn rocksdb_list_column_families_destroy(); - pub unsafe fn rocksdb_flush(); - pub unsafe fn rocksdb_flush_wal(); - pub unsafe fn rocksdb_open_column_families(); - pub unsafe fn rocksdb_open_for_read_only_column_families(); - pub unsafe fn rocksdb_open_as_secondary_column_families(); - pub unsafe fn rocksdb_open_column_families_with_ttl(); - pub unsafe fn rocksdb_open(); - pub unsafe fn rocksdb_open_for_read_only(); - pub unsafe fn rocksdb_open_with_ttl(); - pub unsafe fn rocksdb_open_as_secondary(); - pub unsafe fn rocksdb_write(); - pub unsafe fn rocksdb_create_iterator_cf(); - pub unsafe fn rocksdb_backup_engine_create_new_backup_flush(); - pub unsafe fn rocksdb_backup_engine_options_create(); - pub unsafe fn rocksdb_write_buffer_manager_destroy(); - pub unsafe fn rocksdb_options_set_ttl(); -} From fa71162c7dd943afdf78d10710914076ec2d3c85 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 18:45:14 -0500 Subject: [PATCH 097/310] bump rocksdb to v9.11.1 Signed-off-by: June Clementine Strawberry --- Cargo.lock | 8 ++++---- Cargo.toml | 2 +- flake.lock | 8 ++++---- flake.nix | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9a46f008..56ff3c6b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3684,8 +3684,8 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" -version = "0.32.0+9.10.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=a5d5358ca1358f828283e1558cf6a402b6cbea34#a5d5358ca1358f828283e1558cf6a402b6cbea34" +version = "0.33.0+9.11.1" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=3f4c5357243defedc849ae6227490102a9f90bef#3f4c5357243defedc849ae6227490102a9f90bef" dependencies = [ "bindgen", "bzip2-sys", @@ -3701,8 +3701,8 @@ dependencies = [ [[package]] name = "rust-rocksdb" -version = "0.36.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=a5d5358ca1358f828283e1558cf6a402b6cbea34#a5d5358ca1358f828283e1558cf6a402b6cbea34" +version = "0.37.0" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=3f4c5357243defedc849ae6227490102a9f90bef#3f4c5357243defedc849ae6227490102a9f90bef" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/Cargo.toml b/Cargo.toml index 62c90119..43b2d55d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -383,7 +383,7 @@ features = [ [workspace.dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "a5d5358ca1358f828283e1558cf6a402b6cbea34" +rev = "3f4c5357243defedc849ae6227490102a9f90bef" default-features = false features = [ "multi-threaded-cf", diff --git a/flake.lock b/flake.lock index a1bd423f..3a43c4cd 100644 --- a/flake.lock +++ b/flake.lock @@ -567,16 +567,16 @@ "rocksdb": { "flake": false, "locked": { - "lastModified": 1741234703, - "narHash": "sha256-sT5g/RM9vrwY6AmjSfl4RoJPGtcJCkZCsxiX3PFJgKQ=", + "lastModified": 1741303627, + "narHash": "sha256-7HpydEinYHvskC4vkl1Yie2kg2yShfZbREAyQMkvEUc=", "owner": "girlbossceo", "repo": "rocksdb", - "rev": "185593ce4534091e57025e9f3571dbf681c04631", + "rev": "cecee0e4fbff2b69e3edc6e9b5b751d8098a3ba1", "type": "github" }, "original": { "owner": "girlbossceo", - "ref": "v9.9.3", + "ref": "v9.11.1", "repo": "rocksdb", "type": "github" } diff --git a/flake.nix b/flake.nix index faff87d6..8f08a7d9 100644 --- a/flake.nix +++ b/flake.nix @@ -9,7 +9,7 @@ flake-utils.url = "github:numtide/flake-utils?ref=main"; nix-filter.url = "github:numtide/nix-filter?ref=main"; nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable"; - rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.9.3"; flake = false; }; + rocksdb = { url = "github:girlbossceo/rocksdb?ref=v9.11.1"; flake = false; }; liburing = { url = "github:axboe/liburing?ref=master"; flake = false; }; }; From 20dd1d148dd31948d9055c5a19ba8f8e13041363 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 18:45:48 -0500 Subject: [PATCH 098/310] add new complement test results Signed-off-by: June Clementine Strawberry --- .../complement/test_results.jsonl | 425 +++++++++++++++++- 1 file changed, 421 insertions(+), 4 deletions(-) diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index 11339049..fed43b48 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -1,5 +1,26 @@ {"Action":"pass","Test":"TestACLs"} +{"Action":"pass","Test":"TestAddAccountData"} +{"Action":"pass","Test":"TestAddAccountData/Can_add_global_account_data"} +{"Action":"pass","Test":"TestAddAccountData/Can_add_room_account_data"} +{"Action":"fail","Test":"TestArchivedRoomsHistory"} +{"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_has_events"} +{"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_has_events/incremental_sync"} +{"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_has_events/initial_sync"} +{"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_is_empty"} +{"Action":"skip","Test":"TestArchivedRoomsHistory/timeline_is_empty/incremental_sync"} +{"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_is_empty/initial_sync"} +{"Action":"fail","Test":"TestAsyncUpload"} +{"Action":"fail","Test":"TestAsyncUpload/Cannot_upload_to_a_media_ID_that_has_already_been_uploaded_to"} +{"Action":"fail","Test":"TestAsyncUpload/Create_media"} +{"Action":"fail","Test":"TestAsyncUpload/Download_media"} +{"Action":"fail","Test":"TestAsyncUpload/Download_media_over__matrix/client/v1/media/download"} +{"Action":"fail","Test":"TestAsyncUpload/Not_yet_uploaded"} +{"Action":"fail","Test":"TestAsyncUpload/Upload_media"} +{"Action":"pass","Test":"TestAvatarUrlUpdate"} {"Action":"pass","Test":"TestBannedUserCannotSendJoin"} +{"Action":"skip","Test":"TestCanRegisterAdmin"} +{"Action":"pass","Test":"TestCannotKickLeftUser"} +{"Action":"fail","Test":"TestCannotKickNonPresentUser"} {"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room"} {"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/event_with_mismatched_state_key"} {"Action":"pass","Test":"TestCannotSendKnockViaSendKnockInMSC3787Room/invite_event"} @@ -42,30 +63,124 @@ {"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV2/knock_event"} {"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV2/non-state_membership_event"} {"Action":"pass","Test":"TestCannotSendNonLeaveViaSendLeaveV2/regular_event"} +{"Action":"pass","Test":"TestChangePassword"} +{"Action":"pass","Test":"TestChangePassword/After_changing_password,_a_different_session_no_longer_works_by_default"} +{"Action":"pass","Test":"TestChangePassword/After_changing_password,_can't_log_in_with_old_password"} +{"Action":"pass","Test":"TestChangePassword/After_changing_password,_can_log_in_with_new_password"} +{"Action":"pass","Test":"TestChangePassword/After_changing_password,_different_sessions_can_optionally_be_kept"} +{"Action":"pass","Test":"TestChangePassword/After_changing_password,_existing_session_still_works"} +{"Action":"fail","Test":"TestChangePasswordPushers"} +{"Action":"fail","Test":"TestChangePasswordPushers/Pushers_created_with_a_different_access_token_are_deleted_on_password_change"} +{"Action":"pass","Test":"TestChangePasswordPushers/Pushers_created_with_the_same_access_token_are_not_deleted_on_password_change"} +{"Action":"fail","Test":"TestClientSpacesSummary"} +{"Action":"fail","Test":"TestClientSpacesSummary/max_depth"} +{"Action":"fail","Test":"TestClientSpacesSummary/pagination"} +{"Action":"fail","Test":"TestClientSpacesSummary/query_whole_graph"} +{"Action":"fail","Test":"TestClientSpacesSummary/redact_link"} +{"Action":"fail","Test":"TestClientSpacesSummary/suggested_only"} +{"Action":"fail","Test":"TestClientSpacesSummaryJoinRules"} +{"Action":"pass","Test":"TestContent"} +{"Action":"pass","Test":"TestContentCSAPIMediaV1"} {"Action":"pass","Test":"TestContentMediaV1"} +{"Action":"fail","Test":"TestCumulativeJoinLeaveJoinSync"} +{"Action":"pass","Test":"TestDeactivateAccount"} +{"Action":"pass","Test":"TestDeactivateAccount/After_deactivating_account,_can't_log_in_with_password"} +{"Action":"pass","Test":"TestDeactivateAccount/Can't_deactivate_account_with_wrong_password"} +{"Action":"pass","Test":"TestDeactivateAccount/Can_deactivate_account"} +{"Action":"pass","Test":"TestDeactivateAccount/Password_flow_is_available"} +{"Action":"fail","Test":"TestDelayedEvents"} +{"Action":"fail","Test":"TestDelayedEvents/cannot_update_a_delayed_event_with_an_invalid_action"} +{"Action":"pass","Test":"TestDelayedEvents/cannot_update_a_delayed_event_without_a_delay_ID"} +{"Action":"fail","Test":"TestDelayedEvents/cannot_update_a_delayed_event_without_a_request_body"} +{"Action":"fail","Test":"TestDelayedEvents/cannot_update_a_delayed_event_without_an_action"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_events_are_empty_on_startup"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_message_events_are_sent_on_timeout"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_are_cancelled_by_a_more_recent_state_event_from_another_user"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_are_cancelled_by_a_more_recent_state_event_from_the_same_user"} +{"Action":"skip","Test":"TestDelayedEvents/delayed_state_events_are_kept_on_server_restart"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_are_sent_on_timeout"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_can_be_cancelled"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_can_be_restarted"} +{"Action":"fail","Test":"TestDelayedEvents/delayed_state_events_can_be_sent_on_request"} +{"Action":"pass","Test":"TestDelayedEvents/parallel"} +{"Action":"pass","Test":"TestDelayedEvents/parallel/cannot_cancel_a_delayed_event_without_a_matching_delay_ID"} +{"Action":"pass","Test":"TestDelayedEvents/parallel/cannot_restart_a_delayed_event_without_a_matching_delay_ID"} +{"Action":"pass","Test":"TestDelayedEvents/parallel/cannot_send_a_delayed_event_without_a_matching_delay_ID"} +{"Action":"fail","Test":"TestDeletingDeviceRemovesDeviceLocalNotificationSettings"} +{"Action":"fail","Test":"TestDeletingDeviceRemovesDeviceLocalNotificationSettings/Deleting_a_user's_device_should_delete_any_local_notification_settings_entries_from_their_account_data"} +{"Action":"pass","Test":"TestDemotingUsersViaUsersDefault"} +{"Action":"fail","Test":"TestDeviceListUpdates"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_joining_a_room_with_a_local_user"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_joining_a_room_with_a_remote_user"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_leaving_a_room_with_a_local_user"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_leaving_a_room_with_a_remote_user"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_local_user_joins_a_room"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_local_user_leaves_a_room"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_local_user_rejoins_a_room"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_remote_user_joins_a_room"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_remote_user_leaves_a_room"} +{"Action":"fail","Test":"TestDeviceListUpdates/when_remote_user_rejoins_a_room"} {"Action":"fail","Test":"TestDeviceListsUpdateOverFederation"} {"Action":"fail","Test":"TestDeviceListsUpdateOverFederation/good_connectivity"} {"Action":"fail","Test":"TestDeviceListsUpdateOverFederation/interrupted_connectivity"} {"Action":"fail","Test":"TestDeviceListsUpdateOverFederation/stopped_server"} {"Action":"fail","Test":"TestDeviceListsUpdateOverFederationOnRoomJoin"} +{"Action":"fail","Test":"TestDeviceManagement"} +{"Action":"fail","Test":"TestDeviceManagement/DELETE_/device/{deviceId}"} +{"Action":"fail","Test":"TestDeviceManagement/DELETE_/device/{deviceId}_requires_UI_auth_user_to_match_device_owner"} +{"Action":"pass","Test":"TestDeviceManagement/GET_/device/{deviceId}"} +{"Action":"pass","Test":"TestDeviceManagement/GET_/device/{deviceId}_gives_a_404_for_unknown_devices"} +{"Action":"pass","Test":"TestDeviceManagement/GET_/devices"} +{"Action":"pass","Test":"TestDeviceManagement/PUT_/device/{deviceId}_gives_a_404_for_unknown_devices"} +{"Action":"pass","Test":"TestDeviceManagement/PUT_/device/{deviceId}_updates_device_fields"} +{"Action":"pass","Test":"TestDisplayNameUpdate"} +{"Action":"fail","Test":"TestE2EKeyBackupReplaceRoomKeyRules"} +{"Action":"fail","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel"} +{"Action":"fail","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel/{isVerified:false_firstMessageIndex:10_forwardedCount:5}"} +{"Action":"fail","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel/{isVerified:true_firstMessageIndex:10_forwardedCount:5}"} +{"Action":"pass","Test":"TestEvent"} +{"Action":"pass","Test":"TestEvent/Parallel"} +{"Action":"pass","Test":"TestEvent/Parallel/Large_Event"} +{"Action":"pass","Test":"TestEvent/Parallel/Large_State_Event"} {"Action":"pass","Test":"TestEventAuth"} {"Action":"pass","Test":"TestEventAuth/returns_auth_events_for_the_requested_event"} {"Action":"pass","Test":"TestEventAuth/returns_the_auth_chain_for_the_requested_event"} -{"Action":"pass","Test":"TestFederatedClientSpaces"} +{"Action":"fail","Test":"TestEventRelationships"} +{"Action":"fail","Test":"TestFederatedClientSpaces"} +{"Action":"fail","Test":"TestFederatedEventRelationships"} {"Action":"fail","Test":"TestFederationKeyUploadQuery"} {"Action":"fail","Test":"TestFederationKeyUploadQuery/Can_claim_remote_one_time_key_using_POST"} {"Action":"fail","Test":"TestFederationKeyUploadQuery/Can_query_remote_device_keys_using_POST"} {"Action":"pass","Test":"TestFederationRedactSendsWithoutEvent"} {"Action":"pass","Test":"TestFederationRejectInvite"} -{"Action":"pass","Test":"TestFederationRoomsInvite"} -{"Action":"pass","Test":"TestFederationRoomsInvite/Parallel"} +{"Action":"fail","Test":"TestFederationRoomsInvite"} +{"Action":"fail","Test":"TestFederationRoomsInvite/Parallel"} {"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation"} -{"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_for_empty_room"} +{"Action":"fail","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_for_empty_room"} {"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_several_times"} {"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_has_'is_direct'_flag_in_prev_content_after_joining"} +{"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Remote_invited_user_can_join_the_room_when_homeserver_is_already_participating_in_the_room"} +{"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Remote_invited_user_can_reject_invite_when_homeserver_is_already_participating_in_the_room"} {"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Remote_invited_user_can_see_room_metadata"} {"Action":"pass","Test":"TestFederationThumbnail"} +{"Action":"pass","Test":"TestFetchEvent"} +{"Action":"fail","Test":"TestFetchEventNonWorldReadable"} +{"Action":"pass","Test":"TestFetchEventWorldReadable"} +{"Action":"fail","Test":"TestFetchHistoricalInvitedEventFromBeforeInvite"} +{"Action":"pass","Test":"TestFetchHistoricalInvitedEventFromBetweenInvite"} +{"Action":"fail","Test":"TestFetchHistoricalJoinedEventDenied"} +{"Action":"pass","Test":"TestFetchHistoricalSharedEvent"} +{"Action":"pass","Test":"TestFetchMessagesFromNonExistentRoom"} +{"Action":"pass","Test":"TestFilter"} +{"Action":"fail","Test":"TestFilterMessagesByRelType"} +{"Action":"fail","Test":"TestGappedSyncLeaveSection"} +{"Action":"fail","Test":"TestGetFilteredRoomMembers"} +{"Action":"fail","Test":"TestGetFilteredRoomMembers/membership/join"} +{"Action":"fail","Test":"TestGetFilteredRoomMembers/membership/leave"} +{"Action":"fail","Test":"TestGetFilteredRoomMembers/not_membership"} {"Action":"fail","Test":"TestGetMissingEventsGapFilling"} +{"Action":"pass","Test":"TestGetRoomMembers"} +{"Action":"fail","Test":"TestGetRoomMembersAtPoint"} {"Action":"fail","Test":"TestInboundCanReturnMissingEvents"} {"Action":"fail","Test":"TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_invited_visibility"} {"Action":"fail","Test":"TestInboundCanReturnMissingEvents/Inbound_federation_can_return_missing_events_for_joined_visibility"} @@ -76,15 +191,41 @@ {"Action":"pass","Test":"TestInboundFederationProfile/Inbound_federation_can_query_profile_data"} {"Action":"pass","Test":"TestInboundFederationProfile/Non-numeric_ports_in_server_names_are_rejected"} {"Action":"fail","Test":"TestInboundFederationRejectsEventsWithRejectedAuthEvents"} +{"Action":"fail","Test":"TestInviteFromIgnoredUsersDoesNotAppearInSync"} {"Action":"pass","Test":"TestIsDirectFlagFederation"} {"Action":"pass","Test":"TestIsDirectFlagLocal"} {"Action":"pass","Test":"TestJoinFederatedRoomFailOver"} +{"Action":"fail","Test":"TestJoinFederatedRoomFromApplicationServiceBridgeUser"} +{"Action":"fail","Test":"TestJoinFederatedRoomFromApplicationServiceBridgeUser/join_remote_federated_room_as_application_service_user"} {"Action":"pass","Test":"TestJoinFederatedRoomWithUnverifiableEvents"} {"Action":"pass","Test":"TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_missing_signatures_shouldn't_block_room_join"} {"Action":"pass","Test":"TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_bad_signatures_shouldn't_block_room_join"} {"Action":"pass","Test":"TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_state_with_unverifiable_auth_events_shouldn't_block_room_join"} {"Action":"pass","Test":"TestJoinFederatedRoomWithUnverifiableEvents//send_join_response_with_unobtainable_keys_shouldn't_block_room_join"} {"Action":"pass","Test":"TestJoinViaRoomIDAndServerName"} +{"Action":"fail","Test":"TestJson"} +{"Action":"fail","Test":"TestJson/Parallel"} +{"Action":"fail","Test":"TestJson/Parallel/Invalid_JSON_special_values"} +{"Action":"fail","Test":"TestJson/Parallel/Invalid_numerical_values"} +{"Action":"fail","Test":"TestJumpToDateEndpoint"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/federation"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/federation/can_paginate_after_getting_remote_event_from_timestamp_to_event_endpoint"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/federation/looking_backwards,_should_be_able_to_find_event_that_was_sent_before_we_joined"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/federation/looking_forwards,_should_be_able_to_find_event_that_was_sent_before_we_joined"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/federation/when_looking_backwards_before_the_room_was_created,_should_be_able_to_find_event_that_was_imported"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_find_event_after_given_timestmap"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_find_event_before_given_timestmap"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_find_next_event_topologically_after_given_timestmap_when_all_message_timestamps_are_the_same"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_find_next_event_topologically_before_given_timestamp_when_all_message_timestamps_are_the_same"} +{"Action":"pass","Test":"TestJumpToDateEndpoint/parallel/should_find_nothing_after_the_latest_timestmap"} +{"Action":"pass","Test":"TestJumpToDateEndpoint/parallel/should_find_nothing_before_the_earliest_timestmap"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_not_be_able_to_query_a_private_room_you_are_not_a_member_of"} +{"Action":"fail","Test":"TestJumpToDateEndpoint/parallel/should_not_be_able_to_query_a_public_room_you_are_not_a_member_of"} +{"Action":"fail","Test":"TestKeyChangesLocal"} +{"Action":"fail","Test":"TestKeyChangesLocal/New_login_should_create_a_device_lists.changed_entry"} +{"Action":"fail","Test":"TestKeyClaimOrdering"} +{"Action":"pass","Test":"TestKeysQueryWithDeviceIDAsObjectFails"} {"Action":"fail","Test":"TestKnockRoomsInPublicRoomsDirectory"} {"Action":"fail","Test":"TestKnockRoomsInPublicRoomsDirectoryInMSC3787Room"} {"Action":"fail","Test":"TestKnocking"} @@ -139,9 +280,35 @@ {"Action":"fail","Test":"TestKnockingInMSC3787Room/Knocking_on_a_room_with_join_rule_'knock'_should_succeed#01"} {"Action":"pass","Test":"TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock"} {"Action":"pass","Test":"TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01"} +{"Action":"pass","Test":"TestLeakyTyping"} +{"Action":"fail","Test":"TestLeaveEventInviteRejection"} +{"Action":"fail","Test":"TestLeaveEventVisibility"} +{"Action":"fail","Test":"TestLeftRoomFixture"} +{"Action":"fail","Test":"TestLeftRoomFixture/Can_get_'m.room.name'_state_for_a_departed_room"} +{"Action":"fail","Test":"TestLeftRoomFixture/Can_get_rooms/{roomId}/members_for_a_departed_room"} +{"Action":"pass","Test":"TestLeftRoomFixture/Can_get_rooms/{roomId}/messages_for_a_departed_room"} +{"Action":"fail","Test":"TestLeftRoomFixture/Can_get_rooms/{roomId}/state_for_a_departed_room"} +{"Action":"pass","Test":"TestLeftRoomFixture/Getting_messages_going_forward_is_limited_for_a_departed_room"} {"Action":"pass","Test":"TestLocalPngThumbnail"} {"Action":"pass","Test":"TestLocalPngThumbnail/test_/_matrix/client/v1/media_endpoint"} {"Action":"pass","Test":"TestLocalPngThumbnail/test_/_matrix/media/v3_endpoint"} +{"Action":"fail","Test":"TestLogin"} +{"Action":"fail","Test":"TestLogin/parallel"} +{"Action":"pass","Test":"TestLogin/parallel/GET_/login_yields_a_set_of_flows"} +{"Action":"fail","Test":"TestLogin/parallel/Login_with_uppercase_username_works_and_GET_/whoami_afterwards_also"} +{"Action":"pass","Test":"TestLogin/parallel/POST_/login_as_non-existing_user_is_rejected"} +{"Action":"pass","Test":"TestLogin/parallel/POST_/login_can_log_in_as_a_user_with_just_the_local_part_of_the_id"} +{"Action":"pass","Test":"TestLogin/parallel/POST_/login_can_login_as_user"} +{"Action":"pass","Test":"TestLogin/parallel/POST_/login_returns_the_same_device_id_as_that_in_the_request"} +{"Action":"pass","Test":"TestLogin/parallel/POST_/login_wrong_password_is_rejected"} +{"Action":"pass","Test":"TestLogout"} +{"Action":"pass","Test":"TestLogout/Can_logout_all_devices"} +{"Action":"pass","Test":"TestLogout/Can_logout_current_device"} +{"Action":"pass","Test":"TestLogout/Request_to_logout_with_invalid_an_access_token_is_rejected"} +{"Action":"pass","Test":"TestLogout/Request_to_logout_without_an_access_token_is_rejected"} +{"Action":"fail","Test":"TestMSC3757OwnedState"} +{"Action":"pass","Test":"TestMSC3967"} +{"Action":"pass","Test":"TestMediaConfig"} {"Action":"pass","Test":"TestMediaFilenames"} {"Action":"pass","Test":"TestMediaFilenames/Parallel"} {"Action":"pass","Test":"TestMediaFilenames/Parallel/ASCII"} @@ -178,11 +345,74 @@ {"Action":"pass","Test":"TestMediaWithoutFileNameCSMediaV1/parallel/Can_download_without_a_file_name_locally"} {"Action":"pass","Test":"TestMediaWithoutFileNameCSMediaV1/parallel/Can_download_without_a_file_name_over_federation"} {"Action":"pass","Test":"TestMediaWithoutFileNameCSMediaV1/parallel/Can_upload_without_a_file_name"} +{"Action":"fail","Test":"TestMembersLocal"} +{"Action":"fail","Test":"TestMembersLocal/Parallel"} +{"Action":"pass","Test":"TestMembersLocal/Parallel/Existing_members_see_new_members'_join_events"} +{"Action":"fail","Test":"TestMembersLocal/Parallel/Existing_members_see_new_members'_presence_(in_incremental_sync)"} +{"Action":"pass","Test":"TestMembersLocal/Parallel/Existing_members_see_new_members'_presence_(in_initial_sync)"} +{"Action":"pass","Test":"TestMembersLocal/Parallel/New_room_members_see_their_own_join_event"} +{"Action":"fail","Test":"TestMembershipOnEvents"} {"Action":"fail","Test":"TestNetworkPartitionOrdering"} +{"Action":"pass","Test":"TestNotPresentUserCannotBanOthers"} +{"Action":"fail","Test":"TestOlderLeftRoomsNotInLeaveSection"} +{"Action":"fail","Test":"TestOutboundFederationEventSizeGetMissingEvents"} {"Action":"fail","Test":"TestOutboundFederationIgnoresMissingEventWithBadJSONForRoomVersion6"} {"Action":"pass","Test":"TestOutboundFederationProfile"} {"Action":"pass","Test":"TestOutboundFederationProfile/Outbound_federation_can_query_profile_data"} {"Action":"pass","Test":"TestOutboundFederationSend"} +{"Action":"fail","Test":"TestPollsLocalPushRules"} +{"Action":"fail","Test":"TestPollsLocalPushRules/Polls_push_rules_are_correctly_presented_to_the_client"} +{"Action":"pass","Test":"TestPowerLevels"} +{"Action":"pass","Test":"TestPowerLevels/GET_/rooms/:room_id/state/m.room.power_levels_can_fetch_levels"} +{"Action":"pass","Test":"TestPowerLevels/PUT_/rooms/:room_id/state/m.room.power_levels_can_set_levels"} +{"Action":"pass","Test":"TestPowerLevels/PUT_power_levels_should_not_explode_if_the_old_power_levels_were_empty"} +{"Action":"fail","Test":"TestPresence"} +{"Action":"fail","Test":"TestPresence/GET_/presence/:user_id/status_fetches_initial_status"} +{"Action":"pass","Test":"TestPresence/PUT_/presence/:user_id/status_updates_my_presence"} +{"Action":"pass","Test":"TestPresence/Presence_can_be_set_from_sync"} +{"Action":"pass","Test":"TestPresence/Presence_changes_are_reported_to_local_room_members"} +{"Action":"pass","Test":"TestPresence/Presence_changes_to_UNAVAILABLE_are_reported_to_local_room_members"} +{"Action":"pass","Test":"TestPresenceSyncDifferentRooms"} +{"Action":"pass","Test":"TestProfileAvatarURL"} +{"Action":"pass","Test":"TestProfileAvatarURL/GET_/profile/:user_id/avatar_url_publicly_accessible"} +{"Action":"pass","Test":"TestProfileAvatarURL/PUT_/profile/:user_id/avatar_url_sets_my_avatar"} +{"Action":"pass","Test":"TestProfileDisplayName"} +{"Action":"pass","Test":"TestProfileDisplayName/GET_/profile/:user_id/displayname_publicly_accessible"} +{"Action":"pass","Test":"TestProfileDisplayName/PUT_/profile/:user_id/displayname_sets_my_name"} +{"Action":"pass","Test":"TestPushRuleCacheHealth"} +{"Action":"pass","Test":"TestPushSync"} +{"Action":"pass","Test":"TestPushSync/Adding_a_push_rule_wakes_up_an_incremental_/sync"} +{"Action":"pass","Test":"TestPushSync/Disabling_a_push_rule_wakes_up_an_incremental_/sync"} +{"Action":"pass","Test":"TestPushSync/Enabling_a_push_rule_wakes_up_an_incremental_/sync"} +{"Action":"pass","Test":"TestPushSync/Push_rules_come_down_in_an_initial_/sync"} +{"Action":"pass","Test":"TestPushSync/Setting_actions_for_a_push_rule_wakes_up_an_incremental_/sync"} +{"Action":"pass","Test":"TestRegistration"} +{"Action":"pass","Test":"TestRegistration/parallel"} +{"Action":"pass","Test":"TestRegistration/parallel/GET_/register/available_returns_M_INVALID_USERNAME_for_invalid_user_name"} +{"Action":"pass","Test":"TestRegistration/parallel/GET_/register/available_returns_M_USER_IN_USE_for_registered_user_name"} +{"Action":"pass","Test":"TestRegistration/parallel/GET_/register/available_returns_available_for_unregistered_user_name"} +{"Action":"skip","Test":"TestRegistration/parallel/POST_/_synapse/admin/v1/register_admin_with_shared_secret"} +{"Action":"skip","Test":"TestRegistration/parallel/POST_/_synapse/admin/v1/register_with_shared_secret"} +{"Action":"skip","Test":"TestRegistration/parallel/POST_/_synapse/admin/v1/register_with_shared_secret_disallows_symbols"} +{"Action":"skip","Test":"TestRegistration/parallel/POST_/_synapse/admin/v1/register_with_shared_secret_downcases_capitals"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/-"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/."} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_//"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/3"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/="} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/_"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_allows_registration_of_usernames_with_/q"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_can_create_a_user"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_downcases_capitals_in_usernames"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_rejects_if_user_already_exists"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_rejects_usernames_with_special_characters"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_/register_returns_the_same_device_id_as_that_in_the_request"} +{"Action":"pass","Test":"TestRegistration/parallel/POST_{}_returns_a_set_of_flows"} +{"Action":"pass","Test":"TestRegistration/parallel/Registration_accepts_non-ascii_passwords"} +{"Action":"pass","Test":"TestRelations"} +{"Action":"fail","Test":"TestRelationsPagination"} +{"Action":"pass","Test":"TestRelationsPaginationSync"} {"Action":"pass","Test":"TestRemoteAliasRequestsUnderstandUnicode"} {"Action":"pass","Test":"TestRemotePngThumbnail"} {"Action":"pass","Test":"TestRemotePngThumbnail/test_/_matrix/client/v1/media_endpoint"} @@ -191,6 +421,13 @@ {"Action":"fail","Test":"TestRemotePresence/Presence_changes_are_also_reported_to_remote_room_members"} {"Action":"fail","Test":"TestRemotePresence/Presence_changes_to_UNAVAILABLE_are_reported_to_remote_room_members"} {"Action":"pass","Test":"TestRemoteTyping"} +{"Action":"fail","Test":"TestRemovingAccountData"} +{"Action":"fail","Test":"TestRemovingAccountData/Deleting_a_user's_account_data_via_DELETE_works"} +{"Action":"fail","Test":"TestRemovingAccountData/Deleting_a_user's_account_data_via_PUT_works"} +{"Action":"fail","Test":"TestRemovingAccountData/Deleting_a_user's_room_account_data_via_PUT_works"} +{"Action":"fail","Test":"TestRemovingAccountData/Deleting_a_user's_room_data_via_DELETE_works"} +{"Action":"fail","Test":"TestRequestEncodingFails"} +{"Action":"fail","Test":"TestRequestEncodingFails/POST_rejects_invalid_utf-8_in_JSON"} {"Action":"fail","Test":"TestRestrictedRoomsLocalJoin"} {"Action":"pass","Test":"TestRestrictedRoomsLocalJoin/Join_should_fail_initially"} {"Action":"pass","Test":"TestRestrictedRoomsLocalJoin/Join_should_fail_when_left_allowed_room"} @@ -221,12 +458,170 @@ {"Action":"fail","Test":"TestRestrictedRoomsRemoteJoinLocalUserInMSC3787Room"} {"Action":"pass","Test":"TestRestrictedRoomsSpacesSummaryFederation"} {"Action":"fail","Test":"TestRestrictedRoomsSpacesSummaryLocal"} +{"Action":"pass","Test":"TestRoomAlias"} +{"Action":"pass","Test":"TestRoomAlias/Parallel"} +{"Action":"pass","Test":"TestRoomAlias/Parallel/GET_/rooms/:room_id/aliases_lists_aliases"} +{"Action":"pass","Test":"TestRoomAlias/Parallel/Only_room_members_can_list_aliases_of_a_room"} +{"Action":"pass","Test":"TestRoomAlias/Parallel/PUT_/directory/room/:room_alias_creates_alias"} +{"Action":"pass","Test":"TestRoomAlias/Parallel/Room_aliases_can_contain_Unicode"} +{"Action":"fail","Test":"TestRoomCanonicalAlias"} +{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel"} +{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_accepts_present_aliases"} +{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_accepts_present_alt_aliases"} +{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_alias_pointing_to_different_local_room"} +{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_alt_alias_pointing_to_different_local_room"} +{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_invalid_aliases"} +{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_invalid_aliases#01"} +{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_missing_aliases"} +{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_missing_aliases#01"} +{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_setting_rejects_deleted_aliases"} +{"Action":"pass","Test":"TestRoomCreate"} +{"Action":"pass","Test":"TestRoomCreate/Parallel"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/Can_/sync_newly_created_room"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_creates_a_room_with_the_given_version"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_ignores_attempts_to_set_the_room_version_via_creation_content"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_makes_a_private_room"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_makes_a_private_room_with_invites"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_makes_a_public_room"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_makes_a_room_with_a_name"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_makes_a_room_with_a_topic"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_rejects_attempts_to_create_rooms_with_numeric_versions"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/POST_/createRoom_rejects_attempts_to_create_rooms_with_unknown_versions"} +{"Action":"pass","Test":"TestRoomCreate/Parallel/Rooms_can_be_created_with_an_initial_invite_list_(SYN-205)"} +{"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself"} +{"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself/parallel"} +{"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Joining_room_twice_is_idempotent"} +{"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Room_creation_reports_m.room.create_to_myself"} +{"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Room_creation_reports_m.room.member_to_myself"} +{"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Setting_room_topic_reports_m.room.topic_to_myself"} +{"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself/parallel/Setting_state_twice_is_idempotent"} +{"Action":"fail","Test":"TestRoomDeleteAlias"} +{"Action":"fail","Test":"TestRoomDeleteAlias/Parallel"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Alias_creators_can_delete_alias_with_no_ops"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Alias_creators_can_delete_canonical_alias_with_no_ops"} +{"Action":"fail","Test":"TestRoomDeleteAlias/Parallel/Can_delete_canonical_alias"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Deleting_a_non-existent_alias_should_return_a_404"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Regular_users_can_add_and_delete_aliases_in_the_default_room_configuration"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Regular_users_can_add_and_delete_aliases_when_m.room.aliases_is_restricted"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Users_can't_delete_other's_aliases"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Users_with_sufficient_power-level_can_delete_other's_aliases"} +{"Action":"fail","Test":"TestRoomForget"} +{"Action":"fail","Test":"TestRoomForget/Parallel"} +{"Action":"pass","Test":"TestRoomForget/Parallel/Can't_forget_room_you're_still_in"} +{"Action":"pass","Test":"TestRoomForget/Parallel/Can_forget_room_we_weren't_an_actual_member"} +{"Action":"pass","Test":"TestRoomForget/Parallel/Can_forget_room_you've_been_kicked_from"} +{"Action":"pass","Test":"TestRoomForget/Parallel/Can_re-join_room_if_re-invited"} +{"Action":"pass","Test":"TestRoomForget/Parallel/Forgetting_room_does_not_show_up_in_v2_initial_/sync"} +{"Action":"fail","Test":"TestRoomForget/Parallel/Forgotten_room_messages_cannot_be_paginated"} +{"Action":"fail","Test":"TestRoomForget/Parallel/Leave_for_forgotten_room_shows_up_in_v2_incremental_/sync"} +{"Action":"pass","Test":"TestRoomImageRoundtrip"} +{"Action":"fail","Test":"TestRoomMembers"} +{"Action":"fail","Test":"TestRoomMembers/Parallel"} +{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/join/:room_alias_can_join_a_room"} +{"Action":"fail","Test":"TestRoomMembers/Parallel/POST_/join/:room_alias_can_join_a_room_with_custom_content"} +{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/join/:room_id_can_join_a_room"} +{"Action":"fail","Test":"TestRoomMembers/Parallel/POST_/join/:room_id_can_join_a_room_with_custom_content"} +{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/rooms/:room_id/ban_can_ban_a_user"} +{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/rooms/:room_id/invite_can_send_an_invite"} +{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/rooms/:room_id/join_can_join_a_room"} +{"Action":"pass","Test":"TestRoomMembers/Parallel/POST_/rooms/:room_id/leave_can_leave_a_room"} +{"Action":"pass","Test":"TestRoomMembers/Parallel/Test_that_we_can_be_reinvited_to_a_room_we_created"} +{"Action":"pass","Test":"TestRoomMessagesLazyLoading"} +{"Action":"pass","Test":"TestRoomMessagesLazyLoadingLocalUser"} +{"Action":"pass","Test":"TestRoomReadMarkers"} +{"Action":"pass","Test":"TestRoomReceipts"} +{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin"} +{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin/Bob_can_find_Alice_by_mxid"} +{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin/Bob_can_find_Alice_by_profile_display_name"} +{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin/Eve_can_find_Alice_by_mxid"} +{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin/Eve_can_find_Alice_by_profile_display_name"} +{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin/Eve_cannot_find_Alice_by_room-specific_name_that_Eve_is_not_privy_to"} +{"Action":"fail","Test":"TestRoomSpecificUsernameChange"} +{"Action":"fail","Test":"TestRoomSpecificUsernameChange/Bob_can_find_Alice_by_mxid"} +{"Action":"fail","Test":"TestRoomSpecificUsernameChange/Bob_can_find_Alice_by_profile_display_name"} +{"Action":"fail","Test":"TestRoomSpecificUsernameChange/Eve_can_find_Alice_by_mxid"} +{"Action":"fail","Test":"TestRoomSpecificUsernameChange/Eve_can_find_Alice_by_profile_display_name"} +{"Action":"pass","Test":"TestRoomSpecificUsernameChange/Eve_cannot_find_Alice_by_room-specific_name_that_Eve_is_not_privy_to"} +{"Action":"fail","Test":"TestRoomState"} +{"Action":"fail","Test":"TestRoomState/Parallel"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/directory/room/:room_alias_yields_room_ID"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/joined_rooms_lists_newly-created_room"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/publicRooms_lists_newly-created_room"} +{"Action":"fail","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/joined_members_fetches_my_membership"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/joined_members_is_forbidden_after_leaving_room"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state/m.room.member/:user_id?format=event_fetches_my_membership_event"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state/m.room.member/:user_id_fetches_my_membership"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state/m.room.name_gets_name"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state/m.room.power_levels_fetches_powerlevels"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state/m.room.topic_gets_topic"} +{"Action":"pass","Test":"TestRoomState/Parallel/GET_/rooms/:room_id/state_fetches_entire_room_state"} +{"Action":"pass","Test":"TestRoomState/Parallel/POST_/rooms/:room_id/state/m.room.name_sets_name"} +{"Action":"pass","Test":"TestRoomState/Parallel/PUT_/createRoom_with_creation_content"} +{"Action":"pass","Test":"TestRoomState/Parallel/PUT_/rooms/:room_id/state/m.room.topic_sets_topic"} +{"Action":"pass","Test":"TestRoomSummary"} +{"Action":"fail","Test":"TestRoomsInvite"} +{"Action":"fail","Test":"TestRoomsInvite/Parallel"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Can_invite_users_to_invite-only_rooms"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Invited_user_can_reject_invite"} +{"Action":"fail","Test":"TestRoomsInvite/Parallel/Invited_user_can_reject_invite_for_empty_room"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Invited_user_can_see_room_metadata"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Test_that_we_can_be_reinvited_to_a_room_we_created"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Uninvited_users_cannot_join_the_room"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Users_cannot_invite_a_user_that_is_already_in_the_room"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Users_cannot_invite_themselves_to_a_room"} +{"Action":"fail","Test":"TestSearch"} +{"Action":"fail","Test":"TestSearch/parallel"} +{"Action":"fail","Test":"TestSearch/parallel/Can_back-paginate_search_results"} +{"Action":"fail","Test":"TestSearch/parallel/Can_get_context_around_search_results"} +{"Action":"pass","Test":"TestSearch/parallel/Can_search_for_an_event_by_body"} +{"Action":"pass","Test":"TestSearch/parallel/Search_results_with_rank_ordering_do_not_include_redacted_events"} +{"Action":"pass","Test":"TestSearch/parallel/Search_results_with_recent_ordering_do_not_include_redacted_events"} +{"Action":"pass","Test":"TestSearch/parallel/Search_works_across_an_upgraded_room_and_its_predecessor"} +{"Action":"fail","Test":"TestSendAndFetchMessage"} {"Action":"skip","Test":"TestSendJoinPartialStateResponse"} +{"Action":"pass","Test":"TestSendMessageWithTxn"} +{"Action":"pass","Test":"TestServerCapabilities"} +{"Action":"skip","Test":"TestServerNotices"} +{"Action":"fail","Test":"TestSync"} +{"Action":"fail","Test":"TestSync/parallel"} +{"Action":"pass","Test":"TestSync/parallel/Can_sync_a_joined_room"} +{"Action":"fail","Test":"TestSync/parallel/Device_list_tracking"} +{"Action":"fail","Test":"TestSync/parallel/Device_list_tracking/User_is_correctly_listed_when_they_leave,_even_when_lazy_loading_is_enabled"} +{"Action":"pass","Test":"TestSync/parallel/Full_state_sync_includes_joined_rooms"} +{"Action":"fail","Test":"TestSync/parallel/Get_presence_for_newly_joined_members_in_incremental_sync"} +{"Action":"fail","Test":"TestSync/parallel/Newly_joined_room_has_correct_timeline_in_incremental_sync"} +{"Action":"fail","Test":"TestSync/parallel/Newly_joined_room_includes_presence_in_incremental_sync"} +{"Action":"pass","Test":"TestSync/parallel/Newly_joined_room_is_included_in_an_incremental_sync"} +{"Action":"fail","Test":"TestSync/parallel/sync_should_succeed_even_if_the_sync_token_points_to_a_redaction_of_an_unknown_event"} +{"Action":"pass","Test":"TestSyncFilter"} +{"Action":"pass","Test":"TestSyncFilter/Can_create_filter"} +{"Action":"pass","Test":"TestSyncFilter/Can_download_filter"} +{"Action":"fail","Test":"TestSyncLeaveSection"} +{"Action":"fail","Test":"TestSyncLeaveSection/Left_rooms_appear_in_the_leave_section_of_full_state_sync"} +{"Action":"fail","Test":"TestSyncLeaveSection/Left_rooms_appear_in_the_leave_section_of_sync"} +{"Action":"fail","Test":"TestSyncLeaveSection/Newly_left_rooms_appear_in_the_leave_section_of_incremental_sync"} {"Action":"pass","Test":"TestSyncOmitsStateChangeOnFilteredEvents"} +{"Action":"pass","Test":"TestSyncTimelineGap"} +{"Action":"pass","Test":"TestSyncTimelineGap/full"} +{"Action":"pass","Test":"TestSyncTimelineGap/incremental"} +{"Action":"fail","Test":"TestTentativeEventualJoiningAfterRejecting"} +{"Action":"fail","Test":"TestThreadReceiptsInSyncMSC4102"} +{"Action":"fail","Test":"TestThreadedReceipts"} +{"Action":"fail","Test":"TestThreadsEndpoint"} +{"Action":"pass","Test":"TestToDeviceMessages"} {"Action":"fail","Test":"TestToDeviceMessagesOverFederation"} {"Action":"pass","Test":"TestToDeviceMessagesOverFederation/good_connectivity"} {"Action":"pass","Test":"TestToDeviceMessagesOverFederation/interrupted_connectivity"} {"Action":"fail","Test":"TestToDeviceMessagesOverFederation/stopped_server"} +{"Action":"fail","Test":"TestTxnIdWithRefreshToken"} +{"Action":"fail","Test":"TestTxnIdempotency"} +{"Action":"pass","Test":"TestTxnIdempotencyScopedToDevice"} +{"Action":"pass","Test":"TestTxnInEvent"} +{"Action":"pass","Test":"TestTxnScopeOnLocalEcho"} +{"Action":"pass","Test":"TestTyping"} +{"Action":"pass","Test":"TestTyping/Typing_can_be_explicitly_stopped"} +{"Action":"pass","Test":"TestTyping/Typing_notification_sent_to_local_room_members"} +{"Action":"fail","Test":"TestUnbanViaInvite"} {"Action":"fail","Test":"TestUnknownEndpoints"} {"Action":"pass","Test":"TestUnknownEndpoints/Client-server_endpoints"} {"Action":"fail","Test":"TestUnknownEndpoints/Key_endpoints"} @@ -234,5 +629,27 @@ {"Action":"pass","Test":"TestUnknownEndpoints/Server-server_endpoints"} {"Action":"pass","Test":"TestUnknownEndpoints/Unknown_prefix"} {"Action":"fail","Test":"TestUnrejectRejectedEvents"} +{"Action":"fail","Test":"TestUploadKey"} +{"Action":"fail","Test":"TestUploadKey/Parallel"} +{"Action":"fail","Test":"TestUploadKey/Parallel/Can_claim_one_time_key_using_POST"} +{"Action":"pass","Test":"TestUploadKey/Parallel/Can_query_device_keys_using_POST"} +{"Action":"pass","Test":"TestUploadKey/Parallel/Can_query_specific_device_keys_using_POST"} +{"Action":"pass","Test":"TestUploadKey/Parallel/Can_upload_device_keys"} +{"Action":"fail","Test":"TestUploadKey/Parallel/Rejects_invalid_device_keys"} +{"Action":"fail","Test":"TestUploadKey/Parallel/Should_reject_keys_claiming_to_belong_to_a_different_user"} +{"Action":"pass","Test":"TestUploadKey/Parallel/query_for_user_with_no_keys_returns_empty_key_dict"} +{"Action":"pass","Test":"TestUploadKeyIdempotency"} +{"Action":"pass","Test":"TestUploadKeyIdempotencyOverlap"} +{"Action":"fail","Test":"TestUrlPreview"} {"Action":"pass","Test":"TestUserAppearsInChangedDeviceListOnJoinOverFederation"} +{"Action":"pass","Test":"TestVersionStructure"} +{"Action":"pass","Test":"TestVersionStructure/Version_responds_200_OK_with_valid_structure"} +{"Action":"pass","Test":"TestWithoutOwnedState"} +{"Action":"pass","Test":"TestWithoutOwnedState/parallel"} +{"Action":"pass","Test":"TestWithoutOwnedState/parallel/room_creator_cannot_set_state_with_a_non-member_user_ID_as_state_key"} +{"Action":"pass","Test":"TestWithoutOwnedState/parallel/room_creator_cannot_set_state_with_another_suffixed_user_ID_as_state_key"} +{"Action":"pass","Test":"TestWithoutOwnedState/parallel/room_creator_cannot_set_state_with_another_user_ID_as_state_key"} +{"Action":"pass","Test":"TestWithoutOwnedState/parallel/room_creator_cannot_set_state_with_malformed_user_ID_as_state_key"} +{"Action":"pass","Test":"TestWithoutOwnedState/parallel/room_creator_cannot_set_state_with_their_own_suffixed_user_ID_as_state_key"} +{"Action":"pass","Test":"TestWithoutOwnedState/parallel/user_can_set_state_with_their_own_user_ID_as_state_key"} {"Action":"pass","Test":"TestWriteMDirectAccountData"} From 5ad1100e0fdf41a380b445154b42bc09f38a64b5 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 19:48:06 -0500 Subject: [PATCH 099/310] bump our rocksdb fork Signed-off-by: June Clementine Strawberry --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- flake.lock | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 56ff3c6b..7dd24e2e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3685,7 +3685,7 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" version = "0.33.0+9.11.1" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=3f4c5357243defedc849ae6227490102a9f90bef#3f4c5357243defedc849ae6227490102a9f90bef" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=d05c8bd7ba8814de1731ec0ae29e863c8ecb7206#d05c8bd7ba8814de1731ec0ae29e863c8ecb7206" dependencies = [ "bindgen", "bzip2-sys", @@ -3702,7 +3702,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.37.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=3f4c5357243defedc849ae6227490102a9f90bef#3f4c5357243defedc849ae6227490102a9f90bef" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=d05c8bd7ba8814de1731ec0ae29e863c8ecb7206#d05c8bd7ba8814de1731ec0ae29e863c8ecb7206" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/Cargo.toml b/Cargo.toml index 43b2d55d..a9f1abb3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -383,7 +383,7 @@ features = [ [workspace.dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "3f4c5357243defedc849ae6227490102a9f90bef" +rev = "d05c8bd7ba8814de1731ec0ae29e863c8ecb7206" default-features = false features = [ "multi-threaded-cf", diff --git a/flake.lock b/flake.lock index 3a43c4cd..c3292cbc 100644 --- a/flake.lock +++ b/flake.lock @@ -567,11 +567,11 @@ "rocksdb": { "flake": false, "locked": { - "lastModified": 1741303627, - "narHash": "sha256-7HpydEinYHvskC4vkl1Yie2kg2yShfZbREAyQMkvEUc=", + "lastModified": 1741308171, + "narHash": "sha256-YdBvdQ75UJg5ffwNjxizpviCVwVDJnBkM8ZtGIduMgY=", "owner": "girlbossceo", "repo": "rocksdb", - "rev": "cecee0e4fbff2b69e3edc6e9b5b751d8098a3ba1", + "rev": "3ce04794bcfbbb0d2e6f81ae35fc4acf688b6986", "type": "github" }, "original": { From fe65648296b1827841c3e2a602cc78bd1af0a9b5 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 6 Mar 2025 20:10:32 -0500 Subject: [PATCH 100/310] remove unnecessary map_err Signed-off-by: June Clementine Strawberry --- src/service/rooms/timeline/mod.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 276b8b6a..826a1dae 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -344,7 +344,7 @@ impl Service { let insert_lock = self.mutex_insert.lock(&pdu.room_id).await; - let count1 = self.services.globals.next_count()?; + let count1 = self.services.globals.next_count().unwrap(); // Mark as read first so the sending client doesn't get a notification even if // appending fails self.services @@ -362,13 +362,12 @@ impl Service { drop(insert_lock); - // See if the event matches any known pushers + // See if the event matches any known pushers via power level let power_levels: RoomPowerLevelsEventContent = self .services .state_accessor .room_state_get_content(&pdu.room_id, &StateEventType::RoomPowerLevels, "") .await - .map_err(|e| err!(Database(warn!("invalid m.room.power_levels event: {e}")))) .unwrap_or_default(); let sync_pdu = pdu.to_sync_room_event(); From 2c58a6efda4f0ae7fa7b5ad05758489b5ff2e5f5 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Fri, 7 Mar 2025 00:54:30 -0500 Subject: [PATCH 101/310] allow broken no-op deny+allow room server ACL keys Signed-off-by: June Clementine Strawberry --- src/service/rooms/event_handler/acl_check.rs | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/service/rooms/event_handler/acl_check.rs b/src/service/rooms/event_handler/acl_check.rs index 6b432a4b..f847015b 100644 --- a/src/service/rooms/event_handler/acl_check.rs +++ b/src/service/rooms/event_handler/acl_check.rs @@ -14,14 +14,21 @@ pub async fn acl_check(&self, server_name: &ServerName, room_id: &RoomId) -> Res .room_state_get_content(room_id, &StateEventType::RoomServerAcl, "") .await .map(|c: RoomServerAclEventContent| c) - .inspect(|acl| trace!("ACL content found: {acl:?}")) - .inspect_err(|e| trace!("No ACL content found: {e:?}")) + .inspect(|acl| trace!(%room_id, "ACL content found: {acl:?}")) + .inspect_err(|e| trace!(%room_id, "No ACL content found: {e:?}")) else { return Ok(()); }; if acl_event_content.allow.is_empty() { - warn!("Ignoring broken ACL event (allow key is empty)"); + warn!(%room_id, "Ignoring broken ACL event (allow key is empty)"); + return Ok(()); + } + + if acl_event_content.deny.contains(&String::from("*")) + && acl_event_content.allow.contains(&String::from("*")) + { + warn!(%room_id, "Ignoring broken ACL event (allow key and deny key both contain wildcard \"*\""); return Ok(()); } From 4f882c3bd8adfa86edc504396f6cd45b56fd8b62 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Fri, 7 Mar 2025 00:57:39 -0500 Subject: [PATCH 102/310] add some ACL paw-gun checks, better `PUT` state event validation Signed-off-by: June Clementine Strawberry --- src/api/client/keys.rs | 23 +++- src/api/client/state.rs | 253 +++++++++++++++++++++++++--------------- 2 files changed, 178 insertions(+), 98 deletions(-) diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 8a7eab7e..4c1c986a 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -1,7 +1,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use axum::extract::State; -use conduwuit::{Err, Error, Result, debug, err, info, result::NotFound, utils}; +use conduwuit::{Err, Error, Result, debug, debug_warn, err, info, result::NotFound, utils}; use futures::{StreamExt, stream::FuturesUnordered}; use ruma::{ OneTimeKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, @@ -41,6 +41,20 @@ pub(crate) async fn upload_keys_route( let (sender_user, sender_device) = body.sender(); for (key_id, one_time_key) in &body.one_time_keys { + if one_time_key + .deserialize() + .inspect_err(|e| { + debug_warn!( + ?key_id, + ?one_time_key, + "Invalid one time key JSON submitted by client, skipping: {e}" + ) + }) + .is_err() + { + continue; + } + services .users .add_one_time_key(sender_user, sender_device, key_id, one_time_key) @@ -48,7 +62,12 @@ pub(crate) async fn upload_keys_route( } if let Some(device_keys) = &body.device_keys { - let deser_device_keys = device_keys.deserialize()?; + let deser_device_keys = device_keys.deserialize().map_err(|e| { + err!(Request(BadJson(debug_warn!( + ?device_keys, + "Invalid device keys JSON uploaded by client: {e}" + )))) + })?; if deser_device_keys.user_id != sender_user { return Err!(Request(Unknown( diff --git a/src/api/client/state.rs b/src/api/client/state.rs index 6353fe1c..c92091eb 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -11,6 +11,7 @@ use ruma::{ history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, join_rules::{JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, + server_acl::RoomServerAclEventContent, }, }, serde::Raw, @@ -194,134 +195,194 @@ async fn allowed_to_send_state_event( ) -> Result { match event_type { | StateEventType::RoomCreate => { - return Err!(Request(BadJson( + return Err!(Request(BadJson(debug_warn!( + ?room_id, "You cannot update m.room.create after a room has been created." - ))); + )))); + }, + | StateEventType::RoomServerAcl => { + // prevents common ACL paw-guns as ACL management is difficult and prone to + // irreversible mistakes + match json.deserialize_as::() { + | Ok(acl_content) => { + if acl_content.allow.is_empty() { + return Err!(Request(BadJson(debug_warn!( + ?room_id, + "Sending an ACL event with an empty allow key will permanently \ + brick the room for non-conduwuit's as this equates to no servers \ + being allowed to participate in this room." + )))); + } + + if acl_content.deny.contains(&String::from("*")) + && acl_content.allow.contains(&String::from("*")) + { + return Err!(Request(BadJson(debug_warn!( + ?room_id, + "Sending an ACL event with a deny and allow key value of \"*\" will \ + permanently brick the room for non-conduwuit's as this equates to \ + no servers being allowed to participate in this room." + )))); + } + + if acl_content.deny.contains(&String::from("*")) + && !acl_content.is_allowed(services.globals.server_name()) + { + return Err!(Request(BadJson(debug_warn!( + ?room_id, + "Sending an ACL event with a deny key value of \"*\" and without \ + your own server name in the allow key will result in you being \ + unable to participate in this room." + )))); + } + + if !acl_content.allow.contains(&String::from("*")) + && !acl_content.is_allowed(services.globals.server_name()) + { + return Err!(Request(BadJson(debug_warn!( + ?room_id, + "Sending an ACL event for an allow key without \"*\" and without \ + your own server name in the allow key will result in you being \ + unable to participate in this room." + )))); + } + }, + | Err(e) => { + return Err!(Request(BadJson(debug_warn!( + "Room server ACL event is invalid: {e}" + )))); + }, + }; }, - // Forbid m.room.encryption if encryption is disabled | StateEventType::RoomEncryption => - if !services.globals.allow_encryption() { + // Forbid m.room.encryption if encryption is disabled + if !services.config.allow_encryption { return Err!(Request(Forbidden("Encryption is disabled on this homeserver."))); }, - // admin room is a sensitive room, it should not ever be made public | StateEventType::RoomJoinRules => { + // admin room is a sensitive room, it should not ever be made public if let Ok(admin_room_id) = services.admin.get_admin_room().await { if admin_room_id == room_id { - if let Ok(join_rule) = - serde_json::from_str::(json.json().get()) - { - if join_rule.join_rule == JoinRule::Public { - return Err!(Request(Forbidden( - "Admin room is a sensitive room, it cannot be made public" - ))); - } + match json.deserialize_as::() { + | Ok(join_rule) => + if join_rule.join_rule == JoinRule::Public { + return Err!(Request(Forbidden( + "Admin room is a sensitive room, it cannot be made public" + ))); + }, + | Err(e) => { + return Err!(Request(BadJson(debug_warn!( + "Room join rules event is invalid: {e}" + )))); + }, } } } }, - // admin room is a sensitive room, it should not ever be made world readable | StateEventType::RoomHistoryVisibility => { - if let Ok(visibility_content) = - serde_json::from_str::(json.json().get()) - { - if let Ok(admin_room_id) = services.admin.get_admin_room().await { - if admin_room_id == room_id - && visibility_content.history_visibility - == HistoryVisibility::WorldReadable - { - return Err!(Request(Forbidden( - "Admin room is a sensitive room, it cannot be made world readable \ - (public room history)." - ))); - } + // admin room is a sensitive room, it should not ever be made world readable + if let Ok(admin_room_id) = services.admin.get_admin_room().await { + match json.deserialize_as::() { + | Ok(visibility_content) => { + if admin_room_id == room_id + && visibility_content.history_visibility + == HistoryVisibility::WorldReadable + { + return Err!(Request(Forbidden( + "Admin room is a sensitive room, it cannot be made world \ + readable (public room history)." + ))); + } + }, + | Err(e) => { + return Err!(Request(BadJson(debug_warn!( + "Room history visibility event is invalid: {e}" + )))); + }, } } }, | StateEventType::RoomCanonicalAlias => { - if let Ok(canonical_alias) = - serde_json::from_str::(json.json().get()) - { - let mut aliases = canonical_alias.alt_aliases.clone(); + match json.deserialize_as::() { + | Ok(canonical_alias_content) => { + let mut aliases = canonical_alias_content.alt_aliases.clone(); - if let Some(alias) = canonical_alias.alias { - aliases.push(alias); - } + if let Some(alias) = canonical_alias_content.alias { + aliases.push(alias); + } - for alias in aliases { - if !services.globals.server_is_ours(alias.server_name()) { - return Err!(Request(Forbidden( - "canonical_alias must be for this server" + for alias in aliases { + let (alias_room_id, _servers) = + services.rooms.alias.resolve_alias(&alias, None).await?; + + if alias_room_id != room_id { + return Err!(Request(Forbidden( + "Room alias {alias} does not belong to room {room_id}" + ))); + } + } + }, + | Err(e) => { + return Err!(Request(BadJson(debug_warn!( + "Room canonical alias event is invalid: {e}" + )))); + }, + } + }, + | StateEventType::RoomMember => match json.deserialize_as::() { + | Ok(membership_content) => { + let Ok(state_key) = UserId::parse(state_key) else { + return Err!(Request(BadJson( + "Membership event has invalid or non-existent state key" + ))); + }; + + if let Some(authorising_user) = + membership_content.join_authorized_via_users_server + { + if membership_content.membership != MembershipState::Join { + return Err!(Request(BadJson( + "join_authorised_via_users_server is only for member joins" + ))); + } + + if services + .rooms + .state_cache + .is_joined(state_key, room_id) + .await + { + return Err!(Request(InvalidParam( + "{state_key} is already joined, an authorising user is not required." + ))); + } + + if !services.globals.user_is_local(&authorising_user) { + return Err!(Request(InvalidParam( + "Authorising user {authorising_user} does not belong to this \ + homeserver" ))); } if !services .rooms - .alias - .resolve_local_alias(&alias) + .state_cache + .is_joined(&authorising_user, room_id) .await - .is_ok_and(|room| room == room_id) - // Make sure it's the right room { - return Err!(Request(Forbidden( - "You are only allowed to send canonical_alias events when its \ - aliases already exist" + return Err!(Request(InvalidParam( + "Authorising user {authorising_user} is not in the room, they \ + cannot authorise the join." ))); } } - } - }, - | StateEventType::RoomMember => { - let Ok(membership_content) = - serde_json::from_str::(json.json().get()) - else { + }, + | Err(e) => { return Err!(Request(BadJson( "Membership content must have a valid JSON body with at least a valid \ - membership state." + membership state: {e}" ))); - }; - - let Ok(state_key) = UserId::parse(state_key) else { - return Err!(Request(BadJson( - "Membership event has invalid or non-existent state key" - ))); - }; - - if let Some(authorising_user) = membership_content.join_authorized_via_users_server { - if membership_content.membership != MembershipState::Join { - return Err!(Request(BadJson( - "join_authorised_via_users_server is only for member joins" - ))); - } - - if services - .rooms - .state_cache - .is_joined(state_key, room_id) - .await - { - return Err!(Request(InvalidParam( - "{state_key} is already joined, an authorising user is not required." - ))); - } - - if !services.globals.user_is_local(&authorising_user) { - return Err!(Request(InvalidParam( - "Authorising user {authorising_user} does not belong to this homeserver" - ))); - } - - if !services - .rooms - .state_cache - .is_joined(&authorising_user, room_id) - .await - { - return Err!(Request(InvalidParam( - "Authorising user {authorising_user} is not in the room, they cannot \ - authorise the join." - ))); - } - } + }, }, | _ => (), } From 8b3f62919831650a8198ca751dd1892e9889a51d Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Fri, 7 Mar 2025 00:57:47 -0500 Subject: [PATCH 103/310] bump rust-rocksdb Signed-off-by: June Clementine Strawberry --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7dd24e2e..a224ad0f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3685,7 +3685,7 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" version = "0.33.0+9.11.1" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=d05c8bd7ba8814de1731ec0ae29e863c8ecb7206#d05c8bd7ba8814de1731ec0ae29e863c8ecb7206" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2e692ae026881fc385f111fdcfba38bee98f1e47#2e692ae026881fc385f111fdcfba38bee98f1e47" dependencies = [ "bindgen", "bzip2-sys", @@ -3702,7 +3702,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.37.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=d05c8bd7ba8814de1731ec0ae29e863c8ecb7206#d05c8bd7ba8814de1731ec0ae29e863c8ecb7206" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2e692ae026881fc385f111fdcfba38bee98f1e47#2e692ae026881fc385f111fdcfba38bee98f1e47" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/Cargo.toml b/Cargo.toml index a9f1abb3..0b08cd8f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -383,7 +383,7 @@ features = [ [workspace.dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "d05c8bd7ba8814de1731ec0ae29e863c8ecb7206" +rev = "2e692ae026881fc385f111fdcfba38bee98f1e47" default-features = false features = [ "multi-threaded-cf", From 6052c0c8a2c5722a5ca057576ba174f8f72ab9e0 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Fri, 7 Mar 2025 01:04:53 -0500 Subject: [PATCH 104/310] ci: allow ourselves to write to the public docs directory Signed-off-by: June Clementine Strawberry --- .github/workflows/documentation.yml | 1 + conduwuit-example.toml | 2 +- src/api/client/keys.rs | 2 +- src/core/config/mod.rs | 2 +- 4 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml index 88e7bbe1..b5b4ff46 100644 --- a/.github/workflows/documentation.yml +++ b/.github/workflows/documentation.yml @@ -81,6 +81,7 @@ jobs: bin/nix-build-and-cache just .#book cp -r --dereference result public + chmod u+w -R public - name: Upload generated documentation (book) as normal artifact uses: actions/upload-artifact@v4 diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 541f062d..3d4b15bc 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -593,7 +593,7 @@ # Currently, conduwuit doesn't support inbound batched key requests, so # this list should only contain other Synapse servers. # -# example: ["matrix.org", "envs.net", "constellatory.net", "tchncs.de"] +# example: ["matrix.org", "envs.net", "tchncs.de"] # #trusted_servers = ["matrix.org"] diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 4c1c986a..9cd50e85 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -48,7 +48,7 @@ pub(crate) async fn upload_keys_route( ?key_id, ?one_time_key, "Invalid one time key JSON submitted by client, skipping: {e}" - ) + ); }) .is_err() { diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 5a4819e0..a82f5f53 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -713,7 +713,7 @@ pub struct Config { /// Currently, conduwuit doesn't support inbound batched key requests, so /// this list should only contain other Synapse servers. /// - /// example: ["matrix.org", "envs.net", "constellatory.net", "tchncs.de"] + /// example: ["matrix.org", "envs.net", "tchncs.de"] /// /// default: ["matrix.org"] #[serde(default = "default_trusted_servers")] From 298b58c069534833cfd027510ad7683e18d71e7a Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 7 Mar 2025 21:44:33 -0500 Subject: [PATCH 105/310] set file_shape for roomsynctoken_shortstatehash to 3, remove rust-rocksdb package spec Signed-off-by: strawberry --- Cargo.toml | 21 --------------------- src/database/maps.rs | 1 + 2 files changed, 1 insertion(+), 21 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 0b08cd8f..c48be06a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -752,27 +752,6 @@ inherits = "dev" # '-Clink-arg=-Wl,-z,lazy', #] -[profile.dev.package.rust-rocksdb] -inherits = "dev" -debug = 'limited' -incremental = false -codegen-units = 1 -opt-level = 'z' -#rustflags = [ -# '--cfg', 'conduwuit_mods', -# '-Ztls-model=initial-exec', -# '-Cprefer-dynamic=true', -# '-Zstaticlib-prefer-dynamic=true', -# '-Zstaticlib-allow-rdylib-deps=true', -# '-Zpacked-bundled-libs=true', -# '-Zplt=true', -# '-Clink-arg=-Wl,--no-as-needed', -# '-Clink-arg=-Wl,--allow-shlib-undefined', -# '-Clink-arg=-Wl,-z,lazy', -# '-Clink-arg=-Wl,-z,nodlopen', -# '-Clink-arg=-Wl,-z,nodelete', -#] - [profile.dev.package.'*'] inherits = "dev" debug = 'limited' diff --git a/src/database/maps.rs b/src/database/maps.rs index 9af45159..138bb038 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -169,6 +169,7 @@ pub(super) static MAPS: &[Descriptor] = &[ }, Descriptor { name: "roomsynctoken_shortstatehash", + file_shape: 3, val_size_hint: Some(8), block_size: 512, compression_level: 3, From 51d29bc1cbca84c001c3b4efbfca9c34a9b94f37 Mon Sep 17 00:00:00 2001 From: strawberry Date: Fri, 7 Mar 2025 21:44:52 -0500 Subject: [PATCH 106/310] bump complement Signed-off-by: strawberry --- flake.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flake.lock b/flake.lock index c3292cbc..03fc205c 100644 --- a/flake.lock +++ b/flake.lock @@ -80,11 +80,11 @@ "complement": { "flake": false, "locked": { - "lastModified": 1740291865, - "narHash": "sha256-wl1+yCTEtvIH8vgXygnxPkaSgg4MYNKs+c9tzVytr20=", + "lastModified": 1741378155, + "narHash": "sha256-rJSfqf3q4oWxcAwENtAowLZeCi8lktwKVH9XQvvZR64=", "owner": "girlbossceo", "repo": "complement", - "rev": "35ad9d9051498fbac8ea4abff8ab7d8b1844f87b", + "rev": "1502a00d8551d0f6e8954a23e43868877c3e57d9", "type": "github" }, "original": { From 90fee4f50eb5a0f81390e088f60265ab4974370e Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 8 Mar 2025 00:15:13 -0500 Subject: [PATCH 107/310] add gotestfmt log output to complement script and CI output Signed-off-by: strawberry --- .github/workflows/ci.yml | 30 +++++++++++++++++++----------- bin/complement | 22 +++++++++++++++++----- flake.nix | 1 + 3 files changed, 37 insertions(+), 16 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c0425873..c8fef47f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -176,6 +176,13 @@ jobs: path: complement_test_results.jsonl if-no-files-found: error + - name: Upload Complement logs (gotestfmt) + uses: actions/upload-artifact@v4 + with: + name: complement_test_logs_gotestfmt.log + path: complement_test_logs_gotestfmt.log + if-no-files-found: error + - name: Diff Complement results with checked-in repo results run: | diff -u --color=always tests/test_results/complement/test_results.jsonl complement_test_results.jsonl > >(tee -a complement_diff_output.log) @@ -186,22 +193,23 @@ jobs: if: success() || failure() run: | if [ ${GH_JOB_STATUS} == 'success' ]; then - echo '# ✅ completed suwuccessfully' >> $GITHUB_STEP_SUMMARY + echo '# ✅ CI completed suwuccessfully' >> $GITHUB_STEP_SUMMARY else - echo '# CI failure' >> $GITHUB_STEP_SUMMARY + echo '# ❌ CI failed (last 100 lines of output)' >> $GITHUB_STEP_SUMMARY echo '```' >> $GITHUB_STEP_SUMMARY - tail -n 40 test_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - - echo '# Complement diff results' >> $GITHUB_STEP_SUMMARY - echo '```diff' >> $GITHUB_STEP_SUMMARY - tail -n 100 complement_diff_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY + tail -n 100 test_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY echo '```' >> $GITHUB_STEP_SUMMARY fi - - name: Run cargo clean test artifacts to free up space - run: | - cargo clean --profile test + echo '# Complement diff results (last 100 lines)' >> $GITHUB_STEP_SUMMARY + echo '```diff' >> $GITHUB_STEP_SUMMARY + tail -n 100 complement_diff_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + + echo '# Complement gotestfmt logs (last 100 lines)' >> $GITHUB_STEP_SUMMARY + echo '```diff' >> $GITHUB_STEP_SUMMARY + tail -n 100 complement_test_logs_gotestfmt.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY build: name: Build diff --git a/bin/complement b/bin/complement index 9960299c..aec27c5b 100755 --- a/bin/complement +++ b/bin/complement @@ -10,15 +10,15 @@ set -euo pipefail COMPLEMENT_SRC="${COMPLEMENT_SRC:-$1}" # A `.jsonl` file to write test logs to -LOG_FILE="$2" +LOG_FILE="${2:-complement_test_logs.jsonl}" # A `.jsonl` file to write test results to -RESULTS_FILE="$3" +RESULTS_FILE="${3:-complement_test_results.jsonl}" OCI_IMAGE="complement-conduwuit:main" -# Complement tests that are skipped due to flakiness/reliability issues -SKIPPED_COMPLEMENT_TESTS='-skip=TestPartialStateJoin.*' +# Complement tests that are skipped due to flakiness/reliability issues or we don't implement such features and won't for a long time +#SKIPPED_COMPLEMENT_TESTS='-skip=TestPartialStateJoin.*' # $COMPLEMENT_SRC needs to be a directory to Complement source code if [ -f "$COMPLEMENT_SRC" ]; then @@ -34,6 +34,7 @@ toplevel="$(git rev-parse --show-toplevel)" pushd "$toplevel" > /dev/null +# if using macOS, use linux-complement #bin/nix-build-and-cache just .#linux-complement bin/nix-build-and-cache just .#complement @@ -45,7 +46,8 @@ set +o pipefail env \ -C "$COMPLEMENT_SRC" \ COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \ - go test -tags="conduwuit_blacklist" "$SKIPPED_COMPLEMENT_TESTS" -v -timeout 1h -json ./tests/... | tee "$LOG_FILE" + COMPLEMENT_ENABLE_DIRTY_RUNS=1 \ # reuses the same complement container for faster complement, at the possible expense of test environment pollution + go test -tags="conduwuit_blacklist" -v -timeout 1h -json ./tests/... | tee "$LOG_FILE" set -o pipefail # Post-process the results into an easy-to-compare format, sorted by Test name for reproducible results @@ -55,3 +57,13 @@ cat "$LOG_FILE" | jq -s -c 'sort_by(.Test)[]' | jq -c ' and .Test != null ) | {Action: .Action, Test: .Test} ' > "$RESULTS_FILE" + +grep '^{"Time":' "$LOG_FILE" | gotestfmt > "${LOG_FILE}_gotestfmt.log" + +echo "" +echo "" +echo "complement logs saved at $LOG_FILE" +echo "complement results saved at $RESULTS_FILE" +echo "complement logs in gotestfmt pretty format outputted at ${LOG_FILE}_gotestfmt.log (use an editor/terminal that interprets ANSI colours)" +echo "" +echo "" diff --git a/flake.nix b/flake.nix index 8f08a7d9..544cdd4a 100644 --- a/flake.nix +++ b/flake.nix @@ -161,6 +161,7 @@ # Needed for our script for Complement jq + gotestfmt # Needed for finding broken markdown links lychee From 5a3264980aee8f5869eb953e82c01b62c2ac5bed Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 8 Mar 2025 01:35:26 -0500 Subject: [PATCH 108/310] adjust complement script to allow using your own hs OCI image without nix Signed-off-by: strawberry --- bin/complement | 34 +++++++++++++++++++++++++++------- 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/bin/complement b/bin/complement index aec27c5b..47c02843 100755 --- a/bin/complement +++ b/bin/complement @@ -15,7 +15,7 @@ LOG_FILE="${2:-complement_test_logs.jsonl}" # A `.jsonl` file to write test results to RESULTS_FILE="${3:-complement_test_results.jsonl}" -OCI_IMAGE="complement-conduwuit:main" +COMPLEMENT_OCI_IMAGE="${COMPLEMENT_OCI_IMAGE:-complement-conduwuit:main}" # Complement tests that are skipped due to flakiness/reliability issues or we don't implement such features and won't for a long time #SKIPPED_COMPLEMENT_TESTS='-skip=TestPartialStateJoin.*' @@ -34,18 +34,38 @@ toplevel="$(git rev-parse --show-toplevel)" pushd "$toplevel" > /dev/null -# if using macOS, use linux-complement -#bin/nix-build-and-cache just .#linux-complement -bin/nix-build-and-cache just .#complement +if [ ! -f "complement_oci_image.tar.gz" ]; then + echo "building complement conduwuit image" -docker load < result -popd > /dev/null + # if using macOS, use linux-complement + #bin/nix-build-and-cache just .#linux-complement + bin/nix-build-and-cache just .#complement + + echo "complement conduwuit image tar.gz built at \"result\"" + + echo "loading into docker" + docker load < result + popd > /dev/null +else + echo "skipping building a complement conduwuit image as complement_oci_image.tar.gz was already found, loading this" + + docker load < complement_oci_image.tar.gz + popd > /dev/null +fi + +echo "" +echo "running go test with:" +echo "\$COMPLEMENT_SRC: $COMPLEMENT_SRC" +echo "\$COMPLEMENT_BASE_IMAGE: $COMPLEMENT_BASE_IMAGE" +echo "\$RESULTS_FILE: $RESULTS_FILE" +echo "\$LOG_FILE: $LOG_FILE" +echo "" # It's okay (likely, even) that `go test` exits nonzero set +o pipefail env \ -C "$COMPLEMENT_SRC" \ - COMPLEMENT_BASE_IMAGE="$OCI_IMAGE" \ + COMPLEMENT_BASE_IMAGE="$COMPLEMENT_OCI_IMAGE" \ COMPLEMENT_ENABLE_DIRTY_RUNS=1 \ # reuses the same complement container for faster complement, at the possible expense of test environment pollution go test -tags="conduwuit_blacklist" -v -timeout 1h -json ./tests/... | tee "$LOG_FILE" set -o pipefail From bb0b57efb8d8d89fce0392e7c6c34c169ba054b8 Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 8 Mar 2025 02:30:58 -0500 Subject: [PATCH 109/310] bump rust-rocksdb Signed-off-by: strawberry --- Cargo.lock | 26 ++++++++++++++++++++++---- Cargo.toml | 2 +- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a224ad0f..8d4688f5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -236,7 +236,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0f9dd2e03ee80ca2822dd6ea431163d2ef259f2066a4d6ccaca6d9dcb386aa43" dependencies = [ - "bindgen", + "bindgen 0.69.5", "cc", "cmake", "dunce", @@ -431,6 +431,24 @@ dependencies = [ "which", ] +[[package]] +name = "bindgen" +version = "0.71.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3" +dependencies = [ + "bitflags 2.9.0", + "cexpr", + "clang-sys", + "itertools 0.12.1", + "proc-macro2", + "quote", + "regex", + "rustc-hash 2.1.1", + "shlex", + "syn 2.0.98", +] + [[package]] name = "bit_field" version = "0.10.2" @@ -3685,9 +3703,9 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" version = "0.33.0+9.11.1" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2e692ae026881fc385f111fdcfba38bee98f1e47#2e692ae026881fc385f111fdcfba38bee98f1e47" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=1c267e0bf0cc7b7702e9a329deccd89de79ef4c3#1c267e0bf0cc7b7702e9a329deccd89de79ef4c3" dependencies = [ - "bindgen", + "bindgen 0.71.1", "bzip2-sys", "cc", "glob", @@ -3702,7 +3720,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.37.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=2e692ae026881fc385f111fdcfba38bee98f1e47#2e692ae026881fc385f111fdcfba38bee98f1e47" +source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=1c267e0bf0cc7b7702e9a329deccd89de79ef4c3#1c267e0bf0cc7b7702e9a329deccd89de79ef4c3" dependencies = [ "libc", "rust-librocksdb-sys", diff --git a/Cargo.toml b/Cargo.toml index c48be06a..de90e63e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -383,7 +383,7 @@ features = [ [workspace.dependencies.rust-rocksdb] git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "2e692ae026881fc385f111fdcfba38bee98f1e47" +rev = "1c267e0bf0cc7b7702e9a329deccd89de79ef4c3" default-features = false features = [ "multi-threaded-cf", From c8a730c29e3ec5c9d38028b89f3fd26ed546ef8f Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 8 Mar 2025 03:07:42 -0500 Subject: [PATCH 110/310] implement MSC4267 automatically forgetting room on leave Signed-off-by: strawberry --- conduwuit-example.toml | 11 ++++++++++- src/api/client/capabilities.rs | 7 +++++++ src/core/config/mod.rs | 10 ++++++++++ src/service/rooms/state_cache/mod.rs | 8 ++++++-- 4 files changed, 33 insertions(+), 3 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 3d4b15bc..15e6dd37 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -445,10 +445,19 @@ # #allow_federation = true -# This item is undocumented. Please contribute documentation for it. +# Allows federation requests to be made to itself +# +# This isn't intended and is very likely a bug if federation requests are +# being sent to yourself. This currently mainly exists for development +# purposes. # #federation_loopback = false +# Always calls /forget on behalf of the user if leaving a room. This is a +# part of MSC4267 "Automatically forgetting rooms on leave" +# +#forget_forced_upon_leave = false + # Set this to true to require authentication on the normally # unauthenticated profile retrieval endpoints (GET) # "/_matrix/client/v3/profile/{userId}". diff --git a/src/api/client/capabilities.rs b/src/api/client/capabilities.rs index e20af21b..470ff6ab 100644 --- a/src/api/client/capabilities.rs +++ b/src/api/client/capabilities.rs @@ -42,5 +42,12 @@ pub(crate) async fn get_capabilities_route( .set("uk.tcpip.msc4133.profile_fields", json!({"enabled": true})) .expect("this is valid JSON we created"); + capabilities + .set( + "org.matrix.msc4267.forget_forced_upon_leave", + json!({"enabled": services.config.forget_forced_upon_leave}), + ) + .expect("valid JSON we created"); + Ok(get_capabilities::v3::Response { capabilities }) } diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index a82f5f53..e69a56b9 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -558,9 +558,19 @@ pub struct Config { #[serde(default = "true_fn")] pub allow_federation: bool, + /// Allows federation requests to be made to itself + /// + /// This isn't intended and is very likely a bug if federation requests are + /// being sent to yourself. This currently mainly exists for development + /// purposes. #[serde(default)] pub federation_loopback: bool, + /// Always calls /forget on behalf of the user if leaving a room. This is a + /// part of MSC4267 "Automatically forgetting rooms on leave" + #[serde(default)] + pub forget_forced_upon_leave: bool, + /// Set this to true to require authentication on the normally /// unauthenticated profile retrieval endpoints (GET) /// "/_matrix/client/v3/profile/{userId}". diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index f406eb69..23ba0520 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -28,7 +28,7 @@ use ruma::{ serde::Raw, }; -use crate::{Dep, account_data, appservice::RegistrationInfo, globals, rooms, users}; +use crate::{Dep, account_data, appservice::RegistrationInfo, config, globals, rooms, users}; pub struct Service { appservice_in_room_cache: AppServiceInRoomCache, @@ -38,6 +38,7 @@ pub struct Service { struct Services { account_data: Dep, + config: Dep, globals: Dep, state_accessor: Dep, users: Dep, @@ -70,6 +71,7 @@ impl crate::Service for Service { appservice_in_room_cache: RwLock::new(HashMap::new()), services: Services { account_data: args.depend::("account_data"), + config: args.depend::("config"), globals: args.depend::("globals"), state_accessor: args .depend::("rooms::state_accessor"), @@ -268,7 +270,9 @@ impl Service { | MembershipState::Leave | MembershipState::Ban => { self.mark_as_left(user_id, room_id); - if self.services.globals.user_is_local(user_id) { + if self.services.globals.user_is_local(user_id) + && self.services.config.forget_forced_upon_leave + { self.forget(room_id, user_id); } }, From ef96e7afac81ffa6e3335144644277e4ac28658b Mon Sep 17 00:00:00 2001 From: strawberry Date: Sat, 8 Mar 2025 13:52:56 -0500 Subject: [PATCH 111/310] add cargo auditable for future use, ignore paste dependency being unmaintained for now Signed-off-by: strawberry Signed-off-by: June Clementine Strawberry --- .cargo/audit.toml | 27 +++++++++++++++++++++++++++ engage.toml | 2 +- flake.nix | 8 +++++--- 3 files changed, 33 insertions(+), 4 deletions(-) create mode 100644 .cargo/audit.toml diff --git a/.cargo/audit.toml b/.cargo/audit.toml new file mode 100644 index 00000000..bf44fbd6 --- /dev/null +++ b/.cargo/audit.toml @@ -0,0 +1,27 @@ +[advisories] +ignore = ["RUSTSEC-2024-0436"] # advisory IDs to ignore e.g. ["RUSTSEC-2019-0001", ...] +informational_warnings = [] # warn for categories of informational advisories +severity_threshold = "none" # CVSS severity ("none", "low", "medium", "high", "critical") + +# Advisory Database Configuration +[database] +path = "~/.cargo/advisory-db" # Path where advisory git repo will be cloned +url = "https://github.com/RustSec/advisory-db.git" # URL to git repo +fetch = true # Perform a `git fetch` before auditing (default: true) +stale = false # Allow stale advisory DB (i.e. no commits for 90 days, default: false) + +# Output Configuration +[output] +deny = ["warnings", "unmaintained", "unsound", "yanked"] # exit on error if unmaintained dependencies are found +format = "terminal" # "terminal" (human readable report) or "json" +quiet = false # Only print information on error +show_tree = true # Show inverse dependency trees along with advisories (default: true) + +# Target Configuration +[target] +arch = ["x86_64", "aarch64"] # Ignore advisories for CPU architectures other than these +os = ["linux", "windows", "macos"] # Ignore advisories for operating systems other than these + +[yanked] +enabled = true # Warn for yanked crates in Cargo.lock (default: true) +update_index = true # Auto-update the crates.io index (default: true) diff --git a/engage.toml b/engage.toml index 71366532..0a857b5a 100644 --- a/engage.toml +++ b/engage.toml @@ -63,7 +63,7 @@ script = "markdownlint --version" [[task]] name = "cargo-audit" group = "security" -script = "cargo audit -D warnings -D unmaintained -D unsound -D yanked" +script = "cargo audit --color=always -D warnings -D unmaintained -D unsound -D yanked" [[task]] name = "cargo-fmt" diff --git a/flake.nix b/flake.nix index 544cdd4a..9db2e90a 100644 --- a/flake.nix +++ b/flake.nix @@ -144,18 +144,20 @@ toolchain ] ++ (with pkgsHost.pkgs; [ - engage - cargo-audit - # Required by hardened-malloc.rs dep binutils + cargo-audit + cargo-auditable + # Needed for producing Debian packages cargo-deb # Needed for CI to check validity of produced Debian packages (dpkg-deb) dpkg + engage + # Needed for Complement go From 5efe804a207420482dc5c57b8db044c5818d5037 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 8 Mar 2025 15:48:23 -0500 Subject: [PATCH 112/310] always disable fed, evict admins, and forget the room when banning a room Signed-off-by: June Clementine Strawberry --- .github/workflows/ci.yml | 20 +-- bin/complement | 18 +- nix/pkgs/main/default.nix | 2 +- src/admin/room/moderation.rs | 328 +++++++++-------------------------- 4 files changed, 109 insertions(+), 259 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c8fef47f..9a1366f1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -199,18 +199,18 @@ jobs: echo '```' >> $GITHUB_STEP_SUMMARY tail -n 100 test_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY echo '```' >> $GITHUB_STEP_SUMMARY + + echo '# Complement diff results (last 100 lines)' >> $GITHUB_STEP_SUMMARY + echo '```diff' >> $GITHUB_STEP_SUMMARY + tail -n 100 complement_diff_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + + echo '# Complement gotestfmt logs (last 100 lines)' >> $GITHUB_STEP_SUMMARY + echo '```diff' >> $GITHUB_STEP_SUMMARY + tail -n 100 complement_test_logs_gotestfmt.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY fi - echo '# Complement diff results (last 100 lines)' >> $GITHUB_STEP_SUMMARY - echo '```diff' >> $GITHUB_STEP_SUMMARY - tail -n 100 complement_diff_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - - echo '# Complement gotestfmt logs (last 100 lines)' >> $GITHUB_STEP_SUMMARY - echo '```diff' >> $GITHUB_STEP_SUMMARY - tail -n 100 complement_test_logs_gotestfmt.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - build: name: Build runs-on: self-hosted diff --git a/bin/complement b/bin/complement index 47c02843..b869bad6 100755 --- a/bin/complement +++ b/bin/complement @@ -15,7 +15,7 @@ LOG_FILE="${2:-complement_test_logs.jsonl}" # A `.jsonl` file to write test results to RESULTS_FILE="${3:-complement_test_results.jsonl}" -COMPLEMENT_OCI_IMAGE="${COMPLEMENT_OCI_IMAGE:-complement-conduwuit:main}" +COMPLEMENT_BASE_IMAGE="${COMPLEMENT_BASE_IMAGE:-complement-conduwuit:main}" # Complement tests that are skipped due to flakiness/reliability issues or we don't implement such features and won't for a long time #SKIPPED_COMPLEMENT_TESTS='-skip=TestPartialStateJoin.*' @@ -62,12 +62,13 @@ echo "\$LOG_FILE: $LOG_FILE" echo "" # It's okay (likely, even) that `go test` exits nonzero +# `COMPLEMENT_ENABLE_DIRTY_RUNS=1` reuses the same complement container for faster complement, at the possible expense of test environment pollution set +o pipefail env \ -C "$COMPLEMENT_SRC" \ - COMPLEMENT_BASE_IMAGE="$COMPLEMENT_OCI_IMAGE" \ - COMPLEMENT_ENABLE_DIRTY_RUNS=1 \ # reuses the same complement container for faster complement, at the possible expense of test environment pollution - go test -tags="conduwuit_blacklist" -v -timeout 1h -json ./tests/... | tee "$LOG_FILE" + COMPLEMENT_BASE_IMAGE="$COMPLEMENT_BASE_IMAGE" \ + COMPLEMENT_ENABLE_DIRTY_RUNS=1 \ + go test -tags="conduwuit_blacklist" -timeout 1h -json ./tests/... | tee "$LOG_FILE" set -o pipefail # Post-process the results into an easy-to-compare format, sorted by Test name for reproducible results @@ -78,12 +79,17 @@ cat "$LOG_FILE" | jq -s -c 'sort_by(.Test)[]' | jq -c ' ) | {Action: .Action, Test: .Test} ' > "$RESULTS_FILE" -grep '^{"Time":' "$LOG_FILE" | gotestfmt > "${LOG_FILE}_gotestfmt.log" +if command -v gotestfmt &> /dev/null; then + echo "using gotestfmt on $LOG_FILE" + grep '^{"Time":' "$LOG_FILE" | gotestfmt > "complement_test_logs_gotestfmt.log" +fi echo "" echo "" echo "complement logs saved at $LOG_FILE" echo "complement results saved at $RESULTS_FILE" -echo "complement logs in gotestfmt pretty format outputted at ${LOG_FILE}_gotestfmt.log (use an editor/terminal that interprets ANSI colours)" +if command -v gotestfmt &> /dev/null; then + echo "complement logs in gotestfmt pretty format outputted at complement_test_logs_gotestfmt.log (use an editor/terminal/pager that interprets ANSI colours and UTF-8 emojis)" +fi echo "" echo "" diff --git a/nix/pkgs/main/default.nix b/nix/pkgs/main/default.nix index 5dfb32ec..9c8038a7 100644 --- a/nix/pkgs/main/default.nix +++ b/nix/pkgs/main/default.nix @@ -155,9 +155,9 @@ commonAttrs = { # Keep sorted include = [ + ".cargo" "Cargo.lock" "Cargo.toml" - "deps" "src" ]; }; diff --git a/src/admin/room/moderation.rs b/src/admin/room/moderation.rs index 444dfa2f..dd5ea627 100644 --- a/src/admin/room/moderation.rs +++ b/src/admin/room/moderation.rs @@ -1,7 +1,7 @@ use api::client::leave_room; use clap::Subcommand; use conduwuit::{ - Result, debug, error, info, + Result, debug, utils::{IterStream, ReadyExt}, warn, }; @@ -17,51 +17,23 @@ use crate::{admin_command, admin_command_dispatch, get_room_info}; #[derive(Debug, Subcommand)] pub(crate) enum RoomModerationCommand { /// - Bans a room from local users joining and evicts all our local users + /// (including server + /// admins) /// from the room. Also blocks any invites (local and remote) for the - /// banned room. - /// - /// Server admins (users in the conduwuit admin room) will not be evicted - /// and server admins can still join the room. To evict admins too, use - /// --force (also ignores errors) To disable incoming federation of the - /// room, use --disable-federation + /// banned room, and disables federation entirely with it. BanRoom { - #[arg(short, long)] - /// Evicts admins out of the room and ignores any potential errors when - /// making our local users leave the room - force: bool, - - #[arg(long)] - /// Disables incoming federation of the room after banning and evicting - /// users - disable_federation: bool, - /// The room in the format of `!roomid:example.com` or a room alias in /// the format of `#roomalias:example.com` room: Box, }, /// - Bans a list of rooms (room IDs and room aliases) from a newline - /// delimited codeblock similar to `user deactivate-all` - BanListOfRooms { - #[arg(short, long)] - /// Evicts admins out of the room and ignores any potential errors when - /// making our local users leave the room - force: bool, - - #[arg(long)] - /// Disables incoming federation of the room after banning and evicting - /// users - disable_federation: bool, - }, + /// delimited codeblock similar to `user deactivate-all`. Applies the same + /// steps as ban-room + BanListOfRooms, /// - Unbans a room to allow local users to join again - /// - /// To re-enable incoming federation of the room, use --enable-federation UnbanRoom { - #[arg(long)] - /// Enables incoming federation of the room after unbanning - enable_federation: bool, - /// The room in the format of `!roomid:example.com` or a room alias in /// the format of `#roomalias:example.com` room: Box, @@ -77,12 +49,7 @@ pub(crate) enum RoomModerationCommand { } #[admin_command] -async fn ban_room( - &self, - force: bool, - disable_federation: bool, - room: Box, -) -> Result { +async fn ban_room(&self, room: Box) -> Result { debug!("Got room alias or ID: {}", room); let admin_room_alias = &self.services.globals.admin_alias; @@ -175,98 +142,56 @@ async fn ban_room( )); }; - debug!("Making all users leave the room {}", &room); - if force { - let mut users = self - .services - .rooms - .state_cache - .room_members(&room_id) - .ready_filter(|user| self.services.globals.user_is_local(user)) - .boxed(); + debug!("Making all users leave the room {room_id} and forgetting it"); + let mut users = self + .services + .rooms + .state_cache + .room_members(&room_id) + .map(ToOwned::to_owned) + .ready_filter(|user| self.services.globals.user_is_local(user)) + .boxed(); - while let Some(local_user) = users.next().await { - debug!( - "Attempting leave for user {local_user} in room {room_id} (forced, ignoring all \ - errors, evicting admins too)", - ); + while let Some(ref user_id) = users.next().await { + debug!( + "Attempting leave for user {user_id} in room {room_id} (ignoring all errors, \ + evicting admins too)", + ); - if let Err(e) = leave_room(self.services, local_user, &room_id, None).await { - warn!(%e, "Failed to leave room"); - } + if let Err(e) = leave_room(self.services, user_id, &room_id, None).await { + warn!("Failed to leave room: {e}"); } - } else { - let mut users = self - .services - .rooms - .state_cache - .room_members(&room_id) - .ready_filter(|user| self.services.globals.user_is_local(user)) - .boxed(); - while let Some(local_user) = users.next().await { - if self.services.users.is_admin(local_user).await { - continue; - } - - debug!("Attempting leave for user {} in room {}", &local_user, &room_id); - if let Err(e) = leave_room(self.services, local_user, &room_id, None).await { - error!( - "Error attempting to make local user {} leave room {} during room banning: \ - {}", - &local_user, &room_id, e - ); - return Ok(RoomMessageEventContent::text_plain(format!( - "Error attempting to make local user {} leave room {} during room banning \ - (room is still banned but not removing any more users): {}\nIf you would \ - like to ignore errors, use --force", - &local_user, &room_id, e - ))); - } - } + self.services.rooms.state_cache.forget(&room_id, user_id); } - // remove any local aliases, ignore errors - for local_alias in &self - .services + self.services .rooms .alias .local_aliases_for_room(&room_id) .map(ToOwned::to_owned) - .collect::>() - .await - { - _ = self - .services - .rooms - .alias - .remove_alias(local_alias, &self.services.globals.server_user) - .await; - } + .for_each(|local_alias| async move { + self.services + .rooms + .alias + .remove_alias(&local_alias, &self.services.globals.server_user) + .await + .ok(); + }) + .await; - // unpublish from room directory, ignore errors + // unpublish from room directory self.services.rooms.directory.set_not_public(&room_id); - if disable_federation { - self.services.rooms.metadata.disable_room(&room_id, true); - return Ok(RoomMessageEventContent::text_plain( - "Room banned, removed all our local users, and disabled incoming federation with \ - room.", - )); - } + self.services.rooms.metadata.disable_room(&room_id, true); Ok(RoomMessageEventContent::text_plain( - "Room banned and removed all our local users, use `!admin federation disable-room` to \ - stop receiving new inbound federation events as well if needed.", + "Room banned, removed all our local users, and disabled incoming federation with room.", )) } #[admin_command] -async fn ban_list_of_rooms( - &self, - force: bool, - disable_federation: bool, -) -> Result { +async fn ban_list_of_rooms(&self) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" @@ -293,7 +218,7 @@ async fn ban_list_of_rooms( if let Ok(admin_room_id) = self.services.admin.get_admin_room().await { if room.to_owned().eq(&admin_room_id) || room.to_owned().eq(admin_room_alias) { - info!("User specified admin room in bulk ban list, ignoring"); + warn!("User specified admin room in bulk ban list, ignoring"); continue; } } @@ -302,19 +227,12 @@ async fn ban_list_of_rooms( let room_id = match RoomId::parse(room_alias_or_id) { | Ok(room_id) => room_id, | Err(e) => { - if force { - // ignore rooms we failed to parse if we're force banning - warn!( - "Error parsing room \"{room}\" during bulk room banning, \ - ignoring error and logging here: {e}" - ); - continue; - } - - return Ok(RoomMessageEventContent::text_plain(format!( - "{room} is not a valid room ID or room alias, please fix the \ - list and try again: {e}" - ))); + // ignore rooms we failed to parse + warn!( + "Error parsing room \"{room}\" during bulk room banning, \ + ignoring error and logging here: {e}" + ); + continue; }, }; @@ -355,21 +273,11 @@ async fn ban_list_of_rooms( room_id }, | Err(e) => { - // don't fail if force blocking - if force { - warn!( - "Failed to resolve room alias {room} to a \ - room ID: {e}" - ); - continue; - } - - return Ok(RoomMessageEventContent::text_plain( - format!( - "Failed to resolve room alias {room} to a \ - room ID: {e}" - ), - )); + warn!( + "Failed to resolve room alias {room} to a room \ + ID: {e}" + ); + continue; }, } }, @@ -378,37 +286,21 @@ async fn ban_list_of_rooms( room_ids.push(room_id); }, | Err(e) => { - if force { - // ignore rooms we failed to parse if we're force deleting - error!( - "Error parsing room \"{room}\" during bulk room banning, \ - ignoring error and logging here: {e}" - ); - continue; - } - - return Ok(RoomMessageEventContent::text_plain(format!( - "{room} is not a valid room ID or room alias, please fix the \ - list and try again: {e}" - ))); + warn!( + "Error parsing room \"{room}\" during bulk room banning, \ + ignoring error and logging here: {e}" + ); + continue; }, } } }, | Err(e) => { - if force { - // ignore rooms we failed to parse if we're force deleting - error!( - "Error parsing room \"{room}\" during bulk room banning, ignoring error \ - and logging here: {e}" - ); - continue; - } - - return Ok(RoomMessageEventContent::text_plain(format!( - "{room} is not a valid room ID or room alias, please fix the list and try \ - again: {e}" - ))); + warn!( + "Error parsing room \"{room}\" during bulk room banning, ignoring error and \ + logging here: {e}" + ); + continue; }, } } @@ -419,56 +311,27 @@ async fn ban_list_of_rooms( debug!("Banned {room_id} successfully"); room_ban_count = room_ban_count.saturating_add(1); - debug!("Making all users leave the room {}", &room_id); - if force { - let mut users = self - .services - .rooms - .state_cache - .room_members(&room_id) - .ready_filter(|user| self.services.globals.user_is_local(user)) - .boxed(); + debug!("Making all users leave the room {room_id} and forgetting it"); + let mut users = self + .services + .rooms + .state_cache + .room_members(&room_id) + .map(ToOwned::to_owned) + .ready_filter(|user| self.services.globals.user_is_local(user)) + .boxed(); - while let Some(local_user) = users.next().await { - debug!( - "Attempting leave for user {local_user} in room {room_id} (forced, ignoring \ - all errors, evicting admins too)", - ); + while let Some(ref user_id) = users.next().await { + debug!( + "Attempting leave for user {user_id} in room {room_id} (ignoring all errors, \ + evicting admins too)", + ); - if let Err(e) = leave_room(self.services, local_user, &room_id, None).await { - warn!(%e, "Failed to leave room"); - } + if let Err(e) = leave_room(self.services, user_id, &room_id, None).await { + warn!("Failed to leave room: {e}"); } - } else { - let mut users = self - .services - .rooms - .state_cache - .room_members(&room_id) - .ready_filter(|user| self.services.globals.user_is_local(user)) - .boxed(); - while let Some(local_user) = users.next().await { - if self.services.users.is_admin(local_user).await { - continue; - } - - debug!("Attempting leave for user {local_user} in room {room_id}"); - if let Err(e) = leave_room(self.services, local_user, &room_id, None).await { - error!( - "Error attempting to make local user {local_user} leave room {room_id} \ - during bulk room banning: {e}", - ); - - return Ok(RoomMessageEventContent::text_plain(format!( - "Error attempting to make local user {} leave room {} during room \ - banning (room is still banned but not removing any more users and not \ - banning any more rooms): {}\nIf you would like to ignore errors, use \ - --force", - &local_user, &room_id, e - ))); - } - } + self.services.rooms.state_cache.forget(&room_id, user_id); } // remove any local aliases, ignore errors @@ -490,29 +353,17 @@ async fn ban_list_of_rooms( // unpublish from room directory, ignore errors self.services.rooms.directory.set_not_public(&room_id); - if disable_federation { - self.services.rooms.metadata.disable_room(&room_id, true); - } + self.services.rooms.metadata.disable_room(&room_id, true); } - if disable_federation { - Ok(RoomMessageEventContent::text_plain(format!( - "Finished bulk room ban, banned {room_ban_count} total rooms, evicted all users, \ - and disabled incoming federation with the room." - ))) - } else { - Ok(RoomMessageEventContent::text_plain(format!( - "Finished bulk room ban, banned {room_ban_count} total rooms and evicted all users." - ))) - } + Ok(RoomMessageEventContent::text_plain(format!( + "Finished bulk room ban, banned {room_ban_count} total rooms, evicted all users, and \ + disabled incoming federation with the room." + ))) } #[admin_command] -async fn unban_room( - &self, - enable_federation: bool, - room: Box, -) -> Result { +async fn unban_room(&self, room: Box) -> Result { let room_id = if room.is_room_id() { let room_id = match RoomId::parse(&room) { | Ok(room_id) => room_id, @@ -595,15 +446,8 @@ async fn unban_room( )); }; - if enable_federation { - self.services.rooms.metadata.disable_room(&room_id, false); - return Ok(RoomMessageEventContent::text_plain("Room unbanned.")); - } - - Ok(RoomMessageEventContent::text_plain( - "Room unbanned, you may need to re-enable federation with the room using enable-room if \ - this is a remote room to make it fully functional.", - )) + self.services.rooms.metadata.disable_room(&room_id, false); + Ok(RoomMessageEventContent::text_plain("Room unbanned and federation re-enabled.")) } #[admin_command] From 0b012b529f2c925f2bc20aee2381e2d30f116c46 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 8 Mar 2025 18:59:51 -0500 Subject: [PATCH 113/310] comment gotestfmt for now Signed-off-by: June Clementine Strawberry --- .github/workflows/ci.yml | 12 ------------ bin/complement | 15 +++++++-------- 2 files changed, 7 insertions(+), 20 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9a1366f1..cd7d2484 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -176,13 +176,6 @@ jobs: path: complement_test_results.jsonl if-no-files-found: error - - name: Upload Complement logs (gotestfmt) - uses: actions/upload-artifact@v4 - with: - name: complement_test_logs_gotestfmt.log - path: complement_test_logs_gotestfmt.log - if-no-files-found: error - - name: Diff Complement results with checked-in repo results run: | diff -u --color=always tests/test_results/complement/test_results.jsonl complement_test_results.jsonl > >(tee -a complement_diff_output.log) @@ -204,11 +197,6 @@ jobs: echo '```diff' >> $GITHUB_STEP_SUMMARY tail -n 100 complement_diff_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY echo '```' >> $GITHUB_STEP_SUMMARY - - echo '# Complement gotestfmt logs (last 100 lines)' >> $GITHUB_STEP_SUMMARY - echo '```diff' >> $GITHUB_STEP_SUMMARY - tail -n 100 complement_test_logs_gotestfmt.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY fi build: diff --git a/bin/complement b/bin/complement index b869bad6..89521796 100755 --- a/bin/complement +++ b/bin/complement @@ -67,7 +67,6 @@ set +o pipefail env \ -C "$COMPLEMENT_SRC" \ COMPLEMENT_BASE_IMAGE="$COMPLEMENT_BASE_IMAGE" \ - COMPLEMENT_ENABLE_DIRTY_RUNS=1 \ go test -tags="conduwuit_blacklist" -timeout 1h -json ./tests/... | tee "$LOG_FILE" set -o pipefail @@ -79,17 +78,17 @@ cat "$LOG_FILE" | jq -s -c 'sort_by(.Test)[]' | jq -c ' ) | {Action: .Action, Test: .Test} ' > "$RESULTS_FILE" -if command -v gotestfmt &> /dev/null; then - echo "using gotestfmt on $LOG_FILE" - grep '^{"Time":' "$LOG_FILE" | gotestfmt > "complement_test_logs_gotestfmt.log" -fi +#if command -v gotestfmt &> /dev/null; then +# echo "using gotestfmt on $LOG_FILE" +# grep '{"Time":' "$LOG_FILE" | gotestfmt > "complement_test_logs_gotestfmt.log" +#fi echo "" echo "" echo "complement logs saved at $LOG_FILE" echo "complement results saved at $RESULTS_FILE" -if command -v gotestfmt &> /dev/null; then - echo "complement logs in gotestfmt pretty format outputted at complement_test_logs_gotestfmt.log (use an editor/terminal/pager that interprets ANSI colours and UTF-8 emojis)" -fi +#if command -v gotestfmt &> /dev/null; then +# echo "complement logs in gotestfmt pretty format outputted at complement_test_logs_gotestfmt.log (use an editor/terminal/pager that interprets ANSI colours and UTF-8 emojis)" +#fi echo "" echo "" From 06f2039eeeec2d5adf51e8ffbb470f01a8d9e868 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 9 Mar 2025 00:44:56 -0500 Subject: [PATCH 114/310] bump ruwuma to maybe fix rare device key upload issues Signed-off-by: June Clementine Strawberry --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 4 +++- 2 files changed, 14 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8d4688f5..f768eae1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3507,7 +3507,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "assign", "js_int", @@ -3527,7 +3527,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "js_int", "ruma-common", @@ -3539,7 +3539,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "as_variant", "assign", @@ -3562,7 +3562,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "as_variant", "base64 0.22.1", @@ -3594,7 +3594,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "as_variant", "indexmap 2.7.1", @@ -3619,7 +3619,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "bytes", "headers", @@ -3641,7 +3641,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3650,7 +3650,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "js_int", "ruma-common", @@ -3660,7 +3660,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3675,7 +3675,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "js_int", "ruma-common", @@ -3687,7 +3687,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=bb42118bd85e731b652a6110896b6945085bf944#bb42118bd85e731b652a6110896b6945085bf944" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index de90e63e..2bc1d20f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "bb42118bd85e731b652a6110896b6945085bf944" +rev = "d577100f5480c6c528e7a8ff59cd08d95a3a16e7" features = [ "compat", "rand", @@ -371,7 +371,9 @@ features = [ "unstable-msc3381", # polls "unstable-msc3489", # beacon / live location "unstable-msc3575", + "unstable-msc3930", # polls push rules "unstable-msc4075", + "unstable-msc4095", "unstable-msc4121", "unstable-msc4125", "unstable-msc4186", From d0c767c23c1dff11400388c5a8dd9e43f68705f1 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 9 Mar 2025 01:43:49 -0500 Subject: [PATCH 115/310] fix a few things to make some complement tests pass Signed-off-by: June Clementine Strawberry --- src/api/client/membership.rs | 4 +- src/api/client/room/create.rs | 8 +--- src/api/client/session.rs | 79 +++++++++++++++++++---------------- src/service/media/preview.rs | 23 ++++++---- src/service/users/mod.rs | 4 +- 5 files changed, 60 insertions(+), 58 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 940c8639..3f77e69e 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -517,9 +517,7 @@ pub(crate) async fn invite_user_route( join!(sender_ignored_recipient, recipient_ignored_by_sender); if sender_ignored_recipient { - return Err!(Request(Forbidden( - "You cannot invite users you have ignored to rooms." - ))); + return Ok(invite_user::v3::Response {}); } if let Ok(target_user_membership) = services diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index 1b8294a5..bb06e966 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -239,9 +239,7 @@ pub(crate) async fn create_room_route( if preset == RoomPreset::TrustedPrivateChat { for invite in &body.invite { if services.users.user_is_ignored(sender_user, invite).await { - return Err!(Request(Forbidden( - "You cannot invite users you have ignored to rooms." - ))); + continue; } else if services.users.user_is_ignored(invite, sender_user).await { // silently drop the invite to the recipient if they've been ignored by the // sender, pretend it worked @@ -420,9 +418,7 @@ pub(crate) async fn create_room_route( drop(state_lock); for user_id in &body.invite { if services.users.user_is_ignored(sender_user, user_id).await { - return Err!(Request(Forbidden( - "You cannot invite users you have ignored to rooms." - ))); + continue; } else if services.users.user_is_ignored(user_id, sender_user).await { // silently drop the invite to the recipient if they've been ignored by the // sender, pretend it worked diff --git a/src/api/client/session.rs b/src/api/client/session.rs index ab67ee18..3de625e4 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -3,7 +3,7 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{Err, debug, err, info, utils::ReadyExt}; -use futures::{StreamExt, TryFutureExt}; +use futures::StreamExt; use ruma::{ UserId, api::client::{ @@ -96,32 +96,50 @@ pub(crate) async fn login_route( &services.config.server_name, )?; - assert!( - services.globals.user_is_local(&user_id), - "User ID does not belong to this homeserver" - ); - assert!( - services.globals.user_is_local(&lowercased_user_id), - "User ID does not belong to this homeserver" - ); + if !services.globals.user_is_local(&user_id) + || !services.globals.user_is_local(&lowercased_user_id) + { + return Err!(Request(Unknown("User ID does not belong to this homeserver"))); + } + // first try the username as-is let hash = services .users .password_hash(&user_id) - .or_else(|_| services.users.password_hash(&lowercased_user_id)) .await - .inspect_err(|e| debug!("{e}")) - .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; + .inspect_err(|e| debug!("{e}")); - if hash.is_empty() { - return Err!(Request(UserDeactivated("The user has been deactivated"))); + match hash { + | Ok(hash) => { + if hash.is_empty() { + return Err!(Request(UserDeactivated("The user has been deactivated"))); + } + + hash::verify_password(password, &hash) + .inspect_err(|e| debug!("{e}")) + .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; + + user_id + }, + | Err(_e) => { + let hash_lowercased_user_id = services + .users + .password_hash(&lowercased_user_id) + .await + .inspect_err(|e| debug!("{e}")) + .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; + + if hash_lowercased_user_id.is_empty() { + return Err!(Request(UserDeactivated("The user has been deactivated"))); + } + + hash::verify_password(password, &hash_lowercased_user_id) + .inspect_err(|e| debug!("{e}")) + .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; + + lowercased_user_id + }, } - - hash::verify_password(password, &hash) - .inspect_err(|e| debug!("{e}")) - .map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?; - - user_id }, | login::v3::LoginInfo::Token(login::v3::Token { token }) => { debug!("Got token login type"); @@ -153,24 +171,11 @@ pub(crate) async fn login_route( } .map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?; - let lowercased_user_id = UserId::parse_with_server_name( - user_id.localpart().to_lowercase(), - &services.config.server_name, - )?; + if !services.globals.user_is_local(&user_id) { + return Err!(Request(Unknown("User ID does not belong to this homeserver"))); + } - assert!( - services.globals.user_is_local(&user_id), - "User ID does not belong to this homeserver" - ); - assert!( - services.globals.user_is_local(&lowercased_user_id), - "User ID does not belong to this homeserver" - ); - - if !info.is_user_match(&user_id) - && !info.is_user_match(&lowercased_user_id) - && !emergency_mode_enabled - { + if !info.is_user_match(&user_id) && !emergency_mode_enabled { return Err!(Request(Exclusive("Username is not in an appservice namespace."))); } diff --git a/src/service/media/preview.rs b/src/service/media/preview.rs index 17216869..ba5be7d4 100644 --- a/src/service/media/preview.rs +++ b/src/service/media/preview.rs @@ -7,7 +7,7 @@ use std::time::SystemTime; -use conduwuit::{Err, Result, debug}; +use conduwuit::{Err, Result, debug, err}; use conduwuit_core::implement; use ipaddress::IPAddress; use serde::Serialize; @@ -64,28 +64,33 @@ pub async fn get_url_preview(&self, url: &Url) -> Result { async fn request_url_preview(&self, url: &Url) -> Result { if let Ok(ip) = IPAddress::parse(url.host_str().expect("URL previously validated")) { if !self.services.client.valid_cidr_range(&ip) { - return Err!(BadServerResponse("Requesting from this address is forbidden")); + return Err!(Request(Forbidden("Requesting from this address is forbidden"))); } } let client = &self.services.client.url_preview; let response = client.head(url.as_str()).send().await?; + debug!(?url, "URL preview response headers: {:?}", response.headers()); + if let Some(remote_addr) = response.remote_addr() { + debug!(?url, "URL preview response remote address: {:?}", remote_addr); + if let Ok(ip) = IPAddress::parse(remote_addr.ip().to_string()) { if !self.services.client.valid_cidr_range(&ip) { - return Err!(BadServerResponse("Requesting from this address is forbidden")); + return Err!(Request(Forbidden("Requesting from this address is forbidden"))); } } } - let Some(content_type) = response - .headers() - .get(reqwest::header::CONTENT_TYPE) - .and_then(|x| x.to_str().ok()) - else { - return Err!(Request(Unknown("Unknown Content-Type"))); + let Some(content_type) = response.headers().get(reqwest::header::CONTENT_TYPE) else { + return Err!(Request(Unknown("Unknown or invalid Content-Type header"))); }; + + let content_type = content_type + .to_str() + .map_err(|e| err!(Request(Unknown("Unknown or invalid Content-Type header: {e}"))))?; + let data = match content_type { | html if html.starts_with("text/html") => self.download_html(url.as_str()).await?, | img if img.starts_with("image/") => self.download_image(url.as_str()).await?, diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index b3f5db88..5265e64b 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -278,11 +278,9 @@ impl Service { initial_device_display_name: Option, client_ip: Option, ) -> Result<()> { - // This method should never be called for nonexistent users. We shouldn't assert - // though... if !self.exists(user_id).await { return Err!(Request(InvalidParam(error!( - "Called create_device for non-existent {user_id}" + "Called create_device for non-existent user {user_id}" )))); } From 47ff91243d0da2088806351c040ac1386c92c63d Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 9 Mar 2025 03:33:29 -0400 Subject: [PATCH 116/310] update complement results Signed-off-by: June Clementine Strawberry --- .../complement/test_results.jsonl | 112 ++++++++++++++---- 1 file changed, 89 insertions(+), 23 deletions(-) diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index fed43b48..7b06510b 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -6,9 +6,9 @@ {"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_has_events"} {"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_has_events/incremental_sync"} {"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_has_events/initial_sync"} -{"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_is_empty"} +{"Action":"pass","Test":"TestArchivedRoomsHistory/timeline_is_empty"} {"Action":"skip","Test":"TestArchivedRoomsHistory/timeline_is_empty/incremental_sync"} -{"Action":"fail","Test":"TestArchivedRoomsHistory/timeline_is_empty/initial_sync"} +{"Action":"pass","Test":"TestArchivedRoomsHistory/timeline_is_empty/initial_sync"} {"Action":"fail","Test":"TestAsyncUpload"} {"Action":"fail","Test":"TestAsyncUpload/Cannot_upload_to_a_media_ID_that_has_already_been_uploaded_to"} {"Action":"fail","Test":"TestAsyncUpload/Create_media"} @@ -82,7 +82,7 @@ {"Action":"pass","Test":"TestContent"} {"Action":"pass","Test":"TestContentCSAPIMediaV1"} {"Action":"pass","Test":"TestContentMediaV1"} -{"Action":"fail","Test":"TestCumulativeJoinLeaveJoinSync"} +{"Action":"pass","Test":"TestCumulativeJoinLeaveJoinSync"} {"Action":"pass","Test":"TestDeactivateAccount"} {"Action":"pass","Test":"TestDeactivateAccount/After_deactivating_account,_can't_log_in_with_password"} {"Action":"pass","Test":"TestDeactivateAccount/Can't_deactivate_account_with_wrong_password"} @@ -153,10 +153,10 @@ {"Action":"fail","Test":"TestFederationKeyUploadQuery/Can_query_remote_device_keys_using_POST"} {"Action":"pass","Test":"TestFederationRedactSendsWithoutEvent"} {"Action":"pass","Test":"TestFederationRejectInvite"} -{"Action":"fail","Test":"TestFederationRoomsInvite"} -{"Action":"fail","Test":"TestFederationRoomsInvite/Parallel"} +{"Action":"pass","Test":"TestFederationRoomsInvite"} +{"Action":"pass","Test":"TestFederationRoomsInvite/Parallel"} {"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation"} -{"Action":"fail","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_for_empty_room"} +{"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_for_empty_room"} {"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_can_reject_invite_over_federation_several_times"} {"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Invited_user_has_'is_direct'_flag_in_prev_content_after_joining"} {"Action":"pass","Test":"TestFederationRoomsInvite/Parallel/Remote_invited_user_can_join_the_room_when_homeserver_is_already_participating_in_the_room"} @@ -173,7 +173,7 @@ {"Action":"pass","Test":"TestFetchMessagesFromNonExistentRoom"} {"Action":"pass","Test":"TestFilter"} {"Action":"fail","Test":"TestFilterMessagesByRelType"} -{"Action":"fail","Test":"TestGappedSyncLeaveSection"} +{"Action":"pass","Test":"TestGappedSyncLeaveSection"} {"Action":"fail","Test":"TestGetFilteredRoomMembers"} {"Action":"fail","Test":"TestGetFilteredRoomMembers/membership/join"} {"Action":"fail","Test":"TestGetFilteredRoomMembers/membership/leave"} @@ -191,7 +191,7 @@ {"Action":"pass","Test":"TestInboundFederationProfile/Inbound_federation_can_query_profile_data"} {"Action":"pass","Test":"TestInboundFederationProfile/Non-numeric_ports_in_server_names_are_rejected"} {"Action":"fail","Test":"TestInboundFederationRejectsEventsWithRejectedAuthEvents"} -{"Action":"fail","Test":"TestInviteFromIgnoredUsersDoesNotAppearInSync"} +{"Action":"pass","Test":"TestInviteFromIgnoredUsersDoesNotAppearInSync"} {"Action":"pass","Test":"TestIsDirectFlagFederation"} {"Action":"pass","Test":"TestIsDirectFlagLocal"} {"Action":"pass","Test":"TestJoinFederatedRoomFailOver"} @@ -281,7 +281,7 @@ {"Action":"pass","Test":"TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock"} {"Action":"pass","Test":"TestKnockingInMSC3787Room/Users_in_the_room_see_a_user's_membership_update_when_they_knock#01"} {"Action":"pass","Test":"TestLeakyTyping"} -{"Action":"fail","Test":"TestLeaveEventInviteRejection"} +{"Action":"pass","Test":"TestLeaveEventInviteRejection"} {"Action":"fail","Test":"TestLeaveEventVisibility"} {"Action":"fail","Test":"TestLeftRoomFixture"} {"Action":"fail","Test":"TestLeftRoomFixture/Can_get_'m.room.name'_state_for_a_departed_room"} @@ -292,10 +292,10 @@ {"Action":"pass","Test":"TestLocalPngThumbnail"} {"Action":"pass","Test":"TestLocalPngThumbnail/test_/_matrix/client/v1/media_endpoint"} {"Action":"pass","Test":"TestLocalPngThumbnail/test_/_matrix/media/v3_endpoint"} -{"Action":"fail","Test":"TestLogin"} -{"Action":"fail","Test":"TestLogin/parallel"} +{"Action":"pass","Test":"TestLogin"} +{"Action":"pass","Test":"TestLogin/parallel"} {"Action":"pass","Test":"TestLogin/parallel/GET_/login_yields_a_set_of_flows"} -{"Action":"fail","Test":"TestLogin/parallel/Login_with_uppercase_username_works_and_GET_/whoami_afterwards_also"} +{"Action":"pass","Test":"TestLogin/parallel/Login_with_uppercase_username_works_and_GET_/whoami_afterwards_also"} {"Action":"pass","Test":"TestLogin/parallel/POST_/login_as_non-existing_user_is_rejected"} {"Action":"pass","Test":"TestLogin/parallel/POST_/login_can_log_in_as_a_user_with_just_the_local_part_of_the_id"} {"Action":"pass","Test":"TestLogin/parallel/POST_/login_can_login_as_user"} @@ -354,12 +354,78 @@ {"Action":"fail","Test":"TestMembershipOnEvents"} {"Action":"fail","Test":"TestNetworkPartitionOrdering"} {"Action":"pass","Test":"TestNotPresentUserCannotBanOthers"} -{"Action":"fail","Test":"TestOlderLeftRoomsNotInLeaveSection"} +{"Action":"pass","Test":"TestOlderLeftRoomsNotInLeaveSection"} {"Action":"fail","Test":"TestOutboundFederationEventSizeGetMissingEvents"} {"Action":"fail","Test":"TestOutboundFederationIgnoresMissingEventWithBadJSONForRoomVersion6"} {"Action":"pass","Test":"TestOutboundFederationProfile"} {"Action":"pass","Test":"TestOutboundFederationProfile/Outbound_federation_can_query_profile_data"} {"Action":"pass","Test":"TestOutboundFederationSend"} +{"Action":"fail","Test":"TestPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanFastJoinDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanLazyLoadingSyncDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveDeviceListUpdateDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveEventsDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveEventsWithHalfMissingGrandparentsDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveEventsWithHalfMissingParentsDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveEventsWithMissingParentsDuringPartialStateJoin"} +{"Action":"skip","Test":"TestPartialStateJoin/CanReceivePresenceDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveReceiptDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveSigningKeyUpdateDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveToDeviceDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveTypingDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/CanSendEventsDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/Can_change_display_name_during_partial_state_join"} +{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking"} +{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_no_longer_tracked_for_user_incorrectly_believed_to_be_in_room"} +{"Action":"skip","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_no_longer_tracked_when_failing_to_complete_partial_state_join"} +{"Action":"skip","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_no_longer_tracked_when_leaving_partial_state_room"} +{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_no_longer_tracked_when_new_member_leaves_partial_state_room"} +{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracked_for_new_members_in_partial_state_room"} +{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_for_pre-existing_members_in_partial_state_room"} +{"Action":"skip","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_for_user_incorrectly_believed_to_be_in_room_when_they_join_another_shared_room_before_partial_state_join_completes"} +{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_for_user_incorrectly_believed_to_be_in_room_when_they_rejoin_after_partial_state_join_completes"} +{"Action":"skip","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_for_user_incorrectly_believed_to_be_in_room_when_they_rejoin_before_partial_state_join_completes"} +{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_when_pre-existing_members_in_partial_state_room_join_another_shared_room"} +{"Action":"fail","Test":"TestPartialStateJoin/EagerIncrementalSyncDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/EagerInitialSyncDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/EagerLongPollingSyncWokenWhenResyncCompletes"} +{"Action":"fail","Test":"TestPartialStateJoin/GappySyncAfterPartialStateSynced"} +{"Action":"fail","Test":"TestPartialStateJoin/Lazy-loading_gappy_sync_includes_remote_memberships_during_partial_state_join"} +{"Action":"fail","Test":"TestPartialStateJoin/Lazy-loading_incremental_sync_includes_remote_memberships_during_partial_state_join"} +{"Action":"fail","Test":"TestPartialStateJoin/Lazy-loading_initial_sync_includes_remote_memberships_during_partial_state_join"} +{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync"} +{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/can_be_triggered_by_remote_ban"} +{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/can_be_triggered_by_remote_kick"} +{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/does_not_wait_for_resync"} +{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/is_seen_after_the_resync"} +{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/succeeds,_then_another_user_can_join_without_resync_completing"} +{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/succeeds,_then_rejoin_succeeds_without_resync_completing"} +{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/works_after_a_second_partial_join"} +{"Action":"fail","Test":"TestPartialStateJoin/MembersRequestBlocksDuringPartialStateJoin"} +{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates"} +{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_no_longer_reach_departed_servers_after_partial_state_join_completes"} +{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_all_servers_in_partial_state_rooms"} +{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_incorrectly_absent_servers_once_partial_state_join_completes"} +{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_incorrectly_absent_servers_once_partial_state_join_completes_even_though_remote_server_left_room"} +{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_incorrectly_kicked_servers_once_partial_state_join_completes"} +{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_incorrectly_kicked_servers_once_partial_state_join_completes_even_though_remote_server_left_room"} +{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_newly_joined_servers_in_partial_state_rooms"} +{"Action":"fail","Test":"TestPartialStateJoin/PartialStateJoinContinuesAfterRestart"} +{"Action":"fail","Test":"TestPartialStateJoin/PartialStateJoinSyncsUsingOtherHomeservers"} +{"Action":"skip","Test":"TestPartialStateJoin/Purge_during_resync"} +{"Action":"fail","Test":"TestPartialStateJoin/Rejected_events_remain_rejected_after_resync"} +{"Action":"fail","Test":"TestPartialStateJoin/Rejects_make_join_during_partial_join"} +{"Action":"fail","Test":"TestPartialStateJoin/Rejects_make_knock_during_partial_join"} +{"Action":"fail","Test":"TestPartialStateJoin/Rejects_send_join_during_partial_join"} +{"Action":"fail","Test":"TestPartialStateJoin/Rejects_send_knock_during_partial_join"} +{"Action":"fail","Test":"TestPartialStateJoin/Resync_completes_even_when_events_arrive_before_their_prev_events"} +{"Action":"fail","Test":"TestPartialStateJoin/Room_aliases_can_be_added_and_deleted_during_a_resync"} +{"Action":"fail","Test":"TestPartialStateJoin/Room_aliases_can_be_added_and_queried_during_a_resync"} +{"Action":"skip","Test":"TestPartialStateJoin/Room_stats_are_correctly_updated_once_state_re-sync_completes"} +{"Action":"fail","Test":"TestPartialStateJoin/State_accepted_incorrectly"} +{"Action":"fail","Test":"TestPartialStateJoin/State_rejected_incorrectly"} +{"Action":"fail","Test":"TestPartialStateJoin/User_directory_is_correctly_updated_once_state_re-sync_completes"} +{"Action":"fail","Test":"TestPartialStateJoin/joined_members_blocks_during_partial_state_join"} {"Action":"fail","Test":"TestPollsLocalPushRules"} {"Action":"fail","Test":"TestPollsLocalPushRules/Polls_push_rules_are_correctly_presented_to_the_client"} {"Action":"pass","Test":"TestPowerLevels"} @@ -559,11 +625,11 @@ {"Action":"pass","Test":"TestRoomState/Parallel/PUT_/createRoom_with_creation_content"} {"Action":"pass","Test":"TestRoomState/Parallel/PUT_/rooms/:room_id/state/m.room.topic_sets_topic"} {"Action":"pass","Test":"TestRoomSummary"} -{"Action":"fail","Test":"TestRoomsInvite"} -{"Action":"fail","Test":"TestRoomsInvite/Parallel"} +{"Action":"pass","Test":"TestRoomsInvite"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel"} {"Action":"pass","Test":"TestRoomsInvite/Parallel/Can_invite_users_to_invite-only_rooms"} {"Action":"pass","Test":"TestRoomsInvite/Parallel/Invited_user_can_reject_invite"} -{"Action":"fail","Test":"TestRoomsInvite/Parallel/Invited_user_can_reject_invite_for_empty_room"} +{"Action":"pass","Test":"TestRoomsInvite/Parallel/Invited_user_can_reject_invite_for_empty_room"} {"Action":"pass","Test":"TestRoomsInvite/Parallel/Invited_user_can_see_room_metadata"} {"Action":"pass","Test":"TestRoomsInvite/Parallel/Test_that_we_can_be_reinvited_to_a_room_we_created"} {"Action":"pass","Test":"TestRoomsInvite/Parallel/Uninvited_users_cannot_join_the_room"} @@ -596,15 +662,15 @@ {"Action":"pass","Test":"TestSyncFilter"} {"Action":"pass","Test":"TestSyncFilter/Can_create_filter"} {"Action":"pass","Test":"TestSyncFilter/Can_download_filter"} -{"Action":"fail","Test":"TestSyncLeaveSection"} -{"Action":"fail","Test":"TestSyncLeaveSection/Left_rooms_appear_in_the_leave_section_of_full_state_sync"} -{"Action":"fail","Test":"TestSyncLeaveSection/Left_rooms_appear_in_the_leave_section_of_sync"} -{"Action":"fail","Test":"TestSyncLeaveSection/Newly_left_rooms_appear_in_the_leave_section_of_incremental_sync"} +{"Action":"pass","Test":"TestSyncLeaveSection"} +{"Action":"pass","Test":"TestSyncLeaveSection/Left_rooms_appear_in_the_leave_section_of_full_state_sync"} +{"Action":"pass","Test":"TestSyncLeaveSection/Left_rooms_appear_in_the_leave_section_of_sync"} +{"Action":"pass","Test":"TestSyncLeaveSection/Newly_left_rooms_appear_in_the_leave_section_of_incremental_sync"} {"Action":"pass","Test":"TestSyncOmitsStateChangeOnFilteredEvents"} {"Action":"pass","Test":"TestSyncTimelineGap"} {"Action":"pass","Test":"TestSyncTimelineGap/full"} {"Action":"pass","Test":"TestSyncTimelineGap/incremental"} -{"Action":"fail","Test":"TestTentativeEventualJoiningAfterRejecting"} +{"Action":"pass","Test":"TestTentativeEventualJoiningAfterRejecting"} {"Action":"fail","Test":"TestThreadReceiptsInSyncMSC4102"} {"Action":"fail","Test":"TestThreadedReceipts"} {"Action":"fail","Test":"TestThreadsEndpoint"} @@ -635,8 +701,8 @@ {"Action":"pass","Test":"TestUploadKey/Parallel/Can_query_device_keys_using_POST"} {"Action":"pass","Test":"TestUploadKey/Parallel/Can_query_specific_device_keys_using_POST"} {"Action":"pass","Test":"TestUploadKey/Parallel/Can_upload_device_keys"} -{"Action":"fail","Test":"TestUploadKey/Parallel/Rejects_invalid_device_keys"} -{"Action":"fail","Test":"TestUploadKey/Parallel/Should_reject_keys_claiming_to_belong_to_a_different_user"} +{"Action":"pass","Test":"TestUploadKey/Parallel/Rejects_invalid_device_keys"} +{"Action":"pass","Test":"TestUploadKey/Parallel/Should_reject_keys_claiming_to_belong_to_a_different_user"} {"Action":"pass","Test":"TestUploadKey/Parallel/query_for_user_with_no_keys_returns_empty_key_dict"} {"Action":"pass","Test":"TestUploadKeyIdempotency"} {"Action":"pass","Test":"TestUploadKeyIdempotencyOverlap"} From 0e342aab7f2a173638fa723a9d36ae16fe9396d1 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 9 Mar 2025 13:44:57 -0400 Subject: [PATCH 117/310] fix a few error codes Signed-off-by: June Clementine Strawberry --- src/api/client/alias.rs | 2 +- src/api/client/context.rs | 18 ++++++++++++------ src/api/client/state.rs | 12 +++++++----- 3 files changed, 20 insertions(+), 12 deletions(-) diff --git a/src/api/client/alias.rs b/src/api/client/alias.rs index 319e5141..9cd7e0c5 100644 --- a/src/api/client/alias.rs +++ b/src/api/client/alias.rs @@ -92,7 +92,7 @@ pub(crate) async fn get_alias_route( let Ok((room_id, servers)) = services.rooms.alias.resolve_alias(&room_alias, None).await else { - return Err!(Request(NotFound("Room with alias not found."))); + return Err!(Request(Unknown("Room with alias not found."))); }; let servers = room_available_servers(&services, &room_id, &room_alias, servers).await; diff --git a/src/api/client/context.rs b/src/api/client/context.rs index 3f16c850..cb95dfef 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -1,6 +1,6 @@ use axum::extract::State; use conduwuit::{ - Err, PduEvent, Result, at, err, ref_at, + Err, PduEvent, Result, at, debug_warn, err, ref_at, utils::{ IterStream, future::TryExtExt, @@ -35,8 +35,13 @@ pub(crate) async fn get_context_route( let sender = body.sender(); let (sender_user, sender_device) = sender; let room_id = &body.room_id; + let event_id = &body.event_id; let filter = &body.filter; + if !services.rooms.metadata.exists(room_id).await { + return Err!(Request(Forbidden("Room does not exist to this server"))); + } + // Use limit or else 10, with maximum 100 let limit: usize = body .limit @@ -47,29 +52,30 @@ pub(crate) async fn get_context_route( let base_id = services .rooms .timeline - .get_pdu_id(&body.event_id) + .get_pdu_id(event_id) .map_err(|_| err!(Request(NotFound("Event not found.")))); let base_pdu = services .rooms .timeline - .get_pdu(&body.event_id) + .get_pdu(event_id) .map_err(|_| err!(Request(NotFound("Base event not found.")))); let visible = services .rooms .state_accessor - .user_can_see_event(sender_user, &body.room_id, &body.event_id) + .user_can_see_event(sender_user, room_id, event_id) .map(Ok); let (base_id, base_pdu, visible) = try_join3(base_id, base_pdu, visible).await?; - if base_pdu.room_id != body.room_id || base_pdu.event_id != body.event_id { + if base_pdu.room_id != *room_id || base_pdu.event_id != *event_id { return Err!(Request(NotFound("Base event not found."))); } if !visible { - return Err!(Request(Forbidden("You don't have permission to view this event."))); + debug_warn!(req_evt = ?event_id, ?base_id, ?room_id, "Event requested by {sender_user} but is not allowed to see it, returning 404"); + return Err!(Request(NotFound("Event not found."))); } let base_count = base_id.pdu_count(); diff --git a/src/api/client/state.rs b/src/api/client/state.rs index c92091eb..d04aac35 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -27,7 +27,7 @@ pub(crate) async fn send_state_event_for_key_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); Ok(send_state_event::v3::Response { event_id: send_state_event_for_key_helper( @@ -103,7 +103,7 @@ pub(crate) async fn get_state_events_for_key_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); if !services .rooms @@ -111,7 +111,9 @@ pub(crate) async fn get_state_events_for_key_route( .user_can_see_state_events(sender_user, &body.room_id) .await { - return Err!(Request(Forbidden("You don't have permission to view the room state."))); + return Err!(Request(NotFound(debug_warn!( + "You don't have permission to view the room state." + )))); } let event = services @@ -316,14 +318,14 @@ async fn allowed_to_send_state_event( services.rooms.alias.resolve_alias(&alias, None).await?; if alias_room_id != room_id { - return Err!(Request(Forbidden( + return Err!(Request(Unknown( "Room alias {alias} does not belong to room {room_id}" ))); } } }, | Err(e) => { - return Err!(Request(BadJson(debug_warn!( + return Err!(Request(InvalidParam(debug_warn!( "Room canonical alias event is invalid: {e}" )))); }, From 0e2ca7d7192684a945ac49aa53066c488dd40886 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 9 Mar 2025 21:55:07 -0400 Subject: [PATCH 118/310] implement disable TLS validation config option Signed-off-by: June Clementine Strawberry --- nix/pkgs/complement/config.toml | 2 ++ src/core/config/check.rs | 4 ++++ src/core/config/mod.rs | 12 +++++++++++- src/service/client/mod.rs | 3 ++- 4 files changed, 19 insertions(+), 2 deletions(-) diff --git a/nix/pkgs/complement/config.toml b/nix/pkgs/complement/config.toml index 99c151c5..4d7637db 100644 --- a/nix/pkgs/complement/config.toml +++ b/nix/pkgs/complement/config.toml @@ -32,6 +32,8 @@ allow_legacy_media = true startup_netburst = true startup_netburst_keep = -1 +allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure = true + # valgrind makes things so slow dns_timeout = 60 dns_attempts = 20 diff --git a/src/core/config/check.rs b/src/core/config/check.rs index 98223be4..f9d51eeb 100644 --- a/src/core/config/check.rs +++ b/src/core/config/check.rs @@ -28,6 +28,10 @@ pub fn check(config: &Config) -> Result { warn!("Note: conduwuit was built without optimisations (i.e. debug build)"); } + if config.allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure { + warn!("\n\nWARNING: \n\nTLS CERTIFICATE VALIDATION IS DISABLED, THIS IS HIGHLY INSECURE AND SHOULD NOT BE USED IN PRODUCTION.\n\n"); + } + warn_deprecated(config); warn_unknown_key(config); diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index e69a56b9..6b669ad3 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -52,7 +52,7 @@ use crate::{Result, err, error::Error, utils::sys}; ### For more information, see: ### https://conduwuit.puppyirl.gay/configuration.html "#, - ignore = "catchall well_known tls blurhashing" + ignore = "catchall well_known tls blurhashing allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure" )] pub struct Config { /// The server_name is the pretty name of this server. It is used as a @@ -1806,6 +1806,16 @@ pub struct Config { #[serde(default = "true_fn")] pub config_reload_signal: bool, + /// Toggles ignore checking/validating TLS certificates + /// + /// This applies to everything, including URL previews, federation requests, + /// etc. This is a hidden argument that should NOT be used in production as + /// it is highly insecure and I will personally yell at you if I catch you + /// using this. + #[serde(default)] + pub allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure: + bool, + // external structure; separate section #[serde(default)] pub blurhashing: BlurhashConfig, diff --git a/src/service/client/mod.rs b/src/service/client/mod.rs index d5008491..d51e5721 100644 --- a/src/service/client/mod.rs +++ b/src/service/client/mod.rs @@ -128,7 +128,8 @@ fn base(config: &Config) -> Result { .pool_max_idle_per_host(config.request_idle_per_host.into()) .user_agent(conduwuit::version::user_agent()) .redirect(redirect::Policy::limited(6)) - .connection_verbose(true); + .danger_accept_invalid_certs(config.allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure) + .connection_verbose(cfg!(debug_assertions)); #[cfg(feature = "gzip_compression")] { From df1edcf498ac58e27e6ff261b0d53a773d82f69f Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Mon, 10 Mar 2025 10:32:11 -0400 Subject: [PATCH 119/310] adjust complement cert generation Signed-off-by: June Clementine Strawberry --- bin/complement | 1 + nix/pkgs/complement/config.toml | 2 -- nix/pkgs/complement/default.nix | 22 +++++++------------ nix/pkgs/complement/private_key.key | 28 +++++++++++++++++++++++++ nix/pkgs/complement/signing_request.csr | 16 ++++++++++++++ 5 files changed, 53 insertions(+), 16 deletions(-) create mode 100644 nix/pkgs/complement/private_key.key create mode 100644 nix/pkgs/complement/signing_request.csr diff --git a/bin/complement b/bin/complement index 89521796..92539f97 100755 --- a/bin/complement +++ b/bin/complement @@ -40,6 +40,7 @@ if [ ! -f "complement_oci_image.tar.gz" ]; then # if using macOS, use linux-complement #bin/nix-build-and-cache just .#linux-complement bin/nix-build-and-cache just .#complement + #nix build -L .#complement echo "complement conduwuit image tar.gz built at \"result\"" diff --git a/nix/pkgs/complement/config.toml b/nix/pkgs/complement/config.toml index 4d7637db..759f8d78 100644 --- a/nix/pkgs/complement/config.toml +++ b/nix/pkgs/complement/config.toml @@ -47,6 +47,4 @@ sender_idle_timeout = 300 sender_retry_backoff_limit = 300 [global.tls] -certs = "/certificate.crt" dual_protocol = true -key = "/private_key.key" diff --git a/nix/pkgs/complement/default.nix b/nix/pkgs/complement/default.nix index d9af0779..bbd1bd74 100644 --- a/nix/pkgs/complement/default.nix +++ b/nix/pkgs/complement/default.nix @@ -42,25 +42,18 @@ let start = writeShellScriptBin "start" '' set -euxo pipefail - ${lib.getExe openssl} genrsa -out private_key.key 2048 - ${lib.getExe openssl} req \ - -new \ - -sha256 \ - -key private_key.key \ - -subj "/C=US/ST=CA/O=MyOrg, Inc./CN=$SERVER_NAME" \ - -out signing_request.csr - cp ${./v3.ext} v3.ext - echo "DNS.1 = $SERVER_NAME" >> v3.ext + cp ${./v3.ext} /complement/v3.ext + echo "DNS.1 = $SERVER_NAME" >> /complement/v3.ext echo "IP.1 = $(${lib.getExe gawk} 'END{print $1}' /etc/hosts)" \ - >> v3.ext + >> /complement/v3.ext ${lib.getExe openssl} x509 \ -req \ - -extfile v3.ext \ - -in signing_request.csr \ + -extfile /complement/v3.ext \ + -in ${./signing_request.csr} \ -CA /complement/ca/ca.crt \ -CAkey /complement/ca/ca.key \ -CAcreateserial \ - -out certificate.crt \ + -out /complement/certificate.crt \ -days 1 \ -sha256 @@ -99,7 +92,8 @@ dockerTools.buildImage { else []; Env = [ - "SSL_CERT_FILE=/complement/ca/ca.crt" + "CONDUWUIT_TLS__KEY=${./private_key.key}" + "CONDUWUIT_TLS__CERTS=/complement/certificate.crt" "CONDUWUIT_CONFIG=${./config.toml}" "RUST_BACKTRACE=full" ]; diff --git a/nix/pkgs/complement/private_key.key b/nix/pkgs/complement/private_key.key new file mode 100644 index 00000000..5b9d4d4f --- /dev/null +++ b/nix/pkgs/complement/private_key.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDS/odmZivxajeb +iyT7SMuhXqnMm+hF+zEARLcbieem0wG4x7gi2S6WLf8DlifdXax6me13eYk4rBnT +LvGEvNNx0px5M54H+FVyoVa3c1tmA66WUcZjobafPGsDh5j+5qpScgWwjkMPGg1a +09CphCFswO4PpxUUORX/OTGj/rEKxximW6OtavBwaS9F7mqjXJK7lCrcZxKq5ucc +ebGMmCoO660hROSTBaFigdRTVicclk+NgYRrZyWbCiuXPjQ0jlOE2rcaDepqTUga +Qs/2tdT4kBzBH6kZOiQOIN/ddXaj032QXr1HQYfIJfJmiM6nmRob8nik5rpZdWNO +/Ncsro/fAgMBAAECggEAITCCkfv+a5I+vwvrPE/eIDso0JOxvNhfg+BLQVy3AMnu +WmeoMmshZeREWgcTrEGg8QQnk4Sdrjl8MnkO6sddJ2luza3t7OkGX+q7Hk5aETkB +DIo+f8ufU3sIhlydF3OnVSK0fGpUaBq8AQ6Soyeyrk3G5NVufmjgae5QPbDBnqUb +piOGyfcwagL4JtCbZsMk8AT7vQSynLm6zaWsVzWNd71jummLqtVV063K95J9PqVN +D8meEcP3WR5kQrvf+mgy9RVgWLRtVWN8OLZfJ9yrnl4Efj62elrldUj4jaCFezGQ +8f0W+d8jjt038qhmEdymw2MWQ+X/b0R79lJar1Up8QKBgQD1DtHxauhl+JUoI3y+ +3eboqXl7YPJt1/GTnChb4b6D1Z1hvLsOKUa7hjGEfruYGbsWXBCRMICdfzp+iWcq +/lEOp7/YU9OaW4lQMoG4sXMoBWd9uLgg0E+aH6VDJOBvxsfafqM4ufmtspzwEm90 +FU1cq6oImomFnPChSq4X+3+YpwKBgQDcalaK9llCcscWA8HAP8WVVNTjCOqiDp9q +td61E9IO/FIB/gW5y+JkaFRrA2CN1zY3s3K92uveLTNYTArecWlDcPNNFDuaYu2M +Roz4bC104HGh+zztJ0iPVzELL81Lgg6wHhLONN+eVi4gTftJxzJFXybyb+xVT25A +91ynKXB+CQKBgQC+Ub43MoI+/6pHvBfb3FbDByvz6D0flgBmVXb6tP3TQYmzKHJV +8zSd2wCGGC71V7Z3DRVIzVR1/SOetnPLbivhp+JUzfWfAcxI3pDksdvvjxLrDxTh +VycbWcxtsywjY0w/ou581eLVRcygnpC0pP6qJCAwAmUfwd0YRvmiYo6cLQKBgHIW +UIlJDdaJFmdctnLOD3VGHZMOUHRlYTqYvJe5lKbRD5mcZFZRI/OY1Ok3LEj+tj+K +kL+YizHK76KqaY3N4hBYbHbfHCLDRfWvptQHGlg+vFJ9eoG+LZ6UIPyLV5XX0cZz +KoS1dXG9Zc6uznzXsDucDsq6B/f4TzctUjXsCyARAoGAOKb4HtuNyYAW0jUlujR7 +IMHwUesOGlhSXqFtP9aTvk6qJgvV0+3CKcWEb4y02g+uYftP8BLNbJbIt9qOqLYh +tOVyzCoamAi8araAhjA0w4dXvqDCDK7k/gZFkojmKQtRijoxTHnWcDc3vAjYCgaM +9MVtdgSkuh2gwkD/mMoAJXM= +-----END PRIVATE KEY----- diff --git a/nix/pkgs/complement/signing_request.csr b/nix/pkgs/complement/signing_request.csr new file mode 100644 index 00000000..707e73b4 --- /dev/null +++ b/nix/pkgs/complement/signing_request.csr @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICkTCCAXkCAQAwTDELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRYwFAYDVQQK +DA13b29mZXJzLCBpbmMuMRgwFgYDVQQDDA9jb21wbGVtZW50LW9ubHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDS/odmZivxajebiyT7SMuhXqnMm+hF ++zEARLcbieem0wG4x7gi2S6WLf8DlifdXax6me13eYk4rBnTLvGEvNNx0px5M54H ++FVyoVa3c1tmA66WUcZjobafPGsDh5j+5qpScgWwjkMPGg1a09CphCFswO4PpxUU +ORX/OTGj/rEKxximW6OtavBwaS9F7mqjXJK7lCrcZxKq5uccebGMmCoO660hROST +BaFigdRTVicclk+NgYRrZyWbCiuXPjQ0jlOE2rcaDepqTUgaQs/2tdT4kBzBH6kZ +OiQOIN/ddXaj032QXr1HQYfIJfJmiM6nmRob8nik5rpZdWNO/Ncsro/fAgMBAAGg +ADANBgkqhkiG9w0BAQsFAAOCAQEAjW+aD4E0phtRT5b2RyedY1uiSe7LQECsQnIO +wUSyGGG1GXYlJscyxxyzE9W9+QIALrxZkmc/+e02u+bFb1zQXW/uB/7u7FgXzrj6 +2YSDiWYXiYKvgGWEfCi3lpcTJK9x6WWkR+iREaoKRjcl0ynhhGuR7YwP38TNyu+z +FN6B1Lo398fvJkaTCiiHngWiwztXZ2d0MxkicuwZ1LJhIQA72OTl3QoRb5uiqbze +T9QJfU6W3v8cB8c8PuKMv5gl1QsGNtlfyQB56/X0cMxWl25vWXd2ankLkAGRTDJ8 +9YZHxP1ki4/yh75AknFq02nCOsmxYrAazCYgP2TzIPhQwBurKQ== +-----END CERTIFICATE REQUEST----- From 5ba0c02d526d77b9d983335af76585cd49be12c1 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Mon, 10 Mar 2025 12:29:54 -0400 Subject: [PATCH 120/310] bump ruwuma to fix a threads issue, fix more error codes, delete legacy sytest cruft Signed-off-by: June Clementine Strawberry --- Cargo.lock | 22 +- Cargo.toml | 2 +- src/api/client/alias.rs | 2 +- src/api/client/state.rs | 12 +- tests/sytest/are-we-synapse-yet.list | 866 ----------------------- tests/sytest/are-we-synapse-yet.py | 266 ------- tests/sytest/show-expected-fail-tests.sh | 105 --- tests/sytest/sytest-blacklist | 7 - tests/sytest/sytest-whitelist | 516 -------------- 9 files changed, 22 insertions(+), 1776 deletions(-) delete mode 100644 tests/sytest/are-we-synapse-yet.list delete mode 100755 tests/sytest/are-we-synapse-yet.py delete mode 100755 tests/sytest/show-expected-fail-tests.sh delete mode 100644 tests/sytest/sytest-blacklist delete mode 100644 tests/sytest/sytest-whitelist diff --git a/Cargo.lock b/Cargo.lock index f768eae1..65e8eca1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3507,7 +3507,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "assign", "js_int", @@ -3527,7 +3527,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "js_int", "ruma-common", @@ -3539,7 +3539,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "as_variant", "assign", @@ -3562,7 +3562,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "as_variant", "base64 0.22.1", @@ -3594,7 +3594,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "as_variant", "indexmap 2.7.1", @@ -3619,7 +3619,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "bytes", "headers", @@ -3641,7 +3641,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3650,7 +3650,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "js_int", "ruma-common", @@ -3660,7 +3660,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3675,7 +3675,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "js_int", "ruma-common", @@ -3687,7 +3687,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d577100f5480c6c528e7a8ff59cd08d95a3a16e7#d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index 2bc1d20f..d611c08e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "d577100f5480c6c528e7a8ff59cd08d95a3a16e7" +rev = "f5ab6302aaa55a14827a9cb5b40e980dd135fe14" features = [ "compat", "rand", diff --git a/src/api/client/alias.rs b/src/api/client/alias.rs index 9cd7e0c5..319e5141 100644 --- a/src/api/client/alias.rs +++ b/src/api/client/alias.rs @@ -92,7 +92,7 @@ pub(crate) async fn get_alias_route( let Ok((room_id, servers)) = services.rooms.alias.resolve_alias(&room_alias, None).await else { - return Err!(Request(Unknown("Room with alias not found."))); + return Err!(Request(NotFound("Room with alias not found."))); }; let servers = room_available_servers(&services, &room_id, &room_alias, servers).await; diff --git a/src/api/client/state.rs b/src/api/client/state.rs index d04aac35..db79735f 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -314,11 +314,17 @@ async fn allowed_to_send_state_event( } for alias in aliases { - let (alias_room_id, _servers) = - services.rooms.alias.resolve_alias(&alias, None).await?; + let (alias_room_id, _servers) = services + .rooms + .alias + .resolve_alias(&alias, None) + .await + .map_err(|e| { + err!(Request(Unknown("Failed resolving alias \"{alias}\": {e}"))) + })?; if alias_room_id != room_id { - return Err!(Request(Unknown( + return Err!(Request(BadAlias( "Room alias {alias} does not belong to room {room_id}" ))); } diff --git a/tests/sytest/are-we-synapse-yet.list b/tests/sytest/are-we-synapse-yet.list deleted file mode 100644 index 99091989..00000000 --- a/tests/sytest/are-we-synapse-yet.list +++ /dev/null @@ -1,866 +0,0 @@ -reg GET /register yields a set of flows -reg POST /register can create a user -reg POST /register downcases capitals in usernames -reg POST /register returns the same device_id as that in the request -reg POST /register rejects registration of usernames with '!' -reg POST /register rejects registration of usernames with '"' -reg POST /register rejects registration of usernames with ':' -reg POST /register rejects registration of usernames with '?' -reg POST /register rejects registration of usernames with '\' -reg POST /register rejects registration of usernames with '@' -reg POST /register rejects registration of usernames with '[' -reg POST /register rejects registration of usernames with ']' -reg POST /register rejects registration of usernames with '{' -reg POST /register rejects registration of usernames with '|' -reg POST /register rejects registration of usernames with '}' -reg POST /register rejects registration of usernames with '£' -reg POST /register rejects registration of usernames with 'é' -reg POST /register rejects registration of usernames with '\n' -reg POST /register rejects registration of usernames with ''' -reg POST /r0/admin/register with shared secret -reg POST /r0/admin/register admin with shared secret -reg POST /r0/admin/register with shared secret downcases capitals -reg POST /r0/admin/register with shared secret disallows symbols -reg POST rejects invalid utf-8 in JSON -log GET /login yields a set of flows -log POST /login can log in as a user -log POST /login returns the same device_id as that in the request -log POST /login can log in as a user with just the local part of the id -log POST /login as non-existing user is rejected -log POST /login wrong password is rejected -log Interactive authentication types include SSO -log Can perform interactive authentication with SSO -log The user must be consistent through an interactive authentication session with SSO -log The operation must be consistent through an interactive authentication session -v1s GET /events initially -v1s GET /initialSync initially -csa Version responds 200 OK with valid structure -pro PUT /profile/:user_id/displayname sets my name -pro GET /profile/:user_id/displayname publicly accessible -pro PUT /profile/:user_id/avatar_url sets my avatar -pro GET /profile/:user_id/avatar_url publicly accessible -dev GET /device/{deviceId} -dev GET /device/{deviceId} gives a 404 for unknown devices -dev GET /devices -dev PUT /device/{deviceId} updates device fields -dev PUT /device/{deviceId} gives a 404 for unknown devices -dev DELETE /device/{deviceId} -dev DELETE /device/{deviceId} requires UI auth user to match device owner -dev DELETE /device/{deviceId} with no body gives a 401 -dev The deleted device must be consistent through an interactive auth session -dev Users receive device_list updates for their own devices -pre GET /presence/:user_id/status fetches initial status -pre PUT /presence/:user_id/status updates my presence -crm POST /createRoom makes a public room -crm POST /createRoom makes a private room -crm POST /createRoom makes a private room with invites -crm POST /createRoom makes a room with a name -crm POST /createRoom makes a room with a topic -syn Can /sync newly created room -crm POST /createRoom creates a room with the given version -crm POST /createRoom rejects attempts to create rooms with numeric versions -crm POST /createRoom rejects attempts to create rooms with unknown versions -crm POST /createRoom ignores attempts to set the room version via creation_content -mem GET /rooms/:room_id/state/m.room.member/:user_id fetches my membership -mem GET /rooms/:room_id/state/m.room.member/:user_id?format=event fetches my membership event -rst GET /rooms/:room_id/state/m.room.power_levels fetches powerlevels -mem GET /rooms/:room_id/joined_members fetches my membership -v1s GET /rooms/:room_id/initialSync fetches initial sync state -pub GET /publicRooms lists newly-created room -ali GET /directory/room/:room_alias yields room ID -mem GET /joined_rooms lists newly-created room -rst POST /rooms/:room_id/state/m.room.name sets name -rst GET /rooms/:room_id/state/m.room.name gets name -rst POST /rooms/:room_id/state/m.room.topic sets topic -rst GET /rooms/:room_id/state/m.room.topic gets topic -rst GET /rooms/:room_id/state fetches entire room state -crm POST /createRoom with creation content -ali PUT /directory/room/:room_alias creates alias -nsp GET /rooms/:room_id/aliases lists aliases -jon POST /rooms/:room_id/join can join a room -jon POST /join/:room_alias can join a room -jon POST /join/:room_id can join a room -jon POST /join/:room_id can join a room with custom content -jon POST /join/:room_alias can join a room with custom content -lev POST /rooms/:room_id/leave can leave a room -inv POST /rooms/:room_id/invite can send an invite -ban POST /rooms/:room_id/ban can ban a user -snd POST /rooms/:room_id/send/:event_type sends a message -snd PUT /rooms/:room_id/send/:event_type/:txn_id sends a message -snd PUT /rooms/:room_id/send/:event_type/:txn_id deduplicates the same txn id -get GET /rooms/:room_id/messages returns a message -get GET /rooms/:room_id/messages lazy loads members correctly -typ PUT /rooms/:room_id/typing/:user_id sets typing notification -typ Typing notifications don't leak (3 subtests) -rst GET /rooms/:room_id/state/m.room.power_levels can fetch levels -rst PUT /rooms/:room_id/state/m.room.power_levels can set levels -rst PUT power_levels should not explode if the old power levels were empty -rst Both GET and PUT work -rct POST /rooms/:room_id/receipt can create receipts -red POST /rooms/:room_id/read_markers can create read marker -med POST /media/r0/upload can create an upload -med GET /media/r0/download can fetch the value again -cap GET /capabilities is present and well formed for registered user -cap GET /r0/capabilities is not public -reg Register with a recaptcha -reg registration is idempotent, without username specified -reg registration is idempotent, with username specified -reg registration remembers parameters -reg registration accepts non-ascii passwords -reg registration with inhibit_login inhibits login -reg User signups are forbidden from starting with '_' -reg Can register using an email address -log Can login with 3pid and password using m.login.password -log login types include SSO -log /login/cas/redirect redirects if the old m.login.cas login type is listed -log Can login with new user via CAS -lox Can logout current device -lox Can logout all devices -lox Request to logout with invalid an access token is rejected -lox Request to logout without an access token is rejected -log After changing password, can't log in with old password -log After changing password, can log in with new password -log After changing password, existing session still works -log After changing password, a different session no longer works by default -log After changing password, different sessions can optionally be kept -psh Pushers created with a different access token are deleted on password change -psh Pushers created with a the same access token are not deleted on password change -acc Can deactivate account -acc Can't deactivate account with wrong password -acc After deactivating account, can't log in with password -acc After deactivating account, can't log in with an email -v1s initialSync sees my presence status -pre Presence change reports an event to myself -pre Friends presence changes reports events -crm Room creation reports m.room.create to myself -crm Room creation reports m.room.member to myself -rst Setting room topic reports m.room.topic to myself -v1s Global initialSync -v1s Global initialSync with limit=0 gives no messages -v1s Room initialSync -v1s Room initialSync with limit=0 gives no messages -rst Setting state twice is idempotent -jon Joining room twice is idempotent -syn New room members see their own join event -v1s New room members see existing users' presence in room initialSync -syn Existing members see new members' join events -syn Existing members see new members' presence -v1s All room members see all room members' presence in global initialSync -f,jon Remote users can join room by alias -syn New room members see their own join event -v1s New room members see existing members' presence in room initialSync -syn Existing members see new members' join events -syn Existing members see new member's presence -v1s New room members see first user's profile information in global initialSync -v1s New room members see first user's profile information in per-room initialSync -f,jon Remote users may not join unfederated rooms -syn Local room members see posted message events -v1s Fetching eventstream a second time doesn't yield the message again -syn Local non-members don't see posted message events -get Local room members can get room messages -f,syn Remote room members also see posted message events -f,get Remote room members can get room messages -get Message history can be paginated -f,get Message history can be paginated over federation -eph Ephemeral messages received from clients are correctly expired -ali Room aliases can contain Unicode -f,ali Remote room alias queries can handle Unicode -ali Canonical alias can be set -ali Canonical alias can include alt_aliases -ali Regular users can add and delete aliases in the default room configuration -ali Regular users can add and delete aliases when m.room.aliases is restricted -ali Deleting a non-existent alias should return a 404 -ali Users can't delete other's aliases -ali Users with sufficient power-level can delete other's aliases -ali Can delete canonical alias -ali Alias creators can delete alias with no ops -ali Alias creators can delete canonical alias with no ops -ali Only room members can list aliases of a room -inv Can invite users to invite-only rooms -inv Uninvited users cannot join the room -inv Invited user can reject invite -f,inv Invited user can reject invite over federation -f,inv Invited user can reject invite over federation several times -inv Invited user can reject invite for empty room -f,inv Invited user can reject invite over federation for empty room -inv Invited user can reject local invite after originator leaves -inv Invited user can see room metadata -f,inv Remote invited user can see room metadata -inv Users cannot invite themselves to a room -inv Users cannot invite a user that is already in the room -ban Banned user is kicked and may not rejoin until unbanned -f,ban Remote banned user is kicked and may not rejoin until unbanned -ban 'ban' event respects room powerlevel -plv setting 'm.room.name' respects room powerlevel -plv setting 'm.room.power_levels' respects room powerlevel (2 subtests) -plv Unprivileged users can set m.room.topic if it only needs level 0 -plv Users cannot set ban powerlevel higher than their own (2 subtests) -plv Users cannot set kick powerlevel higher than their own (2 subtests) -plv Users cannot set redact powerlevel higher than their own (2 subtests) -v1s Check that event streams started after a client joined a room work (SYT-1) -v1s Event stream catches up fully after many messages -xxx POST /rooms/:room_id/redact/:event_id as power user redacts message -xxx POST /rooms/:room_id/redact/:event_id as original message sender redacts message -xxx POST /rooms/:room_id/redact/:event_id as random user does not redact message -xxx POST /redact disallows redaction of event in different room -xxx Redaction of a redaction redacts the redaction reason -v1s A departed room is still included in /initialSync (SPEC-216) -v1s Can get rooms/{roomId}/initialSync for a departed room (SPEC-216) -rst Can get rooms/{roomId}/state for a departed room (SPEC-216) -mem Can get rooms/{roomId}/members for a departed room (SPEC-216) -get Can get rooms/{roomId}/messages for a departed room (SPEC-216) -rst Can get 'm.room.name' state for a departed room (SPEC-216) -syn Getting messages going forward is limited for a departed room (SPEC-216) -3pd Can invite existing 3pid -3pd Can invite existing 3pid with no ops into a private room -3pd Can invite existing 3pid in createRoom -3pd Can invite unbound 3pid -f,3pd Can invite unbound 3pid over federation -3pd Can invite unbound 3pid with no ops into a private room -f,3pd Can invite unbound 3pid over federation with no ops into a private room -f,3pd Can invite unbound 3pid over federation with users from both servers -3pd Can accept unbound 3pid invite after inviter leaves -3pd Can accept third party invite with /join -3pd 3pid invite join with wrong but valid signature are rejected -3pd 3pid invite join valid signature but revoked keys are rejected -3pd 3pid invite join valid signature but unreachable ID server are rejected -gst Guest user cannot call /events globally -gst Guest users can join guest_access rooms -gst Guest users can send messages to guest_access rooms if joined -gst Guest user calling /events doesn't tightloop -gst Guest users are kicked from guest_access rooms on revocation of guest_access -gst Guest user can set display names -gst Guest users are kicked from guest_access rooms on revocation of guest_access over federation -gst Guest user can upgrade to fully featured user -gst Guest user cannot upgrade other users -pub GET /publicRooms lists rooms -pub GET /publicRooms includes avatar URLs -gst Guest users can accept invites to private rooms over federation -gst Guest users denied access over federation if guest access prohibited -mem Room members can override their displayname on a room-specific basis -mem Room members can join a room with an overridden displayname -mem Users cannot kick users from a room they are not in -mem Users cannot kick users who have already left a room -typ Typing notification sent to local room members -f,typ Typing notifications also sent to remote room members -typ Typing can be explicitly stopped -rct Read receipts are visible to /initialSync -rct Read receipts are sent as events -rct Receipts must be m.read -pro displayname updates affect room member events -pro avatar_url updates affect room member events -gst m.room.history_visibility == "world_readable" allows/forbids appropriately for Guest users -gst m.room.history_visibility == "shared" allows/forbids appropriately for Guest users -gst m.room.history_visibility == "invited" allows/forbids appropriately for Guest users -gst m.room.history_visibility == "joined" allows/forbids appropriately for Guest users -gst m.room.history_visibility == "default" allows/forbids appropriately for Guest users -gst Guest non-joined user cannot call /events on shared room -gst Guest non-joined user cannot call /events on invited room -gst Guest non-joined user cannot call /events on joined room -gst Guest non-joined user cannot call /events on default room -gst Guest non-joined user can call /events on world_readable room -gst Guest non-joined users can get state for world_readable rooms -gst Guest non-joined users can get individual state for world_readable rooms -gst Guest non-joined users cannot room initalSync for non-world_readable rooms -gst Guest non-joined users can room initialSync for world_readable rooms -gst Guest non-joined users can get individual state for world_readable rooms after leaving -gst Guest non-joined users cannot send messages to guest_access rooms if not joined -gst Guest users can sync from world_readable guest_access rooms if joined -gst Guest users can sync from shared guest_access rooms if joined -gst Guest users can sync from invited guest_access rooms if joined -gst Guest users can sync from joined guest_access rooms if joined -gst Guest users can sync from default guest_access rooms if joined -ath m.room.history_visibility == "world_readable" allows/forbids appropriately for Real users -ath m.room.history_visibility == "shared" allows/forbids appropriately for Real users -ath m.room.history_visibility == "invited" allows/forbids appropriately for Real users -ath m.room.history_visibility == "joined" allows/forbids appropriately for Real users -ath m.room.history_visibility == "default" allows/forbids appropriately for Real users -ath Real non-joined user cannot call /events on shared room -ath Real non-joined user cannot call /events on invited room -ath Real non-joined user cannot call /events on joined room -ath Real non-joined user cannot call /events on default room -ath Real non-joined user can call /events on world_readable room -ath Real non-joined users can get state for world_readable rooms -ath Real non-joined users can get individual state for world_readable rooms -ath Real non-joined users cannot room initalSync for non-world_readable rooms -ath Real non-joined users can room initialSync for world_readable rooms -ath Real non-joined users can get individual state for world_readable rooms after leaving -ath Real non-joined users cannot send messages to guest_access rooms if not joined -ath Real users can sync from world_readable guest_access rooms if joined -ath Real users can sync from shared guest_access rooms if joined -ath Real users can sync from invited guest_access rooms if joined -ath Real users can sync from joined guest_access rooms if joined -ath Real users can sync from default guest_access rooms if joined -ath Only see history_visibility changes on boundaries -f,ath Backfill works correctly with history visibility set to joined -fgt Forgotten room messages cannot be paginated -fgt Forgetting room does not show up in v2 /sync -fgt Can forget room you've been kicked from -fgt Can't forget room you're still in -fgt Can re-join room if re-invited -ath Only original members of the room can see messages from erased users -mem /joined_rooms returns only joined rooms -mem /joined_members return joined members -ctx /context/ on joined room works -ctx /context/ on non world readable room does not work -ctx /context/ returns correct number of events -ctx /context/ with lazy_load_members filter works -get /event/ on joined room works -get /event/ on non world readable room does not work -get /event/ does not allow access to events before the user joined -mem Can get rooms/{roomId}/members -mem Can get rooms/{roomId}/members at a given point -mem Can filter rooms/{roomId}/members -upg /upgrade creates a new room -upg /upgrade should preserve room visibility for public rooms -upg /upgrade should preserve room visibility for private rooms -upg /upgrade copies >100 power levels to the new room -upg /upgrade copies the power levels to the new room -upg /upgrade preserves the power level of the upgrading user in old and new rooms -upg /upgrade copies important state to the new room -upg /upgrade copies ban events to the new room -upg local user has push rules copied to upgraded room -f,upg remote user has push rules copied to upgraded room -upg /upgrade moves aliases to the new room -upg /upgrade moves remote aliases to the new room -upg /upgrade preserves direct room state -upg /upgrade preserves room federation ability -upg /upgrade restricts power levels in the old room -upg /upgrade restricts power levels in the old room when the old PLs are unusual -upg /upgrade to an unknown version is rejected -upg /upgrade is rejected if the user can't send state events -upg /upgrade of a bogus room fails gracefully -upg Cannot send tombstone event that points to the same room -f,upg Local and remote users' homeservers remove a room from their public directory on upgrade -rst Name/topic keys are correct -f,pub Can get remote public room list -pub Can paginate public room list -pub Can search public room list -syn Can create filter -syn Can download filter -syn Can sync -syn Can sync a joined room -syn Full state sync includes joined rooms -syn Newly joined room is included in an incremental sync -syn Newly joined room has correct timeline in incremental sync -syn Newly joined room includes presence in incremental sync -syn Get presence for newly joined members in incremental sync -syn Can sync a room with a single message -syn Can sync a room with a message with a transaction id -syn A message sent after an initial sync appears in the timeline of an incremental sync. -syn A filtered timeline reaches its limit -syn Syncing a new room with a large timeline limit isn't limited -syn A full_state incremental update returns only recent timeline -syn A prev_batch token can be used in the v1 messages API -syn A next_batch token can be used in the v1 messages API -syn User sees their own presence in a sync -syn User is offline if they set_presence=offline in their sync -syn User sees updates to presence from other users in the incremental sync. -syn State is included in the timeline in the initial sync -f,syn State from remote users is included in the state in the initial sync -syn Changes to state are included in an incremental sync -syn Changes to state are included in an gapped incremental sync -f,syn State from remote users is included in the timeline in an incremental sync -syn A full_state incremental update returns all state -syn When user joins a room the state is included in the next sync -syn A change to displayname should not result in a full state sync -syn A change to displayname should appear in incremental /sync -syn When user joins a room the state is included in a gapped sync -syn When user joins and leaves a room in the same batch, the full state is still included in the next sync -syn Current state appears in timeline in private history -syn Current state appears in timeline in private history with many messages before -syn Current state appears in timeline in private history with many messages after -syn Rooms a user is invited to appear in an initial sync -syn Rooms a user is invited to appear in an incremental sync -syn Newly joined room is included in an incremental sync after invite -syn Sync can be polled for updates -syn Sync is woken up for leaves -syn Left rooms appear in the leave section of sync -syn Newly left rooms appear in the leave section of incremental sync -syn We should see our own leave event, even if history_visibility is restricted (SYN-662) -syn We should see our own leave event when rejecting an invite, even if history_visibility is restricted (riot-web/3462) -syn Newly left rooms appear in the leave section of gapped sync -syn Previously left rooms don't appear in the leave section of sync -syn Left rooms appear in the leave section of full state sync -syn Archived rooms only contain history from before the user left -syn Banned rooms appear in the leave section of sync -syn Newly banned rooms appear in the leave section of incremental sync -syn Newly banned rooms appear in the leave section of incremental sync -syn Typing events appear in initial sync -syn Typing events appear in incremental sync -syn Typing events appear in gapped sync -syn Read receipts appear in initial v2 /sync -syn New read receipts appear in incremental v2 /sync -syn Can pass a JSON filter as a query parameter -syn Can request federation format via the filter -syn Read markers appear in incremental v2 /sync -syn Read markers appear in initial v2 /sync -syn Read markers can be updated -syn Lazy loading parameters in the filter are strictly boolean -syn The only membership state included in an initial sync is for all the senders in the timeline -syn The only membership state included in an incremental sync is for senders in the timeline -syn The only membership state included in a gapped incremental sync is for senders in the timeline -syn Gapped incremental syncs include all state changes -syn Old leaves are present in gapped incremental syncs -syn Leaves are present in non-gapped incremental syncs -syn Old members are included in gappy incr LL sync if they start speaking -syn Members from the gap are included in gappy incr LL sync -syn We don't send redundant membership state across incremental syncs by default -syn We do send redundant membership state across incremental syncs if asked -syn Unnamed room comes with a name summary -syn Named room comes with just joined member count summary -syn Room summary only has 5 heroes -syn Room summary counts change when membership changes -rmv User can create and send/receive messages in a room with version 1 -rmv User can create and send/receive messages in a room with version 1 (2 subtests) -rmv local user can join room with version 1 -rmv User can invite local user to room with version 1 -rmv remote user can join room with version 1 -rmv User can invite remote user to room with version 1 -rmv Remote user can backfill in a room with version 1 -rmv Can reject invites over federation for rooms with version 1 -rmv Can receive redactions from regular users over federation in room version 1 -rmv User can create and send/receive messages in a room with version 2 -rmv User can create and send/receive messages in a room with version 2 (2 subtests) -rmv local user can join room with version 2 -rmv User can invite local user to room with version 2 -rmv remote user can join room with version 2 -rmv User can invite remote user to room with version 2 -rmv Remote user can backfill in a room with version 2 -rmv Can reject invites over federation for rooms with version 2 -rmv Can receive redactions from regular users over federation in room version 2 -rmv User can create and send/receive messages in a room with version 3 -rmv User can create and send/receive messages in a room with version 3 (2 subtests) -rmv local user can join room with version 3 -rmv User can invite local user to room with version 3 -rmv remote user can join room with version 3 -rmv User can invite remote user to room with version 3 -rmv Remote user can backfill in a room with version 3 -rmv Can reject invites over federation for rooms with version 3 -rmv Can receive redactions from regular users over federation in room version 3 -rmv User can create and send/receive messages in a room with version 4 -rmv User can create and send/receive messages in a room with version 4 (2 subtests) -rmv local user can join room with version 4 -rmv User can invite local user to room with version 4 -rmv remote user can join room with version 4 -rmv User can invite remote user to room with version 4 -rmv Remote user can backfill in a room with version 4 -rmv Can reject invites over federation for rooms with version 4 -rmv Can receive redactions from regular users over federation in room version 4 -rmv User can create and send/receive messages in a room with version 5 -rmv User can create and send/receive messages in a room with version 5 (2 subtests) -rmv local user can join room with version 5 -rmv User can invite local user to room with version 5 -rmv remote user can join room with version 5 -rmv User can invite remote user to room with version 5 -rmv Remote user can backfill in a room with version 5 -rmv Can reject invites over federation for rooms with version 5 -rmv Can receive redactions from regular users over federation in room version 5 -rmv User can create and send/receive messages in a room with version 6 -rmv User can create and send/receive messages in a room with version 6 (2 subtests) -rmv local user can join room with version 6 -rmv User can invite local user to room with version 6 -rmv remote user can join room with version 6 -rmv User can invite remote user to room with version 6 -rmv Remote user can backfill in a room with version 6 -rmv Can reject invites over federation for rooms with version 6 -rmv Can receive redactions from regular users over federation in room version 6 -rmv Inbound federation rejects invites which include invalid JSON for room version 6 -rmv Outbound federation rejects invite response which include invalid JSON for room version 6 -rmv Inbound federation rejects invite rejections which include invalid JSON for room version 6 -rmv Server rejects invalid JSON in a version 6 room -pre Presence changes are reported to local room members -f,pre Presence changes are also reported to remote room members -pre Presence changes to UNAVAILABLE are reported to local room members -f,pre Presence changes to UNAVAILABLE are reported to remote room members -v1s Newly created users see their own presence in /initialSync (SYT-34) -dvk Can upload device keys -dvk Should reject keys claiming to belong to a different user -dvk Can query device keys using POST -dvk Can query specific device keys using POST -dvk query for user with no keys returns empty key dict -dvk Can claim one time key using POST -f,dvk Can query remote device keys using POST -f,dvk Can claim remote one time key using POST -dvk Local device key changes appear in v2 /sync -dvk Local new device changes appear in v2 /sync -dvk Local delete device changes appear in v2 /sync -dvk Local update device changes appear in v2 /sync -dvk Can query remote device keys using POST after notification -f,dev Device deletion propagates over federation -f,dev If remote user leaves room, changes device and rejoins we see update in sync -f,dev If remote user leaves room we no longer receive device updates -dvk Local device key changes appear in /keys/changes -dvk New users appear in /keys/changes -f,dvk If remote user leaves room, changes device and rejoins we see update in /keys/changes -dvk Get left notifs in sync and /keys/changes when other user leaves -dvk Get left notifs for other users in sync and /keys/changes when user leaves -f,dvk If user leaves room, remote user changes device and rejoins we see update in /sync and /keys/changes -dkb Can create backup version -dkb Can update backup version -dkb Responds correctly when backup is empty -dkb Can backup keys -dkb Can update keys with better versions -dkb Will not update keys with worse versions -dkb Will not back up to an old backup version -dkb Can delete backup -dkb Deleted & recreated backups are empty -dkb Can create more than 10 backup versions -xsk Can upload self-signing keys -xsk Fails to upload self-signing keys with no auth -xsk Fails to upload self-signing key without master key -xsk Changing master key notifies local users -xsk Changing user-signing key notifies local users -f,xsk can fetch self-signing keys over federation -f,xsk uploading self-signing key notifies over federation -f,xsk uploading signed devices gets propagated over federation -tag Can add tag -tag Can remove tag -tag Can list tags for a room -v1s Tags appear in the v1 /events stream -v1s Tags appear in the v1 /initalSync -v1s Tags appear in the v1 room initial sync -tag Tags appear in an initial v2 /sync -tag Newly updated tags appear in an incremental v2 /sync -tag Deleted tags appear in an incremental v2 /sync -tag local user has tags copied to the new room -f,tag remote user has tags copied to the new room -sch Can search for an event by body -sch Can get context around search results -sch Can back-paginate search results -sch Search works across an upgraded room and its predecessor -sch Search results with rank ordering do not include redacted events -sch Search results with recent ordering do not include redacted events -acc Can add account data -acc Can add account data to room -acc Can get account data without syncing -acc Can get room account data without syncing -v1s Latest account data comes down in /initialSync -v1s Latest account data comes down in room initialSync -v1s Account data appears in v1 /events stream -v1s Room account data appears in v1 /events stream -acc Latest account data appears in v2 /sync -acc New account data appears in incremental v2 /sync -oid Can generate a openid access_token that can be exchanged for information about a user -oid Invalid openid access tokens are rejected -oid Requests to userinfo without access tokens are rejected -std Can send a message directly to a device using PUT /sendToDevice -std Can recv a device message using /sync -std Can recv device messages until they are acknowledged -std Device messages with the same txn_id are deduplicated -std Device messages wake up /sync -std Can recv device messages over federation -fsd Device messages over federation wake up /sync -std Can send messages with a wildcard device id -std Can send messages with a wildcard device id to two devices -std Wildcard device messages wake up /sync -fsd Wildcard device messages over federation wake up /sync -adm /whois -nsp /purge_history -nsp /purge_history by ts -nsp Can backfill purged history -nsp Shutdown room -ign Ignore user in existing room -ign Ignore invite in full sync -ign Ignore invite in incremental sync -fky Checking local federation server -fky Federation key API allows unsigned requests for keys -fky Federation key API can act as a notary server via a GET request -fky Federation key API can act as a notary server via a POST request -fky Key notary server should return an expired key if it can't find any others -fky Key notary server must not overwrite a valid key with a spurious result from the origin server -fqu Non-numeric ports in server names are rejected -fqu Outbound federation can query profile data -fqu Inbound federation can query profile data -fqu Outbound federation can query room alias directory -fqu Inbound federation can query room alias directory -fsj Outbound federation can query v1 /send_join -fsj Outbound federation can query v2 /send_join -fmj Outbound federation passes make_join failures through to the client -fsj Inbound federation can receive v1 /send_join -fsj Inbound federation can receive v2 /send_join -fmj Inbound /v1/make_join rejects remote attempts to join local users to rooms -fsj Inbound /v1/send_join rejects incorrectly-signed joins -fsj Inbound /v1/send_join rejects joins from other servers -fau Inbound federation rejects remote attempts to kick local users to rooms -frv Inbound federation rejects attempts to join v1 rooms from servers without v1 support -frv Inbound federation rejects attempts to join v2 rooms from servers lacking version support -frv Inbound federation rejects attempts to join v2 rooms from servers only supporting v1 -frv Inbound federation accepts attempts to join v2 rooms from servers with support -frv Outbound federation correctly handles unsupported room versions -frv A pair of servers can establish a join in a v2 room -fsj Outbound federation rejects send_join responses with no m.room.create event -frv Outbound federation rejects m.room.create events with an unknown room version -fsj Event with an invalid signature in the send_join response should not cause room join to fail -fsj Inbound: send_join rejects invalid JSON for room version 6 -fed Outbound federation can send events -fed Inbound federation can receive events -fed Inbound federation can receive redacted events -fed Ephemeral messages received from servers are correctly expired -fed Events whose auth_events are in the wrong room do not mess up the room state -fed Inbound federation can return events -fed Inbound federation redacts events from erased users -fme Outbound federation can request missing events -fme Inbound federation can return missing events for world_readable visibility -fme Inbound federation can return missing events for shared visibility -fme Inbound federation can return missing events for invite visibility -fme Inbound federation can return missing events for joined visibility -fme outliers whose auth_events are in a different room are correctly rejected -fbk Outbound federation can backfill events -fbk Inbound federation can backfill events -fbk Backfill checks the events requested belong to the room -fbk Backfilled events whose prev_events are in a different room do not allow cross-room back-pagination -fiv Outbound federation can send invites via v1 API -fiv Outbound federation can send invites via v2 API -fiv Inbound federation can receive invites via v1 API -fiv Inbound federation can receive invites via v2 API -fiv Inbound federation can receive invite and reject when remote replies with a 403 -fiv Inbound federation can receive invite and reject when remote replies with a 500 -fiv Inbound federation can receive invite and reject when remote is unreachable -fiv Inbound federation rejects invites which are not signed by the sender -fiv Inbound federation can receive invite rejections -fiv Inbound federation rejects incorrectly-signed invite rejections -fsl Inbound /v1/send_leave rejects leaves from other servers -fst Inbound federation can get state for a room -fst Inbound federation of state requires event_id as a mandatory paramater -fst Inbound federation can get state_ids for a room -fst Inbound federation of state_ids requires event_id as a mandatory paramater -fst Federation rejects inbound events where the prev_events cannot be found -fst Room state at a rejected message event is the same as its predecessor -fst Room state at a rejected state event is the same as its predecessor -fst Outbound federation requests missing prev_events and then asks for /state_ids and resolves the state -fst Federation handles empty auth_events in state_ids sanely -fst Getting state checks the events requested belong to the room -fst Getting state IDs checks the events requested belong to the room -fst Should not be able to take over the room by pretending there is no PL event -fpb Inbound federation can get public room list -fed Outbound federation sends receipts -fed Inbound federation rejects receipts from wrong remote -fed Inbound federation ignores redactions from invalid servers room > v3 -fed An event which redacts an event in a different room should be ignored -fed An event which redacts itself should be ignored -fed A pair of events which redact each other should be ignored -fdk Local device key changes get to remote servers -fdk Server correctly handles incoming m.device_list_update -fdk Server correctly resyncs when client query keys and there is no remote cache -fdk Server correctly resyncs when server leaves and rejoins a room -fdk Local device key changes get to remote servers with correct prev_id -fdk Device list doesn't change if remote server is down -fdk If a device list update goes missing, the server resyncs on the next one -fst Name/topic keys are correct -fau Remote servers cannot set power levels in rooms without existing powerlevels -fau Remote servers should reject attempts by non-creators to set the power levels -fau Inbound federation rejects typing notifications from wrong remote -fau Users cannot set notifications powerlevel higher than their own -fed Forward extremities remain so even after the next events are populated as outliers -fau Banned servers cannot send events -fau Banned servers cannot /make_join -fau Banned servers cannot /send_join -fau Banned servers cannot /make_leave -fau Banned servers cannot /send_leave -fau Banned servers cannot /invite -fau Banned servers cannot get room state -fau Banned servers cannot get room state ids -fau Banned servers cannot backfill -fau Banned servers cannot /event_auth -fau Banned servers cannot get missing events -fau Server correctly handles transactions that break edu limits -fau Inbound federation correctly soft fails events -fau Inbound federation accepts a second soft-failed event -fau Inbound federation correctly handles soft failed events as extremities -med Can upload with Unicode file name -med Can download with Unicode file name locally -f,med Can download with Unicode file name over federation -med Alternative server names do not cause a routing loop -med Can download specifying a different Unicode file name -med Can upload without a file name -med Can download without a file name locally -f,med Can download without a file name over federation -med Can upload with ASCII file name -med Can download file 'ascii' -med Can download file 'name with spaces' -med Can download file 'name;with;semicolons' -med Can download specifying a different ASCII file name -med Can send image in room message -med Can fetch images in room -med POSTed media can be thumbnailed -f,med Remote media can be thumbnailed -med Test URL preview -med Can read configuration endpoint -nsp Can quarantine media in rooms -udr User appears in user directory -udr User in private room doesn't appear in user directory -udr User joining then leaving public room appears and dissappears from directory -udr Users appear/disappear from directory when join_rules are changed -udr Users appear/disappear from directory when history_visibility are changed -udr Users stay in directory when join_rules are changed but history_visibility is world_readable -f,udr User in remote room doesn't appear in user directory after server left room -udr User directory correctly update on display name change -udr User in shared private room does appear in user directory -udr User in shared private room does appear in user directory until leave -udr User in dir while user still shares private rooms -nsp Create group -nsp Add group rooms -nsp Remove group rooms -nsp Get local group profile -nsp Get local group users -nsp Add/remove local group rooms -nsp Get local group summary -nsp Get remote group profile -nsp Get remote group users -nsp Add/remove remote group rooms -nsp Get remote group summary -nsp Add local group users -nsp Remove self from local group -nsp Remove other from local group -nsp Add remote group users -nsp Remove self from remote group -nsp Listing invited users of a remote group when not a member returns a 403 -nsp Add group category -nsp Remove group category -nsp Get group categories -nsp Add group role -nsp Remove group role -nsp Get group roles -nsp Add room to group summary -nsp Adding room to group summary keeps room_id when fetching rooms in group -nsp Adding multiple rooms to group summary have correct order -nsp Remove room from group summary -nsp Add room to group summary with category -nsp Remove room from group summary with category -nsp Add user to group summary -nsp Adding multiple users to group summary have correct order -nsp Remove user from group summary -nsp Add user to group summary with role -nsp Remove user from group summary with role -nsp Local group invites come down sync -nsp Group creator sees group in sync -nsp Group creator sees group in initial sync -nsp Get/set local group publicity -nsp Bulk get group publicity -nsp Joinability comes down summary -nsp Set group joinable and join it -nsp Group is not joinable by default -nsp Group is joinable over federation -nsp Room is transitioned on local and remote groups upon room upgrade -3pd Can bind 3PID via home server -3pd Can bind and unbind 3PID via homeserver -3pd Can unbind 3PID via homeserver when bound out of band -3pd 3PIDs are unbound after account deactivation -3pd Can bind and unbind 3PID via /unbind by specifying the identity server -3pd Can bind and unbind 3PID via /unbind without specifying the identity server -app AS can create a user -app AS can create a user with an underscore -app AS can create a user with inhibit_login -app AS cannot create users outside its own namespace -app Regular users cannot register within the AS namespace -app AS can make room aliases -app Regular users cannot create room aliases within the AS namespace -app AS-ghosted users can use rooms via AS -app AS-ghosted users can use rooms themselves -app Ghost user must register before joining room -app AS can set avatar for ghosted users -app AS can set displayname for ghosted users -app AS can't set displayname for random users -app Inviting an AS-hosted user asks the AS server -app Accesing an AS-hosted room alias asks the AS server -app Events in rooms with AS-hosted room aliases are sent to AS server -app AS user (not ghost) can join room without registering -app AS user (not ghost) can join room without registering, with user_id query param -app HS provides query metadata -app HS can provide query metadata on a single protocol -app HS will proxy request for 3PU mapping -app HS will proxy request for 3PL mapping -app AS can publish rooms in their own list -app AS and main public room lists are separate -app AS can deactivate a user -psh Test that a message is pushed -psh Invites are pushed -psh Rooms with names are correctly named in pushed -psh Rooms with canonical alias are correctly named in pushed -psh Rooms with many users are correctly pushed -psh Don't get pushed for rooms you've muted -psh Rejected events are not pushed -psh Can add global push rule for room -psh Can add global push rule for sender -psh Can add global push rule for content -psh Can add global push rule for override -psh Can add global push rule for underride -psh Can add global push rule for content -psh New rules appear before old rules by default -psh Can add global push rule before an existing rule -psh Can add global push rule after an existing rule -psh Can delete a push rule -psh Can disable a push rule -psh Adding the same push rule twice is idempotent -psh Messages that notify from another user increment unread notification count -psh Messages that highlight from another user increment unread highlight count -psh Can change the actions of default rules -psh Changing the actions of an unknown default rule fails with 404 -psh Can change the actions of a user specified rule -psh Changing the actions of an unknown rule fails with 404 -psh Can fetch a user's pushers -psh Push rules come down in an initial /sync -psh Adding a push rule wakes up an incremental /sync -psh Disabling a push rule wakes up an incremental /sync -psh Enabling a push rule wakes up an incremental /sync -psh Setting actions for a push rule wakes up an incremental /sync -psh Can enable/disable default rules -psh Enabling an unknown default rule fails with 404 -psh Test that rejected pushers are removed. -psh Notifications can be viewed with GET /notifications -psh Trying to add push rule with no scope fails with 400 -psh Trying to add push rule with invalid scope fails with 400 -psh Trying to add push rule with missing template fails with 400 -psh Trying to add push rule with missing rule_id fails with 400 -psh Trying to add push rule with empty rule_id fails with 400 -psh Trying to add push rule with invalid template fails with 400 -psh Trying to add push rule with rule_id with slashes fails with 400 -psh Trying to add push rule with override rule without conditions fails with 400 -psh Trying to add push rule with underride rule without conditions fails with 400 -psh Trying to add push rule with condition without kind fails with 400 -psh Trying to add push rule with content rule without pattern fails with 400 -psh Trying to add push rule with no actions fails with 400 -psh Trying to add push rule with invalid action fails with 400 -psh Trying to add push rule with invalid attr fails with 400 -psh Trying to add push rule with invalid value for enabled fails with 400 -psh Trying to get push rules with no trailing slash fails with 400 -psh Trying to get push rules with scope without trailing slash fails with 400 -psh Trying to get push rules with template without tailing slash fails with 400 -psh Trying to get push rules with unknown scope fails with 400 -psh Trying to get push rules with unknown template fails with 400 -psh Trying to get push rules with unknown attribute fails with 400 -psh Trying to get push rules with unknown rule_id fails with 404 -psh Rooms with names are correctly named in pushes -v1s GET /initialSync with non-numeric 'limit' -v1s GET /events with non-numeric 'limit' -v1s GET /events with negative 'limit' -v1s GET /events with non-numeric 'timeout' -ath Event size limits -syn Check creating invalid filters returns 4xx -f,pre New federated private chats get full presence information (SYN-115) -pre Left room members do not cause problems for presence -crm Rooms can be created with an initial invite list (SYN-205) (1 subtests) -typ Typing notifications don't leak -ban Non-present room members cannot ban others -psh Getting push rules doesn't corrupt the cache SYN-390 -inv Test that we can be reinvited to a room we created -syn Multiple calls to /sync should not cause 500 errors -gst Guest user can call /events on another world_readable room (SYN-606) -gst Real user can call /events on another world_readable room (SYN-606) -gst Events come down the correct room -pub Asking for a remote rooms list, but supplying the local server's name, returns the local rooms list -std Can send a to-device message to two users which both receive it using /sync -fme Outbound federation will ignore a missing event with bad JSON for room version 6 -fbk Outbound federation rejects backfill containing invalid JSON for events in room version 6 -jso Invalid JSON integers -jso Invalid JSON floats -jso Invalid JSON special values -inv Can invite users to invite-only rooms (2 subtests) -plv setting 'm.room.name' respects room powerlevel (2 subtests) -psh Messages that notify from another user increment notification_count -psh Messages that org.matrix.msc2625.mark_unread from another user increment org.matrix.msc2625.unread_count -dvk Can claim one time key using POST (2 subtests) -fdk Can query remote device keys using POST (1 subtests) -fdk Can claim remote one time key using POST (2 subtests) -fmj Inbound /make_join rejects attempts to join rooms where all users have left \ No newline at end of file diff --git a/tests/sytest/are-we-synapse-yet.py b/tests/sytest/are-we-synapse-yet.py deleted file mode 100755 index 3d21fa41..00000000 --- a/tests/sytest/are-we-synapse-yet.py +++ /dev/null @@ -1,266 +0,0 @@ -#!/usr/bin/env python3 - -from __future__ import division -import argparse -import re -import sys - -# Usage: $ ./are-we-synapse-yet.py [-v] results.tap -# This script scans a results.tap file from Dendrite's CI process and spits out -# a rating of how close we are to Synapse parity, based purely on SyTests. -# The main complexity is grouping tests sensibly into features like 'Registration' -# and 'Federation'. Then it just checks the ones which are passing and calculates -# percentages for each group. Produces results like: -# -# Client-Server APIs: 29% (196/666 tests) -# ------------------- -# Registration : 62% (20/32 tests) -# Login : 7% (1/15 tests) -# V1 CS APIs : 10% (3/30 tests) -# ... -# -# or in verbose mode: -# -# Client-Server APIs: 29% (196/666 tests) -# ------------------- -# Registration : 62% (20/32 tests) -# ✓ GET /register yields a set of flows -# ✓ POST /register can create a user -# ✓ POST /register downcases capitals in usernames -# ... -# -# You can also tack `-v` on to see exactly which tests each category falls under. - -test_mappings = { - "nsp": "Non-Spec API", - "unk": "Unknown API (no group specified)", - "app": "Application Services API", - "f": "Federation", # flag to mark test involves federation - - "federation_apis": { - "fky": "Key API", - "fsj": "send_join API", - "fmj": "make_join API", - "fsl": "send_leave API", - "fiv": "Invite API", - "fqu": "Query API", - "frv": "room versions", - "fau": "Auth", - "fbk": "Backfill API", - "fme": "get_missing_events API", - "fst": "State APIs", - "fpb": "Public Room API", - "fdk": "Device Key APIs", - "fed": "Federation API", - "fsd": "Send-to-Device APIs", - }, - - "client_apis": { - "reg": "Registration", - "log": "Login", - "lox": "Logout", - "v1s": "V1 CS APIs", - "csa": "Misc CS APIs", - "pro": "Profile", - "dev": "Devices", - "dvk": "Device Keys", - "dkb": "Device Key Backup", - "xsk": "Cross-signing Keys", - "pre": "Presence", - "crm": "Create Room", - "syn": "Sync API", - "rmv": "Room Versions", - "rst": "Room State APIs", - "pub": "Public Room APIs", - "mem": "Room Membership", - "ali": "Room Aliases", - "jon": "Joining Rooms", - "lev": "Leaving Rooms", - "inv": "Inviting users to Rooms", - "ban": "Banning users", - "snd": "Sending events", - "get": "Getting events for Rooms", - "rct": "Receipts", - "red": "Read markers", - "med": "Media APIs", - "cap": "Capabilities API", - "typ": "Typing API", - "psh": "Push APIs", - "acc": "Account APIs", - "eph": "Ephemeral Events", - "plv": "Power Levels", - "xxx": "Redaction", - "3pd": "Third-Party ID APIs", - "gst": "Guest APIs", - "ath": "Room Auth", - "fgt": "Forget APIs", - "ctx": "Context APIs", - "upg": "Room Upgrade APIs", - "tag": "Tagging APIs", - "sch": "Search APIs", - "oid": "OpenID API", - "std": "Send-to-Device APIs", - "adm": "Server Admin API", - "ign": "Ignore Users", - "udr": "User Directory APIs", - "jso": "Enforced canonical JSON", - }, -} - -# optional 'not ' with test number then anything but '#' -re_testname = re.compile(r"^(not )?ok [0-9]+ ([^#]+)") - -# Parses lines like the following: -# -# SUCCESS: ok 3 POST /register downcases capitals in usernames -# FAIL: not ok 54 (expected fail) POST /createRoom creates a room with the given version -# SKIP: ok 821 Multiple calls to /sync should not cause 500 errors # skip lack of can_post_room_receipts -# EXPECT FAIL: not ok 822 (expected fail) Guest user can call /events on another world_readable room (SYN-606) # TODO expected fail -# -# Only SUCCESS lines are treated as success, the rest are not implemented. -# -# Returns a dict like: -# { name: "...", ok: True } -def parse_test_line(line): - if not line.startswith("ok ") and not line.startswith("not ok "): - return - re_match = re_testname.match(line) - test_name = re_match.groups()[1].replace("(expected fail) ", "").strip() - test_pass = False - if line.startswith("ok ") and not "# skip " in line: - test_pass = True - return { - "name": test_name, - "ok": test_pass, - } - -# Prints the stats for a complete section. -# header_name => "Client-Server APIs" -# gid_to_tests => { gid: { : True|False }} -# gid_to_name => { gid: "Group Name" } -# verbose => True|False -# Produces: -# Client-Server APIs: 29% (196/666 tests) -# ------------------- -# Registration : 62% (20/32 tests) -# Login : 7% (1/15 tests) -# V1 CS APIs : 10% (3/30 tests) -# ... -# or in verbose mode: -# Client-Server APIs: 29% (196/666 tests) -# ------------------- -# Registration : 62% (20/32 tests) -# ✓ GET /register yields a set of flows -# ✓ POST /register can create a user -# ✓ POST /register downcases capitals in usernames -# ... -def print_stats(header_name, gid_to_tests, gid_to_name, verbose): - subsections = [] # Registration: 100% (13/13 tests) - subsection_test_names = {} # 'subsection name': ["✓ Test 1", "✓ Test 2", "× Test 3"] - total_passing = 0 - total_tests = 0 - for gid, tests in gid_to_tests.items(): - group_total = len(tests) - if group_total == 0: - continue - group_passing = 0 - test_names_and_marks = [] - for name, passing in tests.items(): - if passing: - group_passing += 1 - test_names_and_marks.append(f"{'✓' if passing else '×'} {name}") - - total_tests += group_total - total_passing += group_passing - pct = "{0:.0f}%".format(group_passing/group_total * 100) - line = "%s: %s (%d/%d tests)" % (gid_to_name[gid].ljust(25, ' '), pct.rjust(4, ' '), group_passing, group_total) - subsections.append(line) - subsection_test_names[line] = test_names_and_marks - - pct = "{0:.0f}%".format(total_passing/total_tests * 100) - print("%s: %s (%d/%d tests)" % (header_name, pct, total_passing, total_tests)) - print("-" * (len(header_name)+1)) - for line in subsections: - print(" %s" % (line,)) - if verbose: - for test_name_and_pass_mark in subsection_test_names[line]: - print(" %s" % (test_name_and_pass_mark,)) - print("") - print("") - -def main(results_tap_path, verbose): - # Load up test mappings - test_name_to_group_id = {} - fed_tests = set() - client_tests = set() - with open("./are-we-synapse-yet.list", "r") as f: - for line in f.readlines(): - test_name = " ".join(line.split(" ")[1:]).strip() - groups = line.split(" ")[0].split(",") - for gid in groups: - if gid == "f" or gid in test_mappings["federation_apis"]: - fed_tests.add(test_name) - else: - client_tests.add(test_name) - if gid == "f": - continue # we expect another group ID - test_name_to_group_id[test_name] = gid - - # parse results.tap - summary = { - "client": { - # gid: { - # test_name: OK - # } - }, - "federation": { - # gid: { - # test_name: OK - # } - }, - "appservice": { - "app": {}, - }, - "nonspec": { - "nsp": {}, - "unk": {} - }, - } - with open(results_tap_path, "r") as f: - for line in f.readlines(): - test_result = parse_test_line(line) - if not test_result: - continue - name = test_result["name"] - group_id = test_name_to_group_id.get(name) - if not group_id: - summary["nonspec"]["unk"][name] = test_result["ok"] - if group_id == "nsp": - summary["nonspec"]["nsp"][name] = test_result["ok"] - elif group_id == "app": - summary["appservice"]["app"][name] = test_result["ok"] - elif group_id in test_mappings["federation_apis"]: - group = summary["federation"].get(group_id, {}) - group[name] = test_result["ok"] - summary["federation"][group_id] = group - elif group_id in test_mappings["client_apis"]: - group = summary["client"].get(group_id, {}) - group[name] = test_result["ok"] - summary["client"][group_id] = group - - print("Are We Synapse Yet?") - print("===================") - print("") - print_stats("Non-Spec APIs", summary["nonspec"], test_mappings, verbose) - print_stats("Client-Server APIs", summary["client"], test_mappings["client_apis"], verbose) - print_stats("Federation APIs", summary["federation"], test_mappings["federation_apis"], verbose) - print_stats("Application Services APIs", summary["appservice"], test_mappings, verbose) - - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument("tap_file", help="path to results.tap") - parser.add_argument("-v", action="store_true", help="show individual test names in output") - args = parser.parse_args() - main(args.tap_file, args.v) \ No newline at end of file diff --git a/tests/sytest/show-expected-fail-tests.sh b/tests/sytest/show-expected-fail-tests.sh deleted file mode 100755 index 320d4ebd..00000000 --- a/tests/sytest/show-expected-fail-tests.sh +++ /dev/null @@ -1,105 +0,0 @@ -#! /bin/bash -# -# Parses a results.tap file from SyTest output and a file containing test names (a test whitelist) -# and checks whether a test name that exists in the whitelist (that should pass), failed or not. -# -# An optional blacklist file can be added, also containing test names, where if a test name is -# present, the script will not error even if the test is in the whitelist file and failed -# -# For each of these files, lines starting with '#' are ignored. -# -# Usage ./show-expected-fail-tests.sh results.tap whitelist [blacklist] - -results_file=$1 -whitelist_file=$2 -blacklist_file=$3 - -fail_build=0 - -if [ $# -lt 2 ]; then - echo "Usage: $0 results.tap whitelist [blacklist]" - exit 1 -fi - -if [ ! -f "$results_file" ]; then - echo "ERROR: Specified results file '${results_file}' doesn't exist." - fail_build=1 -fi - -if [ ! -f "$whitelist_file" ]; then - echo "ERROR: Specified test whitelist '${whitelist_file}' doesn't exist." - fail_build=1 -fi - -blacklisted_tests=() - -# Check if a blacklist file was provided -if [ $# -eq 3 ]; then - # Read test blacklist file - if [ ! -f "$blacklist_file" ]; then - echo "ERROR: Specified test blacklist file '${blacklist_file}' doesn't exist." - fail_build=1 - fi - - # Read each line, ignoring those that start with '#' - blacklisted_tests="" - search_non_comments=$(grep -v '^#' ${blacklist_file}) - while read -r line ; do - # Record the blacklisted test name - blacklisted_tests+=("${line}") - done <<< "${search_non_comments}" # This allows us to edit blacklisted_tests in the while loop -fi - -[ "$fail_build" = 0 ] || exit 1 - -passed_but_expected_fail=$(grep ' # TODO passed but expected fail' ${results_file} | sed -E 's/^ok [0-9]+ (\(expected fail\) )?//' | sed -E 's/( \([0-9]+ subtests\))? # TODO passed but expected fail$//') -tests_to_add="" -already_in_whitelist="" - -while read -r test_name; do - # Ignore empty lines - [ "${test_name}" = "" ] && continue - - grep "^${test_name}" "${whitelist_file}" > /dev/null 2>&1 - if [ "$?" != "0" ]; then - # Check if this test name is blacklisted - if printf '%s\n' "${blacklisted_tests[@]}" | grep -q -P "^${test_name}$"; then - # Don't notify about this test - continue - fi - - # Append this test_name to the existing list - tests_to_add="${tests_to_add}${test_name}\n" - fail_build=1 - else - already_in_whitelist="${already_in_whitelist}${test_name}\n" - fi -done <<< "${passed_but_expected_fail}" - -# TODO: Check that the same test doesn't exist in both the whitelist and blacklist -# TODO: Check that the same test doesn't appear twice in the whitelist|blacklist - -# Trim test output strings -tests_to_add=$(IFS=$'\n' echo "${tests_to_add[*]%%'\n'}") -already_in_whitelist=$(IFS=$'\n' echo "${already_in_whitelist[*]%%'\n'}") - -# Format output with markdown for buildkite annotation rendering purposes -if [ -n "${tests_to_add}" ] && [ -n "${already_in_whitelist}" ]; then - echo "### 📜 SyTest Whitelist Maintenance" -fi - -if [ -n "${tests_to_add}" ]; then - echo "**ERROR**: The following tests passed but are not present in \`$2\`. Please append them to the file:" - echo "\`\`\`" - echo -e "${tests_to_add}" - echo "\`\`\`" -fi - -if [ -n "${already_in_whitelist}" ]; then - echo "**WARN**: Tests in the whitelist still marked as **expected fail**:" - echo "\`\`\`" - echo -e "${already_in_whitelist}" - echo "\`\`\`" -fi - -exit ${fail_build} diff --git a/tests/sytest/sytest-blacklist b/tests/sytest/sytest-blacklist deleted file mode 100644 index 009de225..00000000 --- a/tests/sytest/sytest-blacklist +++ /dev/null @@ -1,7 +0,0 @@ -# This test checks for a room-alias key in the response which is not in the spec, we must add it back in whitelist when https://github.com/matrix-org/sytest/pull/880 is merged -POST /createRoom makes a public room -# These fails because they use a endpoint which is not in the spec, we must add them back in whitelist when https://github.com/matrix-org/sytest/issues/878 is closed -POST /createRoom makes a room with a name -POST /createRoom makes a room with a topic -Can /sync newly created room -POST /createRoom ignores attempts to set the room version via creation_content \ No newline at end of file diff --git a/tests/sytest/sytest-whitelist b/tests/sytest/sytest-whitelist deleted file mode 100644 index 1c969dba..00000000 --- a/tests/sytest/sytest-whitelist +++ /dev/null @@ -1,516 +0,0 @@ -/event/ does not allow access to events before the user joined -/event/ on joined room works -/event/ on non world readable room does not work -/joined_members return joined members -/joined_rooms returns only joined rooms -/whois -3pid invite join valid signature but revoked keys are rejected -3pid invite join valid signature but unreachable ID server are rejected -3pid invite join with wrong but valid signature are rejected -A change to displayname should appear in incremental /sync -A full_state incremental update returns all state -A full_state incremental update returns only recent timeline -A message sent after an initial sync appears in the timeline of an incremental sync. -A next_batch token can be used in the v1 messages API -A pair of events which redact each other should be ignored -A pair of servers can establish a join in a v2 room -A prev_batch token can be used in the v1 messages API -AS can create a user -AS can create a user with an underscore -AS can create a user with inhibit_login -AS can set avatar for ghosted users -AS can set displayname for ghosted users -AS can't set displayname for random users -AS cannot create users outside its own namespace -AS user (not ghost) can join room without registering -AS user (not ghost) can join room without registering, with user_id query param -After changing password, a different session no longer works by default -After changing password, can log in with new password -After changing password, can't log in with old password -After changing password, different sessions can optionally be kept -After changing password, existing session still works -After deactivating account, can't log in with an email -After deactivating account, can't log in with password -Alias creators can delete alias with no ops -Alias creators can delete canonical alias with no ops -Alternative server names do not cause a routing loop -An event which redacts an event in a different room should be ignored -An event which redacts itself should be ignored -Asking for a remote rooms list, but supplying the local server's name, returns the local rooms list -Backfill checks the events requested belong to the room -Backfill works correctly with history visibility set to joined -Backfilled events whose prev_events are in a different room do not allow cross-room back-pagination -Banned servers cannot /event_auth -Banned servers cannot /invite -Banned servers cannot /make_join -Banned servers cannot /make_leave -Banned servers cannot /send_join -Banned servers cannot /send_leave -Banned servers cannot backfill -Banned servers cannot get missing events -Banned servers cannot get room state -Banned servers cannot get room state ids -Banned servers cannot send events -Banned user is kicked and may not rejoin until unbanned -Both GET and PUT work -Can /sync newly created room -Can add account data -Can add account data to room -Can add tag -Can claim one time key using POST -Can claim remote one time key using POST -Can create filter -Can deactivate account -Can delete canonical alias -Can download file 'ascii' -Can download file 'name with spaces' -Can download file 'name;with;semicolons' -Can download filter -Can download specifying a different ASCII file name -Can download specifying a different Unicode file name -Can download with Unicode file name locally -Can download with Unicode file name over federation -Can download without a file name locally -Can download without a file name over federation -Can forget room you've been kicked from -Can get 'm.room.name' state for a departed room (SPEC-216) -Can get account data without syncing -Can get remote public room list -Can get room account data without syncing -Can get rooms/{roomId}/members -Can get rooms/{roomId}/members for a departed room (SPEC-216) -Can get rooms/{roomId}/state for a departed room (SPEC-216) -Can invite users to invite-only rooms -Can list tags for a room -Can logout all devices -Can logout current device -Can paginate public room list -Can pass a JSON filter as a query parameter -Can query device keys using POST -Can query remote device keys using POST -Can query specific device keys using POST -Can re-join room if re-invited -Can read configuration endpoint -Can receive redactions from regular users over federation in room version 1 -Can receive redactions from regular users over federation in room version 2 -Can receive redactions from regular users over federation in room version 3 -Can receive redactions from regular users over federation in room version 4 -Can receive redactions from regular users over federation in room version 5 -Can receive redactions from regular users over federation in room version 6 -Can recv a device message using /sync -Can recv a device message using /sync -Can recv device messages over federation -Can recv device messages until they are acknowledged -Can recv device messages until they are acknowledged -Can reject invites over federation for rooms with version 1 -Can reject invites over federation for rooms with version 2 -Can reject invites over federation for rooms with version 3 -Can reject invites over federation for rooms with version 4 -Can reject invites over federation for rooms with version 5 -Can reject invites over federation for rooms with version 6 -Can remove tag -Can search public room list -Can send a message directly to a device using PUT /sendToDevice -Can send a message directly to a device using PUT /sendToDevice -Can send a to-device message to two users which both receive it using /sync -Can send image in room message -Can send messages with a wildcard device id -Can send messages with a wildcard device id -Can send messages with a wildcard device id to two devices -Can send messages with a wildcard device id to two devices -Can sync -Can sync a joined room -Can sync a room with a message with a transaction id -Can sync a room with a single message -Can upload device keys -Can upload with ASCII file name -Can upload with Unicode file name -Can upload without a file name -Can't deactivate account with wrong password -Can't forget room you're still in -Changes to state are included in an gapped incremental sync -Changes to state are included in an incremental sync -Changing the actions of an unknown default rule fails with 404 -Changing the actions of an unknown rule fails with 404 -Checking local federation server -Creators can delete alias -Current state appears in timeline in private history -Current state appears in timeline in private history with many messages before -DELETE /device/{deviceId} -DELETE /device/{deviceId} requires UI auth user to match device owner -DELETE /device/{deviceId} with no body gives a 401 -Deleted tags appear in an incremental v2 /sync -Deleting a non-existent alias should return a 404 -Device list doesn't change if remote server is down -Device messages over federation wake up /sync -Device messages wake up /sync -Device messages wake up /sync -Device messages with the same txn_id are deduplicated -Device messages with the same txn_id are deduplicated -Enabling an unknown default rule fails with 404 -Event size limits -Event with an invalid signature in the send_join response should not cause room join to fail -Events come down the correct room -Events whose auth_events are in the wrong room do not mess up the room state -Existing members see new members' join events -Federation key API allows unsigned requests for keys -Federation key API can act as a notary server via a GET request -Federation key API can act as a notary server via a POST request -Federation rejects inbound events where the prev_events cannot be found -Fetching eventstream a second time doesn't yield the message again -Forgetting room does not show up in v2 /sync -Full state sync includes joined rooms -GET /capabilities is present and well formed for registered user -GET /device/{deviceId} -GET /device/{deviceId} gives a 404 for unknown devices -GET /devices -GET /directory/room/:room_alias yields room ID -GET /events initially -GET /events with negative 'limit' -GET /events with non-numeric 'limit' -GET /events with non-numeric 'timeout' -GET /initialSync initially -GET /joined_rooms lists newly-created room -GET /login yields a set of flows -GET /media/r0/download can fetch the value again -GET /profile/:user_id/avatar_url publicly accessible -GET /profile/:user_id/displayname publicly accessible -GET /publicRooms includes avatar URLs -GET /publicRooms lists newly-created room -GET /publicRooms lists rooms -GET /r0/capabilities is not public -GET /register yields a set of flows -GET /rooms/:room_id/joined_members fetches my membership -GET /rooms/:room_id/messages returns a message -GET /rooms/:room_id/state fetches entire room state -GET /rooms/:room_id/state/m.room.member/:user_id fetches my membership -GET /rooms/:room_id/state/m.room.member/:user_id?format=event fetches my membership event -GET /rooms/:room_id/state/m.room.name gets name -GET /rooms/:room_id/state/m.room.power_levels can fetch levels -GET /rooms/:room_id/state/m.room.power_levels fetches powerlevels -GET /rooms/:room_id/state/m.room.topic gets topic -Get left notifs for other users in sync and /keys/changes when user leaves -Getting messages going forward is limited for a departed room (SPEC-216) -Getting push rules doesn't corrupt the cache SYN-390 -Getting state IDs checks the events requested belong to the room -Getting state checks the events requested belong to the room -Ghost user must register before joining room -Guest non-joined user cannot call /events on default room -Guest non-joined user cannot call /events on invited room -Guest non-joined user cannot call /events on joined room -Guest non-joined user cannot call /events on shared room -Guest non-joined users can get individual state for world_readable rooms -Guest non-joined users can get individual state for world_readable rooms after leaving -Guest non-joined users can get state for world_readable rooms -Guest non-joined users cannot room initalSync for non-world_readable rooms -Guest non-joined users cannot send messages to guest_access rooms if not joined -Guest user can set display names -Guest user cannot call /events globally -Guest user cannot upgrade other users -Guest users can accept invites to private rooms over federation -Guest users can join guest_access rooms -Guest users can send messages to guest_access rooms if joined -If a device list update goes missing, the server resyncs on the next one -If remote user leaves room we no longer receive device updates -If remote user leaves room, changes device and rejoins we see update in /keys/changes -If remote user leaves room, changes device and rejoins we see update in sync -Inbound /make_join rejects attempts to join rooms where all users have left -Inbound /v1/make_join rejects remote attempts to join local users to rooms -Inbound /v1/send_join rejects incorrectly-signed joins -Inbound /v1/send_join rejects joins from other servers -Inbound /v1/send_leave rejects leaves from other servers -Inbound federation accepts a second soft-failed event -Inbound federation accepts attempts to join v2 rooms from servers with support -Inbound federation can backfill events -Inbound federation can get public room list -Inbound federation can get state for a room -Inbound federation can get state_ids for a room -Inbound federation can query profile data -Inbound federation can query room alias directory -Inbound federation can receive events -Inbound federation can receive invites via v1 API -Inbound federation can receive invites via v2 API -Inbound federation can receive redacted events -Inbound federation can receive v1 /send_join -Inbound federation can receive v2 /send_join -Inbound federation can return events -Inbound federation can return missing events for invite visibility -Inbound federation can return missing events for world_readable visibility -Inbound federation correctly soft fails events -Inbound federation of state requires event_id as a mandatory paramater -Inbound federation of state_ids requires event_id as a mandatory paramater -Inbound federation rejects attempts to join v1 rooms from servers without v1 support -Inbound federation rejects attempts to join v2 rooms from servers lacking version support -Inbound federation rejects attempts to join v2 rooms from servers only supporting v1 -Inbound federation rejects invite rejections which include invalid JSON for room version 6 -Inbound federation rejects invites which include invalid JSON for room version 6 -Inbound federation rejects receipts from wrong remote -Inbound federation rejects remote attempts to join local users to rooms -Inbound federation rejects remote attempts to kick local users to rooms -Inbound federation rejects typing notifications from wrong remote -Inbound: send_join rejects invalid JSON for room version 6 -Invalid JSON floats -Invalid JSON integers -Invalid JSON special values -Invited user can reject invite -Invited user can reject invite over federation -Invited user can reject invite over federation for empty room -Invited user can reject invite over federation several times -Invited user can see room metadata -Inviting an AS-hosted user asks the AS server -Lazy loading parameters in the filter are strictly boolean -Left rooms appear in the leave section of full state sync -Local delete device changes appear in v2 /sync -Local device key changes appear in /keys/changes -Local device key changes appear in v2 /sync -Local device key changes get to remote servers -Local new device changes appear in v2 /sync -Local non-members don't see posted message events -Local room members can get room messages -Local room members see posted message events -Local update device changes appear in v2 /sync -Local users can peek by room alias -Local users can peek into world_readable rooms by room ID -Message history can be paginated -Message history can be paginated over federation -Name/topic keys are correct -New account data appears in incremental v2 /sync -New read receipts appear in incremental v2 /sync -New room members see their own join event -New users appear in /keys/changes -Newly banned rooms appear in the leave section of incremental sync -Newly joined room is included in an incremental sync -Newly joined room is included in an incremental sync after invite -Newly left rooms appear in the leave section of gapped sync -Newly left rooms appear in the leave section of incremental sync -Newly updated tags appear in an incremental v2 /sync -Non-numeric ports in server names are rejected -Outbound federation can backfill events -Outbound federation can query profile data -Outbound federation can query room alias directory -Outbound federation can query v1 /send_join -Outbound federation can query v2 /send_join -Outbound federation can request missing events -Outbound federation can send events -Outbound federation can send invites via v1 API -Outbound federation can send invites via v2 API -Outbound federation can send room-join requests -Outbound federation correctly handles unsupported room versions -Outbound federation passes make_join failures through to the client -Outbound federation rejects backfill containing invalid JSON for events in room version 6 -Outbound federation rejects m.room.create events with an unknown room version -Outbound federation rejects send_join responses with no m.room.create event -Outbound federation sends receipts -Outbound federation will ignore a missing event with bad JSON for room version 6 -POST /createRoom creates a room with the given version -POST /createRoom ignores attempts to set the room version via creation_content -POST /createRoom makes a private room -POST /createRoom makes a private room with invites -POST /createRoom makes a public room -POST /createRoom makes a room with a name -POST /createRoom makes a room with a topic -POST /createRoom rejects attempts to create rooms with numeric versions -POST /createRoom rejects attempts to create rooms with unknown versions -POST /createRoom with creation content -POST /join/:room_alias can join a room -POST /join/:room_alias can join a room with custom content -POST /join/:room_id can join a room -POST /join/:room_id can join a room with custom content -POST /login as non-existing user is rejected -POST /login can log in as a user -POST /login can log in as a user with just the local part of the id -POST /login returns the same device_id as that in the request -POST /login wrong password is rejected -POST /media/r0/upload can create an upload -POST /redact disallows redaction of event in different room -POST /register allows registration of usernames with '-' -POST /register allows registration of usernames with '.' -POST /register allows registration of usernames with '/' -POST /register allows registration of usernames with '3' -POST /register allows registration of usernames with '=' -POST /register allows registration of usernames with '_' -POST /register allows registration of usernames with 'q' -POST /register can create a user -POST /register downcases capitals in usernames -POST /register rejects registration of usernames with '!' -POST /register rejects registration of usernames with '"' -POST /register rejects registration of usernames with ''' -POST /register rejects registration of usernames with ':' -POST /register rejects registration of usernames with '?' -POST /register rejects registration of usernames with '@' -POST /register rejects registration of usernames with '[' -POST /register rejects registration of usernames with '\' -POST /register rejects registration of usernames with '\n' -POST /register rejects registration of usernames with ']' -POST /register rejects registration of usernames with '{' -POST /register rejects registration of usernames with '|' -POST /register rejects registration of usernames with '}' -POST /register rejects registration of usernames with '£' -POST /register rejects registration of usernames with 'é' -POST /register returns the same device_id as that in the request -POST /rooms/:room_id/ban can ban a user -POST /rooms/:room_id/invite can send an invite -POST /rooms/:room_id/join can join a room -POST /rooms/:room_id/leave can leave a room -POST /rooms/:room_id/read_markers can create read marker -POST /rooms/:room_id/receipt can create receipts -POST /rooms/:room_id/redact/:event_id as original message sender redacts message -POST /rooms/:room_id/redact/:event_id as power user redacts message -POST /rooms/:room_id/redact/:event_id as random user does not redact message -POST /rooms/:room_id/send/:event_type sends a message -POST /rooms/:room_id/state/m.room.name sets name -POST /rooms/:room_id/state/m.room.topic sets topic -POST /rooms/:room_id/upgrade can upgrade a room version -POST rejects invalid utf-8 in JSON -POSTed media can be thumbnailed -PUT /device/{deviceId} gives a 404 for unknown devices -PUT /device/{deviceId} updates device fields -PUT /directory/room/:room_alias creates alias -PUT /profile/:user_id/avatar_url sets my avatar -PUT /profile/:user_id/displayname sets my name -PUT /rooms/:room_id/send/:event_type/:txn_id deduplicates the same txn id -PUT /rooms/:room_id/send/:event_type/:txn_id sends a message -PUT /rooms/:room_id/state/m.room.power_levels can set levels -PUT /rooms/:room_id/typing/:user_id sets typing notification -PUT power_levels should not explode if the old power levels were empty -Peeked rooms only turn up in the sync for the device who peeked them -Previously left rooms don't appear in the leave section of sync -Push rules come down in an initial /sync -Read markers appear in incremental v2 /sync -Read markers appear in initial v2 /sync -Read markers can be updated -Read receipts appear in initial v2 /sync -Real non-joined user cannot call /events on default room -Real non-joined user cannot call /events on invited room -Real non-joined user cannot call /events on joined room -Real non-joined user cannot call /events on shared room -Real non-joined users can get individual state for world_readable rooms -Real non-joined users can get individual state for world_readable rooms after leaving -Real non-joined users can get state for world_readable rooms -Real non-joined users cannot room initalSync for non-world_readable rooms -Real non-joined users cannot send messages to guest_access rooms if not joined -Receipts must be m.read -Redaction of a redaction redacts the redaction reason -Regular users can add and delete aliases in the default room configuration -Regular users can add and delete aliases when m.room.aliases is restricted -Regular users cannot create room aliases within the AS namespace -Regular users cannot register within the AS namespace -Remote media can be thumbnailed -Remote room alias queries can handle Unicode -Remote room members also see posted message events -Remote room members can get room messages -Remote user can backfill in a room with version 1 -Remote user can backfill in a room with version 2 -Remote user can backfill in a room with version 3 -Remote user can backfill in a room with version 4 -Remote user can backfill in a room with version 5 -Remote user can backfill in a room with version 6 -Remote users can join room by alias -Remote users may not join unfederated rooms -Request to logout with invalid an access token is rejected -Request to logout without an access token is rejected -Room aliases can contain Unicode -Room creation reports m.room.create to myself -Room creation reports m.room.member to myself -Room members can join a room with an overridden displayname -Room members can override their displayname on a room-specific basis -Room state at a rejected message event is the same as its predecessor -Room state at a rejected state event is the same as its predecessor -Rooms a user is invited to appear in an incremental sync -Rooms a user is invited to appear in an initial sync -Rooms can be created with an initial invite list (SYN-205) -Server correctly handles incoming m.device_list_update -Server correctly handles transactions that break edu limits -Server correctly resyncs when client query keys and there is no remote cache -Server correctly resyncs when server leaves and rejoins a room -Server rejects invalid JSON in a version 6 room -Setting room topic reports m.room.topic to myself -Should not be able to take over the room by pretending there is no PL event -Should reject keys claiming to belong to a different user -State from remote users is included in the state in the initial sync -State from remote users is included in the timeline in an incremental sync -State is included in the timeline in the initial sync -Sync can be polled for updates -Sync is woken up for leaves -Syncing a new room with a large timeline limit isn't limited -Tags appear in an initial v2 /sync -Trying to get push rules with unknown rule_id fails with 404 -Typing can be explicitly stopped -Typing events appear in gapped sync -Typing events appear in incremental sync -Typing events appear in initial sync -Typing notification sent to local room members -Typing notifications also sent to remote room members -Typing notifications don't leak -Uninvited users cannot join the room -Unprivileged users can set m.room.topic if it only needs level 0 -User appears in user directory -User in private room doesn't appear in user directory -User joining then leaving public room appears and dissappears from directory -User in shared private room does appear in user directory until leave -User can create and send/receive messages in a room with version 1 -User can create and send/receive messages in a room with version 2 -User can create and send/receive messages in a room with version 3 -User can create and send/receive messages in a room with version 4 -User can create and send/receive messages in a room with version 5 -User can create and send/receive messages in a room with version 6 -User can invite local user to room with version 1 -User can invite local user to room with version 2 -User can invite local user to room with version 3 -User can invite local user to room with version 4 -User can invite local user to room with version 5 -User can invite local user to room with version 6 -User can invite remote user to room with version 1 -User can invite remote user to room with version 2 -User can invite remote user to room with version 3 -User can invite remote user to room with version 4 -User can invite remote user to room with version 5 -User can invite remote user to room with version 6 -User directory correctly update on display name change -User in dir while user still shares private rooms -User in shared private room does appear in user directory -User is offline if they set_presence=offline in their sync -User signups are forbidden from starting with '_' -Users can't delete other's aliases -Users cannot invite a user that is already in the room -Users cannot invite themselves to a room -Users cannot kick users from a room they are not in -Users cannot kick users who have already left a room -Users cannot set ban powerlevel higher than their own -Users cannot set kick powerlevel higher than their own -Users cannot set notifications powerlevel higher than their own -Users cannot set redact powerlevel higher than their own -Users receive device_list updates for their own devices -Users with sufficient power-level can delete other's aliases -Version responds 200 OK with valid structure -We can't peek into rooms with invited history_visibility -We can't peek into rooms with joined history_visibility -We can't peek into rooms with shared history_visibility -We don't send redundant membership state across incremental syncs by default -We should see our own leave event when rejecting an invite, even if history_visibility is restricted (riot-web/3462) -We should see our own leave event, even if history_visibility is restricted (SYN-662) -Wildcard device messages over federation wake up /sync -Wildcard device messages wake up /sync -Wildcard device messages wake up /sync -avatar_url updates affect room member events -displayname updates affect room member events -local user can join room with version 1 -local user can join room with version 2 -local user can join room with version 3 -local user can join room with version 4 -local user can join room with version 5 -local user can join room with version 6 -m.room.history_visibility == "joined" allows/forbids appropriately for Guest users -m.room.history_visibility == "joined" allows/forbids appropriately for Real users -m.room.history_visibility == "world_readable" allows/forbids appropriately for Guest users -m.room.history_visibility == "world_readable" allows/forbids appropriately for Real users -query for user with no keys returns empty key dict -remote user can join room with version 1 -remote user can join room with version 2 -remote user can join room with version 3 -remote user can join room with version 4 -remote user can join room with version 5 -remote user can join room with version 6 -setting 'm.room.name' respects room powerlevel -setting 'm.room.power_levels' respects room powerlevel -Federation publicRoom Name/topic keys are correct From e704bbaf1166d0082a7aac27fdbd72e37d8fd664 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Mon, 10 Mar 2025 12:30:39 -0400 Subject: [PATCH 121/310] update complement test results Signed-off-by: June Clementine Strawberry --- tests/test_results/complement/test_results.jsonl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index 7b06510b..5fb850f1 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -534,10 +534,10 @@ {"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel"} {"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_accepts_present_aliases"} {"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_accepts_present_alt_aliases"} -{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_alias_pointing_to_different_local_room"} -{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_alt_alias_pointing_to_different_local_room"} -{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_invalid_aliases"} -{"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_invalid_aliases#01"} +{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_alias_pointing_to_different_local_room"} +{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_alt_alias_pointing_to_different_local_room"} +{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_invalid_aliases"} +{"Action":"pass","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_invalid_aliases#01"} {"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_missing_aliases"} {"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_rejects_missing_aliases#01"} {"Action":"fail","Test":"TestRoomCanonicalAlias/Parallel/m.room.canonical_alias_setting_rejects_deleted_aliases"} From 889fb3cf262d433bf2da461a7482a3e7400fc41f Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Mon, 10 Mar 2025 13:36:56 -0400 Subject: [PATCH 122/310] add download-artifact pattern for OCI images only Signed-off-by: June Clementine Strawberry --- .github/workflows/ci.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cd7d2484..3fd834e0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -550,6 +550,8 @@ jobs: - name: Download artifacts uses: actions/download-artifact@v4 + with: + pattern: "oci*" - name: Move OCI images into position run: | From 56dba8acb7b873c890313991630ebd23bbb47376 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Mon, 10 Mar 2025 17:15:21 -0400 Subject: [PATCH 123/310] misc docs updates Signed-off-by: June Clementine Strawberry --- README.md | 62 +++-- arch/conduwuit.service | 1 + book.toml | 5 +- debian/conduwuit.service | 16 +- docs/SUMMARY.md | 1 - docs/assets/conduwuit_logo.svg | 36 +++ docs/assets/gay dog anarchists.png | Bin 0 -> 11533 bytes docs/deploying/generic.md | 22 -- docs/development/hot_reload.md | 3 + docs/development/testing.md | 19 +- docs/differences.md | 379 ----------------------------- docs/introduction.md | 4 - 12 files changed, 107 insertions(+), 441 deletions(-) create mode 100644 docs/assets/conduwuit_logo.svg create mode 100644 docs/assets/gay dog anarchists.png delete mode 100644 docs/differences.md diff --git a/README.md b/README.md index 13a1c67f..d8f99d45 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,16 @@ # conduwuit -[![conduwuit main room](https://img.shields.io/matrix/conduwuit%3Apuppygock.gay?server_fqdn=matrix.transfem.dev&style=flat&logo=matrix&logoColor=%23f5b3ff&label=%23conduwuit%3Apuppygock.gay&color=%23f652ff)](https://matrix.to/#/#conduwuit:puppygock.gay) [![conduwuit space](https://img.shields.io/matrix/conduwuit-space%3Apuppygock.gay?server_fqdn=matrix.transfem.dev&style=flat&logo=matrix&logoColor=%23f5b3ff&label=%23conduwuit-space%3Apuppygock.gay&color=%23f652ff)](https://matrix.to/#/#conduwuit-space:puppygock.gay) [![CI and Artifacts](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml) +[![conduwuit main room](https://img.shields.io/matrix/conduwuit%3Apuppygock.gay?server_fqdn=matrix.transfem.dev&style=flat&logo=matrix&logoColor=%23f5b3ff&label=%23conduwuit%3Apuppygock.gay&color=%23f652ff)](https://matrix.to/#/#conduwuit:puppygock.gay) [![conduwuit space](https://img.shields.io/matrix/conduwuit-space%3Apuppygock.gay?server_fqdn=matrix.transfem.dev&style=flat&logo=matrix&logoColor=%23f5b3ff&label=%23conduwuit-space%3Apuppygock.gay&color=%23f652ff)](https://matrix.to/#/#conduwuit-space:puppygock.gay) + +[![CI and Artifacts](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml/badge.svg?branch=main)](https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml) + +![GitHub Repo stars](https://img.shields.io/github/stars/girlbossceo/conduwuit?style=flat&color=%23fcba03&link=https%3A%2F%2Fgithub.com%2Fgirlbossceo%2Fconduwuit) ![GitHub commit activity](https://img.shields.io/github/commit-activity/m/girlbossceo/conduwuit?style=flat&color=%2303fcb1&link=https%3A%2F%2Fgithub.com%2Fgirlbossceo%2Fconduwuit%2Fpulse%2Fmonthly) ![GitHub Created At](https://img.shields.io/github/created-at/girlbossceo/conduwuit) ![GitHub Sponsors](https://img.shields.io/github/sponsors/girlbossceo?color=%23fc03ba&link=https%3A%2F%2Fgithub.com%2Fsponsors%2Fgirlbossceo) ![GitHub License](https://img.shields.io/github/license/girlbossceo/conduwuit) + + + +![Docker Image Size (tag)](https://img.shields.io/docker/image-size/girlbossceo/conduwuit/latest?label=image%20size%20(latest)&link=https%3A%2F%2Fhub.docker.com%2Frepository%2Fdocker%2Fgirlbossceo%2Fconduwuit%2Ftags%3Fname%3Dlatest) ![Docker Image Size (tag)](https://img.shields.io/docker/image-size/girlbossceo/conduwuit/main?label=image%20size%20(main)&link=https%3A%2F%2Fhub.docker.com%2Frepository%2Fdocker%2Fgirlbossceo%2Fconduwuit%2Ftags%3Fname%3Dmain) + + @@ -53,6 +63,19 @@ A lot of critical stability and performance issues have been fixed, and a lot of necessary groundwork has finished; making this project way better than it was back in the start at ~early 2024. +#### Where is the differences page? + +conduwuit historically had a "differences" page that listed each and every single +different thing about conduwuit from Conduit, as a way to promote and advertise +conduwuit by showing significant amounts of work done. While this was feasible to +maintain back when the project was new in early-2024, this became impossible +very quickly and has unfortunately became heavily outdated, missing tons of things, etc. + +It's difficult to list out what we do differently, what are our notable features, etc +when there's so many things and features and bug fixes and performance optimisations, +the list goes on. We simply recommend folks to just try out conduwuit, or ask us +what features you are looking for and if they're implemented in conduwuit. + #### How is conduwuit funded? Is conduwuit sustainable? conduwuit has no external funding. This is made possible purely in my freetime with @@ -64,17 +87,15 @@ and we have no plans in stopping or slowing down any time soon! #### Can I migrate or switch from Conduit? -conduwuit is a complete drop-in replacement for Conduit. As long as you are using RocksDB, -the only "migration" you need to do is replace the binary or container image. There -is no harm or additional steps required for using conduwuit. See the -[Migrating from Conduit](https://conduwuit.puppyirl.gay/deploying/generic.html#migrating-from-conduit) section -on the generic deploying guide. +conduwuit had drop-in migration/replacement support for Conduit for about 12 months before +bugs somewhere along the line broke it. Maintaining this has been difficult and +the majority of Conduit users have already migrated, additionally debugging Conduit +is not one of our interests, and so Conduit migration no longer works. We also +feel that 12 months has been plenty of time for people to seamlessly migrate. -Note that as of conduwuit version 0.5.0, backwards compatibility with Conduit is -no longer supported. We only support migrating *from* Conduit, not back to -Conduit like before. If you are truly finding yourself wanting to migrate back -to Conduit, we would appreciate all your feedback and if we can assist with -any issues or concerns. +If you are a Conduit user looking to migrate, you will have to wipe and reset +your database. We may fix seamless migration support at some point, but it's not an interest +from us. #### Can I migrate from Synapse or Dendrite? @@ -98,9 +119,10 @@ is the official project Matrix room. You can get support here, ask questions or concerns, get assistance setting up conduwuit, etc. This room should stay relevant and focused on conduwuit. An offtopic general -chatter room can be found there as well. +chatter room can be found in the room topic there as well. + +Please keep the issue trackers focused on *actual* bug reports and enhancement requests. -Please keep the issue trackers focused on bug reports and enhancement requests. General support is extremely difficult to be offered over an issue tracker, and simple questions should be asked directly in an interactive platform like our Matrix room above as they can turn into a relevant discussion and/or may not be @@ -108,24 +130,34 @@ simple to answer. If you're not sure, just ask in the Matrix room. If you have a bug or feature to request: [Open an issue on GitHub](https://github.com/girlbossceo/conduwuit/issues/new) +If you need to contact the primary maintainer, my contact methods are on my website: https://girlboss.ceo + #### Donate conduwuit development is purely made possible by myself and contributors. I do not get paid to work on this, and I work on it in my free time. Donations are heavily appreciated! 💜🥺 -- Liberapay (preferred): -- GitHub Sponsors (preferred): +- Liberapay: +- GitHub Sponsors: - Ko-fi: I do not and will not accept cryptocurrency donations, including things related. +Note that donations will NOT guarantee you or give you any kind of tangible product, +feature prioritisation, etc. By donating, you are agreeing that conduwuit is NOT +going to provide you any goods or services as part of your donation, and this +donation is purely a generous donation. We will not provide things like paid +personal/direct support, feature request priority, merchandise, etc. + #### Logo Original repo and Matrix room picture was from bran (<3). Current banner image and logo is directly from [this cohost post](https://web.archive.org/web/20241126004041/https://cohost.org/RatBaby/post/1028290-finally-a-flag-for). +An SVG logo made by [@nktnet1](https://github.com/nktnet1) is available here: + #### Is it conduwuit or Conduwuit? Both, but I prefer conduwuit. diff --git a/arch/conduwuit.service b/arch/conduwuit.service index fa3616d8..4f45ddc0 100644 --- a/arch/conduwuit.service +++ b/arch/conduwuit.service @@ -4,6 +4,7 @@ Wants=network-online.target After=network-online.target Documentation=https://conduwuit.puppyirl.gay/ RequiresMountsFor=/var/lib/private/conduwuit +Alias=matrix-conduwuit.service [Service] DynamicUser=yes diff --git a/book.toml b/book.toml index 1d32c766..7eb1983b 100644 --- a/book.toml +++ b/book.toml @@ -13,12 +13,15 @@ create-missing = true extra-watch-dirs = ["debian", "docs"] [rust] -edition = "2021" +edition = "2024" [output.html] git-repository-url = "https://github.com/girlbossceo/conduwuit" edit-url-template = "https://github.com/girlbossceo/conduwuit/edit/main/{path}" git-repository-icon = "fa-github-square" +[output.html.redirect] +"/differences.html" = "https://conduwuit.puppyirl.gay/#where-is-the-differences-page" + [output.html.search] limit-results = 15 diff --git a/debian/conduwuit.service b/debian/conduwuit.service index 4d6f4eef..a079499e 100644 --- a/debian/conduwuit.service +++ b/debian/conduwuit.service @@ -2,26 +2,14 @@ Description=conduwuit Matrix homeserver Wants=network-online.target After=network-online.target +Alias=matrix-conduwuit.service Documentation=https://conduwuit.puppyirl.gay/ [Service] DynamicUser=yes User=conduwuit Group=conduwuit -Type=notify-reload -ReloadSignal=SIGUSR1 - -TTYPath=/dev/tty25 -DeviceAllow=char-tty -StandardInput=tty-force -StandardOutput=tty -StandardError=journal+console -TTYReset=yes -# uncomment to allow buffer to be cleared every restart -TTYVTDisallocate=no - -TTYColumns=120 -TTYRows=40 +Type=notify Environment="CONDUWUIT_CONFIG=/etc/conduwuit/conduwuit.toml" diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 8e07adc2..ad0f8135 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -1,7 +1,6 @@ # Summary - [Introduction](introduction.md) -- [Differences from upstream Conduit](differences.md) - [Configuration](configuration.md) - [Examples](configuration/examples.md) - [Deploying](deploying.md) diff --git a/docs/assets/conduwuit_logo.svg b/docs/assets/conduwuit_logo.svg new file mode 100644 index 00000000..9be5b453 --- /dev/null +++ b/docs/assets/conduwuit_logo.svg @@ -0,0 +1,36 @@ + + + + + + diff --git a/docs/assets/gay dog anarchists.png b/docs/assets/gay dog anarchists.png new file mode 100644 index 0000000000000000000000000000000000000000..871cf302a8f27d6ed02c983241c7c5ca6029ce99 GIT binary patch literal 11533 zcmd^l`9IX(`}fP1$`YwavWDy=`&QWrW#7i2ELoBoM$9m6mK0^*DTYe6gc=O8y~~!7 zEg>WF%962;{XXOK{@nNd{TseNJs;;>*LAMtoa=S2nRCTlF*js8%zGGuAU0zoJxd5W z&+`LHQqE{g$ zsAMIhX*sTTt?=_k=lJ&2!rtB+im94gKC-TBk0rx*^;sl%-pN zcQ9Dowdtoc#jYa_}hWPfyC&r%KM(}?T&jK9%m+P$?GL3a90I(p2*^u#~RjDwq+Touy; z;fx$mt`-BXf1@X&&klkNw|XLS_vEl_(5-?k>Siv);EEu&rvk7EZ=-?!?o&Cs1C2hc zlcqSj@}nM@L-LTV8y}LM6?c4yM!K+ENCu>@A7JWHSLnPylE%SXtwUtwfx1G^(%@;R zDh$8(W%@C#>aYQn9a3 z(ik6o+jmshXWx6eRhpQ5tr|^>eC6GJDE_z)`5XI?skYCOcy}LSKkmbW4H{e-JxWve z$}gR!&brE)rcTmv-i72e~Kn{>zy8r(L9OfVWY7nZ4K3n`%)Gu zPiS*o9yGqiGyAdDytp3_Lti198-_k2`*1^aAH2nB!0aor4?a@+a6@Jvy!ZW%_fepY z;=L7VaKuMxAG}p)5LUIn!&=|tXj%&f_I-P?L-S_NUYBO1!2l;MYM&hUV^N+%BW+(Z zqnS)~L+pd6KMg{v+i0}6=fJv+(IG52F2?GH%j<{8tYk5vsTHt4_|L&8%MbJp>{PUJOM zgT9fjWw3Nbi|*%DvaiWzDYSTyq9lVwLLtwn-^HsEJSz`xF5&CO^I3GwK^5EH|C9A*TO1F&;W4 zLi8~nH^!N9B3BFkiZ(_@mt~O9T&{{Ob_jUuP&p^LxO(L@(-0%Y2DzC-?}wvi=x&zM zqbx98B3LAyx;Sw%h!2s^g_iSq79LMn(jiOQ$YEOi3X2K?qJ{hw->CE-t!E6#rNGf=bKaJJR~8JT9{D z4OoNyD-)%vZbgfE21hk98V|ztb0JT{TkPex$x@=&-*mQSn5lxlT8)om$_iFSneAmL z$;re$NJ^AQA{~dyO~_Jm2nI;%4AJpfDLt;A6X|a>NZ0r!y3Dpfiq6%h#ZH4%1(gev zi$!4{$exYTpMuhY-ae%Im=S~_+l!bgaRe76B}jBkDm{#wjww^d&(h}wl8bHNFOYWz z)yIyY4DpH)9aBo#ah@?{)%aP4ysPBmAowtycLLQ1NicwT&kzR$5u#8RCvx^#DHpCS zx~$5`?(hYxmch^hU#2`EvacDbpZ+T=rAihKrSpDBb<9MyLgz1HN-PMT5U(I{;4DH3 z3MBhxlY$sH#E1iu2sH?t#ClQ{0|!`K8leR#kbOrny97!4n0 zNP9&6gCw*;o`S?`L4*g?A5#Wme)56`*;k5`eBeql^?MwugwFa5vHA?cAL@@T^EVzm zbwQNu>q|P$eB}}Kdk(6L&KgLcMFc_OT*#56QVkqeOj(JE9e<;7%V03x_sCC1vTp~e zpLy{**>{Ar%DkvY_MIgiKe%{?9E)P3*Str)grfty{vowg7bg-UM>Mq)ZoJyUt%LXF z{CSWZ8-Xw81i2pj0;NUIXN0+GVt1zTVhi^*ID=6YM|HO&NYe3(64Me&O>u_Na)o#) zuAdjlu~<^op+$(&8V>(q1gTDWhQiX%2@yl=376=^V&wP}ORwS%kz?EO#m8KusALXU zor#AN$tQ+*4KYz#Uy!N}%L@`!oe7R~Ak$S12rhKt97sNKLVWei#}*mc z$>6ADV^R={Uk;UggOtp2eqpXPHubkoeJF*jKNW$*P^^yg;Wk;9oNTHBIo8Vhbx@v&mt25&JHl!VktKhW3z=2u(#vpK zwKn{YcgW5y^wyGb?ir%0%@mn=U~X#uIlMYL8(Y&Jo&ImQoNGTTbDZc;h|_EF2B^0&E=o!3eO4$Qsb zLJGf4sKiF`yYN>!PtT7{YPjrBkt`lv zZs<{3F`iVH#w2cW>CrU5_lDW^?L!@(dlzSm=50d4G&aiW!=#95R`UT&m({PA%>i#? z1!Aklowe(}%x89({AC;~n1!jKOARVoTH6u30fgq>s>kC8hh|$JR7RoKz4#Y)ml||L zC=(9t%Z(8x0)=sL>VcQTlve|Yd(UnLuDm4;((x~TU7TH5+ibe`ZMY*gDkb#PL#@x& z16jM)l=*Mcly+2n{xu~c>tbGJeelm`7zx9)mrO;sfA1=*wTs_tU_Ylcl;YZnw;cY&pJ-vQyEZ*DIW_KbxolSc zX-L7ShNTyEbu@Og_DoZnaXsPDA+Nq8N6G>U|2NqH7gbwe6-V2?@eX}ZutG^HsS5o zdyjQvKy3*z-KdLIX+PvC#N6NI*yK0ZWnO2UTk;)2_Ld<(Xgkt4t$p}zmMi~*t0SGF zod)yLCtj&xseKD#H@2Gi&u!l6(s4aKo+k$f8wPlmxmC~j)ji(4nKGFZ9RK>ZSbKQB z+Mo&x=GX8H`iZgLAIH4t;T+1>nNervg*Yz1oV>YDU6*PKw!fZhx-4utcvQc0;EBbV z+zN-d!N<~lse`Y!uLi0#Z=A9+SZ_ZgY$IdU>i6a4i*vj3rkq7jZ3ShO2OW3Q?(T&V zCVP|dU()77G(r~@mh-%`T&u?X`cr%RUVY>iWKR-l&>@|UFO{}y_iLS@8=6ZD&1YQZ z@iS55AUH238a8-knOf+xoH#ck?>JZ8sQbFtBo18}@pH3IMlt*av+H+XbTUJ^zBtF<vslBt7b8Uqt58`lCnD-`-(l`YPUC zl_3n3I7|F=-n>`2J@fv9MnqvP|ESS1#ZgJC>%FZv^FMdA<}ROq%acuvbH23E(H6*O zee|`by3I`qmku8z?#-|jVe9UdS9}aIM4;3c(5xyRid1V?Qt!HnY6TU1NoPOKP~K>?#BLH641Q97+$$YH&oeMhLJl^Yu70#cI1zoYzv>T4I@hi@mN6C z;cNqY)+S4qh3}S{VUPJMY*?APge-O6<-sNu&s7znQ*2F^+`UQeZ0;!ADryi#Xmn%d z5}+FQ=D4>vaof3_Y_B{=8rLf;NYmYxq7mP}gpCv~uAB=x-Wp~jJ;|e$&MH9Ck>-^1 zER6Ki?|VxA9kdAd>Fcu9GPOQCU#C?1;lz=qSAtHxsC;kprqzhnXb}yMfrk?!<-d4a z!dmMo98V5^5;c=^9QLp6d7aN<)xrIS##U5#x>fZ&tz|?_jWcG@f>c;X;sR9 z-OzSRbq4Qh4dyq$cHhh=yZx9R6gtr!);jo}HNsi-dCp;ysF{H5_ZvygpF6w$dW83X z6PWb07ajBY4AUr)@s6KN#T^~d-00qJ>_?S4rCd^dJbQsuhU~HB;0eWpvA)Bbmo_I7 zQd*H;*A}NRX6xGO&q}=sS2XEO<3l6!n@V&TPnB-@{=8l?A<5psp@ty5gg-6XBv1@x znr~FD3>3YeI1-z=(wY?Hep0x|)b2cj$DFN|S*!JyJXXQM>*&Lzc&QBjW6z)GzUI}Y4zD@MMBU>I(`~$P^(;JQ z$V@tJcI5H=J2+MXeP5}3Mb=x@WAGC0c+anWS6DLm>wJnW*i#QLT5S*is-IxOhY6fE z;y$mX8c=LO{NQfKb**<|t~9$dOB(TYC9h)soyz1`mkpOn;oD<$ZJ$X@W=A6ZCBlDg zRNj91Sb&jd#~8?Z&(%e_+vw;Vl;2F0^$wMvOQfKKHBZ%lOZfXQ2hZB)m8u_Hu3Z0P zTwCvM$k3t7#FLC?$zW$qEoqNms`nBZdU!y2CfPzf!dOLTKo0R0l~=_O%Q%!Iw$QnF z|6e!Q&vUz$l@9f`WS?}oLRjy;hq~BzZ<@aIDj%0)+7GNoT^{OhJ_G!?kfDo-=P9r4 zm$H9@!_3GNMY&}9$HeaZX%1ID&kuy@`2Vt&)UT)7vWF7OF zd+`P4@_#lvRIpL?pYkhwEln^j)R9=&f79LDwxV8WbaUn+p#W~W<@xH-;Ono269<-i z-~q!U;AkCj40|E!&s#cCe(*W%5FUwq`Z}g2_-nz$RpJRD%m0XlS#Sl+=jp2q+`8gG zD1@6<^Aze$k*p<>-S*~aZNjtC8=+U;%F5n`-rrDtK-C_l&gc?e!JlrOd^w3px9k+$ zO}U9#5kmB%Uf8z=CK7oaQ3`LLAEA9?NH!oRx<2d5i?N~=GA|ayp=|o1CfqE8?^pPK zu=~m?oLtb6q0K6AU+q~AqWRIA0YOV+!dqB3(Bzq#UfW~3w8WX$XApxZck07tKH+Q4 zYSYDKaJ8WvDacIxW|z=AqyiMSr#ZSdqfaP@TRPmL2u4V@rxH0hhffz};7^6z&1!mK z=@Q3^j1W6H3!x|x#PG_?G~GILB%6%dsa=8sl%IpT(HBdWj~j%GUx>B}@ws>yYr&+# zQ?1wHQo+>e)__w1^!YDQ{^0wg*6y+C<84UkYZLR7G#OMLRh9alH0Zdbt9EbZ5YKUZ zY-aee4pmL2$y#t3)r&@3CzamZyjr*T`H~({!cNcyYmC+;B*+G+pTo#4_m4Vk-uAv2 z82PQ=E9%x2D?%M?q~`ETl^xUp(l3fiQP{r3rNQbTKIhbpRYSsfZ#wE;U*he9(^odT zY9*FCB#722rMC!{(t3637D#pnl_~nT;v=-$fFx>e_vKNv7-x-t7P1<5)rrssciio* zc&qp0U10c*5B>A+Bz<#a_m^uAvd&f^CbvWXvP!%$wNuN!>kusC3)lE$A?*bE>f_k) z9a&>_5+f-LYSlNy8+J`<)WPq`JE4l|>N9`DuooGhtVImdwP}(bnWGI7qtQqG!erYv zJm{D!jqKESJ|?@41P4nr*c%WA;GSWPL)9K~ZSf7#7p}{~?nSzj%+#ZPy4}HrN2a(d zjZ-zWcy&Idx@J!$8>vTUe&P<3CIZ1&tpCAY?dg|LW!q# zAs73~1&3D{$$KWq25$zh8DRXo7bsG>CHmJ-z>Y<}eO^Mu%g7UoBPWBM%zC6r$>u$U zDFLca!&yq(Yj%22Mfs-irJ`$N%+&~u_1!X5alRp}UWDY@m@<>hthlqDJ!guA&lNdw zZFo?tgeJEFB!pM-#aS-%(}UR)gN09|8ZWWGG38dv{*H(Uxk0T`m}FeOecZ=%P!$V) zPsFZ>R(uG&k2R{%2u+j?+fDePC@$@ZP1(p(ae=jf>k7~$Q zgz1Y6C;i_p`Boa}c!2WOhCqbBQhTGezn3pL>qXHf>foLbIxz9Ba%r$L71f?E1uqrq z)GWnD)SvB`SVwi_i^G2kHRPtab*E0r`X2x}AJ~`Ko79`U{k`tC$%Yl-ckgrX8OZEq zm)~kQE6!PO|0tAHW#(mIp_$dt6e3eE#M573nn}#6mksUr*bMc)s8-^qqPRbobY$*t z)_6z#ZLeBTMkbW{5tgc*nc1lU5LRWW*f{ zODnl>{gr*R_eo~jM0lOd0oS;ndGgMAo7$u6GkSzKuwexwB?x@YE(h{OaXVS+gDHc% zKR$J=Onpsr$K3=)AozHzxUii{QY_#(#cvWoo6$E@dF`pY2lpQZy5S!t0%v6=NaY!jkcg0rT$7t|;J&)|f2XODmNKSL~(Rh3_%#Rr| zsXcsfs8zbMS@o45q7U^l?II?V$eVGV4D7OBhzu8e=HvFbnY?Dqn27w^Npq#W zj0G%85YdZrqaK{or(N8Eg4q4?LBH>TJDVYxNrUp=fMVjo>_w+{EKhbAd2B#&!q3W` z>d@R2-YlY6pOl{XpBbT?BDu&&HVePNW6a{Fpbp}x#;3xuFWl&#yxA|5#2FnP{uBP+ zQT+62{ht3jqDvv3a1?8NB?-!`3%EcYIs+T{ej!8ipn}GiROY%9uHTYmz18J4qbJXS ztJ_c2)`C`lfGJRaJo}G{baZWQ-8B28GHB&c>%=F_;HT`9V`)AAt#vY3%I!>}nt9)s z*R`?8yp{rv=Y51U1XKb_Gltx?>lEmp(5gvxR))EtP4|UErPqf7P-_2*JrI0u zq>t`k2n^#!xlDL=WhBKS`x081xAMI$@DGkp5NJaKV@DTPHI@v}G z%BF{FoBAm_^Td2!zQeX$dS)y1CetNA~+gNMg6(slV{U#=f~ zBALKsrV`n~F*<{1AV_bo-*o1oyV%&D_D;eJbjziK;x=hWxp%B`Bq z0&v4&?7HVi(Vn8tb@}A9%ZfOo|L>ZFU=9s&jFOA{b z)w&;+9#xwr^Rg(;S*tH`;*~h&&)o{!pX)2!(tkG={|0h9wOwT;bqQJ;zR7|+7C%~r zG|b&9Y5cviUd+_0-ZHfKAw(BlUV2X(s)eDlS7MsYourbd8VqRaAt`oaQ?d? za{E8itrVit!q)#TS{o=v>l`yAiMyyd&e2t#Px47A^VqcR6l#l;mMs0m+micOXlSWD zFiFl^QMNDxZ<>Dd__bW_7qMr<|2RFMTB$KQZT@Z@d3m0-@s-zvWg)VLjkT#X=*x{M z4!;R~<<`UO4DynW)d{eD#jf0WMyUg?ND{I%GD)|*R$rVm)(RtYv7k_tC7pO&=e>w- zIdJ(<`&VRzam#J`-lKXxj^)o_=fEs$_k+QcgJ z>P$(Bs+Lucu{ReCo&5F?`6W5Ja<7r+cj_TMpUjUBhttSL?5&SZYxP>LR*$(K zAi8!ulLNyY?imGD+RA5Il`_^5?El;e_|mhJvFoDrM)RuA7~`qmsfq=!0@~<3W4lvl zpE2Y!yT(ac+RKq~%fj%kFBQE9j=d4no) z`Omp{@Q33hoS)9+mxQzx)b^}9RbaXl^5IY%Bz1)g!9V0NmFcU+gL4brH>ymE3b8&g?q;4wnvs?hpi=@czDZRdwEOZsf~cL-@`|g`TroWMiDP;-G!S6otG%`JuIv&iw)vs$XEXvMY3jY1OWqd5cWG|IeX;Q*yA{XrF zFp9s~BX)^o+89?}DKIxSTVyyiGFr84SBOe?Xbq=lnCw4pXO|iz@UZ^-2DB0M#A4D4 zAFel)xIp?f(~-bCZCizJYzXFhI~_M1(`B$UtJ?6yVr?fdRW{D0n{wM7o~Y3fZb-AM zkE6$rGS(7aDILtTpBHP0TgCWDwAXiBi*!@ro;+i7a(z*qf4_OsA4&T@82X1rv$QCDxudLT^PSFv-L=iGjgCJCzzI@QB0GB?6jTZg%`JK8!m)xA)Qh(t=~ zyO-Us*xO#;%go9r)MCHukx`g>^lqE+n$DrH3>~cU}vqoIEi?e3APveT4R>NK}xw5KZ-G5k_4@4uL6OnFGhcS81iPSy z=k@OtHx~z+?!+{NKX+*<(BMCPSCzPnZWw=M`*6pz&BwDLVtc+(nx z4JT(^vJY}0SvFQE9l41LYrh4M+B|nTQ^C)8avpGGyW^15Xp3D-kwq>sd}Cs=$RZ)u zwqT1Np{hzGZcwR|iZE=o%?T>)f5*l^O3>f+X6{2t59Gq0;vx#d1d7oZ40d^9hRrYn zbDm0nfHWGP9Oe$DI*`l(Y+Kuzt2Y~s2zwPiMJv5$aT=9=Kyp+k<|wi;6t8ji zBALa$5_Lq>07EyMvhea@w4Cca37d3~!t@qNNIVLJ`3u4%Vu9@fq|u~Dz;$dZfny@zjONPMWwW+7Y~I<7AB19l>K>BZ$0Aa*Uj- z6lo{r4RGZ&9A~OeW*JIA_279aOn#<>SxX>ZEjEgN4a7$h7wW*HHcrN5-Njsti1Rl= zBh5i1OH9_0<{)D8CTLf48r^i=HRm$mPg(e+`5eg0jz--*N@04o&;nky={a1%?n4Xs zS_Q+SNN60trC`X3gmmy4=L|WJ(4NWqb#^f#6iGs)GsMW@ED3UVI4Df=DGQx^7cg{l zChMNySsN-Ws8x&y?0ux2bY{SYC7sS-j+VpikhC)DTeur_Nr((BfXyUqhB2Atrvhx= z#YkaV{D3O+jH1%3>Jgk2g^3V*;=U2*|HVr!nB@HTL1dsFA=LtAbjpC%`_?8)rZp z<25$KXk&cX#sPr5q*2VB7VZnD;c%h?nI$g;WjKC}!gR%gP!s^R`<29Kzltt~?pyN0 z>?t84p#yD- zq3e_y{jC9FHJ!4sf&igjHeKIj2iipoYO!b_evY(5R0QG@aNPKD3R8I&s^>4zZf{AL zMFZ{9Qlsk{KzmBc!ZreE?=W58We3_HqSldGU^kyH>`93eAxqMsavw5_pm;W&6xg=} zl0>;97)oU`fDRUjL^1{)EG&X@*-W%ucYJ340CccG{zku>zJRrC3%J~DKp-?o>7av! zB~KK#1|2L+UsF)Bpo4|p+f4l==wP9HFP_Z?QXXNeuuQvP`8xPEfqd6< z0NWIOm_ zdNLr}sb2#fEKq2QdHOI-8h^)A43N=qLHZ*=MymUQY=^1^vz7aSG>EyG2QW#B6k`RX zhvHDvpo0afK9jB952XE#f18T}vM=eg6_6gvL9KyghpNwJYl99JXe6mSmkX>?Vj}+4 z7z8aCuQ - -See the `[global.well_known]` config section, or configure your web server -appropriately to send the delegation responses. - ## Adding a conduwuit user While conduwuit can run as any user it is better to use dedicated users for diff --git a/docs/development/hot_reload.md b/docs/development/hot_reload.md index 018eb4b3..65fd4adf 100644 --- a/docs/development/hot_reload.md +++ b/docs/development/hot_reload.md @@ -1,5 +1,8 @@ # Hot Reloading ("Live" Development) +Note that hot reloading has not been refactored in quite a while and is not +guaranteed to work at this time. + ### Summary When developing in debug-builds with the nightly toolchain, conduwuit is modular diff --git a/docs/development/testing.md b/docs/development/testing.md index 2d421767..a577698a 100644 --- a/docs/development/testing.md +++ b/docs/development/testing.md @@ -5,12 +5,11 @@ Have a look at [Complement's repository][complement] for an explanation of what it is. -To test against Complement, with Nix (or [Lix](https://lix.systems) and direnv -installed and set up, you can: +To test against Complement, with Nix (or [Lix](https://lix.systems) and +[direnv installed and set up][direnv] (run `direnv allow` after setting up the hook), you can: -* Run `./bin/complement "$COMPLEMENT_SRC" ./path/to/logs.jsonl -./path/to/results.jsonl` to build a Complement image, run the tests, and output -the logs and results to the specified paths. This will also output the OCI image +* Run `./bin/complement "$COMPLEMENT_SRC"` to build a Complement image, run +the tests, and output the logs and results to the specified paths. This will also output the OCI image at `result` * Run `nix build .#complement` from the root of the repository to just build a Complement OCI image outputted to `result` (it's a `.tar.gz` file) @@ -18,5 +17,15 @@ Complement OCI image outputted to `result` (it's a `.tar.gz` file) output from the commit/revision you want to test (e.g. from main) [here][ci-workflows] +If you want to use your own prebuilt OCI image (such as from our CI) without needing +Nix installed, put the image at `complement_oci_image.tar.gz` in the root of the repo +and run the script. + +If you're on macOS and need to build an image, run `nix build .#linux-complement`. + +We have a Complement fork as some tests have needed to be fixed. This can be found +at: + [ci-workflows]: https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml?query=event%3Apush+is%3Asuccess+actor%3Agirlbossceo [complement]: https://github.com/matrix-org/complement +[direnv]: https://direnv.net/docs/hook.html diff --git a/docs/differences.md b/docs/differences.md deleted file mode 100644 index 18ea7a1f..00000000 --- a/docs/differences.md +++ /dev/null @@ -1,379 +0,0 @@ -#### **Note: This list may not up to date. There are rapidly more and more -improvements, fixes, changes, etc being made that it is becoming more difficult -to maintain this list. I recommend that you give conduwuit a try and see the -differences for yourself. If you have any concerns, feel free to join the -conduwuit Matrix room and ask any pre-usage questions.** - -### list of features, bug fixes, etc that conduwuit does that Conduit does not - -Outgoing typing indicators, outgoing read receipts, **and** outgoing presence! - -## Performance - -- Concurrency support for individual homeserver key fetching for faster remote -room joins and room joins that will error less frequently -- Send `Cache-Control` response header with `immutable` and 1 year cache length -for all media requests (download and thumbnail) to instruct clients to cache -media, and reduce server load from media requests that could be otherwise cached -- Add feature flags and config options to enable/build with zstd, brotli, and/or -gzip HTTP body compression (response and request) -- Eliminate all usage of the thread-blocking `getaddrinfo(3)` call upon DNS -queries, significantly improving federation latency/ping and cache DNS results -(NXDOMAINs, successful queries, etc) using hickory-dns / hickory-resolver -- Enable HTTP/2 support on all requests -- Vastly improve RocksDB default settings to use new features that help with -performance significantly, uses settings tailored to SSDs, various ways to tweak -RocksDB, and a conduwuit setting to tell RocksDB to use settings that are -tailored to HDDs or slow spinning rust storage or buggy filesystems. -- Implement database flush and cleanup conduwuit operations when using RocksDB -- Implement RocksDB write buffer corking and coalescing in database write-heavy -areas -- Perform connection pooling and keepalives where necessary to significantly -improve federation performance and latency -- Various config options to tweak connection pooling, request timeouts, -connection timeouts, DNS timeouts and settings, etc with good defaults which -also help huge with performance via reusing connections and retrying where -needed -- Properly get and use the amount of parallelism / tokio workers -- Implement building conduwuit with jemalloc (which extends to the RocksDB -jemalloc feature for maximum gains) or hardened_malloc light variant, and -io_uring support, and produce CI builds with jemalloc and io_uring by default -for performance (Nix doesn't seem to build -[hardened_malloc-rs](https://github.com/girlbossceo/hardened_malloc-rs) -properly) -- Add support for caching DNS results with hickory-dns / hickory-resolver in -conduwuit (not a replacement for a proper resolver cache, but still far better -than nothing), also properly falls back on TCP for UDP errors or if a SRV -response is too large -- Add config option for using DNS over TCP, and config option for controlling -A/AAAA record lookup strategy (e.g. don't query AAAA records if you don't have -IPv6 connectivity) -- Overall significant database, Client-Server, and federation performance and -latency improvements (check out the ping room leaderboards if you don't believe -me :>) -- Add config options for RocksDB compression and bottommost compression, -including choosing the algorithm and compression level -- Use [loole](https://github.com/mahdi-shojaee/loole) MPSC channels instead of -tokio MPSC channels for huge performance boosts in sending channels (mainly -relevant for federation) and presence channels -- Use `tracing`/`log`'s `release_max_level_info` feature to improve performance, -build speeds, binary size, and CPU usage in release builds by avoid compiling -debug/trace log level macros that users will generally never use (can be -disabled with a build-time feature flag) -- Remove some unnecessary checks on EDU handling for incoming transactions, -effectively speeding them up -- Simplify, dedupe, etc huge chunks of the codebase, including some that were -unnecessary overhead, binary bloats, or preventing compiler/linker optimisations -- Implement zero-copy RocksDB database accessors, substantially improving -performance caused by unnecessary memory allocations - -## General Fixes/Features - -- Add legacy Element client hack fixing password changes and deactivations on -legacy Element Android/iOS due to usage of an unspecced `user` field for UIAA -- Raise and improve all the various request timeouts making some things like -room joins and client bugs error less or none at all than they should, and make -them all user configurable -- Add missing `reason` field to user ban events (`/ban`) -- Safer and cleaner shutdowns across incoming/outgoing requests (graceful -shutdown) and the database -- Stop sending `make_join` requests on room joins if 15 servers respond with -`M_UNSUPPORTED_ROOM_VERSION` or `M_INVALID_ROOM_VERSION` -- Stop sending `make_join` requests if 50 servers cannot provide `make_join` for -us -- Respect *most* client parameters for `/media/` requests (`allow_redirect` -still needs work) -- Return joined member count of rooms for push rules/conditions instead of a -hardcoded value of 10 -- Make `CONDUIT_CONFIG` optional, relevant for container users that configure -only by environment variables and no longer need to set `CONDUIT_CONFIG` to an -empty string. -- Allow HEAD and PATCH (MSC4138) HTTP requests in CORS for clients (despite not -being explicity mentioned in Matrix spec, HTTP spec says all HEAD requests need -to behave the same as GET requests, Synapse supports HEAD requests) -- Fix using conduwuit with flake-compat on NixOS -- Resolve and remove some "features" from upstream that result in concurrency -hazards, exponential backoff issues, or arbitrary performance limiters -- Find more servers for outbound federation `/hierarchy` requests instead of -just the room ID server name -- Support for suggesting servers to join through at -`/_matrix/client/v3/directory/room/{roomAlias}` -- Support for suggesting servers to join through us at -`/_matrix/federation/v1/query/directory` -- Misc edge-case search fixes (e.g. potentially missing some events) -- Misc `/sync` fixes (e.g. returning unnecessary data or incorrect/invalid -responses) -- Add `replaces_state` and `prev_sender` in `unsigned` for state event changes -which primarily makes Element's "See history" button on a state event functional -- Fix Conduit not allowing incoming federation requests for various world -readable rooms -- Fix Conduit not respecting the client-requested file name on media requests -- Prevent sending junk / non-membership events to `/send_join` and `/send_leave` -endpoints -- Only allow the requested membership type on `/send_join` and `/send_leave` -endpoints (e.g. don't allow leave memberships on join endpoints) -- Prevent state key impersonation on `/send_join` and `/send_leave` endpoints -- Validate `X-Matrix` origin and request body `"origin"` field on incoming -transactions -- Add `GET /_matrix/client/v1/register/m.login.registration_token/validity` -endpoint -- Explicitly define support for sliding sync at `/_matrix/client/versions` -(`org.matrix.msc3575`) -- Fix seeing empty status messages on user presences - -## Moderation - -- (Also see [Admin Room](#admin-room) for all the admin commands pertaining to -moderation, there's a lot!) -- Add support for room banning/blocking by ID using admin command -- Add support for serving `support` well-known from `[global.well_known]` -(MSC1929) (`/.well-known/matrix/support`) -- Config option to forbid publishing rooms to the room directory -(`lockdown_public_room_directory`) except for admins -- Admin commands to delete room aliases and unpublish rooms from our room -directory -- For all -[`/report`](https://spec.matrix.org/latest/client-server-api/#post_matrixclientv3roomsroomidreporteventid) -requests: check if the reported event ID belongs to the reported room ID, raise -report reasoning character limit to 750, fix broken formatting, make a small -delayed random response per spec suggestion on privacy, and check if the sender -user is in the reported room. -- Support blocking servers from downloading remote media from, returning a 404 -- Don't allow `m.call.invite` events to be sent in public rooms (prevents -calling the entire room) -- On new public room creations, only allow moderators to send `m.call.invite`, -`org.matrix.msc3401.call`, and `org.matrix.msc3401.call.member` events to -prevent unprivileged users from calling the entire room -- Add support for a "global ACLs" feature (`forbidden_remote_server_names`) that -blocks inbound remote room invites, room joins by room ID on server name, room -joins by room alias on server name, incoming federated joins, and incoming -federated room directory requests. This is very helpful for blocking servers -that are purely toxic/bad and serve no value in allowing our users to suffer -from things like room invite spam or such. Please note that this is not a -substitute for room ACLs. -- Add support for a config option to forbid our local users from sending -federated room directory requests for -(`forbidden_remote_room_directory_server_names`). Similar to above, useful for -blocking servers that help prevent our users from wandering into bad areas of -Matrix via room directories of those malicious servers. -- Add config option for auto remediating/deactivating local non-admin users who -attempt to join bad/forbidden rooms (`auto_deactivate_banned_room_attempts`) -- Deactivating users will remove their profile picture, blurhash, display name, -and leave all rooms by default just like Synapse and for additional privacy -- Reject some EDUs from ACL'd users such as read receipts and typing indicators - -## Privacy/Security - -- Add config option for device name federation with a privacy-friendly default -(disabled) -- Add config option for requiring authentication to the `/publicRooms` endpoint -(room directory) with a default enabled for privacy -- Add config option for federating `/publicRooms` endpoint (room directory) to -other servers with a default disabled for privacy -- Uses proper `argon2` crate by RustCrypto instead of questionable `rust-argon2` -crate -- Generate passwords with 25 characters instead of 15 -- Config option `ip_range_denylist` to support refusing to send requests -(typically federation) to specific IP ranges, typically RFC 1918, non-routable, -testnet, etc addresses like Synapse for security (note: this is not a guaranteed -protection, and you should be using a firewall with zones if you want guaranteed -protection as doing this on the application level is prone to bypasses). -- Config option to block non-admin users from sending room invites or receiving -remote room invites. Admin users are still allowed. -- Config option to disable incoming and/or outgoing remote read receipts -- Config option to disable incoming and/or outgoing remote typing indicators -- Config option to disable incoming, outgoing, and/or local presence and for -timing out remote users -- Sanitise file names for the `Content-Disposition` header for all media -requests (thumbnails, downloads, uploads) -- Media repository on handling `Content-Disposition` and `Content-Type` is fully -spec compliant and secured -- Send secure default HTTP headers such as a strong restrictive CSP (see -MSC4149), deny iframes, disable `X-XSS-Protection`, disable interest cohort in -`Permission-Policy`, etc to mitigate any potential attack surface such as from -untrusted media - -## Administration/Logging - -- Commandline argument to specify the path to a config file instead of relying -on `CONDUIT_CONFIG` -- Revamped admin room infrastructure and commands -- Substantially clean up, improve, and fix logging (less noisy dead server -logging, registration attempts, more useful troubleshooting logging, proper -error propagation, etc) -- Configurable RocksDB logging (`LOG` files) with proper defaults (rotate, max -size, verbosity, etc) to stop LOG files from accumulating so much -- Explicit startup error if your configuration allows open registration without -a token or such like Synapse with a way to bypass it if needed -- Replace the lightning bolt emoji option with support for setting any arbitrary -text (e.g. another emoji) to suffix to all new user registrations, with a -conduwuit default of "🏳️‍⚧️" -- Implement config option to auto join rooms upon registration -- Warn on unknown config options specified -- Add `/_conduwuit/server_version` route to return the version of conduwuit -without relying on the federation API `/_matrix/federation/v1/version` -- Add `/_conduwuit/local_user_count` route to return the amount of registered -active local users on your homeserver *if federation is enabled* -- Add configurable RocksDB recovery modes to aid in recovering corrupted RocksDB -databases -- Support config options via `CONDUWUIT_` prefix and accessing non-global struct -config options with the `__` split (e.g. `CONDUWUIT_WELL_KNOWN__SERVER`) -- Add support for listening on multiple TCP ports and multiple addresses -- **Opt-in** Sentry.io telemetry and metrics, mainly used for crash reporting -- Log the client IP on various requests such as registrations, banned room join -attempts, logins, deactivations, federation transactions, etc -- Fix Conduit dropping some remote server federation response errors - -## Maintenance/Stability - -- GitLab CI ported to GitHub Actions -- Add support for the Matrix spec compliance test suite -[Complement](https://github.com/matrix-org/complement/) via the Nix flake and -various other fixes for it -- Implement running and diff'ing Complement results in CI and error if any -mismatch occurs to prevent large cases of conduwuit regressions -- Repo is (officially) mirrored to GitHub, GitLab, git.gay, git.girlcock.ceo, -sourcehut, and Codeberg (see README.md for their links) -- Docker container images published to GitLab Container Registry, GitHub -Container Registry, and Dockerhub -- Extensively revamp the example config to be extremely helpful and useful to -both new users and power users -- Fixed every single clippy (default lints) and rustc warnings, including some -that were performance related or potential safety issues / unsoundness -- Add a **lot** of other clippy and rustc lints and a rustfmt.toml file -- Repo uses [Renovate](https://docs.renovatebot.com/) and keeps ALL -dependencies as up to date as possible -- Purge unmaintained/irrelevant/broken database backends (heed, sled, persy) and -other unnecessary code or overhead -- webp support for images -- Add cargo audit support to CI -- Add documentation lints via lychee and markdownlint-cli to CI -- CI tests for all sorts of feature matrixes (jemalloc, non-defaullt, all -features, etc) -- Add static and dynamic linking smoke tests in CI to prevent any potential -linking regressions for Complement, static binaries, Nix devshells, etc -- Add timestamp by commit date when building OCI images for keeping image build -reproducibility and still have a meaningful "last modified date" for OCI image -- Add timestamp by commit date via `SOURCE_DATE_EPOCH` for Debian packages -- Startup check if conduwuit running in a container and is listening on -127.0.0.1 (generally containers are using NAT networking and 0.0.0.0 is the -intended listening address) -- Add a panic catcher layer to return panic messages in HTTP responses if a -panic occurs -- Add full compatibility support for SHA256 media file names instead of base64 -file names to overcome filesystem file name length limitations (OS error file -name too long) while still retaining upstream database compatibility -- Remove SQLite support due to being very poor performance, difficult to -maintain against RocksDB, and is a blocker to significantly improved database -code - -## Admin Room - -- Add support for a console CLI interface that can issue admin commands and -output them in your terminal -- Add support for an admin-user-only commandline admin room interface that can -be issued in any room with the `\\!admin` or `\!admin` prefix and returns the -response as yourself in the same room -- Add admin commands for uptime, server startup, server shutdown, and server -restart -- Fix admin room handler to not panic/crash if the admin room command response -fails (e.g. too large message) -- Add command to dynamically change conduwuit's tracing log level filter on the -fly -- Add admin command to fetch a server's `/.well-known/matrix/support` file -- Add debug admin command to force update user device lists (could potentially -resolve some E2EE flukes) -- Implement **RocksDB online backups**, listing RocksDB backups, and listing -database file counts all via admin commands -- Add various database visibility commands such as being able to query the -getters and iterators used in conduwuit, a very helpful online debugging utility -- Forbid the admin room from being made public or world readable history -- Add `!admin` as a way to call the admin bot -- Extend clear cache admin command to support clearing more caches such as DNS -and TLS name overrides -- Admin debug command to send a federation request/ping to a server's -`/_matrix/federation/v1/version` endpoint and measures the latency it took -- Add admin command to bulk delete media via a codeblock list of MXC URLs. -- Add admin command to delete both the thumbnail and media MXC URLs from an -event ID (e.g. from an abuse report) -- Add admin command to list all the rooms a local user is joined in -- Add admin command to list joined members in a room -- Add admin command to view the room topic of a room -- Add admin command to delete all remote media in the past X minutes as a form -of deleting media that you don't want on your server that a remote user posted -in a room, a `--force` flag to ignore errors, and support for reading `last -modified time` instead of `creation time` for filesystems that don't support -file created metadata -- Add admin command to return a room's full/complete state -- Admin debug command to fetch a PDU from a remote server and inserts it into -our database/timeline as backfill -- Add admin command to delete media via a specific MXC. This deletes the MXC -from our database, and the file locally. -- Add admin commands for banning (blocking) room IDs from our local users -joining (admins are always allowed) and evicts all our local users from that -room, in addition to bulk room banning support, and blocks room invites (remote -and local) to the banned room, as a moderation feature -- Add admin commands to output jemalloc memory stats and memory usage -- Add admin command to get rooms a *remote* user shares with us -- Add debug admin commands to get the earliest and latest PDU in a room -- Add debug admin command to echo a message -- Add admin command to insert rooms tags for a user, most useful for inserting -the `m.server_notice` tag on your admin room to make it "persistent" in the -"System Alerts" section of Element -- Add experimental admin debug command for Dendrite's `AdminDownloadState` -(`/admin/downloadState/{serverName}/{roomID}`) admin API endpoint to download -and use a remote server's room state in the room -- Disable URL previews by default in the admin room due to various command -outputs having "URLs" in them that clients may needlessly render/request -- Extend memory usage admin server command to support showing memory allocator -stats such as jemalloc's -- Add admin debug command to see memory allocator's full extended debug -statistics such as jemalloc's - -## Misc - -- Add guest support for accessing TURN servers via `turn_allow_guests` like -Synapse -- Support for creating rooms with custom room IDs like Maunium Synapse -(`room_id` request body field to `/createRoom`) -- Query parameter `?format=event|content` for returning either the room state -event's content (default) for the full room state event on -`/_matrix/client/v3/rooms/{roomId}/state/{eventType}[/{stateKey}]` requests (see -) -- Send a User-Agent on all of our requests -- Send `avatar_url` on invite room membership events/changes -- Support sending [`well_known` response to client login -responses](https://spec.matrix.org/v1.10/client-server-api/#post_matrixclientv3login) -if using config option `[well_known.client]` -- Implement `include_state` search criteria support for `/search` requests -(response now can include room states) -- Declare various missing Matrix versions and features at -`/_matrix/client/versions` -- Implement legacy Matrix `/v1/` media endpoints that some clients and servers -may still call -- Config option to change Conduit's behaviour of homeserver key fetching -(`query_trusted_key_servers_first`). This option sets whether conduwuit will -query trusted notary key servers first before the individual homeserver(s), or -vice versa which may help in joining certain rooms. -- Implement unstable MSC2666 support for querying mutual rooms with a user -- Implement unstable MSC3266 room summary API support -- Implement unstable MSC4125 support for specifying servers to join via on -federated invites -- Make conduwuit build and be functional under Nix + macOS -- Log out all sessions after unsetting the emergency password -- Assume well-knowns are broken if they exceed past 12288 characters. -- Add support for listening on both HTTP and HTTPS if using direct TLS with -conduwuit for usecases such as Complement -- Add config option for disabling RocksDB Direct IO if needed -- Add various documentation on maintaining conduwuit, using RocksDB online -backups, some troubleshooting, using admin commands, moderation documentation, -etc -- (Developers): Add support for [hot reloadable/"live" modular -development](development/hot_reload.md) -- (Developers): Add support for tokio-console -- (Developers): Add support for tracing flame graphs -- No cryptocurrency donations allowed, conduwuit is fully maintained by -independent queer maintainers, and with a strong priority on inclusitivity and -comfort for protected groups 🏳️‍⚧️ -- [Add a community Code of Conduct for all conduwuit community spaces, primarily -the Matrix space](https://conduwuit.puppyirl.gay/conduwuit_coc.html) diff --git a/docs/introduction.md b/docs/introduction.md index 9db76681..9d3a294a 100644 --- a/docs/introduction.md +++ b/docs/introduction.md @@ -4,10 +4,6 @@ {{#include ../README.md:body}} -#### What's different about your fork than upstream Conduit? - -See the [differences](differences.md) page - #### How can I deploy my own? - [Deployment options](deploying.md) From 1e23c95ec6e059c5d9b2b0083868596f1d38f5aa Mon Sep 17 00:00:00 2001 From: Tamara Schmitz <15906939+tamara-schmitz@users.noreply.github.com> Date: Mon, 10 Mar 2025 21:27:53 +0000 Subject: [PATCH 124/310] docs: refactor reverse proxy setup sections (#701) --- docs/deploying/generic.md | 59 ++++++++++++++++++++------------------- 1 file changed, 30 insertions(+), 29 deletions(-) diff --git a/docs/deploying/generic.md b/docs/deploying/generic.md index 88ba01d5..a07da560 100644 --- a/docs/deploying/generic.md +++ b/docs/deploying/generic.md @@ -145,25 +145,32 @@ sudo chmod 700 /var/lib/conduwuit/ ## Setting up the Reverse Proxy -Refer to the documentation or various guides online of your chosen reverse proxy -software. There are many examples of basic Apache/Nginx reverse proxy setups -out there. +We recommend Caddy as a reverse proxy, as it is trivial to use, handling TLS certificates, reverse proxy headers, etc transparently with proper defaults. +For other software, please refer to their respective documentation or online guides. -A [Caddy](https://caddyserver.com/) example will be provided as this -is the recommended reverse proxy for new users and is very trivial to use -(handles TLS, reverse proxy headers, etc transparently with proper defaults). +### Caddy -Lighttpd is not supported as it seems to mess with the `X-Matrix` Authorization -header, making federation non-functional. If a workaround is found, feel free to share to get it added to the documentation here. +After installing Caddy via your preferred method, create `/etc/caddy/conf.d/conduwuit_caddyfile` +and enter this (substitute for your server name). -If using Apache, you need to use `nocanon` in your `ProxyPass` directive to prevent this (note that Apache isn't very good as a general reverse proxy and we discourage the usage of it if you can). +```caddyfile +your.server.name, your.server.name:8448 { + # TCP reverse_proxy + reverse_proxy 127.0.0.1:6167 + # UNIX socket + #reverse_proxy unix//run/conduwuit/conduwuit.sock +} +``` -If using Nginx, you need to give conduwuit the request URI using `$request_uri`, or like so: -- `proxy_pass http://127.0.0.1:6167$request_uri;` -- `proxy_pass http://127.0.0.1:6167;` +That's it! Just start and enable the service and you're set. -Nginx users need to increase `client_max_body_size` (default is 1M) to match -`max_request_size` defined in conduwuit.toml. +```bash +sudo systemctl enable --now caddy +``` + +### Other Reverse Proxies + +As we would prefer our users to use Caddy, we will not provide configuration files for other proxys. You will need to reverse proxy everything under following routes: - `/_matrix/` - core Matrix C-S and S-S APIs @@ -186,25 +193,19 @@ Examples of delegation: - - -### Caddy +For Apache and Nginx there are many examples available online. -Create `/etc/caddy/conf.d/conduwuit_caddyfile` and enter this (substitute for -your server name). +Lighttpd is not supported as it seems to mess with the `X-Matrix` Authorization +header, making federation non-functional. If a workaround is found, feel free to share to get it added to the documentation here. -```caddyfile -your.server.name, your.server.name:8448 { - # TCP reverse_proxy - reverse_proxy 127.0.0.1:6167 - # UNIX socket - #reverse_proxy unix//run/conduwuit/conduwuit.sock -} -``` +If using Apache, you need to use `nocanon` in your `ProxyPass` directive to prevent httpd from messing with the `X-Matrix` header (note that Apache isn't very good as a general reverse proxy and we discourage the usage of it if you can). -That's it! Just start and enable the service and you're set. +If using Nginx, you need to give conduwuit the request URI using `$request_uri`, or like so: +- `proxy_pass http://127.0.0.1:6167$request_uri;` +- `proxy_pass http://127.0.0.1:6167;` -```bash -sudo systemctl enable --now caddy -``` +Nginx users need to increase `client_max_body_size` (default is 1M) to match +`max_request_size` defined in conduwuit.toml. ## You're done From 1366a3092f5be044fbe39225dd606ef3445899d5 Mon Sep 17 00:00:00 2001 From: Ginger <75683114+gingershaped@users.noreply.github.com> Date: Mon, 10 Mar 2025 17:28:19 -0400 Subject: [PATCH 125/310] Check the `room_types` filter when searching for local public rooms (#698) --- src/api/client/directory.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index 88f0e668..7ce32e4c 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -15,7 +15,7 @@ use ruma::{ }, federation, }, - directory::{Filter, PublicRoomJoinRule, PublicRoomsChunk, RoomNetwork}, + directory::{Filter, PublicRoomJoinRule, PublicRoomsChunk, RoomNetwork, RoomTypeFilter}, events::{ StateEventType, room::{ @@ -289,6 +289,9 @@ pub(crate) async fn get_public_rooms_filtered_helper( .map(ToOwned::to_owned) .then(|room_id| public_rooms_chunk(services, room_id)) .filter_map(|chunk| async move { + if !filter.room_types.is_empty() && !filter.room_types.contains(&RoomTypeFilter::from(chunk.room_type.clone())) { + return None; + } if let Some(query) = filter.generic_search_term.as_ref().map(|q| q.to_lowercase()) { if let Some(name) = &chunk.name { if name.as_str().to_lowercase().contains(&query) { From c4b05e77f3dd66636e26b64f8f4852703816c399 Mon Sep 17 00:00:00 2001 From: Odd Eivind Ebbesen Date: Mon, 10 Mar 2025 22:28:29 +0100 Subject: [PATCH 126/310] Fix up wording in the doc comments for admin media deletion (#694) --- src/admin/media/mod.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/admin/media/mod.rs b/src/admin/media/mod.rs index d212aab4..405c26d5 100644 --- a/src/admin/media/mod.rs +++ b/src/admin/media/mod.rs @@ -27,18 +27,18 @@ pub(super) enum MediaCommand { DeleteList, /// - Deletes all remote (and optionally local) media created before or - /// after \[duration] time using filesystem metadata first created at - /// date, or fallback to last modified date. This will always ignore - /// errors by default. + /// after [duration] time using filesystem metadata first created at date, + /// or fallback to last modified date. This will always ignore errors by + /// default. DeletePastRemoteMedia { /// - The relative time (e.g. 30s, 5m, 7d) within which to search duration: String, - /// - Only delete media created more recently than \[duration] ago + /// - Only delete media created before [duration] ago #[arg(long, short)] before: bool, - /// - Only delete media created after \[duration] ago + /// - Only delete media created after [duration] ago #[arg(long, short)] after: bool, From 3104586884b0027a1404bfe1986d569ff9e492d4 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 11 Mar 2025 18:05:36 -0400 Subject: [PATCH 127/310] bump tracing-subscriber, allowlist cargo-doc lint in admin room Signed-off-by: June Clementine Strawberry --- Cargo.lock | 16 ++++++++-------- Cargo.toml | 10 +++++----- src/admin/media/mod.rs | 1 + 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 65e8eca1..22d93237 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -440,7 +440,7 @@ dependencies = [ "bitflags 2.9.0", "cexpr", "clang-sys", - "itertools 0.12.1", + "itertools 0.13.0", "proc-macro2", "quote", "regex", @@ -2382,7 +2382,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] @@ -4833,7 +4833,7 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" version = "0.1.41" -source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" +source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "log", "pin-project-lite", @@ -4844,7 +4844,7 @@ dependencies = [ [[package]] name = "tracing-attributes" version = "0.1.28" -source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" +source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "proc-macro2", "quote", @@ -4854,7 +4854,7 @@ dependencies = [ [[package]] name = "tracing-core" version = "0.1.33" -source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" +source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "once_cell", "valuable", @@ -4874,7 +4874,7 @@ dependencies = [ [[package]] name = "tracing-log" version = "0.2.0" -source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" +source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "log", "once_cell", @@ -4901,8 +4901,8 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.18" -source = "git+https://github.com/girlbossceo/tracing?rev=05825066a6d0e9ad6b80dcf29457eb179ff4768c#05825066a6d0e9ad6b80dcf29457eb179ff4768c" +version = "0.3.19" +source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "matchers", "nu-ansi-term", diff --git a/Cargo.toml b/Cargo.toml index d611c08e..1528349c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -210,7 +210,7 @@ default-features = false version = "0.1.41" default-features = false [workspace.dependencies.tracing-subscriber] -version = "=0.3.18" +version = "0.3.19" default-features = false features = ["env-filter", "std", "tracing", "tracing-log", "ansi", "fmt"] [workspace.dependencies.tracing-core] @@ -541,16 +541,16 @@ version = "1.0.2" # https://github.com/girlbossceo/tracing/commit/b348dca742af641c47bc390261f60711c2af573c [patch.crates-io.tracing-subscriber] git = "https://github.com/girlbossceo/tracing" -rev = "05825066a6d0e9ad6b80dcf29457eb179ff4768c" +rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" [patch.crates-io.tracing] git = "https://github.com/girlbossceo/tracing" -rev = "05825066a6d0e9ad6b80dcf29457eb179ff4768c" +rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" [patch.crates-io.tracing-core] git = "https://github.com/girlbossceo/tracing" -rev = "05825066a6d0e9ad6b80dcf29457eb179ff4768c" +rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" [patch.crates-io.tracing-log] git = "https://github.com/girlbossceo/tracing" -rev = "05825066a6d0e9ad6b80dcf29457eb179ff4768c" +rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" # adds a tab completion callback: https://github.com/girlbossceo/rustyline-async/commit/de26100b0db03e419a3d8e1dd26895d170d1fe50 # adds event for CTRL+\: https://github.com/girlbossceo/rustyline-async/commit/67d8c49aeac03a5ef4e818f663eaa94dd7bf339b diff --git a/src/admin/media/mod.rs b/src/admin/media/mod.rs index 405c26d5..641834b2 100644 --- a/src/admin/media/mod.rs +++ b/src/admin/media/mod.rs @@ -1,3 +1,4 @@ +#![allow(rustdoc::broken_intra_doc_links)] mod commands; use clap::Subcommand; From 7f95eef9abf86298a25fd0bd410835084742eaae Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 11 Mar 2025 21:01:20 -0400 Subject: [PATCH 128/310] bump ruwuma Signed-off-by: June Clementine Strawberry --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 2 +- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 22d93237..c93716f9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3507,7 +3507,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "assign", "js_int", @@ -3527,7 +3527,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "js_int", "ruma-common", @@ -3539,7 +3539,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "as_variant", "assign", @@ -3562,7 +3562,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "as_variant", "base64 0.22.1", @@ -3594,7 +3594,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "as_variant", "indexmap 2.7.1", @@ -3619,7 +3619,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "bytes", "headers", @@ -3641,7 +3641,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3650,7 +3650,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "js_int", "ruma-common", @@ -3660,7 +3660,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3675,7 +3675,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "js_int", "ruma-common", @@ -3687,7 +3687,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=f5ab6302aaa55a14827a9cb5b40e980dd135fe14#f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index 1528349c..c09cdaea 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "f5ab6302aaa55a14827a9cb5b40e980dd135fe14" +rev = "69133fd53ca063552788c8dfbaf5e01c98dec3e7" features = [ "compat", "rand", From ae818d5b25977a6c4543bca16b78af6f2fa0cca7 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 11 Mar 2025 21:08:41 -0400 Subject: [PATCH 129/310] remove most of cargo test from engage as crane does that but with more caching Signed-off-by: June Clementine Strawberry --- engage.toml | 36 ------------------------------------ 1 file changed, 36 deletions(-) diff --git a/engage.toml b/engage.toml index 0a857b5a..210bafd5 100644 --- a/engage.toml +++ b/engage.toml @@ -161,24 +161,6 @@ name = "markdownlint" group = "lints" script = "markdownlint docs *.md || true" # TODO: fix the ton of markdown lints so we can drop `|| true` -[[task]] -name = "cargo/all" -group = "tests" -script = """ -env DIRENV_DEVSHELL=all-features \ - direnv exec . \ - cargo test \ - --workspace \ - --locked \ - --profile test \ - --all-targets \ - --no-fail-fast \ - --all-features \ - --color=always \ - -- \ - --color=always -""" - [[task]] name = "cargo/default" group = "tests" @@ -196,24 +178,6 @@ env DIRENV_DEVSHELL=default \ --color=always """ -[[task]] -name = "cargo/no-features" -group = "tests" -script = """ -env DIRENV_DEVSHELL=no-features \ - direnv exec . \ - cargo test \ - --workspace \ - --locked \ - --profile test \ - --all-targets \ - --no-fail-fast \ - --no-default-features \ - --color=always \ - -- \ - --color=always -""" - # Checks if the generated example config differs from the checked in repo's # example config. [[task]] From e920c44cb488d398bc57fe4ce7fdffb3ded5038a Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 11 Mar 2025 21:15:11 -0400 Subject: [PATCH 130/310] ignore humantime dep as tracing console-subscriber uses it (somewhere) Signed-off-by: June Clementine Strawberry --- .cargo/audit.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.cargo/audit.toml b/.cargo/audit.toml index bf44fbd6..37148cfb 100644 --- a/.cargo/audit.toml +++ b/.cargo/audit.toml @@ -1,5 +1,5 @@ [advisories] -ignore = ["RUSTSEC-2024-0436"] # advisory IDs to ignore e.g. ["RUSTSEC-2019-0001", ...] +ignore = ["RUSTSEC-2024-0436", "RUSTSEC-2025-0014"] # advisory IDs to ignore e.g. ["RUSTSEC-2019-0001", ...] informational_warnings = [] # warn for categories of informational advisories severity_threshold = "none" # CVSS severity ("none", "low", "medium", "high", "critical") From 0877f294393954bbe49279456f012e1fbb604f78 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 11 Mar 2025 22:21:53 -0400 Subject: [PATCH 131/310] respect membership filters on /members Signed-off-by: June Clementine Strawberry --- Cargo.lock | 22 +++++++------- Cargo.toml | 2 +- src/api/client/membership.rs | 56 ++++++++++++++++++++++++++++++++++-- 3 files changed, 66 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c93716f9..ab155fd0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3507,7 +3507,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "assign", "js_int", @@ -3527,7 +3527,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "js_int", "ruma-common", @@ -3539,7 +3539,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "as_variant", "assign", @@ -3562,7 +3562,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "as_variant", "base64 0.22.1", @@ -3594,7 +3594,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "as_variant", "indexmap 2.7.1", @@ -3619,7 +3619,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "bytes", "headers", @@ -3641,7 +3641,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3650,7 +3650,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "js_int", "ruma-common", @@ -3660,7 +3660,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3675,7 +3675,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "js_int", "ruma-common", @@ -3687,7 +3687,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=69133fd53ca063552788c8dfbaf5e01c98dec3e7#69133fd53ca063552788c8dfbaf5e01c98dec3e7" +source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index c09cdaea..2bf30d61 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "69133fd53ca063552788c8dfbaf5e01c98dec3e7" +rev = "24d018a0015bb85489ae84564701a49a643bcc57" features = [ "compat", "rand", diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 3f77e69e..11395e83 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -25,8 +25,9 @@ use ruma::{ error::ErrorKind, knock::knock_room, membership::{ - ThirdPartySigned, ban_user, forget_room, get_member_events, invite_user, - join_room_by_id, join_room_by_id_or_alias, + ThirdPartySigned, ban_user, forget_room, + get_member_events::{self, v3::MembershipEventFilter}, + invite_user, join_room_by_id, join_room_by_id_or_alias, joined_members::{self, v3::RoomMember}, joined_rooms, kick_user, leave_room, unban_user, }, @@ -768,6 +769,54 @@ pub(crate) async fn joined_rooms_route( }) } +fn membership_filter( + pdu: PduEvent, + for_membership: Option<&MembershipEventFilter>, + not_membership: Option<&MembershipEventFilter>, +) -> Option { + let membership_state_filter = match for_membership { + | Some(MembershipEventFilter::Ban) => MembershipState::Ban, + | Some(MembershipEventFilter::Invite) => MembershipState::Invite, + | Some(MembershipEventFilter::Knock) => MembershipState::Knock, + | Some(MembershipEventFilter::Leave) => MembershipState::Leave, + | Some(_) | None => MembershipState::Join, + }; + + let not_membership_state_filter = match not_membership { + | Some(MembershipEventFilter::Ban) => MembershipState::Ban, + | Some(MembershipEventFilter::Invite) => MembershipState::Invite, + | Some(MembershipEventFilter::Join) => MembershipState::Join, + | Some(MembershipEventFilter::Knock) => MembershipState::Knock, + | Some(_) | None => MembershipState::Leave, + }; + + let evt_membership = pdu.get_content::().ok()?.membership; + + if for_membership.is_some() && not_membership.is_some() { + if membership_state_filter != evt_membership + || not_membership_state_filter == evt_membership + { + None + } else { + Some(pdu) + } + } else if for_membership.is_some() && not_membership.is_none() { + if membership_state_filter != evt_membership { + None + } else { + Some(pdu) + } + } else if not_membership.is_some() && for_membership.is_none() { + if not_membership_state_filter == evt_membership { + None + } else { + Some(pdu) + } + } else { + Some(pdu) + } +} + /// # `POST /_matrix/client/r0/rooms/{roomId}/members` /// /// Lists all joined users in a room (TODO: at a specific point in time, with a @@ -779,6 +828,8 @@ pub(crate) async fn get_member_events_route( body: Ruma, ) -> Result { let sender_user = body.sender_user(); + let membership = body.membership.as_ref(); + let not_membership = body.not_membership.as_ref(); if !services .rooms @@ -797,6 +848,7 @@ pub(crate) async fn get_member_events_route( .ready_filter_map(Result::ok) .ready_filter(|((ty, _), _)| *ty == StateEventType::RoomMember) .map(at!(1)) + .ready_filter_map(|pdu| membership_filter(pdu, membership, not_membership)) .map(PduEvent::into_member_event) .collect() .await, From 1d1ccec532bf3eaebf499d3ff4c9f7a24369c389 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 11 Mar 2025 23:05:56 -0400 Subject: [PATCH 132/310] fix some nightly clippy lints Signed-off-by: June Clementine Strawberry --- Cargo.toml | 3 +++ clippy.toml | 3 ++- src/admin/processor.rs | 8 +++++--- src/api/client/account.rs | 4 ++-- src/api/client/state.rs | 2 +- src/core/utils/string.rs | 1 + 6 files changed, 14 insertions(+), 7 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 2bf30d61..fd477850 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -841,6 +841,9 @@ unused_crate_dependencies = "allow" unsafe_code = "allow" variant_size_differences = "allow" +# we check nightly clippy lints +unknown_lints = "allow" + ####################################### # # Clippy lints diff --git a/clippy.toml b/clippy.toml index 42427101..863759aa 100644 --- a/clippy.toml +++ b/clippy.toml @@ -2,9 +2,10 @@ array-size-threshold = 4096 cognitive-complexity-threshold = 94 # TODO reduce me ALARA excessive-nesting-threshold = 11 # TODO reduce me to 4 or 5 future-size-threshold = 7745 # TODO reduce me ALARA -stack-size-threshold = 196608 # reduce me ALARA +stack-size-threshold = 196608 # TODO reduce me ALARA too-many-lines-threshold = 780 # TODO reduce me to <= 100 type-complexity-threshold = 250 # reduce me to ~200 +large-error-threshold = 256 # TODO reduce me ALARA disallowed-macros = [ { path = "log::error", reason = "use conduwuit_core::error" }, diff --git a/src/admin/processor.rs b/src/admin/processor.rs index 77a60959..53a15098 100644 --- a/src/admin/processor.rs +++ b/src/admin/processor.rs @@ -91,6 +91,7 @@ async fn process_command(services: Arc, input: &CommandInput) -> Proce } } +#[allow(clippy::result_large_err)] fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult { let link = "Please submit a [bug report](https://github.com/girlbossceo/conduwuit/issues/new). 🥺"; @@ -100,7 +101,7 @@ fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult { Err(reply(content, command.reply_id.as_deref())) } -// Parse and process a message from the admin room +/// Parse and process a message from the admin room async fn process( context: &Command<'_>, command: AdminCommand, @@ -164,7 +165,8 @@ fn capture_create(context: &Command<'_>) -> (Arc, Arc>) { (capture, logs) } -// Parse chat messages from the admin room into an AdminCommand object +/// Parse chat messages from the admin room into an AdminCommand object +#[allow(clippy::result_large_err)] fn parse<'a>( services: &Arc, input: &'a CommandInput, @@ -232,7 +234,7 @@ fn complete_command(mut cmd: clap::Command, line: &str) -> String { ret.join(" ") } -// Parse chat messages from the admin room into an AdminCommand object +/// Parse chat messages from the admin room into an AdminCommand object fn parse_line(command_line: &str) -> Vec { let mut argv = command_line .split_whitespace() diff --git a/src/api/client/account.rs b/src/api/client/account.rs index 2b8209d4..32438098 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -109,7 +109,7 @@ pub(crate) async fn get_register_available_route( if !info.is_user_match(&user_id) { return Err!(Request(Exclusive("Username is not in an appservice namespace."))); } - }; + } if services.appservice.is_exclusive_user_id(&user_id).await { return Err!(Request(Exclusive("Username is reserved by an appservice."))); @@ -159,7 +159,7 @@ pub(crate) async fn register_route( | (None, _) => { info!(%is_guest, "Rejecting registration attempt as registration is disabled"); }, - }; + } return Err!(Request(Forbidden("Registration has been disabled."))); } diff --git a/src/api/client/state.rs b/src/api/client/state.rs index db79735f..9563c26d 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -254,7 +254,7 @@ async fn allowed_to_send_state_event( "Room server ACL event is invalid: {e}" )))); }, - }; + } }, | StateEventType::RoomEncryption => // Forbid m.room.encryption if encryption is disabled diff --git a/src/core/utils/string.rs b/src/core/utils/string.rs index 9340d009..d8fa3f95 100644 --- a/src/core/utils/string.rs +++ b/src/core/utils/string.rs @@ -60,6 +60,7 @@ pub fn camel_to_snake_string(s: &str) -> String { } #[inline] +#[allow(clippy::unbuffered_bytes)] // these are allocated string utilities, not file I/O utils pub fn camel_to_snake_case(output: &mut O, input: I) -> Result<()> where I: std::io::Read, From 5dea52f0f87dc640274e0f3ecb38b96ac9293f44 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 11 Mar 2025 23:45:53 -0400 Subject: [PATCH 133/310] stop doing complement cert gen and just use self-signed cert Signed-off-by: June Clementine Strawberry --- bin/complement | 2 +- flake.lock | 6 +++--- nix/pkgs/complement/certificate.crt | 21 +++++++++++++++++++ nix/pkgs/complement/default.nix | 19 +---------------- nix/pkgs/complement/signing_request.csr | 28 ++++++++++++------------- nix/pkgs/complement/v3.ext | 6 ++++++ 6 files changed, 46 insertions(+), 36 deletions(-) create mode 100644 nix/pkgs/complement/certificate.crt diff --git a/bin/complement b/bin/complement index 92539f97..3aa5a6f5 100755 --- a/bin/complement +++ b/bin/complement @@ -68,7 +68,7 @@ set +o pipefail env \ -C "$COMPLEMENT_SRC" \ COMPLEMENT_BASE_IMAGE="$COMPLEMENT_BASE_IMAGE" \ - go test -tags="conduwuit_blacklist" -timeout 1h -json ./tests/... | tee "$LOG_FILE" + go test -tags="conduwuit_blacklist" -v -timeout 1h -json ./tests/... | tee "$LOG_FILE" set -o pipefail # Post-process the results into an easy-to-compare format, sorted by Test name for reproducible results diff --git a/flake.lock b/flake.lock index 03fc205c..63cc2787 100644 --- a/flake.lock +++ b/flake.lock @@ -80,11 +80,11 @@ "complement": { "flake": false, "locked": { - "lastModified": 1741378155, - "narHash": "sha256-rJSfqf3q4oWxcAwENtAowLZeCi8lktwKVH9XQvvZR64=", + "lastModified": 1741757487, + "narHash": "sha256-Fkx/krwI3h6wJ6Mj199KlXUNJNEwl7h1pR4/d2ncmKw=", "owner": "girlbossceo", "repo": "complement", - "rev": "1502a00d8551d0f6e8954a23e43868877c3e57d9", + "rev": "40982a261cfc36650f74967f99fb1a049b13e065", "type": "github" }, "original": { diff --git a/nix/pkgs/complement/certificate.crt b/nix/pkgs/complement/certificate.crt new file mode 100644 index 00000000..5dd4fdea --- /dev/null +++ b/nix/pkgs/complement/certificate.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDfzCCAmegAwIBAgIUcrZdSPmCh33Evys/U6mTPpShqdcwDQYJKoZIhvcNAQEL +BQAwPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQKDAx3b29mZXJz +IGluYy4xDDAKBgNVBAMMA2hzMTAgFw0yNTAzMTMxMjU4NTFaGA8yMDUyMDcyODEy +NTg1MVowPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQKDAx3b29m +ZXJzIGluYy4xDDAKBgNVBAMMA2hzMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBANL+h2ZmK/FqN5uLJPtIy6Feqcyb6EX7MQBEtxuJ56bTAbjHuCLZLpYt +/wOWJ91drHqZ7Xd5iTisGdMu8YS803HSnHkzngf4VXKhVrdzW2YDrpZRxmOhtp88 +awOHmP7mqlJyBbCOQw8aDVrT0KmEIWzA7g+nFRQ5Ff85MaP+sQrHGKZbo61q8HBp +L0XuaqNckruUKtxnEqrm5xx5sYyYKg7rrSFE5JMFoWKB1FNWJxyWT42BhGtnJZsK +K5c+NDSOU4TatxoN6mpNSBpCz/a11PiQHMEfqRk6JA4g3911dqPTfZBevUdBh8gl +8maIzqeZGhvyeKTmull1Y0781yyuj98CAwEAAaNxMG8wCQYDVR0TBAIwADALBgNV +HQ8EBAMCBPAwNgYDVR0RBC8wLYIRKi5kb2NrZXIuaW50ZXJuYWyCA2hzMYIDaHMy +ggNoczOCA2hzNIcEfwAAATAdBgNVHQ4EFgQUr4VYrmW1d+vjBTJewvy7fJYhLDYw +DQYJKoZIhvcNAQELBQADggEBADkYqkjNYxjWX8hUUAmFHNdCwzT1CpYe/5qzLiyJ +irDSdMlC5g6QqMUSrpu7nZxo1lRe1dXGroFVfWpoDxyCjSQhplQZgtYqtyLfOIx+ +HQ7cPE/tUU/KsTGc0aL61cETB6u8fj+rQKUGdfbSlm0Rpu4v0gC8RnDj06X/hZ7e +VkWU+dOBzxlqHuLlwFFtVDgCyyTatIROx5V+GpMHrVqBPO7HcHhwqZ30k2kMM8J3 +y1CWaliQM85jqtSZV+yUHKQV8EksSowCFJuguf+Ahz0i0/koaI3i8m4MRN/1j13d +jbTaX5a11Ynm3A27jioZdtMRty6AJ88oCp18jxVzqTxNNO4= +-----END CERTIFICATE----- diff --git a/nix/pkgs/complement/default.nix b/nix/pkgs/complement/default.nix index bbd1bd74..9b010e14 100644 --- a/nix/pkgs/complement/default.nix +++ b/nix/pkgs/complement/default.nix @@ -3,10 +3,8 @@ , buildEnv , coreutils , dockerTools -, gawk , lib , main -, openssl , stdenv , tini , writeShellScriptBin @@ -42,21 +40,6 @@ let start = writeShellScriptBin "start" '' set -euxo pipefail - cp ${./v3.ext} /complement/v3.ext - echo "DNS.1 = $SERVER_NAME" >> /complement/v3.ext - echo "IP.1 = $(${lib.getExe gawk} 'END{print $1}' /etc/hosts)" \ - >> /complement/v3.ext - ${lib.getExe openssl} x509 \ - -req \ - -extfile /complement/v3.ext \ - -in ${./signing_request.csr} \ - -CA /complement/ca/ca.crt \ - -CAkey /complement/ca/ca.key \ - -CAcreateserial \ - -out /complement/certificate.crt \ - -days 1 \ - -sha256 - ${lib.getExe' coreutils "env"} \ CONDUWUIT_SERVER_NAME="$SERVER_NAME" \ ${lib.getExe main'} @@ -93,7 +76,7 @@ dockerTools.buildImage { Env = [ "CONDUWUIT_TLS__KEY=${./private_key.key}" - "CONDUWUIT_TLS__CERTS=/complement/certificate.crt" + "CONDUWUIT_TLS__CERTS=${./certificate.crt}" "CONDUWUIT_CONFIG=${./config.toml}" "RUST_BACKTRACE=full" ]; diff --git a/nix/pkgs/complement/signing_request.csr b/nix/pkgs/complement/signing_request.csr index 707e73b4..e2aa658e 100644 --- a/nix/pkgs/complement/signing_request.csr +++ b/nix/pkgs/complement/signing_request.csr @@ -1,16 +1,16 @@ -----BEGIN CERTIFICATE REQUEST----- -MIICkTCCAXkCAQAwTDELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRYwFAYDVQQK -DA13b29mZXJzLCBpbmMuMRgwFgYDVQQDDA9jb21wbGVtZW50LW9ubHkwggEiMA0G -CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDS/odmZivxajebiyT7SMuhXqnMm+hF -+zEARLcbieem0wG4x7gi2S6WLf8DlifdXax6me13eYk4rBnTLvGEvNNx0px5M54H -+FVyoVa3c1tmA66WUcZjobafPGsDh5j+5qpScgWwjkMPGg1a09CphCFswO4PpxUU -ORX/OTGj/rEKxximW6OtavBwaS9F7mqjXJK7lCrcZxKq5uccebGMmCoO660hROST -BaFigdRTVicclk+NgYRrZyWbCiuXPjQ0jlOE2rcaDepqTUgaQs/2tdT4kBzBH6kZ -OiQOIN/ddXaj032QXr1HQYfIJfJmiM6nmRob8nik5rpZdWNO/Ncsro/fAgMBAAGg -ADANBgkqhkiG9w0BAQsFAAOCAQEAjW+aD4E0phtRT5b2RyedY1uiSe7LQECsQnIO -wUSyGGG1GXYlJscyxxyzE9W9+QIALrxZkmc/+e02u+bFb1zQXW/uB/7u7FgXzrj6 -2YSDiWYXiYKvgGWEfCi3lpcTJK9x6WWkR+iREaoKRjcl0ynhhGuR7YwP38TNyu+z -FN6B1Lo398fvJkaTCiiHngWiwztXZ2d0MxkicuwZ1LJhIQA72OTl3QoRb5uiqbze -T9QJfU6W3v8cB8c8PuKMv5gl1QsGNtlfyQB56/X0cMxWl25vWXd2ankLkAGRTDJ8 -9YZHxP1ki4/yh75AknFq02nCOsmxYrAazCYgP2TzIPhQwBurKQ== +MIIChDCCAWwCAQAwPzELMAkGA1UEBhMCNjkxCzAJBgNVBAgMAjQyMRUwEwYDVQQK +DAx3b29mZXJzIGluYy4xDDAKBgNVBAMMA2hzMTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBANL+h2ZmK/FqN5uLJPtIy6Feqcyb6EX7MQBEtxuJ56bTAbjH +uCLZLpYt/wOWJ91drHqZ7Xd5iTisGdMu8YS803HSnHkzngf4VXKhVrdzW2YDrpZR +xmOhtp88awOHmP7mqlJyBbCOQw8aDVrT0KmEIWzA7g+nFRQ5Ff85MaP+sQrHGKZb +o61q8HBpL0XuaqNckruUKtxnEqrm5xx5sYyYKg7rrSFE5JMFoWKB1FNWJxyWT42B +hGtnJZsKK5c+NDSOU4TatxoN6mpNSBpCz/a11PiQHMEfqRk6JA4g3911dqPTfZBe +vUdBh8gl8maIzqeZGhvyeKTmull1Y0781yyuj98CAwEAAaAAMA0GCSqGSIb3DQEB +CwUAA4IBAQDR/gjfxN0IID1MidyhZB4qpdWn3m6qZnEQqoTyHHdWalbfNXcALC79 +ffS+Smx40N5hEPvqy6euR89N5YuYvt8Hs+j7aWNBn7Wus5Favixcm2JcfCTJn2R3 +r8FefuSs2xGkoyGsPFFcXE13SP/9zrZiwvOgSIuTdz/Pbh6GtEx7aV4DqHJsrXnb +XuPxpQleoBqKvQgSlmaEBsJg13TQB+Fl2foBVUtqAFDQiv+RIuircf0yesMCKJaK +MPH4Oo+r3pR8lI8ewfJPreRhCoV+XrGYMubaakz003TJ1xlOW8M+N9a6eFyMVh76 +U1nY/KP8Ua6Lgaj9PRz7JCRzNoshZID/ -----END CERTIFICATE REQUEST----- diff --git a/nix/pkgs/complement/v3.ext b/nix/pkgs/complement/v3.ext index 6083d960..0deaa48a 100644 --- a/nix/pkgs/complement/v3.ext +++ b/nix/pkgs/complement/v3.ext @@ -4,3 +4,9 @@ keyUsage = digitalSignature, nonRepudiation, keyEncipherment, dataEncipherment subjectAltName = @alt_names [alt_names] +DNS.1 = *.docker.internal +DNS.2 = hs1 +DNS.3 = hs2 +DNS.4 = hs3 +DNS.5 = hs4 +IP.1 = 127.0.0.1 From 258b399de93e74b00695ab42697dc31f5a49aa81 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 13 Mar 2025 10:52:13 -0400 Subject: [PATCH 134/310] bump ruwuma Signed-off-by: June Clementine Strawberry --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 2 +- flake.lock | 6 +++--- nix/pkgs/complement/config.toml | 6 +++--- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ab155fd0..c28f4eab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3507,7 +3507,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "assign", "js_int", @@ -3527,7 +3527,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "js_int", "ruma-common", @@ -3539,7 +3539,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "as_variant", "assign", @@ -3562,7 +3562,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "as_variant", "base64 0.22.1", @@ -3594,7 +3594,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "as_variant", "indexmap 2.7.1", @@ -3619,7 +3619,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "bytes", "headers", @@ -3641,7 +3641,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "js_int", "thiserror 2.0.11", @@ -3650,7 +3650,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "js_int", "ruma-common", @@ -3660,7 +3660,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3675,7 +3675,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "js_int", "ruma-common", @@ -3687,7 +3687,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=24d018a0015bb85489ae84564701a49a643bcc57#24d018a0015bb85489ae84564701a49a643bcc57" +source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index fd477850..db55b9b8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "24d018a0015bb85489ae84564701a49a643bcc57" +rev = "d197318a2507d38ffe6ee524d0d52728ca72538a" features = [ "compat", "rand", diff --git a/flake.lock b/flake.lock index 63cc2787..1f87b9b6 100644 --- a/flake.lock +++ b/flake.lock @@ -80,11 +80,11 @@ "complement": { "flake": false, "locked": { - "lastModified": 1741757487, - "narHash": "sha256-Fkx/krwI3h6wJ6Mj199KlXUNJNEwl7h1pR4/d2ncmKw=", + "lastModified": 1741891349, + "narHash": "sha256-YvrzOWcX7DH1drp5SGa+E/fc7wN3hqFtPbqPjZpOu1Q=", "owner": "girlbossceo", "repo": "complement", - "rev": "40982a261cfc36650f74967f99fb1a049b13e065", + "rev": "e587b3df569cba411aeac7c20b6366d03c143745", "type": "github" }, "original": { diff --git a/nix/pkgs/complement/config.toml b/nix/pkgs/complement/config.toml index 759f8d78..7f4ecef7 100644 --- a/nix/pkgs/complement/config.toml +++ b/nix/pkgs/complement/config.toml @@ -6,7 +6,7 @@ allow_public_room_directory_over_federation = true allow_public_room_directory_without_auth = true allow_registration = true database_path = "/database" -log = "trace,h2=warn,hyper=warn" +log = "trace,h2=debug,hyper=debug" port = [8008, 8448] trusted_servers = [] only_query_trusted_key_servers = false @@ -19,11 +19,11 @@ url_preview_domain_explicit_denylist = ["*"] media_compat_file_link = false media_startup_check = true prune_missing_media = true -log_colors = false +log_colors = true admin_room_notices = false allow_check_for_updates = false intentionally_unknown_config_option_for_testing = true -rocksdb_log_level = "debug" +rocksdb_log_level = "info" rocksdb_max_log_files = 1 rocksdb_recovery_mode = 0 rocksdb_paranoid_file_checks = true From 6c29792b3d9dfe1e65c5d3545296d431e058e375 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 11 Mar 2025 22:21:42 -0400 Subject: [PATCH 135/310] respect include_leave syncv3 filter Signed-off-by: June Clementine Strawberry --- src/api/client/sync/v3.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index fb59837b..70c4c6a7 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -219,6 +219,7 @@ pub(crate) async fn build_sync_events( sender_user, next_batch, full_state, + filter.room.include_leave, &filter, ) .map_ok(move |left_room| (room_id, left_room)) @@ -412,6 +413,7 @@ async fn handle_left_room( sender_user: &UserId, next_batch: u64, full_state: bool, + include_leave: bool, filter: &FilterDefinition, ) -> Result> { let left_count = services @@ -540,6 +542,10 @@ async fn handle_left_room( continue; }; + if !include_leave && pdu.sender == sender_user { + continue; + } + left_state_events.push(pdu.to_sync_state_event()); } } From ee3c585555a80c037bdaa861beeecbf6e19a7f04 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Fri, 14 Mar 2025 15:57:18 -0400 Subject: [PATCH 136/310] skip a few flakey complement tests Signed-off-by: June Clementine Strawberry --- bin/complement | 4 +- .../complement/test_results.jsonl | 82 ++----------------- 2 files changed, 8 insertions(+), 78 deletions(-) diff --git a/bin/complement b/bin/complement index 3aa5a6f5..c437503e 100755 --- a/bin/complement +++ b/bin/complement @@ -18,7 +18,7 @@ RESULTS_FILE="${3:-complement_test_results.jsonl}" COMPLEMENT_BASE_IMAGE="${COMPLEMENT_BASE_IMAGE:-complement-conduwuit:main}" # Complement tests that are skipped due to flakiness/reliability issues or we don't implement such features and won't for a long time -#SKIPPED_COMPLEMENT_TESTS='-skip=TestPartialStateJoin.*' +SKIPPED_COMPLEMENT_TESTS='TestPartialStateJoin.*|TestRoomDeleteAlias/Parallel/Regular_users_can_add_and_delete_aliases_when_m.*|TestRoomDeleteAlias/Parallel/Can_delete_canonical_alias|TestUnbanViaInvite.*|TestRoomState/Parallel/GET_/publicRooms_lists.*"|TestRoomDeleteAlias/Parallel/Users_with_sufficient_power-level_can_delete_other.*' # $COMPLEMENT_SRC needs to be a directory to Complement source code if [ -f "$COMPLEMENT_SRC" ]; then @@ -68,7 +68,7 @@ set +o pipefail env \ -C "$COMPLEMENT_SRC" \ COMPLEMENT_BASE_IMAGE="$COMPLEMENT_BASE_IMAGE" \ - go test -tags="conduwuit_blacklist" -v -timeout 1h -json ./tests/... | tee "$LOG_FILE" + go test -tags="conduwuit_blacklist" -skip="$SKIPPED_COMPLEMENT_TESTS" -v -timeout 1h -json ./tests/... | tee "$LOG_FILE" set -o pipefail # Post-process the results into an easy-to-compare format, sorted by Test name for reproducible results diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index 5fb850f1..6b5f670e 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -174,10 +174,10 @@ {"Action":"pass","Test":"TestFilter"} {"Action":"fail","Test":"TestFilterMessagesByRelType"} {"Action":"pass","Test":"TestGappedSyncLeaveSection"} -{"Action":"fail","Test":"TestGetFilteredRoomMembers"} -{"Action":"fail","Test":"TestGetFilteredRoomMembers/membership/join"} -{"Action":"fail","Test":"TestGetFilteredRoomMembers/membership/leave"} -{"Action":"fail","Test":"TestGetFilteredRoomMembers/not_membership"} +{"Action":"pass","Test":"TestGetFilteredRoomMembers"} +{"Action":"pass","Test":"TestGetFilteredRoomMembers/membership/join"} +{"Action":"pass","Test":"TestGetFilteredRoomMembers/membership/leave"} +{"Action":"pass","Test":"TestGetFilteredRoomMembers/not_membership"} {"Action":"fail","Test":"TestGetMissingEventsGapFilling"} {"Action":"pass","Test":"TestGetRoomMembers"} {"Action":"fail","Test":"TestGetRoomMembersAtPoint"} @@ -360,72 +360,6 @@ {"Action":"pass","Test":"TestOutboundFederationProfile"} {"Action":"pass","Test":"TestOutboundFederationProfile/Outbound_federation_can_query_profile_data"} {"Action":"pass","Test":"TestOutboundFederationSend"} -{"Action":"fail","Test":"TestPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanFastJoinDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanLazyLoadingSyncDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveDeviceListUpdateDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveEventsDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveEventsWithHalfMissingGrandparentsDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveEventsWithHalfMissingParentsDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveEventsWithMissingParentsDuringPartialStateJoin"} -{"Action":"skip","Test":"TestPartialStateJoin/CanReceivePresenceDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveReceiptDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveSigningKeyUpdateDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveToDeviceDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanReceiveTypingDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/CanSendEventsDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/Can_change_display_name_during_partial_state_join"} -{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking"} -{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_no_longer_tracked_for_user_incorrectly_believed_to_be_in_room"} -{"Action":"skip","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_no_longer_tracked_when_failing_to_complete_partial_state_join"} -{"Action":"skip","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_no_longer_tracked_when_leaving_partial_state_room"} -{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_no_longer_tracked_when_new_member_leaves_partial_state_room"} -{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracked_for_new_members_in_partial_state_room"} -{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_for_pre-existing_members_in_partial_state_room"} -{"Action":"skip","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_for_user_incorrectly_believed_to_be_in_room_when_they_join_another_shared_room_before_partial_state_join_completes"} -{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_for_user_incorrectly_believed_to_be_in_room_when_they_rejoin_after_partial_state_join_completes"} -{"Action":"skip","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_for_user_incorrectly_believed_to_be_in_room_when_they_rejoin_before_partial_state_join_completes"} -{"Action":"fail","Test":"TestPartialStateJoin/Device_list_tracking/Device_list_tracking_when_pre-existing_members_in_partial_state_room_join_another_shared_room"} -{"Action":"fail","Test":"TestPartialStateJoin/EagerIncrementalSyncDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/EagerInitialSyncDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/EagerLongPollingSyncWokenWhenResyncCompletes"} -{"Action":"fail","Test":"TestPartialStateJoin/GappySyncAfterPartialStateSynced"} -{"Action":"fail","Test":"TestPartialStateJoin/Lazy-loading_gappy_sync_includes_remote_memberships_during_partial_state_join"} -{"Action":"fail","Test":"TestPartialStateJoin/Lazy-loading_incremental_sync_includes_remote_memberships_during_partial_state_join"} -{"Action":"fail","Test":"TestPartialStateJoin/Lazy-loading_initial_sync_includes_remote_memberships_during_partial_state_join"} -{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync"} -{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/can_be_triggered_by_remote_ban"} -{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/can_be_triggered_by_remote_kick"} -{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/does_not_wait_for_resync"} -{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/is_seen_after_the_resync"} -{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/succeeds,_then_another_user_can_join_without_resync_completing"} -{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/succeeds,_then_rejoin_succeeds_without_resync_completing"} -{"Action":"fail","Test":"TestPartialStateJoin/Leave_during_resync/works_after_a_second_partial_join"} -{"Action":"fail","Test":"TestPartialStateJoin/MembersRequestBlocksDuringPartialStateJoin"} -{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates"} -{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_no_longer_reach_departed_servers_after_partial_state_join_completes"} -{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_all_servers_in_partial_state_rooms"} -{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_incorrectly_absent_servers_once_partial_state_join_completes"} -{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_incorrectly_absent_servers_once_partial_state_join_completes_even_though_remote_server_left_room"} -{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_incorrectly_kicked_servers_once_partial_state_join_completes"} -{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_incorrectly_kicked_servers_once_partial_state_join_completes_even_though_remote_server_left_room"} -{"Action":"fail","Test":"TestPartialStateJoin/Outgoing_device_list_updates/Device_list_updates_reach_newly_joined_servers_in_partial_state_rooms"} -{"Action":"fail","Test":"TestPartialStateJoin/PartialStateJoinContinuesAfterRestart"} -{"Action":"fail","Test":"TestPartialStateJoin/PartialStateJoinSyncsUsingOtherHomeservers"} -{"Action":"skip","Test":"TestPartialStateJoin/Purge_during_resync"} -{"Action":"fail","Test":"TestPartialStateJoin/Rejected_events_remain_rejected_after_resync"} -{"Action":"fail","Test":"TestPartialStateJoin/Rejects_make_join_during_partial_join"} -{"Action":"fail","Test":"TestPartialStateJoin/Rejects_make_knock_during_partial_join"} -{"Action":"fail","Test":"TestPartialStateJoin/Rejects_send_join_during_partial_join"} -{"Action":"fail","Test":"TestPartialStateJoin/Rejects_send_knock_during_partial_join"} -{"Action":"fail","Test":"TestPartialStateJoin/Resync_completes_even_when_events_arrive_before_their_prev_events"} -{"Action":"fail","Test":"TestPartialStateJoin/Room_aliases_can_be_added_and_deleted_during_a_resync"} -{"Action":"fail","Test":"TestPartialStateJoin/Room_aliases_can_be_added_and_queried_during_a_resync"} -{"Action":"skip","Test":"TestPartialStateJoin/Room_stats_are_correctly_updated_once_state_re-sync_completes"} -{"Action":"fail","Test":"TestPartialStateJoin/State_accepted_incorrectly"} -{"Action":"fail","Test":"TestPartialStateJoin/State_rejected_incorrectly"} -{"Action":"fail","Test":"TestPartialStateJoin/User_directory_is_correctly_updated_once_state_re-sync_completes"} -{"Action":"fail","Test":"TestPartialStateJoin/joined_members_blocks_during_partial_state_join"} {"Action":"fail","Test":"TestPollsLocalPushRules"} {"Action":"fail","Test":"TestPollsLocalPushRules/Polls_push_rules_are_correctly_presented_to_the_client"} {"Action":"pass","Test":"TestPowerLevels"} @@ -561,16 +495,13 @@ {"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Room_creation_reports_m.room.member_to_myself"} {"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Setting_room_topic_reports_m.room.topic_to_myself"} {"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself/parallel/Setting_state_twice_is_idempotent"} -{"Action":"fail","Test":"TestRoomDeleteAlias"} -{"Action":"fail","Test":"TestRoomDeleteAlias/Parallel"} +{"Action":"pass","Test":"TestRoomDeleteAlias"} +{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel"} {"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Alias_creators_can_delete_alias_with_no_ops"} {"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Alias_creators_can_delete_canonical_alias_with_no_ops"} -{"Action":"fail","Test":"TestRoomDeleteAlias/Parallel/Can_delete_canonical_alias"} {"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Deleting_a_non-existent_alias_should_return_a_404"} {"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Regular_users_can_add_and_delete_aliases_in_the_default_room_configuration"} -{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Regular_users_can_add_and_delete_aliases_when_m.room.aliases_is_restricted"} {"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Users_can't_delete_other's_aliases"} -{"Action":"pass","Test":"TestRoomDeleteAlias/Parallel/Users_with_sufficient_power-level_can_delete_other's_aliases"} {"Action":"fail","Test":"TestRoomForget"} {"Action":"fail","Test":"TestRoomForget/Parallel"} {"Action":"pass","Test":"TestRoomForget/Parallel/Can't_forget_room_you're_still_in"} @@ -687,7 +618,6 @@ {"Action":"pass","Test":"TestTyping"} {"Action":"pass","Test":"TestTyping/Typing_can_be_explicitly_stopped"} {"Action":"pass","Test":"TestTyping/Typing_notification_sent_to_local_room_members"} -{"Action":"fail","Test":"TestUnbanViaInvite"} {"Action":"fail","Test":"TestUnknownEndpoints"} {"Action":"pass","Test":"TestUnknownEndpoints/Client-server_endpoints"} {"Action":"fail","Test":"TestUnknownEndpoints/Key_endpoints"} From 4518f554081532400bfae64b931cd135dbceb755 Mon Sep 17 00:00:00 2001 From: cy Date: Wed, 12 Mar 2025 20:46:14 -0400 Subject: [PATCH 137/310] guard against using someone else's access token in UIAA --- src/service/uiaa/mod.rs | 10 ++++++++-- tests/test_results/complement/test_results.jsonl | 2 +- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 39dd2b41..7803c736 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -4,7 +4,7 @@ use std::{ }; use conduwuit::{ - Error, Result, err, error, implement, utils, + Err, Error, Result, err, error, implement, utils, utils::{hash, string::EMPTY}, }; use database::{Deserialized, Json, Map}; @@ -150,12 +150,18 @@ pub async fn try_auth( )); }; - let user_id = UserId::parse_with_server_name( + let user_id_from_username = UserId::parse_with_server_name( username.clone(), self.services.globals.server_name(), ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid."))?; + // Check if the access token being used matches the credentials used for UIAA + if user_id.localpart() != user_id_from_username.localpart() { + return Err!(Request(Forbidden("User ID and access token mismatch."))); + } + let user_id = user_id_from_username; + // Check if password is correct if let Ok(hash) = self.services.users.password_hash(&user_id).await { let hash_matches = hash::verify_password(password, &hash).is_ok(); diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index 6b5f670e..01d2ca4a 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -127,7 +127,7 @@ {"Action":"fail","Test":"TestDeviceListsUpdateOverFederationOnRoomJoin"} {"Action":"fail","Test":"TestDeviceManagement"} {"Action":"fail","Test":"TestDeviceManagement/DELETE_/device/{deviceId}"} -{"Action":"fail","Test":"TestDeviceManagement/DELETE_/device/{deviceId}_requires_UI_auth_user_to_match_device_owner"} +{"Action":"pass","Test":"TestDeviceManagement/DELETE_/device/{deviceId}_requires_UI_auth_user_to_match_device_owner"} {"Action":"pass","Test":"TestDeviceManagement/GET_/device/{deviceId}"} {"Action":"pass","Test":"TestDeviceManagement/GET_/device/{deviceId}_gives_a_404_for_unknown_devices"} {"Action":"pass","Test":"TestDeviceManagement/GET_/devices"} From 658c19d55eb5fdf30f27e189c414208e2eae6e24 Mon Sep 17 00:00:00 2001 From: cy Date: Fri, 14 Mar 2025 23:01:28 -0400 Subject: [PATCH 138/310] check if we already have a more preferable key backup before adding --- src/api/client/backup.rs | 81 ++++++++++++++++--- .../complement/test_results.jsonl | 8 +- 2 files changed, 76 insertions(+), 13 deletions(-) diff --git a/src/api/client/backup.rs b/src/api/client/backup.rs index 714e3f86..63c47e01 100644 --- a/src/api/client/backup.rs +++ b/src/api/client/backup.rs @@ -1,3 +1,5 @@ +use std::cmp::Ordering; + use axum::extract::State; use conduwuit::{Err, err}; use ruma::{ @@ -232,16 +234,77 @@ pub(crate) async fn add_backup_keys_for_session_route( ))); } - services + // Check if we already have a better key + let mut ok_to_replace = true; + if let Some(old_key) = &services .key_backups - .add_key( - body.sender_user(), - &body.version, - &body.room_id, - &body.session_id, - &body.session_data, - ) - .await?; + .get_session(body.sender_user(), &body.version, &body.room_id, &body.session_id) + .await + .ok() + { + let old_is_verified = old_key + .get_field::("is_verified")? + .unwrap_or_default(); + + let new_is_verified = body + .session_data + .get_field::("is_verified")? + .ok_or_else(|| err!(Request(BadJson("`is_verified` field should exist"))))?; + + // Prefer key that `is_verified` + if old_is_verified != new_is_verified { + if old_is_verified { + ok_to_replace = false; + } + } else { + // If both have same `is_verified`, prefer the one with lower + // `first_message_index` + let old_first_message_index = old_key + .get_field::("first_message_index")? + .unwrap_or(UInt::MAX); + + let new_first_message_index = body + .session_data + .get_field::("first_message_index")? + .ok_or_else(|| { + err!(Request(BadJson("`first_message_index` field should exist"))) + })?; + + ok_to_replace = match new_first_message_index.cmp(&old_first_message_index) { + | Ordering::Less => true, + | Ordering::Greater => false, + | Ordering::Equal => { + // If both have same `first_message_index`, prefer the one with lower + // `forwarded_count` + let old_forwarded_count = old_key + .get_field::("forwarded_count")? + .unwrap_or(UInt::MAX); + + let new_forwarded_count = body + .session_data + .get_field::("forwarded_count")? + .ok_or_else(|| { + err!(Request(BadJson("`forwarded_count` field should exist"))) + })?; + + new_forwarded_count < old_forwarded_count + }, + }; + }; + } + + if ok_to_replace { + services + .key_backups + .add_key( + body.sender_user(), + &body.version, + &body.room_id, + &body.session_id, + &body.session_data, + ) + .await?; + } Ok(add_backup_keys_for_session::v3::Response { count: services diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index 01d2ca4a..97170a5c 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -134,10 +134,10 @@ {"Action":"pass","Test":"TestDeviceManagement/PUT_/device/{deviceId}_gives_a_404_for_unknown_devices"} {"Action":"pass","Test":"TestDeviceManagement/PUT_/device/{deviceId}_updates_device_fields"} {"Action":"pass","Test":"TestDisplayNameUpdate"} -{"Action":"fail","Test":"TestE2EKeyBackupReplaceRoomKeyRules"} -{"Action":"fail","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel"} -{"Action":"fail","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel/{isVerified:false_firstMessageIndex:10_forwardedCount:5}"} -{"Action":"fail","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel/{isVerified:true_firstMessageIndex:10_forwardedCount:5}"} +{"Action":"pass","Test":"TestE2EKeyBackupReplaceRoomKeyRules"} +{"Action":"pass","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel"} +{"Action":"pass","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel/{isVerified:false_firstMessageIndex:10_forwardedCount:5}"} +{"Action":"pass","Test":"TestE2EKeyBackupReplaceRoomKeyRules/parallel/{isVerified:true_firstMessageIndex:10_forwardedCount:5}"} {"Action":"pass","Test":"TestEvent"} {"Action":"pass","Test":"TestEvent/Parallel"} {"Action":"pass","Test":"TestEvent/Parallel/Large_Event"} From 7bf92c8a3710eeff229bd86bc81a89daa94b66d5 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Mon, 17 Mar 2025 22:50:29 -0400 Subject: [PATCH 139/310] replace unnecessary check when updating device keys Signed-off-by: June Clementine Strawberry --- src/api/client/backup.rs | 2 +- src/api/client/keys.rs | 20 ++++++++++++++++---- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/src/api/client/backup.rs b/src/api/client/backup.rs index 63c47e01..83955fea 100644 --- a/src/api/client/backup.rs +++ b/src/api/client/backup.rs @@ -290,7 +290,7 @@ pub(crate) async fn add_backup_keys_for_session_route( new_forwarded_count < old_forwarded_count }, }; - }; + } } if ok_to_replace { diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 9cd50e85..f50d7afa 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -80,14 +80,26 @@ pub(crate) async fn upload_keys_route( ))); } - // TODO: merge this and the existing event? - // This check is needed to assure that signatures are kept - if services + if let Ok(existing_keys) = services .users .get_device_keys(sender_user, sender_device) .await - .is_err() { + if existing_keys.json().get() == device_keys.json().get() { + debug!( + ?sender_user, + ?sender_device, + ?device_keys, + "Ignoring user uploaded keys as they are an exact copy already in the \ + database" + ); + } else { + services + .users + .add_device_keys(sender_user, sender_device, device_keys) + .await; + } + } else { services .users .add_device_keys(sender_user, sender_device, device_keys) From 33c5afe050491988ee8224af25b9b06e892f4b50 Mon Sep 17 00:00:00 2001 From: cy Date: Wed, 19 Mar 2025 20:55:14 -0400 Subject: [PATCH 140/310] delete pushers created with different access token on password change --- src/api/client/account.rs | 23 ++++++++++++- src/api/client/push.rs | 2 +- src/database/maps.rs | 4 +++ src/service/pusher/mod.rs | 34 +++++++++++++------ .../complement/test_results.jsonl | 4 +-- 5 files changed, 53 insertions(+), 14 deletions(-) diff --git a/src/api/client/account.rs b/src/api/client/account.rs index 32438098..5dd622d7 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -4,7 +4,8 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ Err, Error, PduBuilder, Result, debug_info, err, error, info, is_equal_to, utils, - utils::ReadyExt, warn, + utils::{ReadyExt, stream::BroadbandExt}, + warn, }; use futures::{FutureExt, StreamExt}; use register::RegistrationKind; @@ -627,6 +628,26 @@ pub(crate) async fn change_password_route( .ready_filter(|id| *id != sender_device) .for_each(|id| services.users.remove_device(sender_user, id)) .await; + + // Remove all pushers except the ones associated with this session + services + .pusher + .get_pushkeys(sender_user) + .map(ToOwned::to_owned) + .broad_filter_map(|pushkey| async move { + services + .pusher + .get_pusher_device(&pushkey) + .await + .ok() + .filter(|pusher_device| pusher_device != sender_device) + .is_some() + .then_some(pushkey) + }) + .for_each(|pushkey| async move { + services.pusher.delete_pusher(sender_user, &pushkey).await; + }) + .await; } info!("User {sender_user} changed their password."); diff --git a/src/api/client/push.rs b/src/api/client/push.rs index 384b9dbc..cc1d3be2 100644 --- a/src/api/client/push.rs +++ b/src/api/client/push.rs @@ -503,7 +503,7 @@ pub(crate) async fn set_pushers_route( services .pusher - .set_pusher(sender_user, &body.action) + .set_pusher(sender_user, body.sender_device(), &body.action) .await?; Ok(set_pusher::v3::Response::new()) diff --git a/src/database/maps.rs b/src/database/maps.rs index 138bb038..1da9acc0 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -219,6 +219,10 @@ pub(super) static MAPS: &[Descriptor] = &[ name: "senderkey_pusher", ..descriptor::RANDOM_SMALL }, + Descriptor { + name: "pushkey_deviceid", + ..descriptor::RANDOM_SMALL + }, Descriptor { name: "server_signingkeys", ..descriptor::RANDOM diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 2b269b3d..27490fb8 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -10,7 +10,7 @@ use database::{Deserialized, Ignore, Interfix, Json, Map}; use futures::{Stream, StreamExt}; use ipaddress::IPAddress; use ruma::{ - RoomId, UInt, UserId, + DeviceId, OwnedDeviceId, RoomId, UInt, UserId, api::{ IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, client::push::{Pusher, PusherKind, set_pusher}, @@ -48,6 +48,7 @@ struct Services { struct Data { senderkey_pusher: Arc, + pushkey_deviceid: Arc, } impl crate::Service for Service { @@ -55,6 +56,7 @@ impl crate::Service for Service { Ok(Arc::new(Self { db: Data { senderkey_pusher: args.db["senderkey_pusher"].clone(), + pushkey_deviceid: args.db["pushkey_deviceid"].clone(), }, services: Services { globals: args.depend::("globals"), @@ -75,6 +77,7 @@ impl Service { pub async fn set_pusher( &self, sender: &UserId, + sender_device: &DeviceId, pusher: &set_pusher::v3::PusherAction, ) -> Result { match pusher { @@ -123,24 +126,35 @@ impl Service { } } - let key = (sender, data.pusher.ids.pushkey.as_str()); + let pushkey = data.pusher.ids.pushkey.as_str(); + let key = (sender, pushkey); self.db.senderkey_pusher.put(key, Json(pusher)); + self.db.pushkey_deviceid.insert(pushkey, sender_device); }, | set_pusher::v3::PusherAction::Delete(ids) => { - let key = (sender, ids.pushkey.as_str()); - self.db.senderkey_pusher.del(key); - - self.services - .sending - .cleanup_events(None, Some(sender), Some(ids.pushkey.as_str())) - .await - .ok(); + self.delete_pusher(sender, ids.pushkey.as_str()).await; }, } Ok(()) } + pub async fn delete_pusher(&self, sender: &UserId, pushkey: &str) { + let key = (sender, pushkey); + self.db.senderkey_pusher.del(key); + self.db.pushkey_deviceid.remove(pushkey); + + self.services + .sending + .cleanup_events(None, Some(sender), Some(pushkey)) + .await + .ok(); + } + + pub async fn get_pusher_device(&self, pushkey: &str) -> Result { + self.db.pushkey_deviceid.get(pushkey).await.deserialized() + } + pub async fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result { let senderkey = (sender, pushkey); self.db diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index 97170a5c..ac2733f8 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -69,8 +69,8 @@ {"Action":"pass","Test":"TestChangePassword/After_changing_password,_can_log_in_with_new_password"} {"Action":"pass","Test":"TestChangePassword/After_changing_password,_different_sessions_can_optionally_be_kept"} {"Action":"pass","Test":"TestChangePassword/After_changing_password,_existing_session_still_works"} -{"Action":"fail","Test":"TestChangePasswordPushers"} -{"Action":"fail","Test":"TestChangePasswordPushers/Pushers_created_with_a_different_access_token_are_deleted_on_password_change"} +{"Action":"pass","Test":"TestChangePasswordPushers"} +{"Action":"pass","Test":"TestChangePasswordPushers/Pushers_created_with_a_different_access_token_are_deleted_on_password_change"} {"Action":"pass","Test":"TestChangePasswordPushers/Pushers_created_with_the_same_access_token_are_not_deleted_on_password_change"} {"Action":"fail","Test":"TestClientSpacesSummary"} {"Action":"fail","Test":"TestClientSpacesSummary/max_depth"} From 07ec9d6d852a8ebb623c96b580af36e0d0d11697 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 26 Mar 2025 01:32:45 +0000 Subject: [PATCH 141/310] re-sort pushkey_deviceid (33c5afe050) Signed-off-by: Jason Volk --- src/database/maps.rs | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/database/maps.rs b/src/database/maps.rs index 1da9acc0..311c629f 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -121,14 +121,18 @@ pub(super) static MAPS: &[Descriptor] = &[ index_size: 512, ..descriptor::SEQUENTIAL }, - Descriptor { - name: "presenceid_presence", - ..descriptor::SEQUENTIAL_SMALL - }, Descriptor { name: "publicroomids", ..descriptor::RANDOM_SMALL }, + Descriptor { + name: "pushkey_deviceid", + ..descriptor::RANDOM_SMALL + }, + Descriptor { + name: "presenceid_presence", + ..descriptor::SEQUENTIAL_SMALL + }, Descriptor { name: "readreceiptid_readreceipt", ..descriptor::RANDOM @@ -219,10 +223,6 @@ pub(super) static MAPS: &[Descriptor] = &[ name: "senderkey_pusher", ..descriptor::RANDOM_SMALL }, - Descriptor { - name: "pushkey_deviceid", - ..descriptor::RANDOM_SMALL - }, Descriptor { name: "server_signingkeys", ..descriptor::RANDOM From aa4d2e236330693c61d5cb116b4c438b15431aec Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 9 Mar 2025 03:14:00 +0000 Subject: [PATCH 142/310] fix unused import without feature jemalloc_conf fix span passed by value Signed-off-by: Jason Volk --- src/core/alloc/je.rs | 3 +-- src/router/request.rs | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs index 6870c1c0..51caf3a3 100644 --- a/src/core/alloc/je.rs +++ b/src/core/alloc/je.rs @@ -8,7 +8,6 @@ use std::{ }; use arrayvec::ArrayVec; -use const_str::concat_bytes; use tikv_jemalloc_ctl as mallctl; use tikv_jemalloc_sys as ffi; use tikv_jemallocator as jemalloc; @@ -20,7 +19,7 @@ use crate::{ #[cfg(feature = "jemalloc_conf")] #[unsafe(no_mangle)] -pub static malloc_conf: &[u8] = concat_bytes!( +pub static malloc_conf: &[u8] = const_str::concat_bytes!( "lg_extent_max_active_fit:4", ",oversize_threshold:16777216", ",tcache_max:2097152", diff --git a/src/router/request.rs b/src/router/request.rs index 00769b3f..dba90324 100644 --- a/src/router/request.rs +++ b/src/router/request.rs @@ -37,7 +37,7 @@ pub(crate) async fn handle( let parent = Span::current(); let task = services.server.runtime().spawn(async move { tokio::select! { - response = execute(&services_, req, next, parent) => response, + response = execute(&services_, req, next, &parent) => response, response = services_.server.until_shutdown() .then(|()| { let timeout = services_.server.config.client_shutdown_timeout; @@ -79,7 +79,7 @@ async fn execute( services: &Arc, req: http::Request, next: axum::middleware::Next, - parent: Span, + parent: &Span, ) -> Response { #[cfg(debug_assertions)] conduwuit::defer! {{ From 7294368015025ae4d7677c28837d3ac0a79539e6 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 12 Mar 2025 23:10:38 +0000 Subject: [PATCH 143/310] parallelize IO for PublicRoomsChunk vector Signed-off-by: Jason Volk --- src/api/client/directory.rs | 118 +++++++++++++++++++----------------- 1 file changed, 64 insertions(+), 54 deletions(-) diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index 7ce32e4c..80b314b9 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -1,7 +1,17 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{Err, Error, Result, info, warn}; -use futures::{StreamExt, TryFutureExt}; +use conduwuit::{ + Err, Error, Result, info, + utils::{ + TryFutureExtExt, + stream::{ReadyExt, WidebandExt}, + }, + warn, +}; +use futures::{ + FutureExt, StreamExt, TryFutureExt, + future::{join, join4, join5}, +}; use ruma::{ OwnedRoomId, RoomId, ServerName, UInt, UserId, api::{ @@ -287,8 +297,8 @@ pub(crate) async fn get_public_rooms_filtered_helper( .directory .public_rooms() .map(ToOwned::to_owned) - .then(|room_id| public_rooms_chunk(services, room_id)) - .filter_map(|chunk| async move { + .wide_then(|room_id| public_rooms_chunk(services, room_id)) + .ready_filter_map(|chunk| { if !filter.room_types.is_empty() && !filter.room_types.contains(&RoomTypeFilter::from(chunk.room_type.clone())) { return None; } @@ -394,60 +404,60 @@ async fn user_can_publish_room( } async fn public_rooms_chunk(services: &Services, room_id: OwnedRoomId) -> PublicRoomsChunk { + let name = services.rooms.state_accessor.get_name(&room_id).ok(); + + let room_type = services.rooms.state_accessor.get_room_type(&room_id).ok(); + + let canonical_alias = services + .rooms + .state_accessor + .get_canonical_alias(&room_id) + .ok(); + + let avatar_url = services.rooms.state_accessor.get_avatar(&room_id); + + let topic = services.rooms.state_accessor.get_room_topic(&room_id).ok(); + + let world_readable = services.rooms.state_accessor.is_world_readable(&room_id); + + let join_rule = services + .rooms + .state_accessor + .room_state_get_content(&room_id, &StateEventType::RoomJoinRules, "") + .map_ok(|c: RoomJoinRulesEventContent| match c.join_rule { + | JoinRule::Public => PublicRoomJoinRule::Public, + | JoinRule::Knock => "knock".into(), + | JoinRule::KnockRestricted(_) => "knock_restricted".into(), + | _ => "invite".into(), + }); + + let guest_can_join = services.rooms.state_accessor.guest_can_join(&room_id); + + let num_joined_members = services.rooms.state_cache.room_joined_count(&room_id); + + let ( + (avatar_url, canonical_alias, guest_can_join, join_rule, name), + (num_joined_members, room_type, topic, world_readable), + ) = join( + join5(avatar_url, canonical_alias, guest_can_join, join_rule, name), + join4(num_joined_members, room_type, topic, world_readable), + ) + .boxed() + .await; + PublicRoomsChunk { - canonical_alias: services - .rooms - .state_accessor - .get_canonical_alias(&room_id) - .await - .ok(), - name: services.rooms.state_accessor.get_name(&room_id).await.ok(), - num_joined_members: services - .rooms - .state_cache - .room_joined_count(&room_id) - .await + avatar_url: avatar_url.into_option().unwrap_or_default().url, + canonical_alias, + guest_can_join, + join_rule: join_rule.unwrap_or_default(), + name, + num_joined_members: num_joined_members .unwrap_or(0) .try_into() .expect("joined count overflows ruma UInt"), - topic: services - .rooms - .state_accessor - .get_room_topic(&room_id) - .await - .ok(), - world_readable: services - .rooms - .state_accessor - .is_world_readable(&room_id) - .await, - guest_can_join: services.rooms.state_accessor.guest_can_join(&room_id).await, - avatar_url: services - .rooms - .state_accessor - .get_avatar(&room_id) - .await - .into_option() - .unwrap_or_default() - .url, - join_rule: services - .rooms - .state_accessor - .room_state_get_content(&room_id, &StateEventType::RoomJoinRules, "") - .map_ok(|c: RoomJoinRulesEventContent| match c.join_rule { - | JoinRule::Public => PublicRoomJoinRule::Public, - | JoinRule::Knock => "knock".into(), - | JoinRule::KnockRestricted(_) => "knock_restricted".into(), - | _ => "invite".into(), - }) - .await - .unwrap_or_default(), - room_type: services - .rooms - .state_accessor - .get_room_type(&room_id) - .await - .ok(), room_id, + room_type, + topic, + world_readable, } } From a57336ec1388ab26a692cf26768474bc3069df75 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 14 Mar 2025 06:54:08 +0000 Subject: [PATCH 144/310] assume canonical order in db serialization test Signed-off-by: Jason Volk --- src/database/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/database/tests.rs b/src/database/tests.rs index 140bc56d..1446a1fc 100644 --- a/src/database/tests.rs +++ b/src/database/tests.rs @@ -152,8 +152,8 @@ fn ser_json_macro() { let content = serde_json::to_value(content).expect("failed to serialize content"); let sender: &UserId = "@foo:example.com".try_into().unwrap(); let serialized = serialize_to_vec(Json(json!({ - "sender": sender, "content": content, + "sender": sender, }))) .expect("failed to serialize value"); From 17003ba773228055de107f9d8baf1b2848d86c1f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 15 Mar 2025 01:23:54 +0000 Subject: [PATCH 145/310] add FIFO compaction for persistent-cache descriptor; comments/cleanup Signed-off-by: Jason Volk --- src/database/engine/cf_opts.rs | 14 ++++++++--- src/database/engine/descriptor.rs | 39 ++++++++++++++++++++++++------- src/database/engine/open.rs | 6 ++--- src/database/maps.rs | 4 ++-- 4 files changed, 46 insertions(+), 17 deletions(-) diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index 5ddb9473..7ceec722 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -1,8 +1,8 @@ use conduwuit::{Config, Result, err, utils::math::Expected}; use rocksdb::{ BlockBasedIndexType, BlockBasedOptions, BlockBasedPinningTier, Cache, - DBCompressionType as CompressionType, DataBlockIndexType, LruCacheOptions, Options, - UniversalCompactOptions, UniversalCompactionStopStyle, + DBCompressionType as CompressionType, DataBlockIndexType, FifoCompactOptions, + LruCacheOptions, Options, UniversalCompactOptions, UniversalCompactionStopStyle, }; use super::descriptor::{CacheDisp, Descriptor}; @@ -16,7 +16,7 @@ pub(super) const SENTINEL_COMPRESSION_LEVEL: i32 = 32767; pub(crate) fn cf_options(ctx: &Context, opts: Options, desc: &Descriptor) -> Result { let cache = get_cache(ctx, desc); let config = &ctx.server.config; - descriptor_cf_options(opts, desc.clone(), config, cache.as_ref()) + descriptor_cf_options(opts, *desc, config, cache.as_ref()) } fn descriptor_cf_options( @@ -46,6 +46,7 @@ fn descriptor_cf_options( opts.set_compaction_style(desc.compaction); opts.set_compaction_pri(desc.compaction_pri); opts.set_universal_compaction_options(&uc_options(&desc)); + opts.set_fifo_compaction_options(&fifo_options(&desc)); let compression_shape: Vec<_> = desc .compression_shape @@ -142,6 +143,13 @@ fn set_compression(desc: &mut Descriptor, config: &Config) { } } +fn fifo_options(desc: &Descriptor) -> FifoCompactOptions { + let mut opts = FifoCompactOptions::default(); + opts.set_max_table_files_size(desc.limit_size); + + opts +} + fn uc_options(desc: &Descriptor) -> UniversalCompactOptions { let mut opts = UniversalCompactOptions::default(); opts.set_stop_style(UniversalCompactionStopStyle::Total); diff --git a/src/database/engine/descriptor.rs b/src/database/engine/descriptor.rs index 816555d2..2274da9c 100644 --- a/src/database/engine/descriptor.rs +++ b/src/database/engine/descriptor.rs @@ -6,14 +6,8 @@ use rocksdb::{ use super::cf_opts::SENTINEL_COMPRESSION_LEVEL; +/// Column Descriptor #[derive(Debug, Clone, Copy)] -pub(crate) enum CacheDisp { - Unique, - Shared, - SharedWith(&'static str), -} - -#[derive(Debug, Clone)] pub(crate) struct Descriptor { pub(crate) name: &'static str, pub(crate) dropped: bool, @@ -30,6 +24,7 @@ pub(crate) struct Descriptor { pub(crate) file_shape: i32, pub(crate) level0_width: i32, pub(crate) merge_width: (i32, i32), + pub(crate) limit_size: u64, pub(crate) ttl: u64, pub(crate) compaction: CompactionStyle, pub(crate) compaction_pri: CompactionPri, @@ -46,7 +41,16 @@ pub(crate) struct Descriptor { pub(crate) auto_readahead_max: usize, } -pub(crate) static BASE: Descriptor = Descriptor { +/// Cache Disposition +#[derive(Debug, Clone, Copy)] +pub(crate) enum CacheDisp { + Unique, + Shared, + SharedWith(&'static str), +} + +/// Base descriptor supplying common defaults to all derived descriptors. +static BASE: Descriptor = Descriptor { name: EMPTY, dropped: false, cache_disp: CacheDisp::Shared, @@ -62,6 +66,7 @@ pub(crate) static BASE: Descriptor = Descriptor { file_shape: 2, level0_width: 2, merge_width: (2, 16), + limit_size: 0, ttl: 60 * 60 * 24 * 21, compaction: CompactionStyle::Level, compaction_pri: CompactionPri::MinOverlappingRatio, @@ -78,6 +83,10 @@ pub(crate) static BASE: Descriptor = Descriptor { auto_readahead_max: 1024 * 1024 * 2, }; +/// Tombstone descriptor for columns which have been or will be deleted. +pub(crate) static DROPPED: Descriptor = Descriptor { dropped: true, ..BASE }; + +/// Descriptor for large datasets with random updates across the keyspace. pub(crate) static RANDOM: Descriptor = Descriptor { compaction_pri: CompactionPri::OldestSmallestSeqFirst, write_size: 1024 * 1024 * 32, @@ -88,6 +97,7 @@ pub(crate) static RANDOM: Descriptor = Descriptor { ..BASE }; +/// Descriptor for large datasets with updates to the end of the keyspace. pub(crate) static SEQUENTIAL: Descriptor = Descriptor { compaction_pri: CompactionPri::OldestLargestSeqFirst, write_size: 1024 * 1024 * 64, @@ -101,6 +111,7 @@ pub(crate) static SEQUENTIAL: Descriptor = Descriptor { ..BASE }; +/// Descriptor for small datasets with random updates across the keyspace. pub(crate) static RANDOM_SMALL: Descriptor = Descriptor { compaction: CompactionStyle::Universal, write_size: 1024 * 1024 * 16, @@ -117,6 +128,7 @@ pub(crate) static RANDOM_SMALL: Descriptor = Descriptor { ..RANDOM }; +/// Descriptor for small datasets with updates to the end of the keyspace. pub(crate) static SEQUENTIAL_SMALL: Descriptor = Descriptor { compaction: CompactionStyle::Universal, write_size: 1024 * 1024 * 16, @@ -132,3 +144,14 @@ pub(crate) static SEQUENTIAL_SMALL: Descriptor = Descriptor { compressed_index: false, ..SEQUENTIAL }; + +/// Descriptor for small persistent caches with random updates. Oldest entries +/// are deleted after limit_size reached. +pub(crate) static RANDOM_SMALL_CACHE: Descriptor = Descriptor { + compaction: CompactionStyle::Fifo, + cache_disp: CacheDisp::Unique, + limit_size: 1024 * 1024 * 64, + ttl: 60 * 60 * 24 * 14, + file_shape: 2, + ..RANDOM_SMALL +}; diff --git a/src/database/engine/open.rs b/src/database/engine/open.rs index 24010c3a..84e59a6a 100644 --- a/src/database/engine/open.rs +++ b/src/database/engine/open.rs @@ -101,13 +101,11 @@ fn configure_cfds( debug!("Creating new column {name:?} not previously found in existing database."); }); - let missing_descriptors = missing - .clone() - .map(|_| Descriptor { dropped: true, ..descriptor::BASE }); + let missing_descriptors = missing.clone().map(|_| descriptor::DROPPED); let cfopts: Vec<_> = desc .iter() - .cloned() + .copied() .chain(missing_descriptors) .map(|ref desc| cf_options(ctx, db_opts.clone(), desc)) .collect::>()?; diff --git a/src/database/maps.rs b/src/database/maps.rs index 311c629f..19f9ced4 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -233,7 +233,7 @@ pub(super) static MAPS: &[Descriptor] = &[ }, Descriptor { name: "servername_destination", - ..descriptor::RANDOM_SMALL + ..descriptor::RANDOM_SMALL_CACHE }, Descriptor { name: "servername_educount", @@ -241,7 +241,7 @@ pub(super) static MAPS: &[Descriptor] = &[ }, Descriptor { name: "servername_override", - ..descriptor::RANDOM_SMALL + ..descriptor::RANDOM_SMALL_CACHE }, Descriptor { name: "servernameevent_data", From d8ea8b378cf2ee9ff7644fdb6c5a33d05923a51d Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 15 Mar 2025 02:35:10 +0000 Subject: [PATCH 146/310] add Map::clear() to db interface Signed-off-by: Jason Volk --- src/database/map.rs | 1 + src/database/map/clear.rs | 30 ++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+) create mode 100644 src/database/map/clear.rs diff --git a/src/database/map.rs b/src/database/map.rs index c5a908ba..ed38e1fc 100644 --- a/src/database/map.rs +++ b/src/database/map.rs @@ -1,3 +1,4 @@ +mod clear; pub mod compact; mod contains; mod count; diff --git a/src/database/map/clear.rs b/src/database/map/clear.rs new file mode 100644 index 00000000..321ec79c --- /dev/null +++ b/src/database/map/clear.rs @@ -0,0 +1,30 @@ +use std::sync::Arc; + +use conduwuit::{ + Result, implement, + utils::stream::{ReadyExt, TryIgnore}, +}; +use futures::{Stream, TryStreamExt}; + +use crate::keyval::Key; + +/// Delete all data stored in this map. !!! USE WITH CAUTION !!! +/// +/// See for_clear() with additional details. +#[implement(super::Map)] +#[tracing::instrument(level = "trace")] +pub async fn clear(self: &Arc) { + self.for_clear().ignore_err().ready_for_each(|_| ()).await; +} + +/// Delete all data stored in this map. !!! USE WITH CAUTION !!! +/// +/// Provides stream of keys undergoing deletion along with any errors. +/// +/// Note this operation applies to a snapshot of the data when invoked. +/// Additional data written during or after this call may be missed. +#[implement(super::Map)] +#[tracing::instrument(level = "trace")] +pub fn for_clear(self: &Arc) -> impl Stream>> + Send { + self.raw_keys().inspect_ok(|key| self.remove(key)) +} From 9ce95a703038e8603da62f15516f205ca70ad962 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 15 Mar 2025 04:07:53 +0000 Subject: [PATCH 147/310] make service memory_usage()/clear_cache() async trait Signed-off-by: Jason Volk --- src/service/globals/mod.rs | 6 ++- src/service/rooms/event_handler/mod.rs | 4 +- src/service/rooms/spaces/mod.rs | 14 +++++- src/service/rooms/state/mod.rs | 4 +- src/service/rooms/state_accessor/mod.rs | 6 ++- src/service/rooms/state_compressor/mod.rs | 6 ++- src/service/rooms/timeline/mod.rs | 4 +- src/service/service.rs | 4 +- src/service/services.rs | 57 ++++++++++------------- 9 files changed, 61 insertions(+), 44 deletions(-) diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 74f83228..1dd7db8e 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -7,6 +7,7 @@ use std::{ time::Instant, }; +use async_trait::async_trait; use conduwuit::{Result, Server, error, utils::bytes::pretty}; use data::Data; use regex::RegexSet; @@ -27,6 +28,7 @@ pub struct Service { type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { let db = Data::new(&args); @@ -73,7 +75,7 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result { + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { let (ber_count, ber_bytes) = self.bad_event_ratelimiter.read()?.iter().fold( (0_usize, 0_usize), |(mut count, mut bytes), (event_id, _)| { @@ -89,7 +91,7 @@ impl crate::Service for Service { Ok(()) } - fn clear_cache(&self) { + async fn clear_cache(&self) { self.bad_event_ratelimiter .write() .expect("locked for writing") diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index e9e79ce4..4944f3ec 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -17,6 +17,7 @@ use std::{ time::Instant, }; +use async_trait::async_trait; use conduwuit::{ Err, PduEvent, Result, RoomVersion, Server, utils::{MutexMap, TryFutureExtExt}, @@ -54,6 +55,7 @@ struct Services { type RoomMutexMap = MutexMap; type HandleTimeMap = HashMap; +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { @@ -79,7 +81,7 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result<()> { + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { let mutex_federation = self.mutex_federation.len(); writeln!(out, "federation_mutex: {mutex_federation}")?; diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 1da38234..55897f9c 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -2,8 +2,9 @@ mod pagination_token; #[cfg(test)] mod tests; -use std::sync::Arc; +use std::{fmt::Write, sync::Arc}; +use async_trait::async_trait; use conduwuit::{ Err, Error, Result, implement, utils::{ @@ -70,6 +71,7 @@ pub enum Identifier<'a> { type Cache = LruCache>; +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { let config = &args.server.config; @@ -90,6 +92,16 @@ impl crate::Service for Service { })) } + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { + let roomid_spacehierarchy_cache = self.roomid_spacehierarchy_cache.lock().await.len(); + + writeln!(out, "roomid_spacehierarchy_cache: {roomid_spacehierarchy_cache}")?; + + Ok(()) + } + + async fn clear_cache(&self) { self.roomid_spacehierarchy_cache.lock().await.clear(); } + fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 8683a3be..56955497 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -1,5 +1,6 @@ use std::{collections::HashMap, fmt::Write, iter::once, sync::Arc}; +use async_trait::async_trait; use conduwuit::{ PduEvent, Result, err, result::FlatOk, @@ -56,6 +57,7 @@ struct Data { type RoomMutexMap = MutexMap; pub type RoomMutexGuard = MutexMapGuard; +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { @@ -79,7 +81,7 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result { + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { let mutex = self.mutex.len(); writeln!(out, "state_mutex: {mutex}")?; diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 7004e35a..652fdbd7 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -8,6 +8,7 @@ use std::{ sync::{Arc, Mutex as StdMutex, Mutex}, }; +use async_trait::async_trait; use conduwuit::{ Result, err, utils, utils::math::{Expected, usize_from_f64}, @@ -57,6 +58,7 @@ struct Data { shorteventid_shortstatehash: Arc, } +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { let config = &args.server.config; @@ -86,7 +88,7 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result { + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { use utils::bytes::pretty; let (svc_count, svc_bytes) = self.server_visibility_cache.lock()?.iter().fold( @@ -119,7 +121,7 @@ impl crate::Service for Service { Ok(()) } - fn clear_cache(&self) { + async fn clear_cache(&self) { self.server_visibility_cache.lock().expect("locked").clear(); self.user_visibility_cache.lock().expect("locked").clear(); } diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 305d3187..56a91d0e 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -5,6 +5,7 @@ use std::{ sync::{Arc, Mutex}, }; +use async_trait::async_trait; use conduwuit::{ Result, arrayvec::ArrayVec, @@ -65,6 +66,7 @@ type ParentStatesVec = Vec; pub type CompressedState = BTreeSet; pub type CompressedStateEvent = [u8; 2 * size_of::()]; +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { let config = &args.server.config; @@ -82,7 +84,7 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result { + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { let (cache_len, ents) = { let cache = self.stateinfo_cache.lock().expect("locked"); let ents = cache.iter().map(at!(1)).flat_map(|vec| vec.iter()).fold( @@ -108,7 +110,7 @@ impl crate::Service for Service { Ok(()) } - fn clear_cache(&self) { self.stateinfo_cache.lock().expect("locked").clear(); } + async fn clear_cache(&self) { self.stateinfo_cache.lock().expect("locked").clear(); } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 826a1dae..dc359d22 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -9,6 +9,7 @@ use std::{ sync::Arc, }; +use async_trait::async_trait; use conduwuit::{ Err, Error, Result, Server, at, debug, debug_warn, err, error, implement, info, pdu::{EventHash, PduBuilder, PduCount, PduEvent, gen_event_id}, @@ -109,6 +110,7 @@ struct Services { type RoomMutexMap = MutexMap; pub type RoomMutexGuard = MutexMapGuard; +#[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { @@ -142,7 +144,7 @@ impl crate::Service for Service { })) } - fn memory_usage(&self, out: &mut dyn Write) -> Result<()> { + async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { let mutex_insert = self.mutex_insert.len(); writeln!(out, "insert_mutex: {mutex_insert}")?; diff --git a/src/service/service.rs b/src/service/service.rs index 2907a562..574efd8f 100644 --- a/src/service/service.rs +++ b/src/service/service.rs @@ -31,10 +31,10 @@ pub(crate) trait Service: Any + Send + Sync { fn interrupt(&self) {} /// Clear any caches or similar runtime state. - fn clear_cache(&self) {} + async fn clear_cache(&self) {} /// Memory usage report in a markdown string. - fn memory_usage(&self, _out: &mut dyn Write) -> Result<()> { Ok(()) } + async fn memory_usage(&self, _out: &mut (dyn Write + Send)) -> Result { Ok(()) } /// Return the name of the service. /// i.e. `crate::service::make_name(std::module_path!())` diff --git a/src/service/services.rs b/src/service/services.rs index 269a1f87..dc390054 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -1,12 +1,12 @@ use std::{ any::Any, collections::BTreeMap, - fmt::Write, sync::{Arc, RwLock}, }; -use conduwuit::{Result, Server, debug, debug_info, info, trace}; +use conduwuit::{Result, Server, debug, debug_info, info, trace, utils::stream::IterStream}; use database::Database; +use futures::{Stream, StreamExt, TryStreamExt}; use tokio::sync::Mutex; use crate::{ @@ -171,40 +171,21 @@ impl Services { } pub async fn clear_cache(&self) { - for (service, ..) in self.service.read().expect("locked for reading").values() { - if let Some(service) = service.upgrade() { - service.clear_cache(); - } - } - - //TODO - self.rooms - .spaces - .roomid_spacehierarchy_cache - .lock() - .await - .clear(); + self.services() + .for_each(|service| async move { + service.clear_cache().await; + }) + .await; } pub async fn memory_usage(&self) -> Result { - let mut out = String::new(); - for (service, ..) in self.service.read().expect("locked for reading").values() { - if let Some(service) = service.upgrade() { - service.memory_usage(&mut out)?; - } - } - - //TODO - let roomid_spacehierarchy_cache = self - .rooms - .spaces - .roomid_spacehierarchy_cache - .lock() + self.services() + .map(Ok) + .try_fold(String::new(), |mut out, service| async move { + service.memory_usage(&mut out).await?; + Ok(out) + }) .await - .len(); - writeln!(out, "roomid_spacehierarchy_cache: {roomid_spacehierarchy_cache}")?; - - Ok(out) } fn interrupt(&self) { @@ -217,6 +198,18 @@ impl Services { } } + /// Iterate from snapshot of the services map + fn services(&self) -> impl Stream> + Send { + self.service + .read() + .expect("locked for reading") + .values() + .filter_map(|val| val.0.upgrade()) + .collect::>() + .into_iter() + .stream() + } + #[inline] pub fn try_get(&self, name: &str) -> Result> where From 8010505853c1c0a78254b0fd31e83d90baff7af3 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 15 Mar 2025 04:08:57 +0000 Subject: [PATCH 148/310] implement clear_cache() for resolver service Signed-off-by: Jason Volk --- src/service/resolver/cache.rs | 17 ++++++++++++++++- src/service/resolver/dns.rs | 4 ++++ src/service/resolver/mod.rs | 7 +++++++ 3 files changed, 27 insertions(+), 1 deletion(-) diff --git a/src/service/resolver/cache.rs b/src/service/resolver/cache.rs index 6b05c00c..cfea7187 100644 --- a/src/service/resolver/cache.rs +++ b/src/service/resolver/cache.rs @@ -7,7 +7,7 @@ use conduwuit::{ utils::{math::Expected, rand, stream::TryIgnore}, }; use database::{Cbor, Deserialized, Map}; -use futures::{Stream, StreamExt}; +use futures::{Stream, StreamExt, future::join}; use ruma::ServerName; use serde::{Deserialize, Serialize}; @@ -45,6 +45,21 @@ impl Cache { } } +#[implement(Cache)] +pub async fn clear(&self) { join(self.clear_destinations(), self.clear_overrides()).await; } + +#[implement(Cache)] +pub async fn clear_destinations(&self) { self.destinations.clear().await; } + +#[implement(Cache)] +pub async fn clear_overrides(&self) { self.overrides.clear().await; } + +#[implement(Cache)] +pub fn del_destination(&self, name: &ServerName) { self.destinations.remove(name); } + +#[implement(Cache)] +pub fn del_override(&self, name: &ServerName) { self.overrides.remove(name); } + #[implement(Cache)] pub fn set_destination(&self, name: &ServerName, dest: &CachedDest) { self.destinations.raw_put(name, Cbor(dest)); diff --git a/src/service/resolver/dns.rs b/src/service/resolver/dns.rs index 98ad7e60..e4245a5b 100644 --- a/src/service/resolver/dns.rs +++ b/src/service/resolver/dns.rs @@ -78,6 +78,10 @@ impl Resolver { server: server.clone(), })) } + + /// Clear the in-memory hickory-dns caches + #[inline] + pub fn clear_cache(&self) { self.resolver.clear_cache(); } } impl Resolve for Resolver { diff --git a/src/service/resolver/mod.rs b/src/service/resolver/mod.rs index 2ec9c0ef..246d6bc1 100644 --- a/src/service/resolver/mod.rs +++ b/src/service/resolver/mod.rs @@ -6,6 +6,7 @@ mod tests; use std::sync::Arc; +use async_trait::async_trait; use conduwuit::{Result, Server, arrayvec::ArrayString, utils::MutexMap}; use self::{cache::Cache, dns::Resolver}; @@ -26,6 +27,7 @@ struct Services { type Resolving = MutexMap; type NameBuf = ArrayString<256>; +#[async_trait] impl crate::Service for Service { #[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)] fn build(args: crate::Args<'_>) -> Result> { @@ -41,5 +43,10 @@ impl crate::Service for Service { })) } + async fn clear_cache(&self) { + self.resolver.clear_cache(); + self.cache.clear().await; + } + fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } From 23e3f6526fd0318525a4cd1fe065dcf7f1d56935 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 19 Mar 2025 03:49:12 +0000 Subject: [PATCH 149/310] split well_known resolver into unit Signed-off-by: Jason Volk --- src/service/resolver/actual.rs | 55 +++--------------------------- src/service/resolver/mod.rs | 2 ++ src/service/resolver/tests.rs | 2 -- src/service/resolver/well_known.rs | 49 ++++++++++++++++++++++++++ 4 files changed, 55 insertions(+), 53 deletions(-) create mode 100644 src/service/resolver/well_known.rs diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index b037cf77..1ad76f66 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -3,7 +3,7 @@ use std::{ net::{IpAddr, SocketAddr}, }; -use conduwuit::{Err, Result, debug, debug_error, debug_info, debug_warn, err, error, trace}; +use conduwuit::{Err, Result, debug, debug_info, err, error, trace}; use futures::{FutureExt, TryFutureExt}; use hickory_resolver::error::ResolveError; use ipaddress::IPAddress; @@ -72,6 +72,9 @@ impl super::Service { if let Some(pos) = dest.as_str().find(':') { self.actual_dest_2(dest, cache, pos).await? } else { + self.conditional_query_and_cache(dest.as_str(), 8448, true) + .await?; + self.services.server.check_running()?; match self.request_well_known(dest.as_str()).await? { | Some(delegated) => self.actual_dest_3(&mut host, cache, delegated).await?, @@ -243,56 +246,6 @@ impl super::Service { Ok(add_port_to_hostname(dest.as_str())) } - #[tracing::instrument(name = "well-known", level = "debug", skip(self, dest))] - async fn request_well_known(&self, dest: &str) -> Result> { - self.conditional_query_and_cache(dest, 8448, true).await?; - - self.services.server.check_running()?; - trace!("Requesting well known for {dest}"); - let response = self - .services - .client - .well_known - .get(format!("https://{dest}/.well-known/matrix/server")) - .send() - .await; - - trace!("response: {response:?}"); - if let Err(e) = &response { - debug!("error: {e:?}"); - return Ok(None); - } - - let response = response?; - if !response.status().is_success() { - debug!("response not 2XX"); - return Ok(None); - } - - let text = response.text().await?; - trace!("response text: {text:?}"); - if text.len() >= 12288 { - debug_warn!("response contains junk"); - return Ok(None); - } - - let body: serde_json::Value = serde_json::from_str(&text).unwrap_or_default(); - - let m_server = body - .get("m.server") - .unwrap_or(&serde_json::Value::Null) - .as_str() - .unwrap_or_default(); - - if ruma::identifiers_validation::server_name::validate(m_server).is_err() { - debug_error!("response content missing or invalid"); - return Ok(None); - } - - debug_info!("{dest:?} found at {m_server:?}"); - Ok(Some(m_server.to_owned())) - } - #[inline] async fn conditional_query_and_cache( &self, diff --git a/src/service/resolver/mod.rs b/src/service/resolver/mod.rs index 246d6bc1..c513cec9 100644 --- a/src/service/resolver/mod.rs +++ b/src/service/resolver/mod.rs @@ -2,7 +2,9 @@ pub mod actual; pub mod cache; mod dns; pub mod fed; +#[cfg(test)] mod tests; +mod well_known; use std::sync::Arc; diff --git a/src/service/resolver/tests.rs b/src/service/resolver/tests.rs index 6e9d0e71..068e08bd 100644 --- a/src/service/resolver/tests.rs +++ b/src/service/resolver/tests.rs @@ -1,5 +1,3 @@ -#![cfg(test)] - use super::fed::{FedDest, add_port_to_hostname, get_ip_with_port}; #[test] diff --git a/src/service/resolver/well_known.rs b/src/service/resolver/well_known.rs new file mode 100644 index 00000000..68a8e620 --- /dev/null +++ b/src/service/resolver/well_known.rs @@ -0,0 +1,49 @@ +use conduwuit::{Result, debug, debug_error, debug_info, debug_warn, implement, trace}; + +#[implement(super::Service)] +#[tracing::instrument(name = "well-known", level = "debug", skip(self, dest))] +pub(super) async fn request_well_known(&self, dest: &str) -> Result> { + trace!("Requesting well known for {dest}"); + let response = self + .services + .client + .well_known + .get(format!("https://{dest}/.well-known/matrix/server")) + .send() + .await; + + trace!("response: {response:?}"); + if let Err(e) = &response { + debug!("error: {e:?}"); + return Ok(None); + } + + let response = response?; + if !response.status().is_success() { + debug!("response not 2XX"); + return Ok(None); + } + + let text = response.text().await?; + trace!("response text: {text:?}"); + if text.len() >= 12288 { + debug_warn!("response contains junk"); + return Ok(None); + } + + let body: serde_json::Value = serde_json::from_str(&text).unwrap_or_default(); + + let m_server = body + .get("m.server") + .unwrap_or(&serde_json::Value::Null) + .as_str() + .unwrap_or_default(); + + if ruma::identifiers_validation::server_name::validate(m_server).is_err() { + debug_error!("response content missing or invalid"); + return Ok(None); + } + + debug_info!("{dest:?} found at {m_server:?}"); + Ok(Some(m_server.to_owned())) +} From d1b82ea2253179836cf7400f70960d583b25af50 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 21 Mar 2025 08:10:44 +0000 Subject: [PATCH 150/310] use #[ignore] for todo'ed tests Signed-off-by: Jason Volk --- src/database/tests.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/database/tests.rs b/src/database/tests.rs index 1446a1fc..c1a9f47c 100644 --- a/src/database/tests.rs +++ b/src/database/tests.rs @@ -325,8 +325,8 @@ fn ser_array() { assert_eq!(&s, &v, "vec serialization does not match"); } -#[cfg(todo)] #[test] +#[ignore] fn de_array() { let a: u64 = 123_456; let b: u64 = 987_654; @@ -357,8 +357,8 @@ fn de_array() { assert_eq!(vec[1], b, "deserialized vec [1] does not match"); } -#[cfg(todo)] #[test] +#[ignore] fn de_complex() { type Key<'a> = (&'a UserId, ArrayVec, &'a RoomId); From 9d0ce3965ea655943304b41ca679507b850130d3 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 22 Mar 2025 07:09:11 +0000 Subject: [PATCH 151/310] fix lints Signed-off-by: Jason Volk --- src/api/client/context.rs | 2 +- src/core/error/response.rs | 2 +- src/service/media/preview.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/api/client/context.rs b/src/api/client/context.rs index cb95dfef..b109711e 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -105,7 +105,7 @@ pub(crate) async fn get_context_route( .collect(); let (base_event, events_before, events_after): (_, Vec<_>, Vec<_>) = - join3(base_event, events_before, events_after).await; + join3(base_event, events_before, events_after).boxed().await; let lazy_loading_context = lazy_loading::Context { user_id: sender_user, diff --git a/src/core/error/response.rs b/src/core/error/response.rs index 00ade5ae..ae6fce62 100644 --- a/src/core/error/response.rs +++ b/src/core/error/response.rs @@ -86,7 +86,7 @@ pub(super) fn bad_request_code(kind: &ErrorKind) -> StatusCode { pub(super) fn ruma_error_message(error: &ruma::api::client::error::Error) -> String { if let ErrorBody::Standard { message, .. } = &error.body { - return message.to_string(); + return message.clone(); } format!("{error}") diff --git a/src/service/media/preview.rs b/src/service/media/preview.rs index ba5be7d4..91660a58 100644 --- a/src/service/media/preview.rs +++ b/src/service/media/preview.rs @@ -256,7 +256,7 @@ pub fn url_preview_allowed(&self, url: &Url) -> bool { if allowlist_url_contains .iter() - .any(|url_s| url.to_string().contains(&url_s.to_string())) + .any(|url_s| url.to_string().contains(url_s)) { debug!("URL {} is allowed by url_preview_url_contains_allowlist (check 4/4)", &host); return true; From 07ba00f74e2dfea314d0e5236f0415b2de6d543c Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 26 Mar 2025 04:40:38 +0000 Subject: [PATCH 152/310] abstract raw query command iterations Signed-off-by: Jason Volk --- src/admin/query/raw.rs | 141 ++++++++++------------------------------- 1 file changed, 35 insertions(+), 106 deletions(-) diff --git a/src/admin/query/raw.rs b/src/admin/query/raw.rs index 23f11cc8..c503eee5 100644 --- a/src/admin/query/raw.rs +++ b/src/admin/query/raw.rs @@ -1,15 +1,16 @@ -use std::{borrow::Cow, collections::BTreeMap, ops::Deref}; +use std::{borrow::Cow, collections::BTreeMap, ops::Deref, sync::Arc}; use clap::Subcommand; use conduwuit::{ Err, Result, apply, at, is_zero, utils::{ - IterStream, - stream::{ReadyExt, TryIgnore, TryParallelExt}, + stream::{IterStream, ReadyExt, TryIgnore, TryParallelExt}, string::EMPTY, }, }; -use futures::{FutureExt, StreamExt, TryStreamExt}; +use conduwuit_database::Map; +use conduwuit_service::Services; +use futures::{FutureExt, Stream, StreamExt, TryStreamExt}; use ruma::events::room::message::RoomMessageEventContent; use tokio::time::Instant; @@ -172,22 +173,18 @@ pub(super) async fn compact( ) -> Result { use conduwuit_database::compact::Options; - let default_all_maps = map - .is_none() - .then(|| { - self.services - .db - .keys() - .map(Deref::deref) - .map(ToOwned::to_owned) - }) - .into_iter() - .flatten(); + let default_all_maps: Option<_> = map.is_none().then(|| { + self.services + .db + .keys() + .map(Deref::deref) + .map(ToOwned::to_owned) + }); let maps: Vec<_> = map .unwrap_or_default() .into_iter() - .chain(default_all_maps) + .chain(default_all_maps.into_iter().flatten()) .map(|map| self.services.db.get(&map)) .filter_map(Result::ok) .cloned() @@ -237,25 +234,8 @@ pub(super) async fn raw_count( ) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); - let default_all_maps = map - .is_none() - .then(|| self.services.db.keys().map(Deref::deref)) - .into_iter() - .flatten(); - - let maps: Vec<_> = map - .iter() - .map(String::as_str) - .chain(default_all_maps) - .map(|map| self.services.db.get(map)) - .filter_map(Result::ok) - .cloned() - .collect(); - let timer = Instant::now(); - let count = maps - .iter() - .stream() + let count = with_maps_or(map.as_deref(), self.services) .then(|map| map.raw_count_prefix(&prefix)) .ready_fold(0_usize, usize::saturating_add) .await; @@ -300,25 +280,8 @@ pub(super) async fn raw_keys_sizes( ) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); - let default_all_maps = map - .is_none() - .then(|| self.services.db.keys().map(Deref::deref)) - .into_iter() - .flatten(); - - let maps: Vec<_> = map - .iter() - .map(String::as_str) - .chain(default_all_maps) - .map(|map| self.services.db.get(map)) - .filter_map(Result::ok) - .cloned() - .collect(); - let timer = Instant::now(); - let result = maps - .iter() - .stream() + let result = with_maps_or(map.as_deref(), self.services) .map(|map| map.raw_keys_prefix(&prefix)) .flatten() .ignore_err() @@ -345,25 +308,8 @@ pub(super) async fn raw_keys_total( ) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); - let default_all_maps = map - .is_none() - .then(|| self.services.db.keys().map(Deref::deref)) - .into_iter() - .flatten(); - - let maps: Vec<_> = map - .iter() - .map(String::as_str) - .chain(default_all_maps) - .map(|map| self.services.db.get(map)) - .filter_map(Result::ok) - .cloned() - .collect(); - let timer = Instant::now(); - let result = maps - .iter() - .stream() + let result = with_maps_or(map.as_deref(), self.services) .map(|map| map.raw_keys_prefix(&prefix)) .flatten() .ignore_err() @@ -387,25 +333,8 @@ pub(super) async fn raw_vals_sizes( ) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); - let default_all_maps = map - .is_none() - .then(|| self.services.db.keys().map(Deref::deref)) - .into_iter() - .flatten(); - - let maps: Vec<_> = map - .iter() - .map(String::as_str) - .chain(default_all_maps) - .map(|map| self.services.db.get(map)) - .filter_map(Result::ok) - .cloned() - .collect(); - let timer = Instant::now(); - let result = maps - .iter() - .stream() + let result = with_maps_or(map.as_deref(), self.services) .map(|map| map.raw_stream_prefix(&prefix)) .flatten() .ignore_err() @@ -433,25 +362,8 @@ pub(super) async fn raw_vals_total( ) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); - let default_all_maps = map - .is_none() - .then(|| self.services.db.keys().map(Deref::deref)) - .into_iter() - .flatten(); - - let maps: Vec<_> = map - .iter() - .map(String::as_str) - .chain(default_all_maps) - .map(|map| self.services.db.get(map)) - .filter_map(Result::ok) - .cloned() - .collect(); - let timer = Instant::now(); - let result = maps - .iter() - .stream() + let result = with_maps_or(map.as_deref(), self.services) .map(|map| map.raw_stream_prefix(&prefix)) .flatten() .ignore_err() @@ -573,3 +485,20 @@ pub(super) async fn raw_maps(&self) -> Result { Ok(RoomMessageEventContent::notice_markdown(format!("{list:#?}"))) } + +fn with_maps_or<'a>( + map: Option<&'a str>, + services: &'a Services, +) -> impl Stream> + Send + 'a { + let default_all_maps = map + .is_none() + .then(|| services.db.keys().map(Deref::deref)) + .into_iter() + .flatten(); + + map.into_iter() + .chain(default_all_maps) + .map(|map| services.db.get(map)) + .filter_map(Result::ok) + .stream() +} From dfe058a244ad7592114c86d504fb6fed744ad524 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 27 Mar 2025 01:08:42 +0000 Subject: [PATCH 153/310] default config item to 'none' when zstd_compression not featured Signed-off-by: Jason Volk --- src/core/config/mod.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 6b669ad3..52df19ac 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -2158,7 +2158,12 @@ fn default_rocksdb_max_log_file_size() -> usize { fn default_rocksdb_parallelism_threads() -> usize { 0 } -fn default_rocksdb_compression_algo() -> String { "zstd".to_owned() } +fn default_rocksdb_compression_algo() -> String { + cfg!(feature = "zstd_compression") + .then_some("zstd") + .unwrap_or("none") + .to_owned() +} /// Default RocksDB compression level is 32767, which is internally read by /// RocksDB as the default magic number and translated to the library's default From c99f5770a01ebae978461605c0f6eb954f7bad1b Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 27 Mar 2025 04:07:24 +0000 Subject: [PATCH 154/310] mark get_summary_and_children_federation Send Signed-off-by: Jason Volk --- src/service/rooms/spaces/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 55897f9c..af597445 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -238,7 +238,7 @@ async fn get_summary_and_children_federation( fn get_stripped_space_child_events<'a>( &'a self, room_id: &'a RoomId, -) -> impl Stream> + 'a { +) -> impl Stream> + Send + 'a { self.services .state .get_room_shortstatehash(room_id) From 7f448d88a430cc2869fe9ab366fd29b3fddb0f13 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 27 Mar 2025 03:34:33 +0000 Subject: [PATCH 155/310] use qualified crate names from within workspace Signed-off-by: Jason Volk --- src/main/clap.rs | 9 +++++++-- src/main/logging.rs | 2 +- src/main/main.rs | 4 +--- src/main/mods.rs | 8 ++++---- src/main/restart.rs | 2 +- src/main/runtime.rs | 11 ++++++----- src/main/sentry.rs | 4 ++-- src/main/server.rs | 10 +++++----- src/main/signal.rs | 2 +- 9 files changed, 28 insertions(+), 24 deletions(-) diff --git a/src/main/clap.rs b/src/main/clap.rs index c7f33bfe..35a7ea41 100644 --- a/src/main/clap.rs +++ b/src/main/clap.rs @@ -3,7 +3,7 @@ use std::path::PathBuf; use clap::{ArgAction, Parser}; -use conduwuit::{ +use conduwuit_core::{ Err, Result, config::{Figment, FigmentValue}, err, toml, @@ -12,7 +12,12 @@ use conduwuit::{ /// Commandline arguments #[derive(Parser, Debug)] -#[clap(version = conduwuit::version(), about, long_about = None, name = "conduwuit")] +#[clap( + about, + long_about = None, + name = "conduwuit", + version = conduwuit_core::version(), +)] pub(crate) struct Args { #[arg(short, long)] /// Path to the config TOML file (optional) diff --git a/src/main/logging.rs b/src/main/logging.rs index 7ce86d56..eeeda127 100644 --- a/src/main/logging.rs +++ b/src/main/logging.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{ +use conduwuit_core::{ Result, config::Config, debug_warn, err, diff --git a/src/main/main.rs b/src/main/main.rs index 2bfc3c06..fbc63b17 100644 --- a/src/main/main.rs +++ b/src/main/main.rs @@ -7,11 +7,9 @@ mod sentry; mod server; mod signal; -extern crate conduwuit_core as conduwuit; - use std::sync::{Arc, atomic::Ordering}; -use conduwuit::{Error, Result, debug_info, error, rustc_flags_capture}; +use conduwuit_core::{Error, Result, debug_info, error, rustc_flags_capture}; use server::Server; rustc_flags_capture! {} diff --git a/src/main/mods.rs b/src/main/mods.rs index 6dc79b2f..d585a381 100644 --- a/src/main/mods.rs +++ b/src/main/mods.rs @@ -9,13 +9,13 @@ use std::{ sync::{Arc, atomic::Ordering}, }; -use conduwuit::{Error, Result, debug, error, mods}; +use conduwuit_core::{Error, Result, debug, error, mods}; use conduwuit_service::Services; use crate::Server; type StartFuncResult = Pin>> + Send>>; -type StartFuncProto = fn(&Arc) -> StartFuncResult; +type StartFuncProto = fn(&Arc) -> StartFuncResult; type RunFuncResult = Pin> + Send>>; type RunFuncProto = fn(&Arc) -> RunFuncResult; @@ -34,8 +34,8 @@ const MODULE_NAMES: &[&str] = &[ ]; #[cfg(panic_trap)] -conduwuit::mod_init! {{ - conduwuit::debug::set_panic_trap(); +conduwuit_core::mod_init! {{ + conduwuit_core::debug::set_panic_trap(); }} pub(crate) async fn run(server: &Arc, starts: bool) -> Result<(bool, bool), Error> { diff --git a/src/main/restart.rs b/src/main/restart.rs index e6f45b82..b9d1dc94 100644 --- a/src/main/restart.rs +++ b/src/main/restart.rs @@ -2,7 +2,7 @@ use std::{env, os::unix::process::CommandExt, process::Command}; -use conduwuit::{debug, info, utils}; +use conduwuit_core::{debug, info, utils}; #[cold] pub(super) fn restart() -> ! { diff --git a/src/main/runtime.rs b/src/main/runtime.rs index b3174e9c..b1657289 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -9,8 +9,8 @@ use std::{ }; #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] -use conduwuit::result::LogDebugErr; -use conduwuit::{ +use conduwuit_core::result::LogDebugErr; +use conduwuit_core::{ Result, is_true, utils::sys::compute::{nth_core_available, set_affinity}, }; @@ -122,7 +122,7 @@ fn set_worker_affinity() { #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] fn set_worker_mallctl(id: usize) { - use conduwuit::alloc::je::{ + use conduwuit_core::alloc::je::{ is_affine_arena, this_thread::{set_arena, set_muzzy_decay}, }; @@ -135,7 +135,8 @@ fn set_worker_mallctl(id: usize) { .get() .expect("GC_MUZZY initialized by runtime::new()"); - let muzzy_auto_disable = conduwuit::utils::available_parallelism() >= DISABLE_MUZZY_THRESHOLD; + let muzzy_auto_disable = + conduwuit_core::utils::available_parallelism() >= DISABLE_MUZZY_THRESHOLD; if matches!(muzzy_option, Some(false) | None if muzzy_auto_disable) { set_muzzy_decay(-1).log_debug_err().ok(); } @@ -188,7 +189,7 @@ fn thread_park() { fn gc_on_park() { #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] - conduwuit::alloc::je::this_thread::decay() + conduwuit_core::alloc::je::this_thread::decay() .log_debug_err() .ok(); } diff --git a/src/main/sentry.rs b/src/main/sentry.rs index 1ea1f3ae..68f12eb7 100644 --- a/src/main/sentry.rs +++ b/src/main/sentry.rs @@ -5,7 +5,7 @@ use std::{ sync::{Arc, OnceLock}, }; -use conduwuit::{config::Config, debug, trace}; +use conduwuit_core::{config::Config, debug, trace}; use sentry::{ Breadcrumb, ClientOptions, Level, types::{ @@ -43,7 +43,7 @@ fn options(config: &Config) -> ClientOptions { traces_sample_rate: config.sentry_traces_sample_rate, debug: cfg!(debug_assertions), release: sentry::release_name!(), - user_agent: conduwuit::version::user_agent().into(), + user_agent: conduwuit_core::version::user_agent().into(), attach_stacktrace: config.sentry_attach_stacktrace, before_send: Some(Arc::new(before_send)), before_breadcrumb: Some(Arc::new(before_breadcrumb)), diff --git a/src/main/server.rs b/src/main/server.rs index 44ca69b0..8f697ca4 100644 --- a/src/main/server.rs +++ b/src/main/server.rs @@ -1,6 +1,6 @@ use std::{path::PathBuf, sync::Arc}; -use conduwuit::{ +use conduwuit_core::{ Error, Result, config::Config, info, @@ -14,7 +14,7 @@ use crate::{clap::Args, logging::TracingFlameGuard}; /// Server runtime state; complete pub(crate) struct Server { /// Server runtime state; public portion - pub(crate) server: Arc, + pub(crate) server: Arc, pub(crate) services: Mutex>>, @@ -25,7 +25,7 @@ pub(crate) struct Server { #[cfg(all(conduwuit_mods, feature = "conduwuit_mods"))] // Module instances; TODO: move to mods::loaded mgmt vector - pub(crate) mods: tokio::sync::RwLock>, + pub(crate) mods: tokio::sync::RwLock>, } impl Server { @@ -66,11 +66,11 @@ impl Server { database_path = ?config.database_path, log_levels = %config.log, "{}", - conduwuit::version(), + conduwuit_core::version(), ); Ok(Arc::new(Self { - server: Arc::new(conduwuit::Server::new(config, runtime.cloned(), Log { + server: Arc::new(conduwuit_core::Server::new(config, runtime.cloned(), Log { reload: tracing_reload_handle, capture, })), diff --git a/src/main/signal.rs b/src/main/signal.rs index 343b95c9..a5d07774 100644 --- a/src/main/signal.rs +++ b/src/main/signal.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{debug_error, trace, warn}; +use conduwuit_core::{debug_error, trace, warn}; use tokio::signal; use super::server::Server; From b2bf35cfab8aac82e4cde1c7c5a7b6e713bba5db Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 28 Mar 2025 06:42:30 +0000 Subject: [PATCH 156/310] fix benches from state-res Signed-off-by: Jason Volk --- src/core/state_res/benches.rs | 672 ++++++++++++++++++++++++++ src/core/state_res/mod.rs | 3 + src/core/state_res/state_res_bench.rs | 648 ------------------------- 3 files changed, 675 insertions(+), 648 deletions(-) create mode 100644 src/core/state_res/benches.rs delete mode 100644 src/core/state_res/state_res_bench.rs diff --git a/src/core/state_res/benches.rs b/src/core/state_res/benches.rs new file mode 100644 index 00000000..7a1ae5bf --- /dev/null +++ b/src/core/state_res/benches.rs @@ -0,0 +1,672 @@ +#[cfg(conduwuit_bench)] +extern crate test; + +use std::{ + borrow::Borrow, + collections::{HashMap, HashSet}, + sync::{ + Arc, + atomic::{AtomicU64, Ordering::SeqCst}, + }, +}; + +use futures::{future, future::ready}; +use maplit::{btreemap, hashmap, hashset}; +use ruma::{ + EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, RoomVersionId, Signatures, UserId, + events::{ + StateEventType, TimelineEventType, + pdu::{EventHash, Pdu, RoomV3Pdu}, + room::{ + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + }, + }, + int, room_id, uint, user_id, +}; +use serde_json::{ + json, + value::{RawValue as RawJsonValue, to_raw_value as to_raw_json_value}, +}; + +use self::event::PduEvent; +use crate::state_res::{self as state_res, Error, Event, Result, StateMap}; + +static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); + +#[cfg(conduwuit_bench)] +#[cfg_attr(conduwuit_bench, bench)] +fn lexico_topo_sort(c: &mut test::Bencher) { + let graph = hashmap! { + event_id("l") => hashset![event_id("o")], + event_id("m") => hashset![event_id("n"), event_id("o")], + event_id("n") => hashset![event_id("o")], + event_id("o") => hashset![], // "o" has zero outgoing edges but 4 incoming edges + event_id("p") => hashset![event_id("o")], + }; + + c.iter(|| { + let _ = state_res::lexicographical_topological_sort(&graph, &|_| { + future::ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0)))) + }); + }); +} + +#[cfg(conduwuit_bench)] +#[cfg_attr(conduwuit_bench, bench)] +fn resolution_shallow_auth_chain(c: &mut test::Bencher) { + let parallel_fetches = 32; + let mut store = TestStore(hashmap! {}); + + // build up the DAG + let (state_at_bob, state_at_charlie, _) = store.set_up(); + + c.iter(|| async { + let ev_map = store.0.clone(); + let state_sets = [&state_at_bob, &state_at_charlie]; + let fetch = |id: OwnedEventId| ready(ev_map.get(&id).map(Arc::clone)); + let exists = |id: OwnedEventId| ready(ev_map.get(&id).is_some()); + let auth_chain_sets: Vec> = state_sets + .iter() + .map(|map| { + store + .auth_event_ids(room_id(), map.values().cloned().collect()) + .unwrap() + }) + .collect(); + + let _ = match state_res::resolve( + &RoomVersionId::V6, + state_sets.into_iter(), + &auth_chain_sets, + &fetch, + &exists, + parallel_fetches, + ) + .await + { + | Ok(state) => state, + | Err(e) => panic!("{e}"), + }; + }); +} + +#[cfg(conduwuit_bench)] +#[cfg_attr(conduwuit_bench, bench)] +fn resolve_deeper_event_set(c: &mut test::Bencher) { + let parallel_fetches = 32; + let mut inner = INITIAL_EVENTS(); + let ban = BAN_STATE_SET(); + + inner.extend(ban); + let store = TestStore(inner.clone()); + + let state_set_a = [ + inner.get(&event_id("CREATE")).unwrap(), + inner.get(&event_id("IJR")).unwrap(), + inner.get(&event_id("IMA")).unwrap(), + inner.get(&event_id("IMB")).unwrap(), + inner.get(&event_id("IMC")).unwrap(), + inner.get(&event_id("MB")).unwrap(), + inner.get(&event_id("PA")).unwrap(), + ] + .iter() + .map(|ev| { + ( + (ev.event_type().clone().into(), ev.state_key().unwrap().into()), + ev.event_id().to_owned(), + ) + }) + .collect::>(); + + let state_set_b = [ + inner.get(&event_id("CREATE")).unwrap(), + inner.get(&event_id("IJR")).unwrap(), + inner.get(&event_id("IMA")).unwrap(), + inner.get(&event_id("IMB")).unwrap(), + inner.get(&event_id("IMC")).unwrap(), + inner.get(&event_id("IME")).unwrap(), + inner.get(&event_id("PA")).unwrap(), + ] + .iter() + .map(|ev| { + ( + (ev.event_type().clone().into(), ev.state_key().unwrap().into()), + ev.event_id().to_owned(), + ) + }) + .collect::>(); + + c.iter(|| async { + let state_sets = [&state_set_a, &state_set_b]; + let auth_chain_sets: Vec> = state_sets + .iter() + .map(|map| { + store + .auth_event_ids(room_id(), map.values().cloned().collect()) + .unwrap() + }) + .collect(); + + let fetch = |id: OwnedEventId| ready(inner.get(&id).map(Arc::clone)); + let exists = |id: OwnedEventId| ready(inner.get(&id).is_some()); + let _ = match state_res::resolve( + &RoomVersionId::V6, + state_sets.into_iter(), + &auth_chain_sets, + &fetch, + &exists, + parallel_fetches, + ) + .await + { + | Ok(state) => state, + | Err(_) => panic!("resolution failed during benchmarking"), + }; + }); +} + +//*///////////////////////////////////////////////////////////////////// +// +// IMPLEMENTATION DETAILS AHEAD +// +/////////////////////////////////////////////////////////////////////*/ +struct TestStore(HashMap>); + +#[allow(unused)] +impl TestStore { + fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> Result> { + self.0 + .get(event_id) + .map(Arc::clone) + .ok_or_else(|| Error::NotFound(format!("{} not found", event_id))) + } + + /// Returns the events that correspond to the `event_ids` sorted in the same + /// order. + fn get_events(&self, room_id: &RoomId, event_ids: &[OwnedEventId]) -> Result>> { + let mut events = vec![]; + for id in event_ids { + events.push(self.get_event(room_id, id)?); + } + Ok(events) + } + + /// Returns a Vec of the related auth events to the given `event`. + fn auth_event_ids(&self, room_id: &RoomId, event_ids: Vec) -> Result> { + let mut result = HashSet::new(); + let mut stack = event_ids; + + // DFS for auth event chain + while !stack.is_empty() { + let ev_id = stack.pop().unwrap(); + if result.contains(&ev_id) { + continue; + } + + result.insert(ev_id.clone()); + + let event = self.get_event(room_id, ev_id.borrow())?; + + stack.extend(event.auth_events().map(ToOwned::to_owned)); + } + + Ok(result) + } + + /// Returns a vector representing the difference in auth chains of the given + /// `events`. + fn auth_chain_diff( + &self, + room_id: &RoomId, + event_ids: Vec>, + ) -> Result> { + let mut auth_chain_sets = vec![]; + for ids in event_ids { + // TODO state store `auth_event_ids` returns self in the event ids list + // when an event returns `auth_event_ids` self is not contained + let chain = self + .auth_event_ids(room_id, ids)? + .into_iter() + .collect::>(); + auth_chain_sets.push(chain); + } + + if let Some(first) = auth_chain_sets.first().cloned() { + let common = auth_chain_sets + .iter() + .skip(1) + .fold(first, |a, b| a.intersection(b).cloned().collect::>()); + + Ok(auth_chain_sets + .into_iter() + .flatten() + .filter(|id| !common.contains(id.borrow())) + .collect()) + } else { + Ok(vec![]) + } + } +} + +impl TestStore { + #[allow(clippy::type_complexity)] + fn set_up( + &mut self, + ) -> (StateMap, StateMap, StateMap) { + let create_event = to_pdu_event::<&EventId>( + "CREATE", + alice(), + TimelineEventType::RoomCreate, + Some(""), + to_raw_json_value(&json!({ "creator": alice() })).unwrap(), + &[], + &[], + ); + let cre = create_event.event_id().to_owned(); + self.0.insert(cre.clone(), Arc::clone(&create_event)); + + let alice_mem = to_pdu_event( + "IMA", + alice(), + TimelineEventType::RoomMember, + Some(alice().to_string().as_str()), + member_content_join(), + &[cre.clone()], + &[cre.clone()], + ); + self.0 + .insert(alice_mem.event_id().to_owned(), Arc::clone(&alice_mem)); + + let join_rules = to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), + &[cre.clone(), alice_mem.event_id().to_owned()], + &[alice_mem.event_id().to_owned()], + ); + self.0 + .insert(join_rules.event_id().to_owned(), join_rules.clone()); + + // Bob and Charlie join at the same time, so there is a fork + // this will be represented in the state_sets when we resolve + let bob_mem = to_pdu_event( + "IMB", + bob(), + TimelineEventType::RoomMember, + Some(bob().to_string().as_str()), + member_content_join(), + &[cre.clone(), join_rules.event_id().to_owned()], + &[join_rules.event_id().to_owned()], + ); + self.0 + .insert(bob_mem.event_id().to_owned(), bob_mem.clone()); + + let charlie_mem = to_pdu_event( + "IMC", + charlie(), + TimelineEventType::RoomMember, + Some(charlie().to_string().as_str()), + member_content_join(), + &[cre, join_rules.event_id().to_owned()], + &[join_rules.event_id().to_owned()], + ); + self.0 + .insert(charlie_mem.event_id().to_owned(), charlie_mem.clone()); + + let state_at_bob = [&create_event, &alice_mem, &join_rules, &bob_mem] + .iter() + .map(|ev| { + ( + (ev.event_type().clone().into(), ev.state_key().unwrap().into()), + ev.event_id().to_owned(), + ) + }) + .collect::>(); + + let state_at_charlie = [&create_event, &alice_mem, &join_rules, &charlie_mem] + .iter() + .map(|ev| { + ( + (ev.event_type().clone().into(), ev.state_key().unwrap().into()), + ev.event_id().to_owned(), + ) + }) + .collect::>(); + + let expected = [&create_event, &alice_mem, &join_rules, &bob_mem, &charlie_mem] + .iter() + .map(|ev| { + ( + (ev.event_type().clone().into(), ev.state_key().unwrap().into()), + ev.event_id().to_owned(), + ) + }) + .collect::>(); + + (state_at_bob, state_at_charlie, expected) + } +} + +fn event_id(id: &str) -> OwnedEventId { + if id.contains('$') { + return id.try_into().unwrap(); + } + format!("${}:foo", id).try_into().unwrap() +} + +fn alice() -> &'static UserId { user_id!("@alice:foo") } + +fn bob() -> &'static UserId { user_id!("@bob:foo") } + +fn charlie() -> &'static UserId { user_id!("@charlie:foo") } + +fn ella() -> &'static UserId { user_id!("@ella:foo") } + +fn room_id() -> &'static RoomId { room_id!("!test:foo") } + +fn member_content_ban() -> Box { + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Ban)).unwrap() +} + +fn member_content_join() -> Box { + to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Join)).unwrap() +} + +fn to_pdu_event( + id: &str, + sender: &UserId, + ev_type: TimelineEventType, + state_key: Option<&str>, + content: Box, + auth_events: &[S], + prev_events: &[S], +) -> Arc +where + S: AsRef, +{ + // We don't care if the addition happens in order just that it is atomic + // (each event has its own value) + let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst); + let id = if id.contains('$') { + id.to_owned() + } else { + format!("${}:foo", id) + }; + let auth_events = auth_events + .iter() + .map(AsRef::as_ref) + .map(event_id) + .collect::>(); + let prev_events = prev_events + .iter() + .map(AsRef::as_ref) + .map(event_id) + .collect::>(); + + let state_key = state_key.map(ToOwned::to_owned); + Arc::new(PduEvent { + event_id: id.try_into().unwrap(), + rest: Pdu::RoomV3Pdu(RoomV3Pdu { + room_id: room_id().to_owned(), + sender: sender.to_owned(), + origin_server_ts: MilliSecondsSinceUnixEpoch(ts.try_into().unwrap()), + state_key, + kind: ev_type, + content, + redacts: None, + unsigned: btreemap! {}, + auth_events, + prev_events, + depth: uint!(0), + hashes: EventHash::new(String::new()), + signatures: Signatures::new(), + }), + }) +} + +// all graphs start with these input events +#[allow(non_snake_case)] +fn INITIAL_EVENTS() -> HashMap> { + vec![ + to_pdu_event::<&EventId>( + "CREATE", + alice(), + TimelineEventType::RoomCreate, + Some(""), + to_raw_json_value(&json!({ "creator": alice() })).unwrap(), + &[], + &[], + ), + to_pdu_event( + "IMA", + alice(), + TimelineEventType::RoomMember, + Some(alice().as_str()), + member_content_join(), + &["CREATE"], + &["CREATE"], + ), + to_pdu_event( + "IPOWER", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100 } })).unwrap(), + &["CREATE", "IMA"], + &["IMA"], + ), + to_pdu_event( + "IJR", + alice(), + TimelineEventType::RoomJoinRules, + Some(""), + to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["IPOWER"], + ), + to_pdu_event( + "IMB", + bob(), + TimelineEventType::RoomMember, + Some(bob().to_string().as_str()), + member_content_join(), + &["CREATE", "IJR", "IPOWER"], + &["IJR"], + ), + to_pdu_event( + "IMC", + charlie(), + TimelineEventType::RoomMember, + Some(charlie().to_string().as_str()), + member_content_join(), + &["CREATE", "IJR", "IPOWER"], + &["IMB"], + ), + to_pdu_event::<&EventId>( + "START", + charlie(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + &[], + &[], + ), + to_pdu_event::<&EventId>( + "END", + charlie(), + TimelineEventType::RoomTopic, + Some(""), + to_raw_json_value(&json!({})).unwrap(), + &[], + &[], + ), + ] + .into_iter() + .map(|ev| (ev.event_id().to_owned(), ev)) + .collect() +} + +// all graphs start with these input events +#[allow(non_snake_case)] +fn BAN_STATE_SET() -> HashMap> { + vec![ + to_pdu_event( + "PA", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + &["CREATE", "IMA", "IPOWER"], // auth_events + &["START"], // prev_events + ), + to_pdu_event( + "PB", + alice(), + TimelineEventType::RoomPowerLevels, + Some(""), + to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), + &["CREATE", "IMA", "IPOWER"], + &["END"], + ), + to_pdu_event( + "MB", + alice(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + member_content_ban(), + &["CREATE", "IMA", "PB"], + &["PA"], + ), + to_pdu_event( + "IME", + ella(), + TimelineEventType::RoomMember, + Some(ella().as_str()), + member_content_join(), + &["CREATE", "IJR", "PA"], + &["MB"], + ), + ] + .into_iter() + .map(|ev| (ev.event_id().to_owned(), ev)) + .collect() +} + +/// Convenience trait for adding event type plus state key to state maps. +trait EventTypeExt { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, String); +} + +impl EventTypeExt for &TimelineEventType { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { + (self.to_string().into(), state_key.into()) + } +} + +mod event { + use ruma::{ + MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, + events::{TimelineEventType, pdu::Pdu}, + }; + use serde::{Deserialize, Serialize}; + use serde_json::value::RawValue as RawJsonValue; + + use super::Event; + + impl Event for PduEvent { + type Id = OwnedEventId; + + fn event_id(&self) -> &Self::Id { &self.event_id } + + fn room_id(&self) -> &RoomId { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.room_id, + | Pdu::RoomV3Pdu(ev) => &ev.room_id, + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn sender(&self) -> &UserId { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.sender, + | Pdu::RoomV3Pdu(ev) => &ev.sender, + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn event_type(&self) -> &TimelineEventType { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.kind, + | Pdu::RoomV3Pdu(ev) => &ev.kind, + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn content(&self) -> &RawJsonValue { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => &ev.content, + | Pdu::RoomV3Pdu(ev) => &ev.content, + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => ev.origin_server_ts, + | Pdu::RoomV3Pdu(ev) => ev.origin_server_ts, + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn state_key(&self) -> Option<&str> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => ev.state_key.as_deref(), + | Pdu::RoomV3Pdu(ev) => ev.state_key.as_deref(), + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn prev_events(&self) -> Box + Send + '_> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => Box::new(ev.prev_events.iter().map(|(id, _)| id)), + | Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter()), + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn auth_events(&self) -> Box + Send + '_> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => Box::new(ev.auth_events.iter().map(|(id, _)| id)), + | Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter()), + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + + fn redacts(&self) -> Option<&Self::Id> { + match &self.rest { + | Pdu::RoomV1Pdu(ev) => ev.redacts.as_ref(), + | Pdu::RoomV3Pdu(ev) => ev.redacts.as_ref(), + #[cfg(not(feature = "unstable-exhaustive-types"))] + | _ => unreachable!("new PDU version"), + } + } + } + + #[derive(Clone, Debug, Deserialize, Serialize)] + pub(crate) struct PduEvent { + pub(crate) event_id: OwnedEventId, + #[serde(flatten)] + pub(crate) rest: Pdu, + } +} diff --git a/src/core/state_res/mod.rs b/src/core/state_res/mod.rs index 6bff0cf8..2020d65c 100644 --- a/src/core/state_res/mod.rs +++ b/src/core/state_res/mod.rs @@ -9,6 +9,9 @@ mod state_event; #[cfg(test)] mod test_utils; +#[cfg(test)] +mod benches; + use std::{ borrow::Borrow, cmp::{Ordering, Reverse}, diff --git a/src/core/state_res/state_res_bench.rs b/src/core/state_res/state_res_bench.rs deleted file mode 100644 index a2bd2c23..00000000 --- a/src/core/state_res/state_res_bench.rs +++ /dev/null @@ -1,648 +0,0 @@ -// Because of criterion `cargo bench` works, -// but if you use `cargo bench -- --save-baseline ` -// or pass any other args to it, it fails with the error -// `cargo bench unknown option --save-baseline`. -// To pass args to criterion, use this form -// `cargo bench --bench -- --save-baseline `. - -#![allow(clippy::exhaustive_structs)] - -use std::{ - borrow::Borrow, - collections::{HashMap, HashSet}, - sync::{ - atomic::{AtomicU64, Ordering::SeqCst}, - Arc, - }, -}; - -use criterion::{criterion_group, criterion_main, Criterion}; -use event::PduEvent; -use futures::{future, future::ready}; -use ruma::{int, uint}; -use maplit::{btreemap, hashmap, hashset}; -use ruma::{ - room_id, user_id, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, RoomVersionId, - Signatures, UserId, -}; -use ruma::events::{ - pdu::{EventHash, Pdu, RoomV3Pdu}, - room::{ - join_rules::{JoinRule, RoomJoinRulesEventContent}, - member::{MembershipState, RoomMemberEventContent}, - }, - StateEventType, TimelineEventType, -}; -use conduwuit::state_res::{self as state_res, Error, Event, Result, StateMap}; -use serde_json::{ - json, - value::{to_raw_value as to_raw_json_value, RawValue as RawJsonValue}, -}; - -static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); - -fn lexico_topo_sort(c: &mut Criterion) { - c.bench_function("lexicographical topological sort", |b| { - let graph = hashmap! { - event_id("l") => hashset![event_id("o")], - event_id("m") => hashset![event_id("n"), event_id("o")], - event_id("n") => hashset![event_id("o")], - event_id("o") => hashset![], // "o" has zero outgoing edges but 4 incoming edges - event_id("p") => hashset![event_id("o")], - }; - b.iter(|| { - let _ = state_res::lexicographical_topological_sort(&graph, &|_| { - future::ok((int!(0), MilliSecondsSinceUnixEpoch(uint!(0)))) - }); - }); - }); -} - -fn resolution_shallow_auth_chain(c: &mut Criterion) { - c.bench_function("resolve state of 5 events one fork", |b| { - let mut store = TestStore(hashmap! {}); - - // build up the DAG - let (state_at_bob, state_at_charlie, _) = store.set_up(); - - b.iter(|| async { - let ev_map = store.0.clone(); - let state_sets = [&state_at_bob, &state_at_charlie]; - let fetch = |id: OwnedEventId| ready(ev_map.get(&id).map(Arc::clone)); - let exists = |id: OwnedEventId| ready(ev_map.get(&id).is_some()); - let auth_chain_sets = state_sets - .iter() - .map(|map| { - store.auth_event_ids(room_id(), map.values().cloned().collect()).unwrap() - }) - .collect(); - - let _ = match state_res::resolve( - &RoomVersionId::V6, - state_sets.into_iter(), - &auth_chain_sets, - &fetch, - &exists, - ) - .await - { - Ok(state) => state, - Err(e) => panic!("{e}"), - }; - }); - }); -} - -fn resolve_deeper_event_set(c: &mut Criterion) { - c.bench_function("resolve state of 10 events 3 conflicting", |b| { - let mut inner = INITIAL_EVENTS(); - let ban = BAN_STATE_SET(); - - inner.extend(ban); - let store = TestStore(inner.clone()); - - let state_set_a = [ - inner.get(&event_id("CREATE")).unwrap(), - inner.get(&event_id("IJR")).unwrap(), - inner.get(&event_id("IMA")).unwrap(), - inner.get(&event_id("IMB")).unwrap(), - inner.get(&event_id("IMC")).unwrap(), - inner.get(&event_id("MB")).unwrap(), - inner.get(&event_id("PA")).unwrap(), - ] - .iter() - .map(|ev| { - (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.event_id().to_owned()) - }) - .collect::>(); - - let state_set_b = [ - inner.get(&event_id("CREATE")).unwrap(), - inner.get(&event_id("IJR")).unwrap(), - inner.get(&event_id("IMA")).unwrap(), - inner.get(&event_id("IMB")).unwrap(), - inner.get(&event_id("IMC")).unwrap(), - inner.get(&event_id("IME")).unwrap(), - inner.get(&event_id("PA")).unwrap(), - ] - .iter() - .map(|ev| { - (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.event_id().to_owned()) - }) - .collect::>(); - - b.iter(|| async { - let state_sets = [&state_set_a, &state_set_b]; - let auth_chain_sets = state_sets - .iter() - .map(|map| { - store.auth_event_ids(room_id(), map.values().cloned().collect()).unwrap() - }) - .collect(); - - let fetch = |id: OwnedEventId| ready(inner.get(&id).map(Arc::clone)); - let exists = |id: OwnedEventId| ready(inner.get(&id).is_some()); - let _ = match state_res::resolve( - &RoomVersionId::V6, - state_sets.into_iter(), - &auth_chain_sets, - &fetch, - &exists, - ) - .await - { - Ok(state) => state, - Err(_) => panic!("resolution failed during benchmarking"), - }; - }); - }); -} - -criterion_group!( - benches, - lexico_topo_sort, - resolution_shallow_auth_chain, - resolve_deeper_event_set -); - -criterion_main!(benches); - -//*///////////////////////////////////////////////////////////////////// -// -// IMPLEMENTATION DETAILS AHEAD -// -/////////////////////////////////////////////////////////////////////*/ -struct TestStore(HashMap>); - -#[allow(unused)] -impl TestStore { - fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> Result> { - self.0 - .get(event_id) - .map(Arc::clone) - .ok_or_else(|| Error::NotFound(format!("{} not found", event_id))) - } - - /// Returns the events that correspond to the `event_ids` sorted in the same order. - fn get_events(&self, room_id: &RoomId, event_ids: &[OwnedEventId]) -> Result>> { - let mut events = vec![]; - for id in event_ids { - events.push(self.get_event(room_id, id)?); - } - Ok(events) - } - - /// Returns a Vec of the related auth events to the given `event`. - fn auth_event_ids(&self, room_id: &RoomId, event_ids: Vec) -> Result> { - let mut result = HashSet::new(); - let mut stack = event_ids; - - // DFS for auth event chain - while !stack.is_empty() { - let ev_id = stack.pop().unwrap(); - if result.contains(&ev_id) { - continue; - } - - result.insert(ev_id.clone()); - - let event = self.get_event(room_id, ev_id.borrow())?; - - stack.extend(event.auth_events().map(ToOwned::to_owned)); - } - - Ok(result) - } - - /// Returns a vector representing the difference in auth chains of the given `events`. - fn auth_chain_diff(&self, room_id: &RoomId, event_ids: Vec>) -> Result> { - let mut auth_chain_sets = vec![]; - for ids in event_ids { - // TODO state store `auth_event_ids` returns self in the event ids list - // when an event returns `auth_event_ids` self is not contained - let chain = self.auth_event_ids(room_id, ids)?.into_iter().collect::>(); - auth_chain_sets.push(chain); - } - - if let Some(first) = auth_chain_sets.first().cloned() { - let common = auth_chain_sets - .iter() - .skip(1) - .fold(first, |a, b| a.intersection(b).cloned().collect::>()); - - Ok(auth_chain_sets - .into_iter() - .flatten() - .filter(|id| !common.contains(id.borrow())) - .collect()) - } else { - Ok(vec![]) - } - } -} - -impl TestStore { - #[allow(clippy::type_complexity)] - fn set_up( - &mut self, - ) -> (StateMap, StateMap, StateMap) { - let create_event = to_pdu_event::<&EventId>( - "CREATE", - alice(), - TimelineEventType::RoomCreate, - Some(""), - to_raw_json_value(&json!({ "creator": alice() })).unwrap(), - &[], - &[], - ); - let cre = create_event.event_id().to_owned(); - self.0.insert(cre.clone(), Arc::clone(&create_event)); - - let alice_mem = to_pdu_event( - "IMA", - alice(), - TimelineEventType::RoomMember, - Some(alice().to_string().as_str()), - member_content_join(), - &[cre.clone()], - &[cre.clone()], - ); - self.0.insert(alice_mem.event_id().to_owned(), Arc::clone(&alice_mem)); - - let join_rules = to_pdu_event( - "IJR", - alice(), - TimelineEventType::RoomJoinRules, - Some(""), - to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), - &[cre.clone(), alice_mem.event_id().to_owned()], - &[alice_mem.event_id().to_owned()], - ); - self.0.insert(join_rules.event_id().to_owned(), join_rules.clone()); - - // Bob and Charlie join at the same time, so there is a fork - // this will be represented in the state_sets when we resolve - let bob_mem = to_pdu_event( - "IMB", - bob(), - TimelineEventType::RoomMember, - Some(bob().to_string().as_str()), - member_content_join(), - &[cre.clone(), join_rules.event_id().to_owned()], - &[join_rules.event_id().to_owned()], - ); - self.0.insert(bob_mem.event_id().to_owned(), bob_mem.clone()); - - let charlie_mem = to_pdu_event( - "IMC", - charlie(), - TimelineEventType::RoomMember, - Some(charlie().to_string().as_str()), - member_content_join(), - &[cre, join_rules.event_id().to_owned()], - &[join_rules.event_id().to_owned()], - ); - self.0.insert(charlie_mem.event_id().to_owned(), charlie_mem.clone()); - - let state_at_bob = [&create_event, &alice_mem, &join_rules, &bob_mem] - .iter() - .map(|e| { - (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) - }) - .collect::>(); - - let state_at_charlie = [&create_event, &alice_mem, &join_rules, &charlie_mem] - .iter() - .map(|e| { - (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) - }) - .collect::>(); - - let expected = [&create_event, &alice_mem, &join_rules, &bob_mem, &charlie_mem] - .iter() - .map(|e| { - (e.event_type().with_state_key(e.state_key().unwrap()), e.event_id().to_owned()) - }) - .collect::>(); - - (state_at_bob, state_at_charlie, expected) - } -} - -fn event_id(id: &str) -> OwnedEventId { - if id.contains('$') { - return id.try_into().unwrap(); - } - format!("${}:foo", id).try_into().unwrap() -} - -fn alice() -> &'static UserId { - user_id!("@alice:foo") -} - -fn bob() -> &'static UserId { - user_id!("@bob:foo") -} - -fn charlie() -> &'static UserId { - user_id!("@charlie:foo") -} - -fn ella() -> &'static UserId { - user_id!("@ella:foo") -} - -fn room_id() -> &'static RoomId { - room_id!("!test:foo") -} - -fn member_content_ban() -> Box { - to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Ban)).unwrap() -} - -fn member_content_join() -> Box { - to_raw_json_value(&RoomMemberEventContent::new(MembershipState::Join)).unwrap() -} - -fn to_pdu_event( - id: &str, - sender: &UserId, - ev_type: TimelineEventType, - state_key: Option<&str>, - content: Box, - auth_events: &[S], - prev_events: &[S], -) -> Arc -where - S: AsRef, -{ - // We don't care if the addition happens in order just that it is atomic - // (each event has its own value) - let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst); - let id = if id.contains('$') { id.to_owned() } else { format!("${}:foo", id) }; - let auth_events = auth_events.iter().map(AsRef::as_ref).map(event_id).collect::>(); - let prev_events = prev_events.iter().map(AsRef::as_ref).map(event_id).collect::>(); - - let state_key = state_key.map(ToOwned::to_owned); - Arc::new(PduEvent { - event_id: id.try_into().unwrap(), - rest: Pdu::RoomV3Pdu(RoomV3Pdu { - room_id: room_id().to_owned(), - sender: sender.to_owned(), - origin_server_ts: MilliSecondsSinceUnixEpoch(ts.try_into().unwrap()), - state_key, - kind: ev_type, - content, - redacts: None, - unsigned: btreemap! {}, - auth_events, - prev_events, - depth: uint!(0), - hashes: EventHash::new(String::new()), - signatures: Signatures::new(), - }), - }) -} - -// all graphs start with these input events -#[allow(non_snake_case)] -fn INITIAL_EVENTS() -> HashMap> { - vec![ - to_pdu_event::<&EventId>( - "CREATE", - alice(), - TimelineEventType::RoomCreate, - Some(""), - to_raw_json_value(&json!({ "creator": alice() })).unwrap(), - &[], - &[], - ), - to_pdu_event( - "IMA", - alice(), - TimelineEventType::RoomMember, - Some(alice().as_str()), - member_content_join(), - &["CREATE"], - &["CREATE"], - ), - to_pdu_event( - "IPOWER", - alice(), - TimelineEventType::RoomPowerLevels, - Some(""), - to_raw_json_value(&json!({ "users": { alice(): 100 } })).unwrap(), - &["CREATE", "IMA"], - &["IMA"], - ), - to_pdu_event( - "IJR", - alice(), - TimelineEventType::RoomJoinRules, - Some(""), - to_raw_json_value(&RoomJoinRulesEventContent::new(JoinRule::Public)).unwrap(), - &["CREATE", "IMA", "IPOWER"], - &["IPOWER"], - ), - to_pdu_event( - "IMB", - bob(), - TimelineEventType::RoomMember, - Some(bob().to_string().as_str()), - member_content_join(), - &["CREATE", "IJR", "IPOWER"], - &["IJR"], - ), - to_pdu_event( - "IMC", - charlie(), - TimelineEventType::RoomMember, - Some(charlie().to_string().as_str()), - member_content_join(), - &["CREATE", "IJR", "IPOWER"], - &["IMB"], - ), - to_pdu_event::<&EventId>( - "START", - charlie(), - TimelineEventType::RoomTopic, - Some(""), - to_raw_json_value(&json!({})).unwrap(), - &[], - &[], - ), - to_pdu_event::<&EventId>( - "END", - charlie(), - TimelineEventType::RoomTopic, - Some(""), - to_raw_json_value(&json!({})).unwrap(), - &[], - &[], - ), - ] - .into_iter() - .map(|ev| (ev.event_id().to_owned(), ev)) - .collect() -} - -// all graphs start with these input events -#[allow(non_snake_case)] -fn BAN_STATE_SET() -> HashMap> { - vec![ - to_pdu_event( - "PA", - alice(), - TimelineEventType::RoomPowerLevels, - Some(""), - to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), - &["CREATE", "IMA", "IPOWER"], // auth_events - &["START"], // prev_events - ), - to_pdu_event( - "PB", - alice(), - TimelineEventType::RoomPowerLevels, - Some(""), - to_raw_json_value(&json!({ "users": { alice(): 100, bob(): 50 } })).unwrap(), - &["CREATE", "IMA", "IPOWER"], - &["END"], - ), - to_pdu_event( - "MB", - alice(), - TimelineEventType::RoomMember, - Some(ella().as_str()), - member_content_ban(), - &["CREATE", "IMA", "PB"], - &["PA"], - ), - to_pdu_event( - "IME", - ella(), - TimelineEventType::RoomMember, - Some(ella().as_str()), - member_content_join(), - &["CREATE", "IJR", "PA"], - &["MB"], - ), - ] - .into_iter() - .map(|ev| (ev.event_id().to_owned(), ev)) - .collect() -} - -/// Convenience trait for adding event type plus state key to state maps. -trait EventTypeExt { - fn with_state_key(self, state_key: impl Into) -> (StateEventType, String); -} - -impl EventTypeExt for &TimelineEventType { - fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { - (self.to_string().into(), state_key.into()) - } -} - -mod event { - use ruma_common::{MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId}; - use ruma_events::{pdu::Pdu, TimelineEventType}; - use ruma_state_res::Event; - use serde::{Deserialize, Serialize}; - use serde_json::value::RawValue as RawJsonValue; - - impl Event for PduEvent { - type Id = OwnedEventId; - - fn event_id(&self) -> &Self::Id { - &self.event_id - } - - fn room_id(&self) -> &RoomId { - match &self.rest { - Pdu::RoomV1Pdu(ev) => &ev.room_id, - Pdu::RoomV3Pdu(ev) => &ev.room_id, - #[cfg(not(feature = "unstable-exhaustive-types"))] - _ => unreachable!("new PDU version"), - } - } - - fn sender(&self) -> &UserId { - match &self.rest { - Pdu::RoomV1Pdu(ev) => &ev.sender, - Pdu::RoomV3Pdu(ev) => &ev.sender, - #[cfg(not(feature = "unstable-exhaustive-types"))] - _ => unreachable!("new PDU version"), - } - } - - fn event_type(&self) -> &TimelineEventType { - match &self.rest { - Pdu::RoomV1Pdu(ev) => &ev.kind, - Pdu::RoomV3Pdu(ev) => &ev.kind, - #[cfg(not(feature = "unstable-exhaustive-types"))] - _ => unreachable!("new PDU version"), - } - } - - fn content(&self) -> &RawJsonValue { - match &self.rest { - Pdu::RoomV1Pdu(ev) => &ev.content, - Pdu::RoomV3Pdu(ev) => &ev.content, - #[cfg(not(feature = "unstable-exhaustive-types"))] - _ => unreachable!("new PDU version"), - } - } - - fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { - match &self.rest { - Pdu::RoomV1Pdu(ev) => ev.origin_server_ts, - Pdu::RoomV3Pdu(ev) => ev.origin_server_ts, - #[cfg(not(feature = "unstable-exhaustive-types"))] - _ => unreachable!("new PDU version"), - } - } - - fn state_key(&self) -> Option<&str> { - match &self.rest { - Pdu::RoomV1Pdu(ev) => ev.state_key.as_deref(), - Pdu::RoomV3Pdu(ev) => ev.state_key.as_deref(), - #[cfg(not(feature = "unstable-exhaustive-types"))] - _ => unreachable!("new PDU version"), - } - } - - fn prev_events(&self) -> Box + Send + '_> { - match &self.rest { - Pdu::RoomV1Pdu(ev) => Box::new(ev.prev_events.iter().map(|(id, _)| id)), - Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter()), - #[cfg(not(feature = "unstable-exhaustive-types"))] - _ => unreachable!("new PDU version"), - } - } - - fn auth_events(&self) -> Box + Send + '_> { - match &self.rest { - Pdu::RoomV1Pdu(ev) => Box::new(ev.auth_events.iter().map(|(id, _)| id)), - Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter()), - #[cfg(not(feature = "unstable-exhaustive-types"))] - _ => unreachable!("new PDU version"), - } - } - - fn redacts(&self) -> Option<&Self::Id> { - match &self.rest { - Pdu::RoomV1Pdu(ev) => ev.redacts.as_ref(), - Pdu::RoomV3Pdu(ev) => ev.redacts.as_ref(), - #[cfg(not(feature = "unstable-exhaustive-types"))] - _ => unreachable!("new PDU version"), - } - } - } - - #[derive(Clone, Debug, Deserialize, Serialize)] - pub(crate) struct PduEvent { - pub(crate) event_id: OwnedEventId, - #[serde(flatten)] - pub(crate) rest: Pdu, - } -} From 6365f1a887a02564237fd6176ee7e3d72480ffbf Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Fri, 28 Mar 2025 14:14:48 -0400 Subject: [PATCH 157/310] remove sccache from ci for now Signed-off-by: June Clementine Strawberry --- .github/workflows/ci.yml | 24 ------------------------ 1 file changed, 24 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3fd834e0..5043f23b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -21,16 +21,6 @@ concurrency: cancel-in-progress: true env: - # sccache only on main repo - SCCACHE_GHA_ENABLED: "${{ !startsWith(github.ref, 'refs/tags/') && (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') && 'true' || 'false' }}" - RUSTC_WRAPPER: "${{ !startsWith(github.ref, 'refs/tags/') && (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') && 'sccache' || '' }}" - SCCACHE_BUCKET: "${{ (github.event.pull_request.draft != true) && (vars.DOCKER_USERNAME != '') && (vars.GITLAB_USERNAME != '') && (vars.SCCACHE_ENDPOINT != '') && (github.event.pull_request.user.login != 'renovate[bot]') && 'sccache' || '' }}" - SCCACHE_S3_USE_SSL: ${{ vars.SCCACHE_S3_USE_SSL }} - SCCACHE_REGION: ${{ vars.SCCACHE_REGION }} - SCCACHE_ENDPOINT: ${{ vars.SCCACHE_ENDPOINT }} - SCCACHE_CACHE_MULTIARCH: ${{ vars.SCCACHE_CACHE_MULTIARCH }} - AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} # Required to make some things output color TERM: ansi # Publishing to my nix binary cache @@ -123,13 +113,6 @@ jobs: bin/nix-build-and-cache just '.#devShells.x86_64-linux.all-features' bin/nix-build-and-cache just '.#devShells.x86_64-linux.dynamic' - # use sccache for Rust - - name: Run sccache-cache - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - #if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} - uses: mozilla-actions/sccache-action@main - # use rust-cache - uses: Swatinem/rust-cache@v2 # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting @@ -247,13 +230,6 @@ jobs: direnv allow nix develop .#all-features --command true --impure - # use sccache for Rust - - name: Run sccache-cache - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - #if: ${{ (env.SCCACHE_GHA_ENABLED == 'true') && !startsWith(github.ref, 'refs/tags/') }} - uses: mozilla-actions/sccache-action@main - # use rust-cache - uses: Swatinem/rust-cache@v2 # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting From 75b6daa67f31d29035113d217accc80505119e63 Mon Sep 17 00:00:00 2001 From: Ginger <75683114+gingershaped@users.noreply.github.com> Date: Fri, 28 Mar 2025 12:22:23 -0400 Subject: [PATCH 158/310] Fix off-by-one error when fetching room hierarchy --- src/api/client/space.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/client/space.rs b/src/api/client/space.rs index a667f852..a55c927d 100644 --- a/src/api/client/space.rs +++ b/src/api/client/space.rs @@ -159,7 +159,7 @@ where break; } - if parents.len() >= max_depth { + if parents.len() > max_depth { continue; } From 3e57b7d35d5bd6cfed5900b377f7c68970213518 Mon Sep 17 00:00:00 2001 From: Ginger <75683114+gingershaped@users.noreply.github.com> Date: Fri, 28 Mar 2025 12:58:18 -0400 Subject: [PATCH 159/310] Update expected test results --- tests/test_results/complement/test_results.jsonl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index ac2733f8..c0e28750 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -73,7 +73,7 @@ {"Action":"pass","Test":"TestChangePasswordPushers/Pushers_created_with_a_different_access_token_are_deleted_on_password_change"} {"Action":"pass","Test":"TestChangePasswordPushers/Pushers_created_with_the_same_access_token_are_not_deleted_on_password_change"} {"Action":"fail","Test":"TestClientSpacesSummary"} -{"Action":"fail","Test":"TestClientSpacesSummary/max_depth"} +{"Action":"pass","Test":"TestClientSpacesSummary/max_depth"} {"Action":"fail","Test":"TestClientSpacesSummary/pagination"} {"Action":"fail","Test":"TestClientSpacesSummary/query_whole_graph"} {"Action":"fail","Test":"TestClientSpacesSummary/redact_link"} From 0e2009dbf5c3dfe1cfd1fd37078c74e871ffa5c6 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 28 Mar 2025 22:47:00 +0000 Subject: [PATCH 160/310] fix client hierarchy loop condition Signed-off-by: Jason Volk --- src/api/client/space.rs | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/api/client/space.rs b/src/api/client/space.rs index a55c927d..567ac62f 100644 --- a/src/api/client/space.rs +++ b/src/api/client/space.rs @@ -155,10 +155,6 @@ where break; } - if children.is_empty() { - break; - } - if parents.len() > max_depth { continue; } From d0132706cd9b5bd0c6df5507cb42bcbade86f28b Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 28 Mar 2025 23:34:42 +0000 Subject: [PATCH 161/310] add --read-only and --maintenance program option Signed-off-by: Jason Volk --- Cargo.toml | 1 + src/main/clap.rs | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index db55b9b8..8b49c3b8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -892,6 +892,7 @@ needless_continue = { level = "allow", priority = 1 } no_effect_underscore_binding = { level = "allow", priority = 1 } similar_names = { level = "allow", priority = 1 } single_match_else = { level = "allow", priority = 1 } +struct_excessive_bools = { level = "allow", priority = 1 } struct_field_names = { level = "allow", priority = 1 } unnecessary_wraps = { level = "allow", priority = 1 } unused_async = { level = "allow", priority = 1 } diff --git a/src/main/clap.rs b/src/main/clap.rs index 35a7ea41..707a1c76 100644 --- a/src/main/clap.rs +++ b/src/main/clap.rs @@ -27,6 +27,14 @@ pub(crate) struct Args { #[arg(long, short('O'))] pub(crate) option: Vec, + /// Run in a stricter read-only --maintenance mode. + #[arg(long)] + pub(crate) read_only: bool, + + /// Run in maintenance mode while refusing connections. + #[arg(long)] + pub(crate) maintenance: bool, + #[cfg(feature = "console")] /// Activate admin command console automatically after startup. #[arg(long, num_args(0))] @@ -121,6 +129,15 @@ pub(super) fn parse() -> Args { Args::parse() } /// Synthesize any command line options with configuration file options. pub(crate) fn update(mut config: Figment, args: &Args) -> Result { + if args.read_only { + config = config.join(("rocksdb_read_only", true)); + } + + if args.maintenance || args.read_only { + config = config.join(("startup_netburst", false)); + config = config.join(("listening", false)); + } + #[cfg(feature = "console")] // Indicate the admin console should be spawned automatically if the // configuration file hasn't already. From b03c493bf994449c8c5dd5b1122ab9c87a289df5 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 28 Mar 2025 20:33:38 +0000 Subject: [PATCH 162/310] add stub for database benches Signed-off-by: Jason Volk --- src/database/benches.rs | 17 +++++++++++++++++ src/database/mod.rs | 2 ++ 2 files changed, 19 insertions(+) create mode 100644 src/database/benches.rs diff --git a/src/database/benches.rs b/src/database/benches.rs new file mode 100644 index 00000000..56d1411c --- /dev/null +++ b/src/database/benches.rs @@ -0,0 +1,17 @@ +#[cfg(conduwuit_bench)] +extern crate test; + +#[cfg(conduwuit_bench)] +#[cfg_attr(conduwuit_bench, bench)] +fn ser_str(b: &mut test::Bencher) { + use conduwuit::ruma::{RoomId, UserId}; + + use crate::ser::serialize_to_vec; + + let user_id: &UserId = "@user:example.com".try_into().unwrap(); + let room_id: &RoomId = "!room:example.com".try_into().unwrap(); + b.iter(|| { + let key = (user_id, room_id); + let _s = serialize_to_vec(key).expect("failed to serialize user_id"); + }); +} diff --git a/src/database/mod.rs b/src/database/mod.rs index 0481d1bd..1262a79a 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -5,6 +5,8 @@ conduwuit::mod_ctor! {} conduwuit::mod_dtor! {} conduwuit::rustc_flags_capture! {} +#[cfg(test)] +mod benches; mod cork; mod de; mod deserialized; From a93cb34dd6e10038d6504af209c78e4967467bcb Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 30 Mar 2025 02:48:08 +0000 Subject: [PATCH 163/310] disambiguate UInt/u64 type related in client/api/directory; use err macros. Signed-off-by: Jason Volk --- src/api/client/directory.rs | 86 +++++++++++++------------------------ 1 file changed, 30 insertions(+), 56 deletions(-) diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index 80b314b9..f2f668c8 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -1,12 +1,13 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - Err, Error, Result, info, + Err, Result, err, info, utils::{ TryFutureExtExt, + math::Expected, + result::FlatOk, stream::{ReadyExt, WidebandExt}, }, - warn, }; use futures::{ FutureExt, StreamExt, TryFutureExt, @@ -20,7 +21,6 @@ use ruma::{ get_public_rooms, get_public_rooms_filtered, get_room_visibility, set_room_visibility, }, - error::ErrorKind, room, }, federation, @@ -71,11 +71,7 @@ pub(crate) async fn get_public_rooms_filtered_route( ) .await .map_err(|e| { - warn!(?body.server, "Failed to return /publicRooms: {e}"); - Error::BadRequest( - ErrorKind::Unknown, - "Failed to return the requested server's public room list.", - ) + err!(Request(Unknown(warn!(?body.server, "Failed to return /publicRooms: {e}")))) })?; Ok(response) @@ -113,11 +109,7 @@ pub(crate) async fn get_public_rooms_route( ) .await .map_err(|e| { - warn!(?body.server, "Failed to return /publicRooms: {e}"); - Error::BadRequest( - ErrorKind::Unknown, - "Failed to return the requested server's public room list.", - ) + err!(Request(Unknown(warn!(?body.server, "Failed to return /publicRooms: {e}")))) })?; Ok(get_public_rooms::v3::Response { @@ -137,7 +129,7 @@ pub(crate) async fn set_room_visibility_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); if !services.rooms.metadata.exists(&body.room_id).await { // Return 404 if the room doesn't exist @@ -181,10 +173,9 @@ pub(crate) async fn set_room_visibility_route( .await; } - return Err(Error::BadRequest( - ErrorKind::forbidden(), + return Err!(Request(Forbidden( "Publishing rooms to the room directory is not allowed", - )); + ))); } services.rooms.directory.set_public(&body.room_id); @@ -202,10 +193,7 @@ pub(crate) async fn set_room_visibility_route( }, | room::Visibility::Private => services.rooms.directory.set_not_public(&body.room_id), | _ => { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Room visibility type is not supported.", - )); + return Err!(Request(InvalidParam("Room visibility type is not supported.",))); }, } @@ -221,7 +209,7 @@ pub(crate) async fn get_room_visibility_route( ) -> Result { if !services.rooms.metadata.exists(&body.room_id).await { // Return 404 if the room doesn't exist - return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found")); + return Err!(Request(NotFound("Room not found"))); } Ok(get_room_visibility::v3::Response { @@ -269,8 +257,8 @@ pub(crate) async fn get_public_rooms_filtered_helper( } // Use limit or else 10, with maximum 100 - let limit = limit.map_or(10, u64::from); - let mut num_since: u64 = 0; + let limit: usize = limit.map_or(10_u64, u64::from).try_into()?; + let mut num_since: usize = 0; if let Some(s) = &since { let mut characters = s.chars(); @@ -278,14 +266,14 @@ pub(crate) async fn get_public_rooms_filtered_helper( | Some('n') => false, | Some('p') => true, | _ => { - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token")); + return Err!(Request(InvalidParam("Invalid `since` token"))); }, }; num_since = characters .collect::() .parse() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `since` token."))?; + .map_err(|_| err!(Request(InvalidParam("Invalid `since` token."))))?; if backwards { num_since = num_since.saturating_sub(limit); @@ -302,6 +290,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( if !filter.room_types.is_empty() && !filter.room_types.contains(&RoomTypeFilter::from(chunk.room_type.clone())) { return None; } + if let Some(query) = filter.generic_search_term.as_ref().map(|q| q.to_lowercase()) { if let Some(name) = &chunk.name { if name.as_str().to_lowercase().contains(&query) { @@ -333,40 +322,24 @@ pub(crate) async fn get_public_rooms_filtered_helper( all_rooms.sort_by(|l, r| r.num_joined_members.cmp(&l.num_joined_members)); - let total_room_count_estimate = UInt::try_from(all_rooms.len()).unwrap_or_else(|_| uint!(0)); + let total_room_count_estimate = UInt::try_from(all_rooms.len()) + .unwrap_or_else(|_| uint!(0)) + .into(); - let chunk: Vec<_> = all_rooms - .into_iter() - .skip( - num_since - .try_into() - .expect("num_since should not be this high"), - ) - .take(limit.try_into().expect("limit should not be this high")) - .collect(); + let chunk: Vec<_> = all_rooms.into_iter().skip(num_since).take(limit).collect(); - let prev_batch = if num_since == 0 { - None - } else { - Some(format!("p{num_since}")) - }; + let prev_batch = num_since.ne(&0).then_some(format!("p{num_since}")); - let next_batch = if chunk.len() < limit.try_into().unwrap() { - None - } else { - Some(format!( - "n{}", - num_since - .checked_add(limit) - .expect("num_since and limit should not be that large") - )) - }; + let next_batch = chunk + .len() + .ge(&limit) + .then_some(format!("n{}", num_since.expected_add(limit))); Ok(get_public_rooms_filtered::v3::Response { chunk, prev_batch, next_batch, - total_room_count_estimate: Some(total_room_count_estimate), + total_room_count_estimate, }) } @@ -384,7 +357,7 @@ async fn user_can_publish_room( .await { | Ok(event) => serde_json::from_str(event.content.get()) - .map_err(|_| Error::bad_database("Invalid event content for m.room.power_levels")) + .map_err(|_| err!(Database("Invalid event content for m.room.power_levels"))) .map(|content: RoomPowerLevelsEventContent| { RoomPowerLevels::from(content) .user_can_send_state(user_id, StateEventType::RoomHistoryVisibility) @@ -452,9 +425,10 @@ async fn public_rooms_chunk(services: &Services, room_id: OwnedRoomId) -> Public join_rule: join_rule.unwrap_or_default(), name, num_joined_members: num_joined_members - .unwrap_or(0) - .try_into() - .expect("joined count overflows ruma UInt"), + .map(TryInto::try_into) + .map(Result::ok) + .flat_ok() + .unwrap_or_else(|| uint!(0)), room_id, room_type, topic, From 095734a8e7835abf793911ff24ddf0f55c89012f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 30 Mar 2025 03:01:58 +0000 Subject: [PATCH 164/310] bump tokio to 1.44.1 Signed-off-by: Jason Volk --- Cargo.lock | 559 +++++++++++++++++++++++++++-------------------------- Cargo.toml | 2 +- 2 files changed, 291 insertions(+), 270 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c28f4eab..8c0e797b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -55,9 +55,9 @@ checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" [[package]] name = "anyhow" -version = "1.0.96" +version = "1.0.97" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b964d184e89d9b6b67dd2715bc8e74cf3107fb2b529990c90cf517326150bf4" +checksum = "dcfed56ad506cb2c684a14971b8861fdc3baaaae314b9e5f9bb532cbe3ba7a4f" [[package]] name = "arbitrary" @@ -79,7 +79,7 @@ checksum = "0ae92a5119aa49cdbcf6b9f893fe4e1d98b04ccbf82ee0584ad948a44a734dea" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -105,9 +105,9 @@ dependencies = [ [[package]] name = "as_variant" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f38fa22307249f86fb7fad906fcae77f2564caeb56d7209103c551cd1cf4798f" +checksum = "9dbc3a507a82b17ba0d98f6ce8fd6954ea0c8152e98009d36a40d8dcc8ce078a" [[package]] name = "assign" @@ -128,9 +128,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.20" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "310c9bcae737a48ef5cdee3174184e6d548b292739ede61a1f955ef76a738861" +checksum = "59a194f9d963d8099596278594b3107448656ba73831c9d8c783e613ce86da64" dependencies = [ "brotli", "flate2", @@ -161,18 +161,18 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] name = "async-trait" -version = "0.1.86" +version = "0.1.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "644dd749086bf3771a2fbc5f256fdb982d53f011c7d5d560304eafeecebce79d" +checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -221,27 +221,25 @@ dependencies = [ [[package]] name = "aws-lc-rs" -version = "1.12.5" +version = "1.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e4e8200b9a4a5801a769d50eeabc05670fec7e959a8cb7a63a93e4e519942ae" +checksum = "dabb68eb3a7aa08b46fddfd59a3d55c978243557a90ab804769f7e20e67d2b01" dependencies = [ "aws-lc-sys", - "paste", "zeroize", ] [[package]] name = "aws-lc-sys" -version = "0.26.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f9dd2e03ee80ca2822dd6ea431163d2ef259f2066a4d6ccaca6d9dcb386aa43" +checksum = "77926887776171ced7d662120a75998e444d3750c951abfe07f90da130514b1f" dependencies = [ "bindgen 0.69.5", "cc", "cmake", "dunce", "fs_extra", - "paste", ] [[package]] @@ -334,16 +332,15 @@ dependencies = [ [[package]] name = "axum-server" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56bac90848f6a9393ac03c63c640925c4b7c8ca21654de40d53f55964667c7d8" +checksum = "495c05f60d6df0093e8fb6e74aa5846a0ad06abaf96d76166283720bf740f8ab" dependencies = [ "arc-swap", "bytes", - "futures-util", + "fs-err", "http", "http-body", - "http-body-util", "hyper", "hyper-util", "pin-project-lite", @@ -352,7 +349,6 @@ dependencies = [ "rustls-pki-types", "tokio", "tokio-rustls", - "tower 0.4.13", "tower-service", ] @@ -404,9 +400,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.6.0" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +checksum = "89e25b6adfb930f02d1981565a6e5d9c547ac15a96606256d3b59040e5cd4ca3" [[package]] name = "bindgen" @@ -427,7 +423,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.98", + "syn 2.0.100", "which", ] @@ -446,7 +442,7 @@ dependencies = [ "regex", "rustc-hash 2.1.1", "shlex", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -535,9 +531,9 @@ checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" [[package]] name = "bytemuck" -version = "1.21.0" +version = "1.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef657dfab802224e671f5818e9a4935f9b1957ed18e58292690cc39e7a4092a3" +checksum = "b6b1fc10dbac614ebc03540c9dbd60e83887fda27794998c6528f1782047d540" [[package]] name = "byteorder" @@ -553,9 +549,9 @@ checksum = "8f1fe948ff07f4bd06c30984e69f5b4899c516a3ef74f34df92a2df2ab535495" [[package]] name = "bytes" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f61dac84819c6588b558454b194026eb1f09c293b9036ae9b159e74e73ab6cf9" +checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "bytesize" @@ -585,9 +581,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.16" +version = "1.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be714c154be609ec7f5dad223a33bf1482fff90472de28f7362806e6d4832b8c" +checksum = "1fcb57c740ae1daf453ae85f16e37396f672b039e00d9d866e07ddb24e328e3a" dependencies = [ "jobserver", "libc", @@ -656,9 +652,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.31" +version = "4.5.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "027bb0d98429ae334a8698531da7077bdf906419543a35a55c2cb1b66437d767" +checksum = "e958897981290da2a852763fe9cdb89cd36977a5d729023127095fa94d95e2ff" dependencies = [ "clap_builder", "clap_derive", @@ -666,9 +662,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.31" +version = "4.5.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5589e0cba072e0f3d23791efac0fd8627b49c829c196a492e88168e6a669d863" +checksum = "83b0f35019843db2160b5bb19ae09b4e6411ac33fc6a712003c33e03090e2489" dependencies = [ "anstyle", "clap_lex", @@ -676,14 +672,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.28" +version = "4.5.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4ced95c6f4a675af3da73304b9ac4ed991640c36374e4b46795c49e17cf1ed" +checksum = "09176aae279615badda0765c0c0b3f6ed53f4709118af73cf4655d85d1530cd7" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -787,7 +783,7 @@ dependencies = [ "ipaddress", "itertools 0.13.0", "log", - "rand", + "rand 0.8.5", "reqwest", "ruma", "serde", @@ -830,7 +826,7 @@ dependencies = [ "maplit", "nix", "num-traits", - "rand", + "rand 0.8.5", "regex", "reqwest", "ring", @@ -842,7 +838,7 @@ dependencies = [ "serde_yaml", "smallstr", "smallvec", - "thiserror 2.0.11", + "thiserror 2.0.12", "tikv-jemalloc-ctl", "tikv-jemalloc-sys", "tikv-jemallocator", @@ -880,7 +876,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -937,7 +933,7 @@ dependencies = [ "log", "loole", "lru-cache", - "rand", + "rand 0.8.5", "regex", "reqwest", "ruma", @@ -1194,7 +1190,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a2785755761f3ddc1492979ce1e48d2c00d09311c39e4466429188f3dd6501" dependencies = [ "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -1221,7 +1217,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -1264,9 +1260,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.11" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +checksum = "28cfac68e08048ae1883171632c2aef3ebc555621ae56fbccce1cbf22dd7f058" dependencies = [ "powerfmt", ] @@ -1290,7 +1286,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -1317,7 +1313,7 @@ checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ "curve25519-dalek", "ed25519", - "rand_core", + "rand_core 0.6.4", "serde", "sha2", "subtle", @@ -1326,9 +1322,9 @@ dependencies = [ [[package]] name = "either" -version = "1.14.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7914353092ddf589ad78f25c5c1c21b7f80b0ff8621e7c814c3485b5306da9d" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" dependencies = [ "serde", ] @@ -1342,7 +1338,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -1373,9 +1369,9 @@ dependencies = [ [[package]] name = "event-listener-strategy" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c3e4e0dd3673c1139bf041f3008816d9cf2946bbfac2945c09e523b8d7b05b2" +checksum = "8be9f3dfaaffdae2972880079a491a1a8bb7cbed0b8dd7a347f668b4150a3b93" dependencies = [ "event-listener", "pin-project-lite", @@ -1472,6 +1468,16 @@ dependencies = [ "thiserror 1.0.69", ] +[[package]] +name = "fs-err" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f89bda4c2a21204059a977ed3bfe746677dfd137b83c339e702b0ac91d482aa" +dependencies = [ + "autocfg", + "tokio", +] + [[package]] name = "fs_extra" version = "1.3.0" @@ -1543,7 +1549,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -1601,14 +1607,16 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a49c392881ce6d5c3b8cb70f98717b7c07aabbdff06687b9030dbfbe2725f8" +checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0" dependencies = [ "cfg-if", + "js-sys", "libc", - "wasi 0.13.3+wasi-0.2.2", - "windows-targets 0.52.6", + "r-efi", + "wasi 0.14.2+wasi-0.2.4", + "wasm-bindgen", ] [[package]] @@ -1645,7 +1653,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.7.1", + "indexmap 2.8.0", "slab", "tokio", "tokio-util", @@ -1654,9 +1662,9 @@ dependencies = [ [[package]] name = "half" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +checksum = "7db2ff139bba50379da6aa0766b52fdcb62cb5b263009b09ed58ba604e14bbd1" dependencies = [ "cfg-if", "crunchy", @@ -1751,7 +1759,7 @@ dependencies = [ "idna", "ipnet", "once_cell", - "rand", + "rand 0.8.5", "thiserror 1.0.69", "tinyvec", "tokio", @@ -1772,7 +1780,7 @@ dependencies = [ "lru-cache", "once_cell", "parking_lot", - "rand", + "rand 0.8.5", "resolv-conf", "smallvec", "thiserror 1.0.69", @@ -1798,17 +1806,6 @@ dependencies = [ "windows-sys 0.59.0", ] -[[package]] -name = "hostname" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c731c3e10504cc8ed35cfe2f1db4c9274c3d35fa486e3b31df46f068ef3e867" -dependencies = [ - "libc", - "match_cfg", - "winapi", -] - [[package]] name = "hostname" version = "0.4.0" @@ -1831,14 +1828,14 @@ dependencies = [ "markup5ever", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] name = "http" -version = "1.2.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f16ca2af56261c99fba8bac40a10251ce8188205a4c448fbb745a2e4daa76fea" +checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" dependencies = [ "bytes", "fnv", @@ -1866,12 +1863,12 @@ dependencies = [ [[package]] name = "http-body-util" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793429d76616a256bcb62c2a2ec2bed781c8307e797e2598c50010f2bee2544f" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", - "futures-util", + "futures-core", "http", "http-body", "pin-project-lite", @@ -1879,9 +1876,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.10.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2d708df4e7140240a16cd6ab0ab65c972d7433ab77819ea693fde9c43811e2a" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" [[package]] name = "httpdate" @@ -1891,9 +1888,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "humantime" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" +checksum = "9b112acc8b3adf4b107a8ec20977da0273a8c386765a3ec0229bd500a1443f9f" [[package]] name = "hyper" @@ -2009,9 +2006,9 @@ dependencies = [ [[package]] name = "icu_locid_transform_data" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc8ff3388f852bede6b579ad4e978ab004f139284d7b28715f773507b946f6e" +checksum = "7515e6d781098bf9f7205ab3fc7e9709d34554ae0b21ddbcb5febfa4bc7df11d" [[package]] name = "icu_normalizer" @@ -2033,9 +2030,9 @@ dependencies = [ [[package]] name = "icu_normalizer_data" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8cafbf7aa791e9b22bec55a167906f9e1215fd475cd22adfcf660e03e989516" +checksum = "c5e8338228bdc8ab83303f16b797e177953730f601a96c25d10cb3ab0daa0cb7" [[package]] name = "icu_properties" @@ -2054,9 +2051,9 @@ dependencies = [ [[package]] name = "icu_properties_data" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67a8effbc3dd3e4ba1afa8ad918d5684b8868b3b26500753effea8d2eed19569" +checksum = "85fb8799753b75aee8d2a21d7c14d9f38921b54b3dbda10f5a3c7a7b82dba5e2" [[package]] name = "icu_provider" @@ -2083,7 +2080,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -2109,9 +2106,9 @@ dependencies = [ [[package]] name = "image" -version = "0.25.5" +version = "0.25.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd6f44aed642f18953a158afeb30206f4d50da59fbc66ecb53c66488de73563b" +checksum = "db35664ce6b9810857a38a906215e75a9c879f0696556a39f59c62829710251a" dependencies = [ "bytemuck", "byteorder-lite", @@ -2137,7 +2134,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b77d01e822461baa8409e156015a1d91735549f0f2c17691bd2d996bef238f7f" dependencies = [ "byteorder-lite", - "quick-error 2.0.1", + "quick-error", ] [[package]] @@ -2158,9 +2155,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.7.1" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c9c992b02b5b4c94ea26e32fe5bccb7aa7d9f390ab5c1221ff895bc7ea8b652" +checksum = "3954d50fe15b02142bf25d3b8bdadb634ec3948f103d04ffe3031bc8fe9d7058" dependencies = [ "equivalent", "hashbrown 0.15.2", @@ -2187,7 +2184,7 @@ checksum = "c34819042dc3d3971c46c2190835914dfbe0c3c13f61449b2997f4e9722dfa60" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -2251,9 +2248,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" +checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "jobserver" @@ -2338,7 +2335,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -2361,9 +2358,9 @@ checksum = "03087c2bad5e1034e8cace5926dec053fb3790248370865f5117a7d0213354c8" [[package]] name = "libc" -version = "0.2.170" +version = "0.2.171" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "875b3680cb2f8f71bdcf9a30f38d48282f5d3c95cbf9b3fa57269bb5d5c06828" +checksum = "c19937216e9d3aa9956d9bb8dfc0b0c8beb6058fc4f7a4dc4d850edf86a237d6" [[package]] name = "libfuzzer-sys" @@ -2387,9 +2384,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.21" +version = "1.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df9b68e50e6e0b26f672573834882eb57759f6db9b3be2ea3c35c91188bb4eaa" +checksum = "8b70e7a7df205e92a1a4cd9aaae7898dac0aa555503cc0a649494d0d60e7651d" dependencies = [ "cc", "pkg-config", @@ -2426,9 +2423,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.26" +version = "0.4.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30bde2b3dc3671ae49d8e2e9f044c7c005836e7a023ee57cffa25ab82764bb9e" +checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" [[package]] name = "loole" @@ -2506,12 +2503,6 @@ dependencies = [ "xml5ever", ] -[[package]] -name = "match_cfg" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" - [[package]] name = "matchers" version = "0.1.0" @@ -2566,7 +2557,7 @@ checksum = "bd2209fff77f705b00c737016a48e73733d7fbccb8b007194db148f03561fb70" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -2713,7 +2704,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -2777,9 +2768,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.20.3" +version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "945462a4b81e43c4e3ba96bd7b49d834c6f61198356aa858733bc4acf3cbe62e" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "openssl-probe" @@ -2795,7 +2786,7 @@ checksum = "1e32339a5dc40459130b3bd269e9892439f55b33e772d2a9d402a789baaf4e8a" dependencies = [ "futures-core", "futures-sink", - "indexmap 2.7.1", + "indexmap 2.8.0", "js-sys", "once_cell", "pin-project-lite", @@ -2844,7 +2835,7 @@ dependencies = [ "opentelemetry", "ordered-float 4.6.0", "percent-encoding", - "rand", + "rand 0.8.5", "thiserror 1.0.69", "tokio", "tokio-stream", @@ -2921,7 +2912,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166" dependencies = [ "base64ct", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -2951,7 +2942,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -2986,7 +2977,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" dependencies = [ "phf_shared", - "rand", + "rand 0.8.5", ] [[package]] @@ -3000,22 +2991,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.9" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfe2e71e1471fe07709406bf725f710b02927c9c54b2b5b2ec0e8087d97c327d" +checksum = "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.9" +version = "1.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6e859e6e5bd50440ab63c47e3ebabc90f26251f7c73c3d3e837b74a1cc3fa67" +checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -3042,9 +3033,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" [[package]] name = "png" @@ -3067,9 +3058,9 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.20" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ "zerocopy", ] @@ -3082,28 +3073,28 @@ checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" [[package]] name = "prettyplease" -version = "0.2.29" +version = "0.2.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6924ced06e1f7dfe3fa48d57b9f74f55d8915f5036121bef647ef4b204895fac" +checksum = "5316f57387668042f561aae71480de936257848f9c43ce528e311d89a07cadeb" dependencies = [ "proc-macro2", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] name = "proc-macro-crate" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" +checksum = "edce586971a4dfaa28950c6f18ed55e0406c1ab88bbce2c6f6293a7aaba73d35" dependencies = [ "toml_edit", ] [[package]] name = "proc-macro2" -version = "1.0.93" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" +checksum = "a31971752e70b8b2686d7e46ec17fb38dad4051d94024c88df49b667caea9c84" dependencies = [ "unicode-ident", ] @@ -3116,7 +3107,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", "version_check", "yansi", ] @@ -3137,7 +3128,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a65f2e60fbf1063868558d69c6beacf412dc755f9fc020f514b7955fc914fe30" dependencies = [ "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -3160,7 +3151,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -3199,12 +3190,6 @@ dependencies = [ "bytemuck", ] -[[package]] -name = "quick-error" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" - [[package]] name = "quick-error" version = "2.0.1" @@ -3213,37 +3198,39 @@ checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" [[package]] name = "quinn" -version = "0.11.6" +version = "0.11.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62e96808277ec6f97351a2380e6c25114bc9e67037775464979f3037c92d05ef" +checksum = "c3bd15a6f2967aef83887dcb9fec0014580467e33720d073560cf015a5683012" dependencies = [ "bytes", + "cfg_aliases", "pin-project-lite", "quinn-proto", "quinn-udp", "rustc-hash 2.1.1", "rustls", "socket2", - "thiserror 2.0.11", + "thiserror 2.0.12", "tokio", "tracing", + "web-time 1.1.0", ] [[package]] name = "quinn-proto" -version = "0.11.9" +version = "0.11.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2fe5ef3495d7d2e377ff17b1a8ce2ee2ec2a18cde8b6ad6619d65d0701c135d" +checksum = "b820744eb4dc9b57a3398183639c511b5a26d2ed702cedd3febaa1393caa22cc" dependencies = [ "bytes", - "getrandom 0.2.15", - "rand", + "getrandom 0.3.2", + "rand 0.9.0", "ring", "rustc-hash 2.1.1", "rustls", "rustls-pki-types", "slab", - "thiserror 2.0.11", + "thiserror 2.0.12", "tinyvec", "tracing", "web-time 1.1.0", @@ -3251,9 +3238,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.10" +version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e46f3055866785f6b92bc6164b76be02ca8f2eb4b002c0354b28cf4c119e5944" +checksum = "541d0f57c6ec747a90738a52741d3221f7960e8ac2f0ff4b1a63680e033b4ab5" dependencies = [ "cfg_aliases", "libc", @@ -3265,13 +3252,19 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.38" +version = "1.0.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" dependencies = [ "proc-macro2", ] +[[package]] +name = "r-efi" +version = "5.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" + [[package]] name = "rand" version = "0.8.5" @@ -3279,8 +3272,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha", - "rand_core", + "rand_chacha 0.3.1", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3779b94aeb87e8bd4e834cee3650289ee9e0d5677f976ecdb6d219e5f4f6cd94" +dependencies = [ + "rand_chacha 0.9.0", + "rand_core 0.9.3", + "zerocopy", ] [[package]] @@ -3290,7 +3294,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core 0.9.3", ] [[package]] @@ -3302,6 +3316,15 @@ dependencies = [ "getrandom 0.2.15", ] +[[package]] +name = "rand_core" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" +dependencies = [ + "getrandom 0.3.2", +] + [[package]] name = "rav1e" version = "0.7.1" @@ -3328,8 +3351,8 @@ dependencies = [ "once_cell", "paste", "profiling", - "rand", - "rand_chacha", + "rand 0.8.5", + "rand_chacha 0.3.1", "simd_helpers", "system-deps", "thiserror 1.0.69", @@ -3346,7 +3369,7 @@ dependencies = [ "avif-serialize", "imgref", "loop9", - "quick-error 2.0.1", + "quick-error", "rav1e", "rayon", "rgb", @@ -3374,9 +3397,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.9" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82b568323e98e49e2a0899dcee453dd679fae22d69adf9b11dd508d1549b7e2f" +checksum = "0b8c0c260b63a8219631167be35e6a988e9554dbd323f8bd08439c8ed1302bd1" dependencies = [ "bitflags 2.9.0", ] @@ -3476,12 +3499,11 @@ dependencies = [ [[package]] name = "resolv-conf" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" +checksum = "48375394603e3dd4b2d64371f7148fd8c7baa2680e28741f2cb8d23b59e3d4c4" dependencies = [ - "hostname 0.3.1", - "quick-error 1.2.3", + "hostname", ] [[package]] @@ -3492,9 +3514,9 @@ checksum = "57397d16646700483b67d2dd6511d79318f9d057fdbd21a4066aeac8b41d310a" [[package]] name = "ring" -version = "0.17.12" +version = "0.17.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed9b823fa29b721a59671b41d6b06e66b29e0628e207e8b1c3ceeda701ec928d" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", @@ -3554,7 +3576,7 @@ dependencies = [ "serde", "serde_html_form", "serde_json", - "thiserror 2.0.11", + "thiserror 2.0.12", "url", "web-time 1.1.0", ] @@ -3570,11 +3592,11 @@ dependencies = [ "form_urlencoded", "getrandom 0.2.15", "http", - "indexmap 2.7.1", + "indexmap 2.8.0", "js_int", "konst", "percent-encoding", - "rand", + "rand 0.8.5", "regex", "ruma-identifiers-validation", "ruma-macros", @@ -3582,7 +3604,7 @@ dependencies = [ "serde_html_form", "serde_json", "smallvec", - "thiserror 2.0.11", + "thiserror 2.0.12", "time", "tracing", "url", @@ -3597,7 +3619,7 @@ version = "0.28.1" source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "as_variant", - "indexmap 2.7.1", + "indexmap 2.8.0", "js_int", "js_option", "percent-encoding", @@ -3609,7 +3631,7 @@ dependencies = [ "serde", "serde_json", "smallvec", - "thiserror 2.0.11", + "thiserror 2.0.12", "tracing", "url", "web-time 1.1.0", @@ -3629,12 +3651,12 @@ dependencies = [ "js_int", "memchr", "mime", - "rand", + "rand 0.8.5", "ruma-common", "ruma-events", "serde", "serde_json", - "thiserror 2.0.11", + "thiserror 2.0.12", "tracing", ] @@ -3644,7 +3666,7 @@ version = "0.9.5" source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" dependencies = [ "js_int", - "thiserror 2.0.11", + "thiserror 2.0.12", ] [[package]] @@ -3668,7 +3690,7 @@ dependencies = [ "quote", "ruma-identifiers-validation", "serde", - "syn 2.0.98", + "syn 2.0.100", "toml", ] @@ -3692,12 +3714,12 @@ dependencies = [ "base64 0.22.1", "ed25519-dalek", "pkcs8", - "rand", + "rand 0.8.5", "ruma-common", "serde_json", "sha2", "subslice", - "thiserror 2.0.11", + "thiserror 2.0.12", ] [[package]] @@ -3768,9 +3790,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.23" +version = "0.23.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47796c98c480fce5406ef69d1c76378375492c3b0a0de587be0c1d9feb12f395" +checksum = "822ee9188ac4ec04a2f0531e55d035fb2de73f18b41a63c70c2712503b6fb13c" dependencies = [ "aws-lc-rs", "log", @@ -3814,9 +3836,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.8" +version = "0.103.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" +checksum = "fef8b8769aaccf73098557a87cd1816b4f9c7c16811c9c77142aa695c16f2c03" dependencies = [ "aws-lc-rs", "ring", @@ -3826,9 +3848,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" +checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" [[package]] name = "rustyline-async" @@ -3840,16 +3862,16 @@ dependencies = [ "futures-util", "pin-project", "thingbuf", - "thiserror 2.0.11", + "thiserror 2.0.12", "unicode-segmentation", "unicode-width 0.2.0", ] [[package]] name = "ryu" -version = "1.0.19" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea1a2d0a644769cc99faa24c3ad26b379b786fe7c36fd3c546254801650e6dd" +checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" [[package]] name = "sanitize-filename" @@ -3909,9 +3931,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.25" +version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f79dfe2d285b0488816f30e700a7438c5a73d816b5b7d3ac72fbc48b0d185e03" +checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" [[package]] name = "sentry" @@ -3953,7 +3975,7 @@ version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eba8754ec3b9279e00aa6d64916f211d44202370a1699afde1db2c16cbada089" dependencies = [ - "hostname 0.4.0", + "hostname", "libc", "os_info", "rustc_version", @@ -3968,7 +3990,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9f8b6dcd4fbae1e3e22b447f32670360b27e31b62ab040f7fb04e0f80c04d92" dependencies = [ "once_cell", - "rand", + "rand 0.8.5", "sentry-types", "serde", "serde_json", @@ -4039,7 +4061,7 @@ checksum = "a71ed3a389948a6a6d92b98e997a2723ca22f09660c5a7b7388ecd509a70a527" dependencies = [ "debugid", "hex", - "rand", + "rand 0.8.5", "serde", "serde_json", "thiserror 1.0.69", @@ -4050,22 +4072,22 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.218" +version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8dfc9d19bdbf6d17e22319da49161d5d0108e4188e8b680aef6299eed22df60" +checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.218" +version = "1.0.219" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f09503e191f4e797cb8aac08e9a4a4695c5edf6a2e70e376d961ddd5c969f82b" +checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -4075,7 +4097,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d2de91cf02bbc07cde38891769ccd5d4f073d22a40683aa4bc7a95781aaa2c4" dependencies = [ "form_urlencoded", - "indexmap 2.7.1", + "indexmap 2.8.0", "itoa", "ryu", "serde", @@ -4083,9 +4105,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.139" +version = "1.0.140" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44f86c3acccc9c65b153fe1b85a3be07fe5515274ec9f0653b4a0875731c72a6" +checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" dependencies = [ "itoa", "memchr", @@ -4095,9 +4117,9 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.16" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" +checksum = "59fab13f937fa393d08645bf3a84bdfe86e296747b506ada67bb15f10f218b2a" dependencies = [ "itoa", "serde", @@ -4140,7 +4162,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.7.1", + "indexmap 2.8.0", "itoa", "ryu", "serde", @@ -4220,7 +4242,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -4306,9 +4328,9 @@ checksum = "f42444fea5b87a39db4218d9422087e66a85d0e7a0963a439b07bcdf91804006" [[package]] name = "string_cache" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "938d512196766101d333398efde81bc1f37b00cb42c2f8350e5df639f040bbbe" +checksum = "bf776ba3fa74f83bf4b63c3dcbbf82173db2632ed8452cb2d891d33f459de70f" dependencies = [ "new_debug_unreachable", "parking_lot", @@ -4357,9 +4379,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.98" +version = "2.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36147f1a48ae0ec2b5b3bc5b537d267457555a10dc06f3dbc8cb11ba3006d3b1" +checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" dependencies = [ "proc-macro2", "quote", @@ -4383,7 +4405,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -4453,11 +4475,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.11" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d452f284b73e6d76dd36758a0c8684b1d5be31f92b89d07fd5822175732206fc" +checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" dependencies = [ - "thiserror-impl 2.0.11", + "thiserror-impl 2.0.12", ] [[package]] @@ -4468,18 +4490,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] name = "thiserror-impl" -version = "2.0.11" +version = "2.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26afc1baea8a989337eeb52b6e72a039780ce45c3edfcc9c5b9d112feeb173c2" +checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -4555,9 +4577,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.37" +version = "0.3.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" +checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" dependencies = [ "deranged", "itoa", @@ -4570,15 +4592,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" +checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" [[package]] name = "time-macros" -version = "0.2.19" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" +checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" dependencies = [ "num-conv", "time-core", @@ -4596,9 +4618,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "022db8904dfa342efe721985167e9fcd16c29b226db4397ed752a761cfce81e8" +checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" dependencies = [ "tinyvec_macros", ] @@ -4611,9 +4633,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.43.0" +version = "1.44.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" +checksum = "f382da615b842244d4b8738c82ed1275e6c5dd90c459a30941cd07080b06c91a" dependencies = [ "backtrace", "bytes", @@ -4635,7 +4657,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -4685,9 +4707,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.13" +version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078" +checksum = "6b9590b93e6fcc1739458317cccd391ad3955e2bde8913edf6f95f9e65a8f034" dependencies = [ "bytes", "futures-core", @@ -4723,7 +4745,7 @@ version = "0.22.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17b4795ff5edd201c7cd6dca065ae59972ce77d1b80fa0a84d94950ece7d1474" dependencies = [ - "indexmap 2.7.1", + "indexmap 2.8.0", "serde", "serde_spanned", "toml_datetime", @@ -4771,7 +4793,7 @@ dependencies = [ "indexmap 1.9.3", "pin-project", "pin-project-lite", - "rand", + "rand 0.8.5", "slab", "tokio", "tokio-util", @@ -4835,7 +4857,6 @@ name = "tracing" version = "0.1.41" source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ - "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -4848,7 +4869,7 @@ source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -4969,9 +4990,9 @@ checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" [[package]] name = "unicode-ident" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00e2473a93778eb0bad35909dff6a10d28e63f792f16ed15e404fca9d5eeedbe" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" [[package]] name = "unicode-segmentation" @@ -5056,11 +5077,11 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "uuid" -version = "1.15.1" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0f540e3240398cce6128b64ba83fdbdd86129c16a3aa1a3a252efd66eb3d587" +checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9" dependencies = [ - "getrandom 0.3.1", + "getrandom 0.3.2", "serde", ] @@ -5116,9 +5137,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasi" -version = "0.13.3+wasi-0.2.2" +version = "0.14.2+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26816d2e1a4a36a2940b96c5296ce403917633dff8f3440e9b236ed6f6bacad2" +checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" dependencies = [ "wit-bindgen-rt", ] @@ -5145,7 +5166,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", "wasm-bindgen-shared", ] @@ -5180,7 +5201,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -5265,9 +5286,9 @@ dependencies = [ [[package]] name = "widestring" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" +checksum = "dd7cf3379ca1aac9eea11fba24fd7e315d621f8dfe35c8d7d2be8b793726e07d" [[package]] name = "wildmatch" @@ -5496,9 +5517,9 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7f4ea97f6f78012141bcdb6a216b2609f0979ada50b20ca5b52dde2eac2bb1" +checksum = "0e97b544156e9bebe1a0ffbc03484fc1ffe3100cbce3ffb17eac35f7cdd7ab36" dependencies = [ "memchr", ] @@ -5515,9 +5536,9 @@ dependencies = [ [[package]] name = "wit-bindgen-rt" -version = "0.33.0" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3268f3d866458b787f390cf61f4bbb563b922d091359f9608842999eaee3943c" +checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" dependencies = [ "bitflags 2.9.0", ] @@ -5571,29 +5592,28 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", "synstructure", ] [[package]] name = "zerocopy" -version = "0.7.35" +version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +checksum = "2586fea28e186957ef732a5f8b3be2da217d65c5969d4b1e17f973ebbe876879" dependencies = [ - "byteorder", "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.35" +version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -5613,7 +5633,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", "synstructure", ] @@ -5642,7 +5662,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.98", + "syn 2.0.100", ] [[package]] @@ -5656,19 +5676,20 @@ dependencies = [ [[package]] name = "zstd-safe" -version = "7.2.3" +version = "7.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3051792fbdc2e1e143244dc28c60f73d8470e93f3f9cbd0ead44da5ed802722" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.14+zstd.1.5.7" +version = "2.0.15+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fb060d4926e4ac3a3ad15d864e99ceb5f343c6b34f5bd6d81ae6ed417311be5" +checksum = "eb81183ddd97d0c74cedf1d50d85c8d08c1b8b68ee863bdee9e706eedba1a237" dependencies = [ + "bindgen 0.71.1", "cc", "pkg-config", ] diff --git a/Cargo.toml b/Cargo.toml index 8b49c3b8..ab7a935c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -242,7 +242,7 @@ default-features = false features = ["std", "async-await"] [workspace.dependencies.tokio] -version = "1.42.0" +version = "1.44.1" default-features = false features = [ "fs", From 5bf5afaec83d4e68cbfd5220cd760a7940e7dda5 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 30 Mar 2025 01:54:55 +0000 Subject: [PATCH 165/310] instrument tokio before/after poll hooks Signed-off-by: Jason Volk --- src/main/runtime.rs | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/src/main/runtime.rs b/src/main/runtime.rs index b1657289..920476db 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -61,6 +61,8 @@ pub(super) fn new(args: &Args) -> Result { #[cfg(tokio_unstable)] builder .on_task_spawn(task_spawn) + .on_before_task_poll(task_enter) + .on_after_task_poll(task_leave) .on_task_terminate(task_terminate); #[cfg(tokio_unstable)] @@ -215,3 +217,25 @@ fn task_spawn(meta: &tokio::runtime::TaskMeta<'_>) {} ), )] fn task_terminate(meta: &tokio::runtime::TaskMeta<'_>) {} + +#[cfg(tokio_unstable)] +#[tracing::instrument( + name = "enter", + level = "trace", + skip_all, + fields( + id = %meta.id() + ), +)] +fn task_enter(meta: &tokio::runtime::TaskMeta<'_>) {} + +#[cfg(tokio_unstable)] +#[tracing::instrument( + name = "leave", + level = "trace", + skip_all, + fields( + id = %meta.id() + ), +)] +fn task_leave(meta: &tokio::runtime::TaskMeta<'_>) {} From dc6e9e74d9e9fb0bbdddb35c6b00d16544860095 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 30 Mar 2025 01:56:00 +0000 Subject: [PATCH 166/310] add spans for for jemalloc mallctl points Signed-off-by: Jason Volk --- src/core/alloc/je.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs index 51caf3a3..2424e99c 100644 --- a/src/core/alloc/je.rs +++ b/src/core/alloc/je.rs @@ -335,6 +335,12 @@ where Ok(res) } +#[tracing::instrument( + name = "get", + level = "trace" + skip_all, + fields(?key) +)] fn get(key: &Key) -> Result where T: Copy + Debug, @@ -346,6 +352,12 @@ where unsafe { mallctl::raw::read_mib(key.as_slice()) }.map_err(map_err) } +#[tracing::instrument( + name = "xchg", + level = "trace" + skip_all, + fields(?key, ?val) +)] fn xchg(key: &Key, val: T) -> Result where T: Copy + Debug, From bee4c6255a815a9c7bc577d7afa66f69e26ea735 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 30 Mar 2025 21:19:47 +0000 Subject: [PATCH 167/310] reorg PduEvent strip tools and callsites Signed-off-by: Jason Volk --- src/api/client/context.rs | 6 +- src/api/client/message.rs | 2 +- src/api/client/room/event.rs | 2 +- src/api/client/room/initial_sync.rs | 2 +- src/api/client/search.rs | 2 +- src/api/client/sync/v3.rs | 8 +- src/api/client/sync/v4.rs | 4 +- src/api/client/sync/v5.rs | 4 +- src/api/client/threads.rs | 2 +- src/core/pdu/strip.rs | 182 ++++++++++++++++++++-------- src/service/rooms/spaces/mod.rs | 5 +- src/service/rooms/state/mod.rs | 2 +- src/service/sending/sender.rs | 2 +- 13 files changed, 152 insertions(+), 71 deletions(-) diff --git a/src/api/client/context.rs b/src/api/client/context.rs index b109711e..1dda7b53 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -182,7 +182,7 @@ pub(crate) async fn get_context_route( .await; Ok(get_context::v3::Response { - event: base_event.map(at!(1)).as_ref().map(PduEvent::to_room_event), + event: base_event.map(at!(1)).map(PduEvent::into_room_event), start: events_before .last() @@ -201,13 +201,13 @@ pub(crate) async fn get_context_route( events_before: events_before .into_iter() .map(at!(1)) - .map(|pdu| pdu.to_room_event()) + .map(PduEvent::into_room_event) .collect(), events_after: events_after .into_iter() .map(at!(1)) - .map(|pdu| pdu.to_room_event()) + .map(PduEvent::into_room_event) .collect(), state, diff --git a/src/api/client/message.rs b/src/api/client/message.rs index c755cc47..03c7335a 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -157,7 +157,7 @@ pub(crate) async fn get_message_events_route( let chunk = events .into_iter() .map(at!(1)) - .map(|pdu| pdu.to_room_event()) + .map(PduEvent::into_room_event) .collect(); Ok(get_message_events::v3::Response { diff --git a/src/api/client/room/event.rs b/src/api/client/room/event.rs index 84b591cd..2b115b5c 100644 --- a/src/api/client/room/event.rs +++ b/src/api/client/room/event.rs @@ -40,5 +40,5 @@ pub(crate) async fn get_room_event_route( event.add_age().ok(); - Ok(get_room_event::v3::Response { event: event.to_room_event() }) + Ok(get_room_event::v3::Response { event: event.into_room_event() }) } diff --git a/src/api/client/room/initial_sync.rs b/src/api/client/room/initial_sync.rs index e4c76ae0..ca63610b 100644 --- a/src/api/client/room/initial_sync.rs +++ b/src/api/client/room/initial_sync.rs @@ -55,7 +55,7 @@ pub(crate) async fn room_initial_sync_route( chunk: events .into_iter() .map(at!(1)) - .map(|pdu| pdu.to_room_event()) + .map(PduEvent::into_room_event) .collect(), }; diff --git a/src/api/client/search.rs b/src/api/client/search.rs index f3366843..d66df881 100644 --- a/src/api/client/search.rs +++ b/src/api/client/search.rs @@ -143,7 +143,7 @@ async fn category_room_events( .map(at!(2)) .flatten() .stream() - .map(|pdu| pdu.to_room_event()) + .map(PduEvent::into_room_event) .map(|result| SearchResult { rank: None, result: Some(result), diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 70c4c6a7..a8248f95 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -461,7 +461,7 @@ async fn handle_left_room( events: Vec::new(), }, state: RoomState { - events: vec![event.to_sync_state_event()], + events: vec![event.into_sync_state_event()], }, })); } @@ -546,7 +546,7 @@ async fn handle_left_room( continue; } - left_state_events.push(pdu.to_sync_state_event()); + left_state_events.push(pdu.into_sync_state_event()); } } @@ -865,8 +865,8 @@ async fn load_joined_room( }, state: RoomState { events: state_events - .iter() - .map(PduEvent::to_sync_state_event) + .into_iter() + .map(PduEvent::into_sync_state_event) .collect(), }, ephemeral: Ephemeral { events: edus }, diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 5fdcbab8..7e902973 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -6,7 +6,7 @@ use std::{ use axum::extract::State; use conduwuit::{ - Error, PduCount, Result, debug, error, extract_variant, + Error, PduCount, PduEvent, Result, debug, error, extract_variant, utils::{ BoolExt, IterStream, ReadyExt, TryFutureExtExt, math::{ruma_from_usize, usize_from_ruma, usize_from_u64_truncated}, @@ -634,7 +634,7 @@ pub(crate) async fn sync_events_v4_route( .state_accessor .room_state_get(room_id, &state.0, &state.1) .await - .map(|s| s.to_sync_state_event()) + .map(PduEvent::into_sync_state_event) .ok() }) .collect() diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index b4c1b815..48b41b21 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -6,7 +6,7 @@ use std::{ use axum::extract::State; use conduwuit::{ - Error, Result, TypeStateKey, debug, error, extract_variant, trace, + Error, PduEvent, Result, TypeStateKey, debug, error, extract_variant, trace, utils::{ BoolExt, IterStream, ReadyExt, TryFutureExtExt, math::{ruma_from_usize, usize_from_ruma}, @@ -507,7 +507,7 @@ async fn process_rooms( .state_accessor .room_state_get(room_id, &state.0, &state.1) .await - .map(|s| s.to_sync_state_event()) + .map(PduEvent::into_sync_state_event) .ok() }) .collect() diff --git a/src/api/client/threads.rs b/src/api/client/threads.rs index d25e52c0..00bfe553 100644 --- a/src/api/client/threads.rs +++ b/src/api/client/threads.rs @@ -53,7 +53,7 @@ pub(crate) async fn get_threads_route( chunk: threads .into_iter() .map(at!(1)) - .map(|pdu| pdu.to_room_event()) + .map(PduEvent::into_room_event) .collect(), }) } diff --git a/src/core/pdu/strip.rs b/src/core/pdu/strip.rs index 4e7c5b83..3683caaa 100644 --- a/src/core/pdu/strip.rs +++ b/src/core/pdu/strip.rs @@ -10,35 +10,18 @@ use serde_json::{json, value::Value as JsonValue}; use crate::implement; -#[must_use] -#[implement(super::Pdu)] -pub fn to_sync_room_event(&self) -> Raw { - let (redacts, content) = self.copy_redacts(); - let mut json = json!({ - "content": content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - }); - - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } - if let Some(state_key) = &self.state_key { - json["state_key"] = json!(state_key); - } - if let Some(redacts) = &redacts { - json["redacts"] = json!(redacts); - } - - serde_json::from_value(json).expect("Raw::from_value always works") -} - /// This only works for events that are also AnyRoomEvents. #[must_use] #[implement(super::Pdu)] -pub fn to_any_event(&self) -> Raw { +pub fn into_any_event(self) -> Raw { + serde_json::from_value(self.into_any_event_value()).expect("Raw::from_value always works") +} + +/// This only works for events that are also AnyRoomEvents. +#[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn into_any_event_value(self) -> JsonValue { let (redacts, content) = self.copy_redacts(); let mut json = json!({ "content": content, @@ -59,12 +42,24 @@ pub fn to_any_event(&self) -> Raw { json["redacts"] = json!(redacts); } - serde_json::from_value(json).expect("Raw::from_value always works") + json } -#[must_use] #[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn into_room_event(self) -> Raw { self.to_room_event() } + +#[implement(super::Pdu)] +#[must_use] pub fn to_room_event(&self) -> Raw { + serde_json::from_value(self.to_room_event_value()).expect("Raw::from_value always works") +} + +#[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn to_room_event_value(&self) -> JsonValue { let (redacts, content) = self.copy_redacts(); let mut json = json!({ "content": content, @@ -85,12 +80,25 @@ pub fn to_room_event(&self) -> Raw { json["redacts"] = json!(redacts); } - serde_json::from_value(json).expect("Raw::from_value always works") + json } -#[must_use] #[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn into_message_like_event(self) -> Raw { self.to_message_like_event() } + +#[implement(super::Pdu)] +#[must_use] pub fn to_message_like_event(&self) -> Raw { + serde_json::from_value(self.to_message_like_event_value()) + .expect("Raw::from_value always works") +} + +#[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn to_message_like_event_value(&self) -> JsonValue { let (redacts, content) = self.copy_redacts(); let mut json = json!({ "content": content, @@ -111,11 +119,55 @@ pub fn to_message_like_event(&self) -> Raw { json["redacts"] = json!(redacts); } - serde_json::from_value(json).expect("Raw::from_value always works") + json } -#[must_use] #[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn into_sync_room_event(self) -> Raw { self.to_sync_room_event() } + +#[implement(super::Pdu)] +#[must_use] +pub fn to_sync_room_event(&self) -> Raw { + serde_json::from_value(self.to_sync_room_event_value()).expect("Raw::from_value always works") +} + +#[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn to_sync_room_event_value(&self) -> JsonValue { + let (redacts, content) = self.copy_redacts(); + let mut json = json!({ + "content": content, + "type": self.kind, + "event_id": self.event_id, + "sender": self.sender, + "origin_server_ts": self.origin_server_ts, + }); + + if let Some(unsigned) = &self.unsigned { + json["unsigned"] = json!(unsigned); + } + if let Some(state_key) = &self.state_key { + json["state_key"] = json!(state_key); + } + if let Some(redacts) = &redacts { + json["redacts"] = json!(redacts); + } + + json +} + +#[implement(super::Pdu)] +#[must_use] +pub fn into_state_event(self) -> Raw { + serde_json::from_value(self.into_state_event_value()).expect("Raw::from_value always works") +} + +#[implement(super::Pdu)] +#[must_use] +#[inline] pub fn into_state_event_value(self) -> JsonValue { let mut json = json!({ "content": self.content, @@ -134,15 +186,17 @@ pub fn into_state_event_value(self) -> JsonValue { json } -#[must_use] #[implement(super::Pdu)] -pub fn into_state_event(self) -> Raw { - serde_json::from_value(self.into_state_event_value()).expect("Raw::from_value always works") +#[must_use] +pub fn into_sync_state_event(self) -> Raw { + serde_json::from_value(self.into_sync_state_event_value()) + .expect("Raw::from_value always works") } -#[must_use] #[implement(super::Pdu)] -pub fn to_sync_state_event(&self) -> Raw { +#[must_use] +#[inline] +pub fn into_sync_state_event_value(self) -> JsonValue { let mut json = json!({ "content": self.content, "type": self.kind, @@ -156,39 +210,65 @@ pub fn to_sync_state_event(&self) -> Raw { json["unsigned"] = json!(unsigned); } - serde_json::from_value(json).expect("Raw::from_value always works") + json } -#[must_use] #[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn into_stripped_state_event(self) -> Raw { + self.to_stripped_state_event() +} + +#[implement(super::Pdu)] +#[must_use] pub fn to_stripped_state_event(&self) -> Raw { - let json = json!({ + serde_json::from_value(self.to_stripped_state_event_value()) + .expect("Raw::from_value always works") +} + +#[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn to_stripped_state_event_value(&self) -> JsonValue { + json!({ "content": self.content, "type": self.kind, "sender": self.sender, "state_key": self.state_key, - }); - - serde_json::from_value(json).expect("Raw::from_value always works") + }) } -#[must_use] #[implement(super::Pdu)] -pub fn to_stripped_spacechild_state_event(&self) -> Raw { - let json = json!({ +#[must_use] +pub fn into_stripped_spacechild_state_event(self) -> Raw { + serde_json::from_value(self.into_stripped_spacechild_state_event_value()) + .expect("Raw::from_value always works") +} + +#[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn into_stripped_spacechild_state_event_value(self) -> JsonValue { + json!({ "content": self.content, "type": self.kind, "sender": self.sender, "state_key": self.state_key, "origin_server_ts": self.origin_server_ts, - }); - - serde_json::from_value(json).expect("Raw::from_value always works") + }) } -#[must_use] #[implement(super::Pdu)] +#[must_use] pub fn into_member_event(self) -> Raw> { + serde_json::from_value(self.into_member_event_value()).expect("Raw::from_value always works") +} + +#[implement(super::Pdu)] +#[must_use] +#[inline] +pub fn into_member_event_value(self) -> JsonValue { let mut json = json!({ "content": self.content, "type": self.kind, @@ -204,5 +284,5 @@ pub fn into_member_event(self) -> Raw> { json["unsigned"] = json!(unsigned); } - serde_json::from_value(json).expect("Raw::from_value always works") + json } diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index af597445..a10fe7fc 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -6,7 +6,7 @@ use std::{fmt::Write, sync::Arc}; use async_trait::async_trait; use conduwuit::{ - Err, Error, Result, implement, + Err, Error, PduEvent, Result, implement, utils::{ IterStream, future::BoolExt, @@ -267,11 +267,12 @@ fn get_stripped_space_child_events<'a>( } if RoomId::parse(&state_key).is_ok() { - return Some(pdu.to_stripped_spacechild_state_event()); + return Some(pdu); } None }) + .map(PduEvent::into_stripped_spacechild_state_event) } /// Gets the summary of a space using either local or remote (federation) diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 56955497..803ba9d7 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -341,7 +341,7 @@ impl Service { .await .into_iter() .filter_map(Result::ok) - .map(|e| e.to_stripped_state_event()) + .map(PduEvent::into_stripped_state_event) .chain(once(event.to_stripped_state_event())) .collect() } diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 616f0846..fab02f6b 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -697,7 +697,7 @@ impl Service { match event { | SendingEvent::Pdu(pdu_id) => { if let Ok(pdu) = self.services.timeline.get_pdu_from_id(pdu_id).await { - pdu_jsons.push(pdu.to_room_event()); + pdu_jsons.push(pdu.into_room_event()); } }, | SendingEvent::Edu(edu) => From db99d3a001841db61bb79544912099b7346456b4 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 30 Mar 2025 01:58:14 +0000 Subject: [PATCH 168/310] remove recently-made-unnecessary unsafe block Signed-off-by: Jason Volk --- src/core/utils/sys/storage.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/core/utils/sys/storage.rs b/src/core/utils/sys/storage.rs index b11df7bb..452b04b2 100644 --- a/src/core/utils/sys/storage.rs +++ b/src/core/utils/sys/storage.rs @@ -123,10 +123,7 @@ pub fn dev_from_path(path: &Path) -> Result<(dev_t, dev_t)> { let stat = fs::metadata(path)?; let dev_id = stat.dev().try_into()?; - - // SAFETY: These functions may not need to be marked as unsafe. - // see: https://github.com/rust-lang/libc/issues/3759 - let (major, minor) = unsafe { (libc::major(dev_id), libc::minor(dev_id)) }; + let (major, minor) = (libc::major(dev_id), libc::minor(dev_id)); Ok((major.try_into()?, minor.try_into()?)) } From d60920c72890b7ebf70d47bfc37f4477fa9716aa Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 30 Mar 2025 22:59:29 +0000 Subject: [PATCH 169/310] workaround some large type name length issues Signed-off-by: Jason Volk --- src/api/mod.rs | 1 + src/core/error/err.rs | 1 + src/core/mod.rs | 2 ++ src/core/state_res/mod.rs | 4 ---- src/core/utils/mod.rs | 4 ++++ src/database/mod.rs | 2 ++ src/main/main.rs | 2 ++ src/router/mod.rs | 2 ++ src/service/mod.rs | 1 + src/service/rooms/event_handler/mod.rs | 15 +++--------- .../rooms/event_handler/resolve_state.rs | 6 +++-- src/service/rooms/spaces/mod.rs | 24 ++++++++++--------- .../rooms/state_accessor/room_state.rs | 4 ++-- src/service/rooms/state_accessor/state.rs | 5 +++- 14 files changed, 41 insertions(+), 32 deletions(-) diff --git a/src/api/mod.rs b/src/api/mod.rs index 8df17a59..090cf897 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -1,3 +1,4 @@ +#![type_length_limit = "16384"] //TODO: reduce me #![allow(clippy::toplevel_ref_arg)] pub mod client; diff --git a/src/core/error/err.rs b/src/core/error/err.rs index 0962c4ee..9c24d3b4 100644 --- a/src/core/error/err.rs +++ b/src/core/error/err.rs @@ -136,6 +136,7 @@ macro_rules! err_log { } #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! err_lev { (debug_warn) => { if $crate::debug::logging() { diff --git a/src/core/mod.rs b/src/core/mod.rs index cd56774a..80ebbdcb 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -1,3 +1,5 @@ +#![type_length_limit = "12288"] + pub mod alloc; pub mod config; pub mod debug; diff --git a/src/core/state_res/mod.rs b/src/core/state_res/mod.rs index 2020d65c..1db92e59 100644 --- a/src/core/state_res/mod.rs +++ b/src/core/state_res/mod.rs @@ -149,7 +149,6 @@ where &event_fetch, parallel_fetches, ) - .boxed() .await?; debug!(count = sorted_control_levels.len(), "power events"); @@ -164,7 +163,6 @@ where &event_fetch, parallel_fetches, ) - .boxed() .await?; debug!(count = resolved_control.len(), "resolved power events"); @@ -192,7 +190,6 @@ where let sorted_left_events = mainline_sort(&events_to_resolve, power_event.cloned(), &event_fetch, parallel_fetches) - .boxed() .await?; trace!(list = ?sorted_left_events, "events left, sorted"); @@ -204,7 +201,6 @@ where &event_fetch, parallel_fetches, ) - .boxed() .await?; // Add unconflicted state to the resolved state diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 53460c59..7593990c 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -173,6 +173,7 @@ macro_rules! is_equal { /// Functor for |x| *x.$i #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! deref_at { ($idx:tt) => { |t| *t.$idx @@ -181,6 +182,7 @@ macro_rules! deref_at { /// Functor for |ref x| x.$i #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! ref_at { ($idx:tt) => { |ref t| &t.$idx @@ -189,6 +191,7 @@ macro_rules! ref_at { /// Functor for |&x| x.$i #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! val_at { ($idx:tt) => { |&t| t.$idx @@ -197,6 +200,7 @@ macro_rules! val_at { /// Functor for |x| x.$i #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! at { ($idx:tt) => { |t| t.$idx diff --git a/src/database/mod.rs b/src/database/mod.rs index 1262a79a..ffcefee9 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -1,3 +1,5 @@ +#![type_length_limit = "3072"] + extern crate conduwuit_core as conduwuit; extern crate rust_rocksdb as rocksdb; diff --git a/src/main/main.rs b/src/main/main.rs index fbc63b17..52f40384 100644 --- a/src/main/main.rs +++ b/src/main/main.rs @@ -1,3 +1,5 @@ +#![type_length_limit = "49152"] //TODO: reduce me + pub(crate) mod clap; mod logging; mod mods; diff --git a/src/router/mod.rs b/src/router/mod.rs index f64dcb67..7038c5df 100644 --- a/src/router/mod.rs +++ b/src/router/mod.rs @@ -1,3 +1,5 @@ +#![type_length_limit = "32768"] //TODO: reduce me + mod layers; mod request; mod router; diff --git a/src/service/mod.rs b/src/service/mod.rs index 0bde0255..8f4a84b0 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -1,3 +1,4 @@ +#![type_length_limit = "2048"] #![allow(refining_impl_trait)] mod manager; diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 4944f3ec..45675da8 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -18,11 +18,7 @@ use std::{ }; use async_trait::async_trait; -use conduwuit::{ - Err, PduEvent, Result, RoomVersion, Server, - utils::{MutexMap, TryFutureExtExt}, -}; -use futures::TryFutureExt; +use conduwuit::{Err, PduEvent, Result, RoomVersion, Server, utils::MutexMap}; use ruma::{ OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, events::room::create::RoomCreateEventContent, @@ -103,13 +99,8 @@ impl Service { self.services.timeline.pdu_exists(&event_id).await } - async fn event_fetch(&self, event_id: OwnedEventId) -> Option> { - self.services - .timeline - .get_pdu(&event_id) - .map_ok(Arc::new) - .ok() - .await + async fn event_fetch(&self, event_id: OwnedEventId) -> Option { + self.services.timeline.get_pdu(&event_id).await.ok() } } diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index 9033c3a8..b3a7a71b 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -110,12 +110,14 @@ pub async fn state_resolution<'a, StateSets>( where StateSets: Iterator> + Clone + Send, { + let event_fetch = |event_id| self.event_fetch(event_id); + let event_exists = |event_id| self.event_exists(event_id); state_res::resolve( room_version, state_sets, auth_chain_sets, - &|event_id| self.event_fetch(event_id), - &|event_id| self.event_exists(event_id), + &event_fetch, + &event_exists, automatic_width(), ) .map_err(|e| err!(error!("State resolution failed: {e:?}"))) diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index a10fe7fc..da52e095 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -9,7 +9,7 @@ use conduwuit::{ Err, Error, PduEvent, Result, implement, utils::{ IterStream, - future::BoolExt, + future::{BoolExt, TryExtExt}, math::usize_from_f64, stream::{BroadbandExt, ReadyExt}, }, @@ -36,7 +36,7 @@ use ruma::{ use tokio::sync::{Mutex, MutexGuard}; pub use self::pagination_token::PaginationToken; -use crate::{Dep, conduwuit::utils::TryFutureExtExt, rooms, sending}; +use crate::{Dep, rooms, sending}; pub struct Service { services: Services, @@ -141,7 +141,8 @@ pub async fn get_summary_and_children_local( } let children_pdus: Vec<_> = self - .get_stripped_space_child_events(current_room) + .get_space_child_events(current_room) + .map(PduEvent::into_stripped_spacechild_state_event) .collect() .await; @@ -235,10 +236,10 @@ async fn get_summary_and_children_federation( /// Simply returns the stripped m.space.child events of a room #[implement(Service)] -fn get_stripped_space_child_events<'a>( +fn get_space_child_events<'a>( &'a self, room_id: &'a RoomId, -) -> impl Stream> + Send + 'a { +) -> impl Stream + Send + 'a { self.services .state .get_room_shortstatehash(room_id) @@ -246,6 +247,7 @@ fn get_stripped_space_child_events<'a>( self.services .state_accessor .state_keys_with_ids(current_shortstatehash, &StateEventType::SpaceChild) + .boxed() }) .map(Result::into_iter) .map(IterStream::stream) @@ -256,8 +258,8 @@ fn get_stripped_space_child_events<'a>( .timeline .get_pdu(&event_id) .map_ok(move |pdu| (state_key, pdu)) - .await .ok() + .await }) .ready_filter_map(move |(state_key, pdu)| { if let Ok(content) = pdu.get_content::() { @@ -266,13 +268,12 @@ fn get_stripped_space_child_events<'a>( } } - if RoomId::parse(&state_key).is_ok() { - return Some(pdu); + if RoomId::parse(&state_key).is_err() { + return None; } - None + Some(pdu) }) - .map(PduEvent::into_stripped_spacechild_state_event) } /// Gets the summary of a space using either local or remote (federation) @@ -501,7 +502,8 @@ async fn cache_insert( allowed_room_ids, room_id: room_id.clone(), children_state: self - .get_stripped_space_child_events(&room_id) + .get_space_child_events(&room_id) + .map(PduEvent::into_stripped_spacechild_state_event) .collect() .await, }; diff --git a/src/service/rooms/state_accessor/room_state.rs b/src/service/rooms/state_accessor/room_state.rs index ff26b33a..642cd5d2 100644 --- a/src/service/rooms/state_accessor/room_state.rs +++ b/src/service/rooms/state_accessor/room_state.rs @@ -31,7 +31,7 @@ pub fn room_state_full<'a>( self.services .state .get_room_shortstatehash(room_id) - .map_ok(|shortstatehash| self.state_full(shortstatehash).map(Ok)) + .map_ok(|shortstatehash| self.state_full(shortstatehash).map(Ok).boxed()) .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) .try_flatten_stream() } @@ -46,7 +46,7 @@ pub fn room_state_full_pdus<'a>( self.services .state .get_room_shortstatehash(room_id) - .map_ok(|shortstatehash| self.state_full_pdus(shortstatehash).map(Ok)) + .map_ok(|shortstatehash| self.state_full_pdus(shortstatehash).map(Ok).boxed()) .map_err(move |e| err!(Database("Missing state for {room_id:?}: {e:?}"))) .try_flatten_stream() } diff --git a/src/service/rooms/state_accessor/state.rs b/src/service/rooms/state_accessor/state.rs index 02a6194e..8f2dd76f 100644 --- a/src/service/rooms/state_accessor/state.rs +++ b/src/service/rooms/state_accessor/state.rs @@ -235,6 +235,7 @@ pub fn state_keys_with_shortids<'a>( .ignore_err() .unzip() .map(|(ssks, sids): (Vec, Vec)| (ssks, sids)) + .boxed() .shared(); let shortstatekeys = short_ids @@ -390,8 +391,10 @@ pub fn state_full_shortids( .map(parse_compressed_state_event) .collect() }) - .map_ok(|vec: Vec<_>| vec.into_iter().try_stream()) + .map_ok(Vec::into_iter) + .map_ok(IterStream::try_stream) .try_flatten_stream() + .boxed() } #[implement(super::Service)] From d3b65af6163baed6e6f55922235ccc9e9f5a4e98 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 31 Mar 2025 02:28:01 +0000 Subject: [PATCH 170/310] remove several services.globals config wrappers Signed-off-by: Jason Volk --- src/api/client/account.rs | 10 ++++----- src/api/client/membership.rs | 4 ++-- src/api/client/presence.rs | 42 ++++++++++------------------------- src/api/client/profile.rs | 4 ++-- src/api/client/read_marker.rs | 4 ++-- src/api/client/room/create.rs | 2 +- src/api/client/send.rs | 3 +-- src/api/client/sync/v3.rs | 6 ++--- src/api/client/typing.rs | 2 +- src/api/client/unstable.rs | 8 +++---- src/api/server/invite.rs | 3 +-- src/service/globals/mod.rs | 30 ------------------------- 12 files changed, 34 insertions(+), 84 deletions(-) diff --git a/src/api/client/account.rs b/src/api/client/account.rs index 5dd622d7..efa8b142 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -146,7 +146,7 @@ pub(crate) async fn register_route( let is_guest = body.kind == RegistrationKind::Guest; let emergency_mode_enabled = services.config.emergency_password.is_some(); - if !services.globals.allow_registration() && body.appservice_info.is_none() { + if !services.config.allow_registration && body.appservice_info.is_none() { match (body.username.as_ref(), body.initial_device_display_name.as_ref()) { | (Some(username), Some(device_display_name)) => { info!(%is_guest, user = %username, device_name = %device_display_name, "Rejecting registration attempt as registration is disabled"); @@ -166,8 +166,8 @@ pub(crate) async fn register_route( } if is_guest - && (!services.globals.allow_guest_registration() - || (services.globals.allow_registration() + && (!services.config.allow_guest_registration + || (services.config.allow_registration && services.globals.registration_token.is_some())) { info!( @@ -441,7 +441,7 @@ pub(crate) async fn register_route( } // log in conduit admin channel if a guest registered - if body.appservice_info.is_none() && is_guest && services.globals.log_guest_registrations() { + if body.appservice_info.is_none() && is_guest && services.config.log_guest_registrations { debug_info!("New guest user \"{user_id}\" registered on this server."); if !device_display_name.is_empty() { @@ -490,7 +490,7 @@ pub(crate) async fn register_route( if body.appservice_info.is_none() && !services.server.config.auto_join_rooms.is_empty() - && (services.globals.allow_guests_auto_join_rooms() || !is_guest) + && (services.config.allow_guests_auto_join_rooms || !is_guest) { for room in &services.server.config.auto_join_rooms { let Ok(room_id) = services.rooms.alias.resolve(room).await else { diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 11395e83..315a363c 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -491,7 +491,7 @@ pub(crate) async fn invite_user_route( ) -> Result { let sender_user = body.sender_user(); - if !services.users.is_admin(sender_user).await && services.globals.block_non_admin_invites() { + if !services.users.is_admin(sender_user).await && services.config.block_non_admin_invites { info!( "User {sender_user} is not an admin and attempted to send an invite to room {}", &body.room_id @@ -1628,7 +1628,7 @@ pub(crate) async fn invite_helper( reason: Option, is_direct: bool, ) -> Result { - if !services.users.is_admin(sender_user).await && services.globals.block_non_admin_invites() { + if !services.users.is_admin(sender_user).await && services.config.block_non_admin_invites { info!( "User {sender_user} is not an admin and attempted to send an invite to room \ {room_id}" diff --git a/src/api/client/presence.rs b/src/api/client/presence.rs index 9b41a721..548e5cce 100644 --- a/src/api/client/presence.rs +++ b/src/api/client/presence.rs @@ -1,12 +1,10 @@ use std::time::Duration; use axum::extract::State; -use ruma::api::client::{ - error::ErrorKind, - presence::{get_presence, set_presence}, -}; +use conduwuit::{Err, Result}; +use ruma::api::client::presence::{get_presence, set_presence}; -use crate::{Error, Result, Ruma}; +use crate::Ruma; /// # `PUT /_matrix/client/r0/presence/{userId}/status` /// @@ -15,24 +13,17 @@ pub(crate) async fn set_presence_route( State(services): State, body: Ruma, ) -> Result { - if !services.globals.allow_local_presence() { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Presence is disabled on this server", - )); + if !services.config.allow_local_presence { + return Err!(Request(Forbidden("Presence is disabled on this server"))); } - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if sender_user != &body.user_id && body.appservice_info.is_none() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Not allowed to set presence of other users", - )); + if body.sender_user() != body.user_id && body.appservice_info.is_none() { + return Err!(Request(InvalidParam("Not allowed to set presence of other users"))); } services .presence - .set_presence(sender_user, &body.presence, None, None, body.status_msg.clone()) + .set_presence(body.sender_user(), &body.presence, None, None, body.status_msg.clone()) .await?; Ok(set_presence::v3::Response {}) @@ -47,21 +38,15 @@ pub(crate) async fn get_presence_route( State(services): State, body: Ruma, ) -> Result { - if !services.globals.allow_local_presence() { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Presence is disabled on this server", - )); + if !services.config.allow_local_presence { + return Err!(Request(Forbidden("Presence is disabled on this server",))); } - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut presence_event = None; - let has_shared_rooms = services .rooms .state_cache - .user_sees_user(sender_user, &body.user_id) + .user_sees_user(body.sender_user(), &body.user_id) .await; if has_shared_rooms { @@ -99,9 +84,6 @@ pub(crate) async fn get_presence_route( presence: presence.content.presence, }) }, - | _ => Err(Error::BadRequest( - ErrorKind::NotFound, - "Presence state for this user was not found", - )), + | _ => Err!(Request(NotFound("Presence state for this user was not found"))), } } diff --git a/src/api/client/profile.rs b/src/api/client/profile.rs index 12e5ebcc..5abe5b23 100644 --- a/src/api/client/profile.rs +++ b/src/api/client/profile.rs @@ -52,7 +52,7 @@ pub(crate) async fn set_displayname_route( update_displayname(&services, &body.user_id, body.displayname.clone(), &all_joined_rooms) .await; - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { // Presence update services .presence @@ -147,7 +147,7 @@ pub(crate) async fn set_avatar_url_route( ) .await; - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { // Presence update services .presence diff --git a/src/api/client/read_marker.rs b/src/api/client/read_marker.rs index 187616b4..b334e356 100644 --- a/src/api/client/read_marker.rs +++ b/src/api/client/read_marker.rs @@ -50,7 +50,7 @@ pub(crate) async fn set_read_marker_route( } // ping presence - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { services .presence .ping_presence(sender_user, &ruma::presence::PresenceState::Online) @@ -126,7 +126,7 @@ pub(crate) async fn create_receipt_route( } // ping presence - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { services .presence .ping_presence(sender_user, &ruma::presence::PresenceState::Online) diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index bb06e966..bdc5d5a5 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -372,7 +372,7 @@ pub(crate) async fn create_room_route( // Silently skip encryption events if they are not allowed if pdu_builder.event_type == TimelineEventType::RoomEncryption - && !services.globals.allow_encryption() + && !services.config.allow_encryption { continue; } diff --git a/src/api/client/send.rs b/src/api/client/send.rs index b01d1ed6..1af74f57 100644 --- a/src/api/client/send.rs +++ b/src/api/client/send.rs @@ -25,8 +25,7 @@ pub(crate) async fn send_message_event_route( let appservice_info = body.appservice_info.as_ref(); // Forbid m.room.encrypted if encryption is disabled - if MessageLikeEventType::RoomEncrypted == body.event_type - && !services.globals.allow_encryption() + if MessageLikeEventType::RoomEncrypted == body.event_type && !services.config.allow_encryption { return Err!(Request(Forbidden("Encryption has been disabled"))); } diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index a8248f95..530c1278 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -118,7 +118,7 @@ pub(crate) async fn sync_events_route( let (sender_user, sender_device) = body.sender(); // Presence update - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { services .presence .ping_presence(sender_user, &body.body.set_presence) @@ -279,8 +279,8 @@ pub(crate) async fn build_sync_events( }); let presence_updates: OptionFuture<_> = services - .globals - .allow_local_presence() + .config + .allow_local_presence .then(|| process_presence_updates(services, since, sender_user)) .into(); diff --git a/src/api/client/typing.rs b/src/api/client/typing.rs index ccfa7340..b02cc473 100644 --- a/src/api/client/typing.rs +++ b/src/api/client/typing.rs @@ -64,7 +64,7 @@ pub(crate) async fn create_typing_event_route( } // ping presence - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { services .presence .ping_presence(&body.user_id, &ruma::presence::PresenceState::Online) diff --git a/src/api/client/unstable.rs b/src/api/client/unstable.rs index 08da5a37..45ad103e 100644 --- a/src/api/client/unstable.rs +++ b/src/api/client/unstable.rs @@ -205,7 +205,7 @@ pub(crate) async fn delete_timezone_key_route( services.users.set_timezone(&body.user_id, None); - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { // Presence update services .presence @@ -233,7 +233,7 @@ pub(crate) async fn set_timezone_key_route( services.users.set_timezone(&body.user_id, body.tz.clone()); - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { // Presence update services .presence @@ -326,7 +326,7 @@ pub(crate) async fn set_profile_key_route( ); } - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { // Presence update services .presence @@ -385,7 +385,7 @@ pub(crate) async fn delete_profile_key_route( .set_profile_key(&body.user_id, &body.key_name, None); } - if services.globals.allow_local_presence() { + if services.config.allow_local_presence { // Presence update services .presence diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index 463cb9ab..f4cc6eb2 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -103,8 +103,7 @@ pub(crate) async fn create_invite_route( return Err!(Request(Forbidden("This room is banned on this homeserver."))); } - if services.globals.block_non_admin_invites() && !services.users.is_admin(&invited_user).await - { + if services.config.block_non_admin_invites && !services.users.is_admin(&invited_user).await { return Err!(Request(Forbidden("This server does not allow room invites."))); } diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 1dd7db8e..a7a9be9d 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -111,20 +111,6 @@ impl Service { #[inline] pub fn server_name(&self) -> &ServerName { self.server.name.as_ref() } - pub fn allow_registration(&self) -> bool { self.server.config.allow_registration } - - pub fn allow_guest_registration(&self) -> bool { self.server.config.allow_guest_registration } - - pub fn allow_guests_auto_join_rooms(&self) -> bool { - self.server.config.allow_guests_auto_join_rooms - } - - pub fn log_guest_registrations(&self) -> bool { self.server.config.log_guest_registrations } - - pub fn allow_encryption(&self) -> bool { self.server.config.allow_encryption } - - pub fn allow_federation(&self) -> bool { self.server.config.allow_federation } - pub fn allow_public_room_directory_over_federation(&self) -> bool { self.server .config @@ -183,22 +169,6 @@ impl Service { pub fn forbidden_usernames(&self) -> &RegexSet { &self.server.config.forbidden_usernames } - pub fn allow_local_presence(&self) -> bool { self.server.config.allow_local_presence } - - pub fn allow_incoming_presence(&self) -> bool { self.server.config.allow_incoming_presence } - - pub fn allow_outgoing_presence(&self) -> bool { self.server.config.allow_outgoing_presence } - - pub fn allow_incoming_read_receipts(&self) -> bool { - self.server.config.allow_incoming_read_receipts - } - - pub fn allow_outgoing_read_receipts(&self) -> bool { - self.server.config.allow_outgoing_read_receipts - } - - pub fn block_non_admin_invites(&self) -> bool { self.server.config.block_non_admin_invites } - /// checks if `user_id` is local to us via server_name comparison #[inline] pub fn user_is_local(&self, user_id: &UserId) -> bool { From 3f0f89cddb28041ddeec94d8c80410a04153235b Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 31 Mar 2025 04:25:48 +0000 Subject: [PATCH 171/310] use async_trait without axum re-export Signed-off-by: Jason Volk --- Cargo.lock | 1 + src/api/Cargo.toml | 1 + src/api/router/args.rs | 3 ++- 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 8c0e797b..aa639b30 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -766,6 +766,7 @@ dependencies = [ name = "conduwuit_api" version = "0.5.0" dependencies = [ + "async-trait", "axum", "axum-client-ip", "axum-extra", diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index 385e786f..7890561c 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -35,6 +35,7 @@ brotli_compression = [ ] [dependencies] +async-trait.workspace = true axum-client-ip.workspace = true axum-extra.workspace = true axum.workspace = true diff --git a/src/api/router/args.rs b/src/api/router/args.rs index 65a68fa4..26713dcc 100644 --- a/src/api/router/args.rs +++ b/src/api/router/args.rs @@ -1,6 +1,7 @@ use std::{mem, ops::Deref}; -use axum::{async_trait, body::Body, extract::FromRequest}; +use async_trait::async_trait; +use axum::{body::Body, extract::FromRequest}; use bytes::{BufMut, Bytes, BytesMut}; use conduwuit::{Error, Result, debug, debug_warn, err, trace, utils::string::EMPTY}; use ruma::{ From 5768ca844295d892cfdcc9c80c8a57ef71c0e30c Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 31 Mar 2025 04:23:47 +0000 Subject: [PATCH 172/310] upgrade dependency ByteSize Signed-off-by: Jason Volk --- Cargo.lock | 103 ++++++++++++++++++---------------------- Cargo.toml | 2 +- src/core/utils/bytes.rs | 6 +-- 3 files changed, 49 insertions(+), 62 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index aa639b30..ab9af9e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -79,7 +79,7 @@ checksum = "0ae92a5119aa49cdbcf6b9f893fe4e1d98b04ccbf82ee0584ad948a44a734dea" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -161,7 +161,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -172,7 +172,7 @@ checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -423,7 +423,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.100", + "syn", "which", ] @@ -442,7 +442,7 @@ dependencies = [ "regex", "rustc-hash 2.1.1", "shlex", - "syn 2.0.100", + "syn", ] [[package]] @@ -555,9 +555,9 @@ checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a" [[package]] name = "bytesize" -version = "1.3.2" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d2c12f985c78475a6b8d629afd0c360260ef34cfef52efccdcfd31972f81c2e" +checksum = "a3c8f83209414aacf0eeae3cf730b18d6981697fba62f200fcfb92b9f082acba" [[package]] name = "bzip2-sys" @@ -679,7 +679,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -877,7 +877,7 @@ dependencies = [ "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -1062,9 +1062,9 @@ dependencies = [ [[package]] name = "crokey" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "520e83558f4c008ac06fa6a86e5c1d4357be6f994cce7434463ebcdaadf47bb1" +checksum = "c5ff945e42bb93d29b10ba509970066a269903a932f0ea07d99d8621f97e90d7" dependencies = [ "crokey-proc_macros", "crossterm", @@ -1075,15 +1075,15 @@ dependencies = [ [[package]] name = "crokey-proc_macros" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "370956e708a1ce65fe4ac5bb7185791e0ece7485087f17736d54a23a0895049f" +checksum = "665f2180fd82d0ba2bf3deb45fafabb18f23451024ff71ee47f6bfdfb4bbe09e" dependencies = [ "crossterm", "proc-macro2", "quote", "strict", - "syn 1.0.109", + "syn", ] [[package]] @@ -1191,7 +1191,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a2785755761f3ddc1492979ce1e48d2c00d09311c39e4466429188f3dd6501" dependencies = [ "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -1218,7 +1218,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -1287,7 +1287,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -1339,7 +1339,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -1550,7 +1550,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -1829,7 +1829,7 @@ dependencies = [ "markup5ever", "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -2081,7 +2081,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -2185,7 +2185,7 @@ checksum = "c34819042dc3d3971c46c2190835914dfbe0c3c13f61449b2997f4e9722dfa60" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -2336,7 +2336,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.100", + "syn", ] [[package]] @@ -2558,7 +2558,7 @@ checksum = "bd2209fff77f705b00c737016a48e73733d7fbccb8b007194db148f03561fb70" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -2705,7 +2705,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -2943,7 +2943,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -3007,7 +3007,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -3079,7 +3079,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5316f57387668042f561aae71480de936257848f9c43ce528e311d89a07cadeb" dependencies = [ "proc-macro2", - "syn 2.0.100", + "syn", ] [[package]] @@ -3108,7 +3108,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", "version_check", "yansi", ] @@ -3129,7 +3129,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a65f2e60fbf1063868558d69c6beacf412dc755f9fc020f514b7955fc914fe30" dependencies = [ "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -3152,7 +3152,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -3691,7 +3691,7 @@ dependencies = [ "quote", "ruma-identifiers-validation", "serde", - "syn 2.0.100", + "syn", "toml", ] @@ -4088,7 +4088,7 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -4297,9 +4297,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c970269d99b64e60ec3bd6ad27270092a5394c4e309314b18ae3fe575695fbe8" +checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" dependencies = [ "libc", "windows-sys 0.52.0", @@ -4367,17 +4367,6 @@ version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - [[package]] name = "syn" version = "2.0.100" @@ -4406,7 +4395,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -4491,7 +4480,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -4502,7 +4491,7 @@ checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -4658,7 +4647,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -4870,7 +4859,7 @@ source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -5167,7 +5156,7 @@ dependencies = [ "log", "proc-macro2", "quote", - "syn 2.0.100", + "syn", "wasm-bindgen-shared", ] @@ -5202,7 +5191,7 @@ checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -5593,7 +5582,7 @@ checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", "synstructure", ] @@ -5614,7 +5603,7 @@ checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] @@ -5634,7 +5623,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", "synstructure", ] @@ -5663,7 +5652,7 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index ab7a935c..e6751acf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -510,7 +510,7 @@ version = "1.0.37" version = "1.0.89" [workspace.dependencies.bytesize] -version = "1.3.2" +version = "2.0.1" [workspace.dependencies.core_affinity] version = "0.8.1" diff --git a/src/core/utils/bytes.rs b/src/core/utils/bytes.rs index 04101be4..507b9b9a 100644 --- a/src/core/utils/bytes.rs +++ b/src/core/utils/bytes.rs @@ -17,15 +17,13 @@ pub fn from_str(str: &str) -> Result { Ok(bytes) } -/// Output a human-readable size string w/ si-unit suffix +/// Output a human-readable size string w/ iec-unit suffix #[inline] #[must_use] pub fn pretty(bytes: usize) -> String { - const SI_UNITS: bool = true; - let bytes: u64 = bytes.try_into().expect("failed to convert usize to u64"); - bytesize::to_string(bytes, SI_UNITS) + ByteSize::b(bytes).display().iec().to_string() } #[inline] From bee1f896243f9fafc588b98f43412637f6a5dd90 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 31 Mar 2025 05:03:15 +0000 Subject: [PATCH 173/310] bump dependencies Signed-off-by: Jason Volk --- Cargo.lock | 136 +++++++++++++++++++++++++++++------------- Cargo.toml | 48 +++++++-------- src/core/Cargo.toml | 1 + src/core/error/mod.rs | 2 + 4 files changed, 121 insertions(+), 66 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ab9af9e8..fb19dfdb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -731,8 +731,8 @@ dependencies = [ "opentelemetry-jaeger", "opentelemetry_sdk", "sentry", - "sentry-tower", - "sentry-tracing", + "sentry-tower 0.35.0", + "sentry-tracing 0.35.0", "tokio", "tokio-metrics", "tracing", @@ -782,7 +782,7 @@ dependencies = [ "http-body-util", "hyper", "ipaddress", - "itertools 0.13.0", + "itertools 0.14.0", "log", "rand 0.8.5", "reqwest", @@ -802,6 +802,7 @@ dependencies = [ "argon2", "arrayvec", "axum", + "axum-extra", "bytes", "bytesize", "cargo_toml", @@ -820,7 +821,7 @@ dependencies = [ "http", "http-body-util", "ipaddress", - "itertools 0.13.0", + "itertools 0.14.0", "libc", "libloading", "log", @@ -874,7 +875,7 @@ dependencies = [ name = "conduwuit_macros" version = "0.5.0" dependencies = [ - "itertools 0.13.0", + "itertools 0.14.0", "proc-macro2", "quote", "syn", @@ -904,8 +905,8 @@ dependencies = [ "rustls", "sd-notify", "sentry", - "sentry-tower", - "sentry-tracing", + "sentry-tower 0.35.0", + "sentry-tracing 0.35.0", "serde_json", "tokio", "tower 0.5.2", @@ -930,7 +931,7 @@ dependencies = [ "http", "image", "ipaddress", - "itertools 0.13.0", + "itertools 0.14.0", "log", "loole", "lru-cache", @@ -997,9 +998,9 @@ checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "const-str" -version = "0.5.7" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3618cccc083bb987a415d85c02ca6c9994ea5b44731ec28b9ecf09658655fba9" +checksum = "9e991226a70654b49d34de5ed064885f0bef0348a8e70018b8ff1ac80aa984a2" [[package]] name = "const_panic" @@ -1948,9 +1949,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.8" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" +checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" dependencies = [ "bytes", "futures-channel", @@ -1961,7 +1962,6 @@ dependencies = [ "pin-project-lite", "socket2", "tokio", - "tower 0.4.13", "tower-service", "tracing", ] @@ -2543,18 +2543,18 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "minicbor" -version = "0.25.1" +version = "0.26.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0452a60c1863c1f50b5f77cd295e8d2786849f35883f0b9e18e7e6e1b5691b0" +checksum = "1936e27fffe7d8557c060eb82cb71668608cd1a5fb56b63e66d22ae8d7564321" dependencies = [ "minicbor-derive", ] [[package]] name = "minicbor-derive" -version = "0.15.3" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd2209fff77f705b00c737016a48e73733d7fbccb8b007194db148f03561fb70" +checksum = "a9882ef5c56df184b8ffc107fc6c61e33ee3a654b021961d790a78571bb9d67a" dependencies = [ "proc-macro2", "quote", @@ -2563,9 +2563,9 @@ dependencies = [ [[package]] name = "minicbor-serde" -version = "0.3.2" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "becf18ac384ecf6f53b2db3b1549eebff664c67ecf259ae99be5912193291686" +checksum = "54e45e8beeefea1b8b6f52fa188a5b6ea3746c2885606af8d4d8bf31cee633fb" dependencies = [ "minicbor", "serde", @@ -3938,21 +3938,21 @@ checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" [[package]] name = "sentry" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "016958f51b96861dead7c1e02290f138411d05e94fad175c8636a835dee6e51e" +checksum = "3a7332159e544e34db06b251b1eda5e546bd90285c3f58d9c8ff8450b484e0da" dependencies = [ "httpdate", "reqwest", "rustls", "sentry-backtrace", "sentry-contexts", - "sentry-core", + "sentry-core 0.36.0", "sentry-debug-images", "sentry-log", "sentry-panic", - "sentry-tower", - "sentry-tracing", + "sentry-tower 0.36.0", + "sentry-tracing 0.36.0", "tokio", "ureq", "webpki-roots", @@ -3960,27 +3960,27 @@ dependencies = [ [[package]] name = "sentry-backtrace" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e57712c24e99252ef175b4b06c485294f10ad6bc5b5e1567ff3803ee7a0b7d3f" +checksum = "565ec31ad37bab8e6d9f289f34913ed8768347b133706192f10606dabd5c6bc4" dependencies = [ "backtrace", "once_cell", "regex", - "sentry-core", + "sentry-core 0.36.0", ] [[package]] name = "sentry-contexts" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eba8754ec3b9279e00aa6d64916f211d44202370a1699afde1db2c16cbada089" +checksum = "e860275f25f27e8c0c7726ce116c7d5c928c5bba2ee73306e52b20a752298ea6" dependencies = [ "hostname", "libc", "os_info", "rustc_version", - "sentry-core", + "sentry-core 0.36.0", "uname", ] @@ -3992,40 +3992,53 @@ checksum = "f9f8b6dcd4fbae1e3e22b447f32670360b27e31b62ab040f7fb04e0f80c04d92" dependencies = [ "once_cell", "rand 0.8.5", - "sentry-types", + "sentry-types 0.35.0", + "serde", + "serde_json", +] + +[[package]] +name = "sentry-core" +version = "0.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "653942e6141f16651273159f4b8b1eaeedf37a7554c00cd798953e64b8a9bf72" +dependencies = [ + "once_cell", + "rand 0.8.5", + "sentry-types 0.36.0", "serde", "serde_json", ] [[package]] name = "sentry-debug-images" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8982a69133d3f5e4efdbfa0776937fca43c3a2e275a8fe184f50b1b0aa92e07c" +checksum = "2a60bc2154e6df59beed0ac13d58f8dfaf5ad20a88548a53e29e4d92e8e835c2" dependencies = [ "findshlibs", "once_cell", - "sentry-core", + "sentry-core 0.36.0", ] [[package]] name = "sentry-log" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efcbfbb74628eaef033c1154d4bb082437c7592ce2282c7c5ccb455c4c97a06d" +checksum = "1c96d796cba1b3a0793e7f53edc420c61f9419fba8fb34ad5519f5c7d01af6b2" dependencies = [ "log", - "sentry-core", + "sentry-core 0.36.0", ] [[package]] name = "sentry-panic" -version = "0.35.0" +version = "0.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de296dae6f01e931b65071ee5fe28d66a27909857f744018f107ed15fd1f6b25" +checksum = "105e3a956c8aa9dab1e4087b1657b03271bfc49d838c6ae9bfc7c58c802fd0ef" dependencies = [ "sentry-backtrace", - "sentry-core", + "sentry-core 0.36.0", ] [[package]] @@ -4033,10 +4046,21 @@ name = "sentry-tower" version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcdaf9b1939589476bd57751d12a9653bbfe356610fc476d03d7683189183ab7" +dependencies = [ + "sentry-core 0.35.0", + "tower-layer", + "tower-service", +] + +[[package]] +name = "sentry-tower" +version = "0.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "082f781dfc504d984e16d99f8dbf94d6ee4762dd0fc28de25713d0f900a8164d" dependencies = [ "http", "pin-project", - "sentry-core", + "sentry-core 0.36.0", "tower-layer", "tower-service", "url", @@ -4047,9 +4071,20 @@ name = "sentry-tracing" version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "263f73c757ed7915d3e1e34625eae18cad498a95b4261603d4ce3f87b159a6f0" +dependencies = [ + "sentry-core 0.35.0", + "tracing-core", + "tracing-subscriber", +] + +[[package]] +name = "sentry-tracing" +version = "0.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64e75c831b4d8b34a5aec1f65f67c5d46a26c7c5d3c7abd8b5ef430796900cf8" dependencies = [ "sentry-backtrace", - "sentry-core", + "sentry-core 0.36.0", "tracing-core", "tracing-subscriber", ] @@ -4071,6 +4106,23 @@ dependencies = [ "uuid", ] +[[package]] +name = "sentry-types" +version = "0.36.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d4203359e60724aa05cf2385aaf5d4f147e837185d7dd2b9ccf1ee77f4420c8" +dependencies = [ + "debugid", + "hex", + "rand 0.8.5", + "serde", + "serde_json", + "thiserror 1.0.69", + "time", + "url", + "uuid", +] + [[package]] name = "serde" version = "1.0.219" diff --git a/Cargo.toml b/Cargo.toml index e6751acf..ba706656 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -27,11 +27,11 @@ version = "0.5.0" name = "conduwuit" [workspace.dependencies.arrayvec] -version = "0.7.4" +version = "0.7.6" features = ["serde"] [workspace.dependencies.smallvec] -version = "1.13.2" +version = "1.14.0" features = [ "const_generics", "const_new", @@ -45,7 +45,7 @@ version = "0.3" features = ["ffi", "std", "union"] [workspace.dependencies.const-str] -version = "0.5.7" +version = "0.6.2" [workspace.dependencies.ctor] version = "0.2.9" @@ -81,13 +81,13 @@ version = "0.8.5" # Used for the http request / response body type for Ruma endpoints used with reqwest [workspace.dependencies.bytes] -version = "1.9.0" +version = "1.10.1" [workspace.dependencies.http-body-util] -version = "0.1.2" +version = "0.1.3" [workspace.dependencies.http] -version = "1.2.0" +version = "1.3.1" [workspace.dependencies.regex] version = "1.11.1" @@ -111,7 +111,7 @@ default-features = false features = ["typed-header", "tracing"] [workspace.dependencies.axum-server] -version = "0.7.1" +version = "0.7.2" default-features = false # to listen on both HTTP and HTTPS if listening on TLS dierctly from conduwuit for complement or sytest @@ -122,7 +122,7 @@ version = "0.7" version = "0.6.1" [workspace.dependencies.tower] -version = "0.5.1" +version = "0.5.2" default-features = false features = ["util"] @@ -156,12 +156,12 @@ features = [ ] [workspace.dependencies.serde] -version = "1.0.216" +version = "1.0.219" default-features = false features = ["rc"] [workspace.dependencies.serde_json] -version = "1.0.133" +version = "1.0.140" default-features = false features = ["raw_value"] @@ -237,7 +237,7 @@ features = [ ] [workspace.dependencies.futures] -version = "0.3.30" +version = "0.3.31" default-features = false features = ["std", "async-await"] @@ -275,7 +275,7 @@ features = ["alloc", "std"] default-features = false [workspace.dependencies.hyper] -version = "1.5.1" +version = "1.6.0" default-features = false features = [ "server", @@ -285,7 +285,7 @@ features = [ [workspace.dependencies.hyper-util] # hyper-util >=0.1.9 seems to have DNS issues -version = "=0.1.8" +version = "0.1.10" default-features = false features = [ "server-auto", @@ -295,7 +295,7 @@ features = [ # to support multiple variations of setting a config option [workspace.dependencies.either] -version = "1.13.0" +version = "1.15.0" default-features = false features = ["serde"] @@ -311,7 +311,7 @@ default-features = false # Used for conduwuit::Error type [workspace.dependencies.thiserror] -version = "2.0.7" +version = "2.0.12" default-features = false # Used when hashing the state @@ -321,7 +321,7 @@ default-features = false # Used to make working with iterators easier, was already a transitive depdendency [workspace.dependencies.itertools] -version = "0.13.0" +version = "0.14.0" # to parse user-friendly time durations in admin commands #TODO: overlaps chrono? @@ -337,7 +337,7 @@ version = "0.4.0" version = "2.3.1" [workspace.dependencies.async-trait] -version = "0.1.83" +version = "0.1.88" [workspace.dependencies.lru-cache] version = "0.1.2" @@ -423,7 +423,7 @@ features = ["rt-tokio"] # optional sentry metrics for crash/panic reporting [workspace.dependencies.sentry] -version = "0.35.0" +version = "0.36.0" default-features = false features = [ "backtrace", @@ -499,18 +499,18 @@ default-features = false version = "0.1" [workspace.dependencies.syn] -version = "2.0.90" +version = "2.0" default-features = false features = ["full", "extra-traits"] [workspace.dependencies.quote] -version = "1.0.37" +version = "1.0" [workspace.dependencies.proc-macro2] -version = "1.0.89" +version = "1.0" [workspace.dependencies.bytesize] -version = "2.0.1" +version = "2.0" [workspace.dependencies.core_affinity] version = "0.8.1" @@ -522,11 +522,11 @@ version = "0.2" version = "0.2" [workspace.dependencies.minicbor] -version = "0.25.1" +version = "0.26.3" features = ["std"] [workspace.dependencies.minicbor-serde] -version = "0.3.2" +version = "0.4.1" features = ["std"] [workspace.dependencies.maplit] diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index b40dd3ad..4848e742 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -59,6 +59,7 @@ conduwuit_mods = [ argon2.workspace = true arrayvec.workspace = true axum.workspace = true +axum-extra.workspace = true bytes.workspace = true bytesize.workspace = true cargo_toml.workspace = true diff --git a/src/core/error/mod.rs b/src/core/error/mod.rs index 02ab6fa3..e46edf09 100644 --- a/src/core/error/mod.rs +++ b/src/core/error/mod.rs @@ -81,6 +81,8 @@ pub enum Error { #[error("Tracing reload error: {0}")] TracingReload(#[from] tracing_subscriber::reload::Error), #[error(transparent)] + TypedHeader(#[from] axum_extra::typed_header::TypedHeaderRejection), + #[error(transparent)] Yaml(#[from] serde_yaml::Error), // ruma/conduwuit From 0f81c1e1ccdcb0c5c6d5a27e82f16eb37b1e61c8 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 1 Apr 2025 02:14:51 +0000 Subject: [PATCH 174/310] revert hyper-util upgrade due to continued DNS issues Signed-off-by: Jason Volk --- Cargo.lock | 5 +++-- Cargo.toml | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fb19dfdb..77d03506 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1949,9 +1949,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.10" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df2dcfbe0677734ab2f3ffa7fa7bfd4706bfdc1ef393f2ee30184aed67e631b4" +checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" dependencies = [ "bytes", "futures-channel", @@ -1962,6 +1962,7 @@ dependencies = [ "pin-project-lite", "socket2", "tokio", + "tower 0.4.13", "tower-service", "tracing", ] diff --git a/Cargo.toml b/Cargo.toml index ba706656..62bbaf16 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -285,7 +285,7 @@ features = [ [workspace.dependencies.hyper-util] # hyper-util >=0.1.9 seems to have DNS issues -version = "0.1.10" +version = "=0.1.8" default-features = false features = [ "server-auto", From 1b71b99c514f69bdd2fbcdb7996dcc00860d2057 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Wed, 2 Apr 2025 10:49:38 -0400 Subject: [PATCH 175/310] fix weird issue with acl c2s check Signed-off-by: June Clementine Strawberry --- src/api/client/state.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/api/client/state.rs b/src/api/client/state.rs index 9563c26d..23583356 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -229,6 +229,9 @@ async fn allowed_to_send_state_event( if acl_content.deny.contains(&String::from("*")) && !acl_content.is_allowed(services.globals.server_name()) + && !acl_content + .allow + .contains(&services.globals.server_name().to_string()) { return Err!(Request(BadJson(debug_warn!( ?room_id, @@ -240,6 +243,9 @@ async fn allowed_to_send_state_event( if !acl_content.allow.contains(&String::from("*")) && !acl_content.is_allowed(services.globals.server_name()) + && !acl_content + .allow + .contains(&services.globals.server_name().to_string()) { return Err!(Request(BadJson(debug_warn!( ?room_id, From ea246d91d975a89a947c35260a4d50684fd2913b Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Wed, 2 Apr 2025 22:38:47 -0400 Subject: [PATCH 176/310] remove pointless and buggy *_visibility in-memory caches Signed-off-by: June Clementine Strawberry --- conduwuit-example.toml | 8 --- src/core/config/mod.rs | 12 ---- src/service/rooms/state_accessor/mod.rs | 68 ++----------------- .../rooms/state_accessor/server_can.rs | 22 +----- src/service/rooms/state_accessor/user_can.rs | 22 +----- 5 files changed, 10 insertions(+), 122 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 15e6dd37..75ecddab 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -195,14 +195,6 @@ # #servernameevent_data_cache_capacity = varies by system -# This item is undocumented. Please contribute documentation for it. -# -#server_visibility_cache_capacity = varies by system - -# This item is undocumented. Please contribute documentation for it. -# -#user_visibility_cache_capacity = varies by system - # This item is undocumented. Please contribute documentation for it. # #stateinfo_cache_capacity = varies by system diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 52df19ac..7be140a5 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -252,14 +252,6 @@ pub struct Config { #[serde(default = "default_servernameevent_data_cache_capacity")] pub servernameevent_data_cache_capacity: u32, - /// default: varies by system - #[serde(default = "default_server_visibility_cache_capacity")] - pub server_visibility_cache_capacity: u32, - - /// default: varies by system - #[serde(default = "default_user_visibility_cache_capacity")] - pub user_visibility_cache_capacity: u32, - /// default: varies by system #[serde(default = "default_stateinfo_cache_capacity")] pub stateinfo_cache_capacity: u32, @@ -2035,10 +2027,6 @@ fn default_servernameevent_data_cache_capacity() -> u32 { parallelism_scaled_u32(100_000).saturating_add(500_000) } -fn default_server_visibility_cache_capacity() -> u32 { parallelism_scaled_u32(500) } - -fn default_user_visibility_cache_capacity() -> u32 { parallelism_scaled_u32(1000) } - fn default_stateinfo_cache_capacity() -> u32 { parallelism_scaled_u32(100) } fn default_roomid_spacehierarchy_cache_capacity() -> u32 { parallelism_scaled_u32(1000) } diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 652fdbd7..b57465ce 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -3,21 +3,13 @@ mod server_can; mod state; mod user_can; -use std::{ - fmt::Write, - sync::{Arc, Mutex as StdMutex, Mutex}, -}; +use std::sync::Arc; use async_trait::async_trait; -use conduwuit::{ - Result, err, utils, - utils::math::{Expected, usize_from_f64}, -}; +use conduwuit::{Result, err}; use database::Map; -use lru_cache::LruCache; use ruma::{ - EventEncryptionAlgorithm, JsOption, OwnedRoomAliasId, OwnedRoomId, OwnedServerName, - OwnedUserId, RoomId, UserId, + EventEncryptionAlgorithm, JsOption, OwnedRoomAliasId, OwnedRoomId, RoomId, UserId, events::{ StateEventType, room::{ @@ -37,11 +29,9 @@ use ruma::{ space::SpaceRoomJoinRule, }; -use crate::{Dep, rooms, rooms::short::ShortStateHash}; +use crate::{Dep, rooms}; pub struct Service { - pub server_visibility_cache: Mutex>, - pub user_visibility_cache: Mutex>, services: Services, db: Data, } @@ -61,19 +51,7 @@ struct Data { #[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { - let config = &args.server.config; - let server_visibility_cache_capacity = - f64::from(config.server_visibility_cache_capacity) * config.cache_capacity_modifier; - let user_visibility_cache_capacity = - f64::from(config.user_visibility_cache_capacity) * config.cache_capacity_modifier; - Ok(Arc::new(Self { - server_visibility_cache: StdMutex::new(LruCache::new(usize_from_f64( - server_visibility_cache_capacity, - )?)), - user_visibility_cache: StdMutex::new(LruCache::new(usize_from_f64( - user_visibility_cache_capacity, - )?)), services: Services { state_cache: args.depend::("rooms::state_cache"), timeline: args.depend::("rooms::timeline"), @@ -88,44 +66,6 @@ impl crate::Service for Service { })) } - async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { - use utils::bytes::pretty; - - let (svc_count, svc_bytes) = self.server_visibility_cache.lock()?.iter().fold( - (0_usize, 0_usize), - |(count, bytes), (key, _)| { - ( - count.expected_add(1), - bytes - .expected_add(key.0.capacity()) - .expected_add(size_of_val(&key.1)), - ) - }, - ); - - let (uvc_count, uvc_bytes) = self.user_visibility_cache.lock()?.iter().fold( - (0_usize, 0_usize), - |(count, bytes), (key, _)| { - ( - count.expected_add(1), - bytes - .expected_add(key.0.capacity()) - .expected_add(size_of_val(&key.1)), - ) - }, - ); - - writeln!(out, "server_visibility_cache: {svc_count} ({})", pretty(svc_bytes))?; - writeln!(out, "user_visibility_cache: {uvc_count} ({})", pretty(uvc_bytes))?; - - Ok(()) - } - - async fn clear_cache(&self) { - self.server_visibility_cache.lock().expect("locked").clear(); - self.user_visibility_cache.lock().expect("locked").clear(); - } - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } diff --git a/src/service/rooms/state_accessor/server_can.rs b/src/service/rooms/state_accessor/server_can.rs index 2e8f3325..7d1b197f 100644 --- a/src/service/rooms/state_accessor/server_can.rs +++ b/src/service/rooms/state_accessor/server_can.rs @@ -1,4 +1,4 @@ -use conduwuit::{error, implement, utils::stream::ReadyExt}; +use conduwuit::{debug_info, implement, utils::stream::ReadyExt}; use futures::StreamExt; use ruma::{ EventId, RoomId, ServerName, @@ -22,15 +22,6 @@ pub async fn server_can_see_event( return true; }; - if let Some(visibility) = self - .server_visibility_cache - .lock() - .expect("locked") - .get_mut(&(origin.to_owned(), shortstatehash)) - { - return *visibility; - } - let history_visibility = self .state_get_content(shortstatehash, &StateEventType::RoomHistoryVisibility, "") .await @@ -44,7 +35,7 @@ pub async fn server_can_see_event( .room_members(room_id) .ready_filter(|member| member.server_name() == origin); - let visibility = match history_visibility { + match history_visibility { | HistoryVisibility::WorldReadable | HistoryVisibility::Shared => true, | HistoryVisibility::Invited => { // Allow if any member on requesting server was AT LEAST invited, else deny @@ -62,12 +53,5 @@ pub async fn server_can_see_event( error!("Unknown history visibility {history_visibility}"); false }, - }; - - self.server_visibility_cache - .lock() - .expect("locked") - .insert((origin.to_owned(), shortstatehash), visibility); - - visibility + } } diff --git a/src/service/rooms/state_accessor/user_can.rs b/src/service/rooms/state_accessor/user_can.rs index c30e1da8..32a766a8 100644 --- a/src/service/rooms/state_accessor/user_can.rs +++ b/src/service/rooms/state_accessor/user_can.rs @@ -1,4 +1,4 @@ -use conduwuit::{Err, Error, Result, error, implement, pdu::PduBuilder}; +use conduwuit::{Err, Error, Result, debug_info, implement, pdu::PduBuilder}; use ruma::{ EventId, RoomId, UserId, events::{ @@ -98,15 +98,6 @@ pub async fn user_can_see_event( return true; }; - if let Some(visibility) = self - .user_visibility_cache - .lock() - .expect("locked") - .get_mut(&(user_id.to_owned(), shortstatehash)) - { - return *visibility; - } - let currently_member = self.services.state_cache.is_joined(user_id, room_id).await; let history_visibility = self @@ -116,7 +107,7 @@ pub async fn user_can_see_event( c.history_visibility }); - let visibility = match history_visibility { + match history_visibility { | HistoryVisibility::WorldReadable => true, | HistoryVisibility::Shared => currently_member, | HistoryVisibility::Invited => { @@ -131,14 +122,7 @@ pub async fn user_can_see_event( error!("Unknown history visibility {history_visibility}"); false }, - }; - - self.user_visibility_cache - .lock() - .expect("locked") - .insert((user_id.to_owned(), shortstatehash), visibility); - - visibility + } } /// Whether a user is allowed to see an event, based on From 74012c5289831c16976fc283a4233bfb6b49ce8b Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Wed, 2 Apr 2025 22:44:44 -0400 Subject: [PATCH 177/310] significantly improve get_missing_events fed code Signed-off-by: June Clementine Strawberry --- src/api/server/backfill.rs | 12 ++- src/api/server/get_missing_events.rs | 111 ++++++++++++++------------- 2 files changed, 65 insertions(+), 58 deletions(-) diff --git a/src/api/server/backfill.rs b/src/api/server/backfill.rs index 5c875807..3cfbcedc 100644 --- a/src/api/server/backfill.rs +++ b/src/api/server/backfill.rs @@ -6,11 +6,17 @@ use conduwuit::{ utils::{IterStream, ReadyExt, stream::TryTools}, }; use futures::{FutureExt, StreamExt, TryStreamExt}; -use ruma::{MilliSecondsSinceUnixEpoch, api::federation::backfill::get_backfill, uint}; +use ruma::{MilliSecondsSinceUnixEpoch, api::federation::backfill::get_backfill}; use super::AccessCheck; use crate::Ruma; +/// arbitrary number but synapse's is 100 and we can handle lots of these +/// anyways +const LIMIT_MAX: usize = 150; +/// no spec defined number but we can handle a lot of these +const LIMIT_DEFAULT: usize = 50; + /// # `GET /_matrix/federation/v1/backfill/` /// /// Retrieves events from before the sender joined the room, if the room's @@ -30,9 +36,9 @@ pub(crate) async fn get_backfill_route( let limit = body .limit - .min(uint!(100)) .try_into() - .expect("UInt could not be converted to usize"); + .unwrap_or(LIMIT_DEFAULT) + .min(LIMIT_MAX); let from = body .v diff --git a/src/api/server/get_missing_events.rs b/src/api/server/get_missing_events.rs index 3d0bbb07..d72918fa 100644 --- a/src/api/server/get_missing_events.rs +++ b/src/api/server/get_missing_events.rs @@ -1,13 +1,19 @@ use axum::extract::State; -use conduwuit::{Error, Result}; -use ruma::{ - CanonicalJsonValue, EventId, RoomId, - api::{client::error::ErrorKind, federation::event::get_missing_events}, +use conduwuit::{ + Result, debug, debug_info, debug_warn, + utils::{self}, + warn, }; +use ruma::api::federation::event::get_missing_events; use super::AccessCheck; use crate::Ruma; +/// arbitrary number but synapse's is 20 and we can handle lots of these anyways +const LIMIT_MAX: usize = 50; +/// spec says default is 10 +const LIMIT_DEFAULT: usize = 10; + /// # `POST /_matrix/federation/v1/get_missing_events/{roomId}` /// /// Retrieves events that the sender is missing. @@ -24,7 +30,11 @@ pub(crate) async fn get_missing_events_route( .check() .await?; - let limit = body.limit.try_into()?; + let limit = body + .limit + .try_into() + .unwrap_or(LIMIT_DEFAULT) + .min(LIMIT_MAX); let mut queued_events = body.latest_events.clone(); // the vec will never have more entries the limit @@ -32,60 +42,51 @@ pub(crate) async fn get_missing_events_route( let mut i: usize = 0; while i < queued_events.len() && events.len() < limit { - if let Ok(pdu) = services + let Ok(pdu) = services.rooms.timeline.get_pdu(&queued_events[i]).await else { + debug_info!(?body.origin, "Event {} does not exist locally, skipping", &queued_events[i]); + i = i.saturating_add(1); + continue; + }; + + if pdu.room_id != body.room_id { + warn!(?body.origin, + "Got an event for the wrong room in database. Found {:?} in {:?}, server requested events in {:?}. Skipping.", + pdu.event_id, pdu.room_id, body.room_id + ); + i = i.saturating_add(1); + continue; + } + + if body.earliest_events.contains(&queued_events[i]) { + i = i.saturating_add(1); + continue; + } + + if !services .rooms - .timeline - .get_pdu_json(&queued_events[i]) + .state_accessor + .server_can_see_event(body.origin(), &body.room_id, &queued_events[i]) .await { - let room_id_str = pdu - .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| Error::bad_database("Invalid event in database."))?; - - let event_room_id = <&RoomId>::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room_id in event in database."))?; - - if event_room_id != body.room_id { - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Event from wrong room.")); - } - - if body.earliest_events.contains(&queued_events[i]) { - i = i.saturating_add(1); - continue; - } - - if !services - .rooms - .state_accessor - .server_can_see_event(body.origin(), &body.room_id, &queued_events[i]) - .await - { - i = i.saturating_add(1); - continue; - } - - let prev_events = pdu - .get("prev_events") - .and_then(CanonicalJsonValue::as_array) - .unwrap_or_default(); - - queued_events.extend( - prev_events - .iter() - .map(<&EventId>::try_from) - .filter_map(Result::ok) - .map(ToOwned::to_owned), - ); - - events.push( - services - .sending - .convert_to_outgoing_federation_event(pdu) - .await, - ); + debug!(?body.origin, "Server cannot see {:?} in {:?}, skipping", pdu.event_id, pdu.room_id); + i = i.saturating_add(1); + continue; } - i = i.saturating_add(1); + + let Ok(pdu_json) = utils::to_canonical_object(&pdu) else { + debug_warn!(?body.origin, "Failed to convert PDU in database to canonical JSON: {pdu:?}"); + i = i.saturating_add(1); + continue; + }; + + queued_events.extend(pdu.prev_events.iter().map(ToOwned::to_owned)); + + events.push( + services + .sending + .convert_to_outgoing_federation_event(pdu_json) + .await, + ); } Ok(get_missing_events::v1::Response { events }) From 1036f8dfa8fabb9642b9638b54381e00016eef9c Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Wed, 2 Apr 2025 22:46:01 -0400 Subject: [PATCH 178/310] default shared history vis on unknown visibilities, drop needless error log Signed-off-by: June Clementine Strawberry --- src/service/rooms/state_accessor/server_can.rs | 4 ++-- src/service/rooms/state_accessor/user_can.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/service/rooms/state_accessor/server_can.rs b/src/service/rooms/state_accessor/server_can.rs index 7d1b197f..c946fbfd 100644 --- a/src/service/rooms/state_accessor/server_can.rs +++ b/src/service/rooms/state_accessor/server_can.rs @@ -50,8 +50,8 @@ pub async fn server_can_see_event( .await }, | _ => { - error!("Unknown history visibility {history_visibility}"); - false + debug_info!(%room_id, "Unknown history visibility, defaulting to shared: {history_visibility:?}"); + true }, } } diff --git a/src/service/rooms/state_accessor/user_can.rs b/src/service/rooms/state_accessor/user_can.rs index 32a766a8..aa54407b 100644 --- a/src/service/rooms/state_accessor/user_can.rs +++ b/src/service/rooms/state_accessor/user_can.rs @@ -119,8 +119,8 @@ pub async fn user_can_see_event( self.user_was_joined(shortstatehash, user_id).await }, | _ => { - error!("Unknown history visibility {history_visibility}"); - false + debug_info!(%room_id, "Unknown history visibility, defaulting to shared: {history_visibility:?}"); + currently_member }, } } From 0e0b8cc4032732378966f07b38b97af89788e399 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Wed, 2 Apr 2025 22:51:17 -0400 Subject: [PATCH 179/310] fixup+update msc3266, add fed support, parallelise IO Signed-off-by: June Clementine Strawberry --- Cargo.lock | 22 +- Cargo.toml | 2 +- src/api/client/room/mod.rs | 9 +- src/api/client/room/summary.rs | 308 ++++++++++++++++++++++++ src/api/client/room/upgrade.rs | 2 +- src/api/client/unstable.rs | 138 +---------- src/service/rooms/spaces/mod.rs | 54 +++-- src/service/rooms/state_accessor/mod.rs | 28 ++- 8 files changed, 389 insertions(+), 174 deletions(-) create mode 100644 src/api/client/room/summary.rs diff --git a/Cargo.lock b/Cargo.lock index 77d03506..a53258bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3531,7 +3531,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "assign", "js_int", @@ -3551,7 +3551,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "js_int", "ruma-common", @@ -3563,7 +3563,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "as_variant", "assign", @@ -3586,7 +3586,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "as_variant", "base64 0.22.1", @@ -3618,7 +3618,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "as_variant", "indexmap 2.8.0", @@ -3643,7 +3643,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "bytes", "headers", @@ -3665,7 +3665,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "js_int", "thiserror 2.0.12", @@ -3674,7 +3674,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "js_int", "ruma-common", @@ -3684,7 +3684,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3699,7 +3699,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "js_int", "ruma-common", @@ -3711,7 +3711,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=d197318a2507d38ffe6ee524d0d52728ca72538a#d197318a2507d38ffe6ee524d0d52728ca72538a" +source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index 62bbaf16..940ece86 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "d197318a2507d38ffe6ee524d0d52728ca72538a" +rev = "ea1278657125e9414caada074e8c172bc252fb1c" features = [ "compat", "rand", diff --git a/src/api/client/room/mod.rs b/src/api/client/room/mod.rs index 16fcadab..86d68f7e 100644 --- a/src/api/client/room/mod.rs +++ b/src/api/client/room/mod.rs @@ -2,9 +2,14 @@ mod aliases; mod create; mod event; mod initial_sync; +mod summary; mod upgrade; pub(crate) use self::{ - aliases::get_room_aliases_route, create::create_room_route, event::get_room_event_route, - initial_sync::room_initial_sync_route, upgrade::upgrade_room_route, + aliases::get_room_aliases_route, + create::create_room_route, + event::get_room_event_route, + initial_sync::room_initial_sync_route, + summary::{get_room_summary, get_room_summary_legacy}, + upgrade::upgrade_room_route, }; diff --git a/src/api/client/room/summary.rs b/src/api/client/room/summary.rs new file mode 100644 index 00000000..34820e83 --- /dev/null +++ b/src/api/client/room/summary.rs @@ -0,0 +1,308 @@ +use axum::extract::State; +use axum_client_ip::InsecureClientIp; +use conduwuit::{ + Err, Result, debug_warn, + utils::{IterStream, future::TryExtExt}, +}; +use futures::{FutureExt, StreamExt, future::join3, stream::FuturesUnordered}; +use ruma::{ + OwnedRoomId, OwnedServerName, RoomId, UserId, + api::{ + client::room::get_summary, + federation::space::{SpaceHierarchyParentSummary, get_hierarchy}, + }, + events::room::member::MembershipState, + space::SpaceRoomJoinRule::{self, *}, +}; +use service::Services; + +use crate::{Ruma, RumaResponse}; + +/// # `GET /_matrix/client/unstable/im.nheko.summary/rooms/{roomIdOrAlias}/summary` +/// +/// Returns a short description of the state of a room. +/// +/// This is the "wrong" endpoint that some implementations/clients may use +/// according to the MSC. Request and response bodies are the same as +/// `get_room_summary`. +/// +/// An implementation of [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266) +pub(crate) async fn get_room_summary_legacy( + State(services): State, + InsecureClientIp(client): InsecureClientIp, + body: Ruma, +) -> Result> { + get_room_summary(State(services), InsecureClientIp(client), body) + .boxed() + .await + .map(RumaResponse) +} + +/// # `GET /_matrix/client/unstable/im.nheko.summary/summary/{roomIdOrAlias}` +/// +/// Returns a short description of the state of a room. +/// +/// An implementation of [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266) +#[tracing::instrument(skip_all, fields(%client), name = "room_summary")] +pub(crate) async fn get_room_summary( + State(services): State, + InsecureClientIp(client): InsecureClientIp, + body: Ruma, +) -> Result { + let (room_id, servers) = services + .rooms + .alias + .resolve_with_servers(&body.room_id_or_alias, Some(body.via.clone())) + .await?; + + if services.rooms.metadata.is_banned(&room_id).await { + return Err!(Request(Forbidden("This room is banned on this homeserver."))); + } + + room_summary_response(&services, &room_id, &servers, body.sender_user.as_deref()) + .boxed() + .await +} + +async fn room_summary_response( + services: &Services, + room_id: &RoomId, + servers: &[OwnedServerName], + sender_user: Option<&UserId>, +) -> Result { + if services.rooms.metadata.exists(room_id).await { + return local_room_summary_response(services, room_id, sender_user) + .boxed() + .await; + } + + let room = + remote_room_summary_hierarchy_response(services, room_id, servers, sender_user).await?; + + Ok(get_summary::msc3266::Response { + room_id: room_id.to_owned(), + canonical_alias: room.canonical_alias, + avatar_url: room.avatar_url, + guest_can_join: room.guest_can_join, + name: room.name, + num_joined_members: room.num_joined_members, + topic: room.topic, + world_readable: room.world_readable, + join_rule: room.join_rule, + room_type: room.room_type, + room_version: room.room_version, + membership: if sender_user.is_none() { + None + } else { + Some(MembershipState::Leave) + }, + encryption: room.encryption, + allowed_room_ids: room.allowed_room_ids, + }) +} + +async fn local_room_summary_response( + services: &Services, + room_id: &RoomId, + sender_user: Option<&UserId>, +) -> Result { + let join_rule = services.rooms.state_accessor.get_space_join_rule(room_id); + let world_readable = services.rooms.state_accessor.is_world_readable(room_id); + let guest_can_join = services.rooms.state_accessor.guest_can_join(room_id); + + let ((join_rule, allowed_room_ids), world_readable, guest_can_join) = + join3(join_rule, world_readable, guest_can_join).await; + + user_can_see_summary( + services, + room_id, + &join_rule, + guest_can_join, + world_readable, + &allowed_room_ids, + sender_user, + ) + .await?; + + let canonical_alias = services + .rooms + .state_accessor + .get_canonical_alias(room_id) + .ok(); + let name = services.rooms.state_accessor.get_name(room_id).ok(); + let topic = services.rooms.state_accessor.get_room_topic(room_id).ok(); + let room_type = services.rooms.state_accessor.get_room_type(room_id).ok(); + let avatar_url = services + .rooms + .state_accessor + .get_avatar(room_id) + .map(|res| res.into_option().unwrap_or_default().url); + let room_version = services.rooms.state.get_room_version(room_id).ok(); + let encryption = services + .rooms + .state_accessor + .get_room_encryption(room_id) + .ok(); + let num_joined_members = services + .rooms + .state_cache + .room_joined_count(room_id) + .unwrap_or(0); + + let ( + canonical_alias, + name, + num_joined_members, + topic, + avatar_url, + room_type, + room_version, + encryption, + ) = futures::join!( + canonical_alias, + name, + num_joined_members, + topic, + avatar_url, + room_type, + room_version, + encryption, + ); + + Ok(get_summary::msc3266::Response { + room_id: room_id.to_owned(), + canonical_alias, + avatar_url, + guest_can_join, + name, + num_joined_members: num_joined_members.try_into().unwrap_or_default(), + topic, + world_readable, + join_rule, + room_type, + room_version, + membership: if let Some(sender_user) = sender_user { + services + .rooms + .state_accessor + .get_member(room_id, sender_user) + .await + .map_or(Some(MembershipState::Leave), |content| Some(content.membership)) + } else { + None + }, + encryption, + allowed_room_ids, + }) +} + +/// used by MSC3266 to fetch a room's info if we do not know about it +async fn remote_room_summary_hierarchy_response( + services: &Services, + room_id: &RoomId, + servers: &[OwnedServerName], + sender_user: Option<&UserId>, +) -> Result { + if !services.config.allow_federation { + return Err!(Request(Forbidden("Federation is disabled."))); + } + + if services.rooms.metadata.is_disabled(room_id).await { + return Err!(Request(Forbidden( + "Federaton of room {room_id} is currently disabled on this server." + ))); + } + + let request = get_hierarchy::v1::Request::new(room_id.to_owned()); + + let mut requests: FuturesUnordered<_> = servers + .iter() + .map(|server| { + services + .sending + .send_federation_request(server, request.clone()) + }) + .collect(); + + while let Some(Ok(response)) = requests.next().await { + let room = response.room.clone(); + if room.room_id != room_id { + debug_warn!( + "Room ID {} returned does not belong to the requested room ID {}", + room.room_id, + room_id + ); + continue; + } + + return user_can_see_summary( + services, + room_id, + &room.join_rule, + room.guest_can_join, + room.world_readable, + &room.allowed_room_ids, + sender_user, + ) + .await + .map(|()| room); + } + + Err!(Request(NotFound( + "Room is unknown to this server and was unable to fetch over federation with the \ + provided servers available" + ))) +} + +async fn user_can_see_summary( + services: &Services, + room_id: &RoomId, + join_rule: &SpaceRoomJoinRule, + guest_can_join: bool, + world_readable: bool, + allowed_room_ids: &[OwnedRoomId], + sender_user: Option<&UserId>, +) -> Result { + match sender_user { + | Some(sender_user) => { + let user_can_see_state_events = services + .rooms + .state_accessor + .user_can_see_state_events(sender_user, room_id); + let is_guest = services.users.is_deactivated(sender_user).unwrap_or(false); + let user_in_allowed_restricted_room = allowed_room_ids + .iter() + .stream() + .any(|room| services.rooms.state_cache.is_joined(sender_user, room)); + + let (user_can_see_state_events, is_guest, user_in_allowed_restricted_room) = + join3(user_can_see_state_events, is_guest, user_in_allowed_restricted_room) + .boxed() + .await; + + if user_can_see_state_events + || (is_guest && guest_can_join) + || matches!(&join_rule, &Public | &Knock | &KnockRestricted) + || user_in_allowed_restricted_room + { + return Ok(()); + } + + Err!(Request(Forbidden( + "Room is not world readable, not publicly accessible/joinable, restricted room \ + conditions not met, and guest access is forbidden. Not allowed to see details \ + of this room." + ))) + }, + | None => { + if matches!(join_rule, Public | Knock | KnockRestricted) || world_readable { + return Ok(()); + } + + Err!(Request(Forbidden( + "Room is not world readable or publicly accessible/joinable, authentication is \ + required" + ))) + }, + } +} diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs index 4ac341a9..3cfb3c28 100644 --- a/src/api/client/room/upgrade.rs +++ b/src/api/client/room/upgrade.rs @@ -103,7 +103,7 @@ pub(crate) async fn upgrade_room_route( // Use the m.room.tombstone event as the predecessor let predecessor = Some(ruma::events::room::create::PreviousRoom::new( body.room_id.clone(), - (*tombstone_event_id).to_owned(), + Some(tombstone_event_id), )); // Send a m.room.create event containing a predecessor field and the applicable diff --git a/src/api/client/unstable.rs b/src/api/client/unstable.rs index 45ad103e..e21eaf21 100644 --- a/src/api/client/unstable.rs +++ b/src/api/client/unstable.rs @@ -2,7 +2,7 @@ use std::collections::BTreeMap; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::Err; +use conduwuit::{Err, Error, Result}; use futures::StreamExt; use ruma::{ OwnedRoomId, @@ -14,16 +14,14 @@ use ruma::{ delete_profile_key, delete_timezone_key, get_profile_key, get_timezone_key, set_profile_key, set_timezone_key, }, - room::get_summary, }, federation, }, - events::room::member::MembershipState, presence::PresenceState, }; use super::{update_avatar_url, update_displayname}; -use crate::{Error, Result, Ruma, RumaResponse}; +use crate::Ruma; /// # `GET /_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms` /// @@ -38,13 +36,10 @@ pub(crate) async fn get_mutual_rooms_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); - if sender_user == &body.user_id { - return Err(Error::BadRequest( - ErrorKind::Unknown, - "You cannot request rooms in common with yourself.", - )); + if sender_user == body.user_id { + return Err!(Request(Unknown("You cannot request rooms in common with yourself."))); } if !services.users.exists(&body.user_id).await { @@ -65,129 +60,6 @@ pub(crate) async fn get_mutual_rooms_route( }) } -/// # `GET /_matrix/client/unstable/im.nheko.summary/rooms/{roomIdOrAlias}/summary` -/// -/// Returns a short description of the state of a room. -/// -/// This is the "wrong" endpoint that some implementations/clients may use -/// according to the MSC. Request and response bodies are the same as -/// `get_room_summary`. -/// -/// An implementation of [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266) -pub(crate) async fn get_room_summary_legacy( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result> { - get_room_summary(State(services), InsecureClientIp(client), body) - .await - .map(RumaResponse) -} - -/// # `GET /_matrix/client/unstable/im.nheko.summary/summary/{roomIdOrAlias}` -/// -/// Returns a short description of the state of a room. -/// -/// TODO: support fetching remote room info if we don't know the room -/// -/// An implementation of [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266) -#[tracing::instrument(skip_all, fields(%client), name = "room_summary")] -pub(crate) async fn get_room_summary( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let sender_user = body.sender_user.as_ref(); - - let room_id = services.rooms.alias.resolve(&body.room_id_or_alias).await?; - - if !services.rooms.metadata.exists(&room_id).await { - return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server")); - } - - if sender_user.is_none() - && !services - .rooms - .state_accessor - .is_world_readable(&room_id) - .await - { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Room is not world readable, authentication is required", - )); - } - - Ok(get_summary::msc3266::Response { - room_id: room_id.clone(), - canonical_alias: services - .rooms - .state_accessor - .get_canonical_alias(&room_id) - .await - .ok(), - avatar_url: services - .rooms - .state_accessor - .get_avatar(&room_id) - .await - .into_option() - .unwrap_or_default() - .url, - guest_can_join: services.rooms.state_accessor.guest_can_join(&room_id).await, - name: services.rooms.state_accessor.get_name(&room_id).await.ok(), - num_joined_members: services - .rooms - .state_cache - .room_joined_count(&room_id) - .await - .unwrap_or(0) - .try_into()?, - topic: services - .rooms - .state_accessor - .get_room_topic(&room_id) - .await - .ok(), - world_readable: services - .rooms - .state_accessor - .is_world_readable(&room_id) - .await, - join_rule: services - .rooms - .state_accessor - .get_join_rule(&room_id) - .await - .unwrap_or_default() - .0, - room_type: services - .rooms - .state_accessor - .get_room_type(&room_id) - .await - .ok(), - room_version: services.rooms.state.get_room_version(&room_id).await.ok(), - membership: if let Some(sender_user) = sender_user { - services - .rooms - .state_accessor - .get_member(&room_id, sender_user) - .await - .map_or_else(|_| MembershipState::Leave, |content| content.membership) - .into() - } else { - None - }, - encryption: services - .rooms - .state_accessor - .get_room_encryption(&room_id) - .await - .ok(), - }) -} - /// # `DELETE /_matrix/client/unstable/uk.tcpip.msc4133/profile/:user_id/us.cloke.msc4175.tz` /// /// Deletes the `tz` (timezone) of a user, as per MSC4133 and MSC4175. diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index da52e095..f51a5e3a 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -27,7 +27,6 @@ use ruma::{ }, events::{ StateEventType, - room::join_rules::{JoinRule, RoomJoinRulesEventContent}, space::child::{HierarchySpaceChildEvent, SpaceChildEventContent}, }, serde::Raw, @@ -306,25 +305,18 @@ async fn get_room_summary( children_state: Vec>, identifier: &Identifier<'_>, ) -> Result { - let join_rule = self + let (join_rule, allowed_room_ids) = self .services .state_accessor - .room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") - .await - .map_or(JoinRule::Invite, |c: RoomJoinRulesEventContent| c.join_rule); + .get_space_join_rule(room_id) + .await; - let allowed_room_ids = self - .services - .state_accessor - .allowed_room_ids(join_rule.clone()); - - let join_rule = join_rule.clone().into(); let is_accessible_child = self .is_accessible_child(room_id, &join_rule, identifier, &allowed_room_ids) .await; if !is_accessible_child { - return Err!(Request(Forbidden("User is not allowed to see the room",))); + return Err!(Request(Forbidden("User is not allowed to see the room"))); } let name = self.services.state_accessor.get_name(room_id).ok(); @@ -355,6 +347,14 @@ async fn get_room_summary( .get_avatar(room_id) .map(|res| res.into_option().unwrap_or_default().url); + let room_version = self.services.state.get_room_version(room_id).ok(); + + let encryption = self + .services + .state_accessor + .get_room_encryption(room_id) + .ok(); + let ( canonical_alias, name, @@ -364,6 +364,8 @@ async fn get_room_summary( guest_can_join, avatar_url, room_type, + room_version, + encryption, ) = futures::join!( canonical_alias, name, @@ -372,7 +374,9 @@ async fn get_room_summary( world_readable, guest_can_join, avatar_url, - room_type + room_type, + room_version, + encryption, ); Ok(SpaceHierarchyParentSummary { @@ -387,9 +391,9 @@ async fn get_room_summary( allowed_room_ids, join_rule, room_id: room_id.to_owned(), - num_joined_members: num_joined_members - .try_into() - .expect("user count should not be that big"), + num_joined_members: num_joined_members.try_into().unwrap_or_default(), + encryption, + room_version, }) } @@ -487,6 +491,8 @@ async fn cache_insert( join_rule, room_type, allowed_room_ids, + encryption, + room_version, } = child; let summary = SpaceHierarchyParentSummary { @@ -506,6 +512,8 @@ async fn cache_insert( .map(PduEvent::into_stripped_spacechild_state_event) .collect() .await, + encryption, + room_version, }; cache.insert(current_room.to_owned(), Some(CachedSpaceHierarchySummary { summary })); @@ -527,7 +535,9 @@ impl From for SpaceHierarchyRoomsChunk { join_rule, room_type, children_state, - .. + allowed_room_ids, + encryption, + room_version, } = value.summary; Self { @@ -542,6 +552,9 @@ impl From for SpaceHierarchyRoomsChunk { join_rule, room_type, children_state, + encryption, + room_version, + allowed_room_ids, } } } @@ -562,7 +575,9 @@ pub fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyR join_rule, room_type, children_state, - .. + allowed_room_ids, + encryption, + room_version, } = summary; SpaceHierarchyRoomsChunk { @@ -577,5 +592,8 @@ pub fn summary_to_chunk(summary: SpaceHierarchyParentSummary) -> SpaceHierarchyR join_rule, room_type, children_state, + encryption, + room_version, + allowed_room_ids, } } diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index b57465ce..7fff5935 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -129,22 +129,34 @@ impl Service { .map(|c: RoomTopicEventContent| c.topic) } - /// Returns the join rule (`SpaceRoomJoinRule`) for a given room - pub async fn get_join_rule( + /// Returns the space join rule (`SpaceRoomJoinRule`) for a given room and + /// any allowed room IDs if available. Will default to Invite and empty vec + /// if doesnt exist or invalid, + pub async fn get_space_join_rule( &self, room_id: &RoomId, - ) -> Result<(SpaceRoomJoinRule, Vec)> { + ) -> (SpaceRoomJoinRule, Vec) { self.room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") .await - .map(|c: RoomJoinRulesEventContent| { - (c.join_rule.clone().into(), self.allowed_room_ids(c.join_rule)) - }) - .or_else(|_| Ok((SpaceRoomJoinRule::Invite, vec![]))) + .map_or_else( + |_| (SpaceRoomJoinRule::Invite, vec![]), + |c: RoomJoinRulesEventContent| { + (c.join_rule.clone().into(), self.allowed_room_ids(c.join_rule)) + }, + ) + } + + /// Returns the join rules for a given room (`JoinRule` type). Will default + /// to Invite if doesnt exist or invalid + pub async fn get_join_rules(&self, room_id: &RoomId) -> JoinRule { + self.room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") + .await + .map_or_else(|_| JoinRule::Invite, |c: RoomJoinRulesEventContent| (c.join_rule)) } /// Returns an empty vec if not a restricted room pub fn allowed_room_ids(&self, join_rule: JoinRule) -> Vec { - let mut room_ids = Vec::with_capacity(1); + let mut room_ids = Vec::with_capacity(1); // restricted rooms generally only have 1 allowed room ID if let JoinRule::Restricted(r) | JoinRule::KnockRestricted(r) = join_rule { for rule in r.allow { if let AllowRule::RoomMembership(RoomMembership { room_id: membership }) = rule { From 24be5794774b7585b6ec1e3dbaa901967d241972 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 3 Apr 2025 12:20:10 -0400 Subject: [PATCH 180/310] add appservice MSC4190 support Signed-off-by: June Clementine Strawberry --- Cargo.lock | 22 +++--- Cargo.toml | 2 +- src/api/client/account.rs | 12 ++-- src/api/client/appservice.rs | 8 ++- src/api/client/device.rs | 112 +++++++++++++++++++++++------- src/service/sending/appservice.rs | 18 +++-- src/service/users/mod.rs | 1 - 7 files changed, 125 insertions(+), 50 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a53258bc..2bcfcee4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3531,7 +3531,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "assign", "js_int", @@ -3551,7 +3551,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "js_int", "ruma-common", @@ -3563,7 +3563,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "as_variant", "assign", @@ -3586,7 +3586,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "as_variant", "base64 0.22.1", @@ -3618,7 +3618,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "as_variant", "indexmap 2.8.0", @@ -3643,7 +3643,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "bytes", "headers", @@ -3665,7 +3665,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "js_int", "thiserror 2.0.12", @@ -3674,7 +3674,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "js_int", "ruma-common", @@ -3684,7 +3684,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3699,7 +3699,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "js_int", "ruma-common", @@ -3711,7 +3711,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=ea1278657125e9414caada074e8c172bc252fb1c#ea1278657125e9414caada074e8c172bc252fb1c" +source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index 940ece86..0abaa2f9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -346,7 +346,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "ea1278657125e9414caada074e8c172bc252fb1c" +rev = "0701341a2fd5a6ea74beada18d5974cc401a4fc1" features = [ "compat", "rand", diff --git a/src/api/client/account.rs b/src/api/client/account.rs index efa8b142..e5894d47 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -318,14 +318,14 @@ pub(crate) async fn register_route( // Success! }, | _ => match body.json_body { - | Some(json) => { + | Some(ref json) => { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services.uiaa.create( &UserId::parse_with_server_name("", services.globals.server_name()) .unwrap(), "".into(), &uiaainfo, - &json, + json, ); return Err(Error::Uiaa(uiaainfo)); }, @@ -373,8 +373,12 @@ pub(crate) async fn register_route( ) .await?; - // Inhibit login does not work for guests - if !is_guest && body.inhibit_login { + if (!is_guest && body.inhibit_login) + || body + .appservice_info + .as_ref() + .is_some_and(|appservice| appservice.registration.device_management) + { return Ok(register::v3::Response { access_token: None, user_id, diff --git a/src/api/client/appservice.rs b/src/api/client/appservice.rs index 84955309..eb6b3312 100644 --- a/src/api/client/appservice.rs +++ b/src/api/client/appservice.rs @@ -22,7 +22,13 @@ pub(crate) async fn appservice_ping( ))); } - if appservice_info.registration.url.is_none() { + if appservice_info.registration.url.is_none() + || appservice_info + .registration + .url + .as_ref() + .is_some_and(|url| url.is_empty() || url == "null") + { return Err!(Request(UrlNotSet( "Appservice does not have a URL set, there is nothing to ping." ))); diff --git a/src/api/client/device.rs b/src/api/client/device.rs index 6a845aed..7603c866 100644 --- a/src/api/client/device.rs +++ b/src/api/client/device.rs @@ -1,9 +1,9 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{Err, err}; +use conduwuit::{Err, debug, err}; use futures::StreamExt; use ruma::{ - MilliSecondsSinceUnixEpoch, + MilliSecondsSinceUnixEpoch, OwnedDeviceId, api::client::{ device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, error::ErrorKind, @@ -12,7 +12,7 @@ use ruma::{ }; use super::SESSION_ID_LENGTH; -use crate::{Error, Result, Ruma, utils}; +use crate::{Error, Result, Ruma, client::DEVICE_ID_LENGTH, utils}; /// # `GET /_matrix/client/r0/devices` /// @@ -59,26 +59,58 @@ pub(crate) async fn update_device_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); + let appservice = body.appservice_info.as_ref(); - let mut device = services + match services .users .get_device_metadata(sender_user, &body.device_id) .await - .map_err(|_| err!(Request(NotFound("Device not found."))))?; + { + | Ok(mut device) => { + device.display_name.clone_from(&body.display_name); + device.last_seen_ip.clone_from(&Some(client.to_string())); + device + .last_seen_ts + .clone_from(&Some(MilliSecondsSinceUnixEpoch::now())); - device.display_name.clone_from(&body.display_name); - device.last_seen_ip.clone_from(&Some(client.to_string())); - device - .last_seen_ts - .clone_from(&Some(MilliSecondsSinceUnixEpoch::now())); + services + .users + .update_device_metadata(sender_user, &body.device_id, &device) + .await?; - services - .users - .update_device_metadata(sender_user, &body.device_id, &device) - .await?; + Ok(update_device::v3::Response {}) + }, + | Err(_) => { + let Some(appservice) = appservice else { + return Err!(Request(NotFound("Device not found."))); + }; + if !appservice.registration.device_management { + return Err!(Request(NotFound("Device not found."))); + } - Ok(update_device::v3::Response {}) + debug!( + "Creating new device for {sender_user} from appservice {} as MSC4190 is enabled \ + and device ID does not exist", + appservice.registration.id + ); + + let device_id = OwnedDeviceId::from(utils::random_string(DEVICE_ID_LENGTH)); + + services + .users + .create_device( + sender_user, + &device_id, + &appservice.registration.as_token, + None, + Some(client.to_string()), + ) + .await?; + + return Ok(update_device::v3::Response {}); + }, + } } /// # `DELETE /_matrix/client/r0/devices/{deviceId}` @@ -95,8 +127,21 @@ pub(crate) async fn delete_device_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + let (sender_user, sender_device) = body.sender(); + let appservice = body.appservice_info.as_ref(); + + if appservice.is_some_and(|appservice| appservice.registration.device_management) { + debug!( + "Skipping UIAA for {sender_user} as this is from an appservice and MSC4190 is \ + enabled" + ); + services + .users + .remove_device(sender_user, &body.device_id) + .await; + + return Ok(delete_device::v3::Response {}); + } // UIAA let mut uiaainfo = UiaaInfo { @@ -120,11 +165,11 @@ pub(crate) async fn delete_device_route( // Success! }, | _ => match body.json_body { - | Some(json) => { + | Some(ref json) => { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + .create(sender_user, sender_device, &uiaainfo, json); return Err!(Uiaa(uiaainfo)); }, @@ -142,11 +187,12 @@ pub(crate) async fn delete_device_route( Ok(delete_device::v3::Response {}) } -/// # `PUT /_matrix/client/r0/devices/{deviceId}` +/// # `POST /_matrix/client/v3/delete_devices` /// -/// Deletes the given device. +/// Deletes the given list of devices. /// -/// - Requires UIAA to verify user password +/// - Requires UIAA to verify user password unless from an appservice with +/// MSC4190 enabled. /// /// For each device: /// - Invalidates access token @@ -158,8 +204,20 @@ pub(crate) async fn delete_devices_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + let (sender_user, sender_device) = body.sender(); + let appservice = body.appservice_info.as_ref(); + + if appservice.is_some_and(|appservice| appservice.registration.device_management) { + debug!( + "Skipping UIAA for {sender_user} as this is from an appservice and MSC4190 is \ + enabled" + ); + for device_id in &body.devices { + services.users.remove_device(sender_user, device_id).await; + } + + return Ok(delete_devices::v3::Response {}); + } // UIAA let mut uiaainfo = UiaaInfo { @@ -183,11 +241,11 @@ pub(crate) async fn delete_devices_route( // Success! }, | _ => match body.json_body { - | Some(json) => { + | Some(ref json) => { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + .create(sender_user, sender_device, &uiaainfo, json); return Err(Error::Uiaa(uiaainfo)); }, diff --git a/src/service/sending/appservice.rs b/src/service/sending/appservice.rs index 7fa0be9a..c7fae11f 100644 --- a/src/service/sending/appservice.rs +++ b/src/service/sending/appservice.rs @@ -25,6 +25,10 @@ where return Ok(None); }; + if dest == *"null" || dest.is_empty() { + return Ok(None); + } + trace!("Appservice URL \"{dest}\", Appservice ID: {}", registration.id); let hs_token = registration.hs_token.as_str(); @@ -34,7 +38,11 @@ where SendAccessToken::IfRequired(hs_token), &VERSIONS, ) - .map_err(|e| err!(BadServerResponse(warn!("Failed to find destination {dest}: {e}"))))? + .map_err(|e| { + err!(BadServerResponse( + warn!(appservice = %registration.id, "Failed to find destination {dest}: {e:?}") + )) + })? .map(BytesMut::freeze); let mut parts = http_request.uri().clone().into_parts(); @@ -51,7 +59,7 @@ where let reqwest_request = reqwest::Request::try_from(http_request)?; let mut response = client.execute(reqwest_request).await.map_err(|e| { - warn!("Could not send request to appservice \"{}\" at {dest}: {e}", registration.id); + warn!("Could not send request to appservice \"{}\" at {dest}: {e:?}", registration.id); e })?; @@ -71,7 +79,7 @@ where if !status.is_success() { debug_error!("Appservice response bytes: {:?}", utils::string_from_bytes(&body)); - return Err!(BadServerResponse(error!( + return Err!(BadServerResponse(warn!( "Appservice \"{}\" returned unsuccessful HTTP response {status} at {dest}", registration.id ))); @@ -84,8 +92,8 @@ where ); response.map(Some).map_err(|e| { - err!(BadServerResponse(error!( - "Appservice \"{}\" returned invalid response bytes {dest}: {e}", + err!(BadServerResponse(warn!( + "Appservice \"{}\" returned invalid/malformed response bytes {dest}: {e}", registration.id ))) }) diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 5265e64b..87a8b93b 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -350,7 +350,6 @@ impl Service { token: &str, ) -> Result<()> { let key = (user_id, device_id); - // should not be None, but we shouldn't assert either lol... if self.db.userdeviceid_metadata.qry(&key).await.is_err() { return Err!(Database(error!( ?user_id, From f14756fb767abda97dc966ad842c958d970d77b9 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 3 Apr 2025 12:20:53 -0400 Subject: [PATCH 181/310] leave room locally if room is banned, rescind knocks on deactivation too Signed-off-by: June Clementine Strawberry --- src/api/client/membership.rs | 87 +++++++++++++++++++++------- src/api/client/sync/v3.rs | 12 ++-- src/api/client/sync/v4.rs | 5 +- src/api/client/sync/v5.rs | 5 +- src/service/rooms/state_cache/mod.rs | 6 +- 5 files changed, 87 insertions(+), 28 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 315a363c..ef40e972 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -475,9 +475,9 @@ pub(crate) async fn leave_room_route( State(services): State, body: Ruma, ) -> Result { - leave_room(&services, body.sender_user(), &body.room_id, body.reason.clone()).await?; - - Ok(leave_room::v3::Response::new()) + leave_room(&services, body.sender_user(), &body.room_id, body.reason.clone()) + .await + .map(|()| leave_room::v3::Response::new()) } /// # `POST /_matrix/client/r0/rooms/{roomId}/invite` @@ -1763,8 +1763,8 @@ pub(crate) async fn invite_helper( Ok(()) } -// Make a user leave all their joined rooms, forgets all rooms, and ignores -// errors +// Make a user leave all their joined rooms, rescinds knocks, forgets all rooms, +// and ignores errors pub async fn leave_all_rooms(services: &Services, user_id: &UserId) { let rooms_joined = services .rooms @@ -1778,7 +1778,17 @@ pub async fn leave_all_rooms(services: &Services, user_id: &UserId) { .rooms_invited(user_id) .map(|(r, _)| r); - let all_rooms: Vec<_> = rooms_joined.chain(rooms_invited).collect().await; + let rooms_knocked = services + .rooms + .state_cache + .rooms_knocked(user_id) + .map(|(r, _)| r); + + let all_rooms: Vec<_> = rooms_joined + .chain(rooms_invited) + .chain(rooms_knocked) + .collect() + .await; for room_id in all_rooms { // ignore errors @@ -1795,7 +1805,40 @@ pub async fn leave_room( user_id: &UserId, room_id: &RoomId, reason: Option, -) -> Result<()> { +) -> Result { + let default_member_content = RoomMemberEventContent { + membership: MembershipState::Leave, + reason: reason.clone(), + join_authorized_via_users_server: None, + is_direct: None, + avatar_url: None, + displayname: None, + third_party_invite: None, + blurhash: None, + }; + + if services.rooms.metadata.is_banned(room_id).await + || services.rooms.metadata.is_disabled(room_id).await + { + // the room is banned/disabled, the room must be rejected locally since we + // cant/dont want to federate with this server + services + .rooms + .state_cache + .update_membership( + room_id, + user_id, + default_member_content, + user_id, + None, + None, + true, + ) + .await?; + + return Ok(()); + } + // Ask a remote server if we don't have this room and are not knocking on it if !services .rooms @@ -1828,7 +1871,7 @@ pub async fn leave_room( .update_membership( room_id, user_id, - RoomMemberEventContent::new(MembershipState::Leave), + default_member_content, user_id, last_state, None, @@ -1848,26 +1891,23 @@ pub async fn leave_room( ) .await else { - // Fix for broken rooms - warn!( + debug_warn!( "Trying to leave a room you are not a member of, marking room as left locally." ); - services + return services .rooms .state_cache .update_membership( room_id, user_id, - RoomMemberEventContent::new(MembershipState::Leave), + default_member_content, user_id, None, None, true, ) - .await?; - - return Ok(()); + .await; }; services @@ -1897,7 +1937,7 @@ async fn remote_leave_room( room_id: &RoomId, ) -> Result<()> { let mut make_leave_response_and_server = - Err!(BadServerResponse("No server available to assist in leaving.")); + Err!(BadServerResponse("No remote server available to assist in leaving {room_id}.")); let mut servers: HashSet = services .rooms @@ -1977,20 +2017,25 @@ async fn remote_leave_room( let (make_leave_response, remote_server) = make_leave_response_and_server?; let Some(room_version_id) = make_leave_response.room_version else { - return Err!(BadServerResponse("Remote room version is not supported by conduwuit")); + return Err!(BadServerResponse(warn!( + "No room version was returned by {remote_server} for {room_id}, room version is \ + likely not supported by conduwuit" + ))); }; if !services.server.supported_room_version(&room_version_id) { - return Err!(BadServerResponse( - "Remote room version {room_version_id} is not supported by conduwuit" - )); + return Err!(BadServerResponse(warn!( + "Remote room version {room_version_id} for {room_id} is not supported by conduwuit", + ))); } let mut leave_event_stub = serde_json::from_str::( make_leave_response.event.get(), ) .map_err(|e| { - err!(BadServerResponse("Invalid make_leave event json received from server: {e:?}")) + err!(BadServerResponse(warn!( + "Invalid make_leave event json received from {remote_server} for {room_id}: {e:?}" + ))) })?; // TODO: Is origin needed? diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 530c1278..83ffa55a 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -15,6 +15,7 @@ use conduwuit::{ math::ruma_from_u64, stream::{BroadbandExt, Tools, TryExpect, WidebandExt}, }, + warn, }; use conduwuit_service::{ Services, @@ -428,9 +429,12 @@ async fn handle_left_room( return Ok(None); } - if !services.rooms.metadata.exists(room_id).await { + if !services.rooms.metadata.exists(room_id).await + || services.rooms.metadata.is_disabled(room_id).await + || services.rooms.metadata.is_banned(room_id).await + { // This is just a rejected invite, not a room we know - // Insert a leave event anyways + // Insert a leave event anyways for the client let event = PduEvent { event_id: EventId::new(services.globals.server_name()), sender: sender_user.to_owned(), @@ -489,7 +493,7 @@ async fn handle_left_room( .room_state_get_id(room_id, &StateEventType::RoomMember, sender_user.as_str()) .await else { - error!("Left room but no left state event"); + warn!("Left {room_id} but no left state event"); return Ok(None); }; @@ -499,7 +503,7 @@ async fn handle_left_room( .pdu_shortstatehash(&left_event_id) .await else { - error!(event_id = %left_event_id, "Leave event has no state"); + warn!(event_id = %left_event_id, "Leave event has no state in {room_id}"); return Ok(None); }; diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 7e902973..f7edb8c0 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -438,7 +438,10 @@ pub(crate) async fn sync_events_v4_route( let mut known_subscription_rooms = BTreeSet::new(); for (room_id, room) in &body.room_subscriptions { - if !services.rooms.metadata.exists(room_id).await { + if !services.rooms.metadata.exists(room_id).await + || services.rooms.metadata.is_disabled(room_id).await + || services.rooms.metadata.is_banned(room_id).await + { continue; } let todo_room = diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index 48b41b21..c4e71d88 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -214,7 +214,10 @@ async fn fetch_subscriptions( ) { let mut known_subscription_rooms = BTreeSet::new(); for (room_id, room) in &body.room_subscriptions { - if !services.rooms.metadata.exists(room_id).await { + if !services.rooms.metadata.exists(room_id).await + || services.rooms.metadata.is_disabled(room_id).await + || services.rooms.metadata.is_banned(room_id).await + { continue; } let todo_room = diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 23ba0520..d3dbc143 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -40,6 +40,7 @@ struct Services { account_data: Dep, config: Dep, globals: Dep, + metadata: Dep, state_accessor: Dep, users: Dep, } @@ -73,6 +74,7 @@ impl crate::Service for Service { account_data: args.depend::("account_data"), config: args.depend::("config"), globals: args.depend::("globals"), + metadata: args.depend::("rooms::metadata"), state_accessor: args .depend::("rooms::state_accessor"), users: args.depend::("users"), @@ -271,7 +273,9 @@ impl Service { self.mark_as_left(user_id, room_id); if self.services.globals.user_is_local(user_id) - && self.services.config.forget_forced_upon_leave + && (self.services.config.forget_forced_upon_leave + || self.services.metadata.is_banned(room_id).await + || self.services.metadata.is_disabled(room_id).await) { self.forget(room_id, user_id); } From 5d1404e9dfff9bc0e5bed4bab6d75c9c94b38183 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Apr 2025 02:52:42 +0000 Subject: [PATCH 182/310] fix well-known using the hooked resolver Signed-off-by: Jason Volk --- src/service/client/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/client/mod.rs b/src/service/client/mod.rs index d51e5721..1aeeb492 100644 --- a/src/service/client/mod.rs +++ b/src/service/client/mod.rs @@ -56,7 +56,7 @@ impl crate::Service for Service { .build()?, well_known: base(config)? - .dns_resolver(resolver.resolver.hooked.clone()) + .dns_resolver(resolver.resolver.clone()) .connect_timeout(Duration::from_secs(config.well_known_conn_timeout)) .read_timeout(Duration::from_secs(config.well_known_timeout)) .timeout(Duration::from_secs(config.well_known_timeout)) From 58adb6fead27c863849c63184f145be209e40e1b Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Apr 2025 04:05:42 +0000 Subject: [PATCH 183/310] upgrade hickory and hyper-util dependencies Signed-off-by: Jason Volk --- Cargo.lock | 195 +++++++++++++++++++++++++++++++-- Cargo.toml | 10 +- src/service/resolver/actual.rs | 39 ++++--- src/service/resolver/dns.rs | 24 ++-- 4 files changed, 229 insertions(+), 39 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2bcfcee4..545f0f0d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -142,6 +142,17 @@ dependencies = [ "zstd-safe", ] +[[package]] +name = "async-recursion" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "async-stream" version = "0.3.6" @@ -927,7 +938,7 @@ dependencies = [ "const-str", "either", "futures", - "hickory-resolver", + "hickory-resolver 0.25.1", "http", "image", "ipaddress", @@ -1061,6 +1072,12 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "critical-section" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" + [[package]] name = "crokey" version = "1.1.1" @@ -1584,6 +1601,19 @@ dependencies = [ "slab", ] +[[package]] +name = "generator" +version = "0.8.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc6bd114ceda131d3b1d665eba35788690ad37f5916457286b32ab6fd3c438dd" +dependencies = [ + "cfg-if", + "libc", + "log", + "rustversion", + "windows 0.58.0", +] + [[package]] name = "generic-array" version = "0.14.7" @@ -1769,6 +1799,34 @@ dependencies = [ "url", ] +[[package]] +name = "hickory-proto" +version = "0.25.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d844af74f7b799e41c78221be863bade11c430d46042c3b49ca8ae0c6d27287" +dependencies = [ + "async-recursion", + "async-trait", + "cfg-if", + "critical-section", + "data-encoding", + "enum-as-inner", + "futures-channel", + "futures-io", + "futures-util", + "idna", + "ipnet", + "once_cell", + "rand 0.9.0", + "ring", + "serde", + "thiserror 2.0.12", + "tinyvec", + "tokio", + "tracing", + "url", +] + [[package]] name = "hickory-resolver" version = "0.24.4" @@ -1777,7 +1835,7 @@ checksum = "cbb117a1ca520e111743ab2f6688eddee69db4e0ea242545a604dce8a66fd22e" dependencies = [ "cfg-if", "futures-util", - "hickory-proto", + "hickory-proto 0.24.4", "ipconfig", "lru-cache", "once_cell", @@ -1790,6 +1848,28 @@ dependencies = [ "tracing", ] +[[package]] +name = "hickory-resolver" +version = "0.25.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a128410b38d6f931fcc6ca5c107a3b02cabd6c05967841269a4ad65d23c44331" +dependencies = [ + "cfg-if", + "futures-util", + "hickory-proto 0.25.1", + "ipconfig", + "moka", + "once_cell", + "parking_lot", + "rand 0.9.0", + "resolv-conf", + "serde", + "smallvec", + "thiserror 2.0.12", + "tokio", + "tracing", +] + [[package]] name = "hmac" version = "0.12.1" @@ -1816,7 +1896,7 @@ checksum = "f9c7c7c8ac16c798734b8a24560c1362120597c40d5e1459f09498f8f6c8f2ba" dependencies = [ "cfg-if", "libc", - "windows", + "windows 0.52.0", ] [[package]] @@ -1949,9 +2029,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.8" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da62f120a8a37763efb0cf8fdf264b884c7b8b9ac8660b900c8661030c00e6ba" +checksum = "497bbc33a26fdd4af9ed9c70d63f61cf56a938375fbb32df34db9b1cd6d643f2" dependencies = [ "bytes", "futures-channel", @@ -1959,10 +2039,10 @@ dependencies = [ "http", "http-body", "hyper", + "libc", "pin-project-lite", "socket2", "tokio", - "tower 0.4.13", "tower-service", "tracing", ] @@ -2439,6 +2519,19 @@ dependencies = [ "futures-sink", ] +[[package]] +name = "loom" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" +dependencies = [ + "cfg-if", + "generator", + "scoped-tls", + "tracing", + "tracing-subscriber", +] + [[package]] name = "loop9" version = "0.1.5" @@ -2609,6 +2702,25 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "moka" +version = "0.12.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9321642ca94a4282428e6ea4af8cc2ca4eac48ac7a6a4ea8f33f76d0ce70926" +dependencies = [ + "crossbeam-channel", + "crossbeam-epoch", + "crossbeam-utils", + "loom", + "parking_lot", + "portable-atomic", + "rustc_version", + "smallvec", + "tagptr", + "thiserror 1.0.69", + "uuid", +] + [[package]] name = "new_debug_unreachable" version = "1.0.6" @@ -2773,6 +2885,10 @@ name = "once_cell" version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +dependencies = [ + "critical-section", + "portable-atomic", +] [[package]] name = "openssl-probe" @@ -3052,6 +3168,12 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "portable-atomic" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" + [[package]] name = "powerfmt" version = "0.2.0" @@ -3463,7 +3585,7 @@ dependencies = [ "futures-core", "futures-util", "h2", - "hickory-resolver", + "hickory-resolver 0.24.4", "http", "http-body", "http-body-util", @@ -3893,6 +4015,12 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "scoped-tls" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" + [[package]] name = "scopeguard" version = "1.2.0" @@ -4464,6 +4592,12 @@ dependencies = [ "version-compare", ] +[[package]] +name = "tagptr" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417" + [[package]] name = "target-lexicon" version = "0.12.16" @@ -5367,7 +5501,17 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be" dependencies = [ - "windows-core", + "windows-core 0.52.0", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" +dependencies = [ + "windows-core 0.58.0", "windows-targets 0.52.6", ] @@ -5380,6 +5524,41 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-core" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-result", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-implement" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "windows-interface" +version = "0.58.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "windows-registry" version = "0.2.0" diff --git a/Cargo.toml b/Cargo.toml index 0abaa2f9..6c5c291f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -284,8 +284,7 @@ features = [ ] [workspace.dependencies.hyper-util] -# hyper-util >=0.1.9 seems to have DNS issues -version = "=0.1.8" +version = "0.1.11" default-features = false features = [ "server-auto", @@ -306,8 +305,13 @@ default-features = false features = ["env", "toml"] [workspace.dependencies.hickory-resolver] -version = "0.24.2" +version = "0.25.1" default-features = false +features = [ + "serde", + "system-config", + "tokio", +] # Used for conduwuit::Error type [workspace.dependencies.thiserror] diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index 1ad76f66..0151c4d7 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -5,7 +5,7 @@ use std::{ use conduwuit::{Err, Result, debug, debug_info, err, error, trace}; use futures::{FutureExt, TryFutureExt}; -use hickory_resolver::error::ResolveError; +use hickory_resolver::ResolveError; use ipaddress::IPAddress; use ruma::ServerName; @@ -334,25 +334,28 @@ impl super::Service { } fn handle_resolve_error(e: &ResolveError, host: &'_ str) -> Result<()> { - use hickory_resolver::error::ResolveErrorKind; + use hickory_resolver::{ResolveErrorKind::Proto, proto::ProtoErrorKind}; - match *e.kind() { - | ResolveErrorKind::NoRecordsFound { .. } => { - // Raise to debug_warn if we can find out the result wasn't from cache - debug!(%host, "No DNS records found: {e}"); - Ok(()) - }, - | ResolveErrorKind::Timeout => { - Err!(warn!(%host, "DNS {e}")) - }, - | ResolveErrorKind::NoConnections => { - error!( - "Your DNS server is overloaded and has ran out of connections. It is \ - strongly recommended you remediate this issue to ensure proper federation \ - connectivity." - ); + match e.kind() { + | Proto(e) => match e.kind() { + | ProtoErrorKind::NoRecordsFound { .. } => { + // Raise to debug_warn if we can find out the result wasn't from cache + debug!(%host, "No DNS records found: {e}"); + Ok(()) + }, + | ProtoErrorKind::Timeout => { + Err!(warn!(%host, "DNS {e}")) + }, + | ProtoErrorKind::NoConnections => { + error!( + "Your DNS server is overloaded and has ran out of connections. It is \ + strongly recommended you remediate this issue to ensure proper \ + federation connectivity." + ); - Err!(error!(%host, "DNS error: {e}")) + Err!(error!(%host, "DNS error: {e}")) + }, + | _ => Err!(error!(%host, "DNS error: {e}")), }, | _ => Err!(error!(%host, "DNS error: {e}")), } diff --git a/src/service/resolver/dns.rs b/src/service/resolver/dns.rs index e4245a5b..3a0b2551 100644 --- a/src/service/resolver/dns.rs +++ b/src/service/resolver/dns.rs @@ -2,19 +2,19 @@ use std::{net::SocketAddr, sync::Arc, time::Duration}; use conduwuit::{Result, Server, err}; use futures::FutureExt; -use hickory_resolver::{TokioAsyncResolver, lookup_ip::LookupIp}; +use hickory_resolver::{TokioResolver, lookup_ip::LookupIp}; use reqwest::dns::{Addrs, Name, Resolve, Resolving}; use super::cache::{Cache, CachedOverride}; pub struct Resolver { - pub(crate) resolver: Arc, + pub(crate) resolver: Arc, pub(crate) hooked: Arc, server: Arc, } pub(crate) struct Hooked { - resolver: Arc, + resolver: Arc, cache: Arc, server: Arc, } @@ -42,7 +42,7 @@ impl Resolver { let mut ns = sys_conf.clone(); if config.query_over_tcp_only { - ns.protocol = hickory_resolver::config::Protocol::Tcp; + ns.protocol = hickory_resolver::proto::xfer::Protocol::Tcp; } ns.trust_negative_responses = !config.query_all_nameservers; @@ -51,6 +51,7 @@ impl Resolver { } opts.cache_size = config.dns_cache_entries as usize; + opts.preserve_intermediates = true; opts.negative_min_ttl = Some(Duration::from_secs(config.dns_min_ttl_nxdomain)); opts.negative_max_ttl = Some(Duration::from_secs(60 * 60 * 24 * 30)); opts.positive_min_ttl = Some(Duration::from_secs(config.dns_min_ttl)); @@ -60,8 +61,7 @@ impl Resolver { opts.try_tcp_on_error = config.dns_tcp_fallback; opts.num_concurrent_reqs = 1; opts.edns0 = true; - opts.shuffle_dns_servers = true; - opts.rotate = true; + opts.case_randomization = true; opts.ip_strategy = match config.ip_lookup_strategy { | 1 => hickory_resolver::config::LookupIpStrategy::Ipv4Only, | 2 => hickory_resolver::config::LookupIpStrategy::Ipv6Only, @@ -69,9 +69,13 @@ impl Resolver { | 4 => hickory_resolver::config::LookupIpStrategy::Ipv6thenIpv4, | _ => hickory_resolver::config::LookupIpStrategy::Ipv4thenIpv6, }; - opts.authentic_data = false; - let resolver = Arc::new(TokioAsyncResolver::tokio(conf, opts)); + let rt_prov = hickory_resolver::proto::runtime::TokioRuntimeProvider::new(); + let conn_prov = hickory_resolver::name_server::TokioConnectionProvider::new(rt_prov); + let mut builder = TokioResolver::builder_with_config(conf, conn_prov); + *builder.options_mut() = opts; + let resolver = Arc::new(builder.build()); + Ok(Arc::new(Self { resolver: resolver.clone(), hooked: Arc::new(Hooked { resolver, cache, server: server.clone() }), @@ -105,7 +109,7 @@ impl Resolve for Hooked { async fn hooked_resolve( cache: Arc, server: Arc, - resolver: Arc, + resolver: Arc, name: Name, ) -> Result> { match cache.get_override(name.as_str()).await { @@ -129,7 +133,7 @@ async fn hooked_resolve( async fn resolve_to_reqwest( server: Arc, - resolver: Arc, + resolver: Arc, name: Name, ) -> ResolvingResult { use std::{io, io::ErrorKind::Interrupted}; From 0b56204f89d37470346c1940e70354deebfd1a3a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Apr 2025 04:34:11 +0000 Subject: [PATCH 184/310] bump additional dependencies Signed-off-by: Jason Volk --- Cargo.lock | 264 ++++++++++++++++++++++++++++++----------------------- Cargo.toml | 20 ++-- 2 files changed, 161 insertions(+), 123 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 545f0f0d..da33af05 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -232,9 +232,9 @@ dependencies = [ [[package]] name = "aws-lc-rs" -version = "1.12.6" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dabb68eb3a7aa08b46fddfd59a3d55c978243557a90ab804769f7e20e67d2b01" +checksum = "19b756939cb2f8dc900aa6dcd505e6e2428e9cae7ff7b028c49e3946efa70878" dependencies = [ "aws-lc-sys", "zeroize", @@ -242,9 +242,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.27.1" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77926887776171ced7d662120a75998e444d3750c951abfe07f90da130514b1f" +checksum = "b9f7720b74ed28ca77f90769a71fd8c637a0137f6fae4ae947e1050229cff57f" dependencies = [ "bindgen 0.69.5", "cc", @@ -663,9 +663,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.34" +version = "4.5.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e958897981290da2a852763fe9cdb89cd36977a5d729023127095fa94d95e2ff" +checksum = "d8aa86934b44c19c50f87cc2790e19f54f7a67aedb64101c2e1a2e5ecfb73944" dependencies = [ "clap_builder", "clap_derive", @@ -673,9 +673,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.34" +version = "4.5.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83b0f35019843db2160b5bb19ae09b4e6411ac33fc6a712003c33e03090e2489" +checksum = "2414dbb2dd0695280da6ea9261e327479e9d37b0630f6b53ba2a11c60c679fd9" dependencies = [ "anstyle", "clap_lex", @@ -742,8 +742,8 @@ dependencies = [ "opentelemetry-jaeger", "opentelemetry_sdk", "sentry", - "sentry-tower 0.35.0", - "sentry-tracing 0.35.0", + "sentry-tower", + "sentry-tracing", "tokio", "tokio-metrics", "tracing", @@ -916,8 +916,8 @@ dependencies = [ "rustls", "sd-notify", "sentry", - "sentry-tower 0.35.0", - "sentry-tracing 0.35.0", + "sentry-tower", + "sentry-tracing", "serde_json", "tokio", "tower 0.5.2", @@ -1454,9 +1454,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11faaf5a5236997af9848be0bef4db95824b1d534ebc64d0f0c6cf3e67bd38dc" +checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece" dependencies = [ "crc32fast", "miniz_oxide", @@ -2016,9 +2016,9 @@ dependencies = [ [[package]] name = "hyper-timeout" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ "hyper", "hyper-util", @@ -2336,10 +2336,11 @@ checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "jobserver" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" dependencies = [ + "getrandom 0.3.2", "libc", ] @@ -3574,9 +3575,9 @@ checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" -version = "0.12.9" +version = "0.12.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a77c62af46e79de0a562e1a9849205ffcb7fc1238876e9bd743357570e04046f" +checksum = "d19c46a6fdd48bc4dab94b6103fccc55d34c67cc0ad04653aad4ea2a07cd7bbb" dependencies = [ "async-compression", "base64 0.22.1", @@ -3612,6 +3613,7 @@ dependencies = [ "tokio-rustls", "tokio-socks", "tokio-util", + "tower 0.5.2", "tower-service", "url", "wasm-bindgen", @@ -4067,21 +4069,21 @@ checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" [[package]] name = "sentry" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a7332159e544e34db06b251b1eda5e546bd90285c3f58d9c8ff8450b484e0da" +checksum = "255914a8e53822abd946e2ce8baa41d4cded6b8e938913b7f7b9da5b7ab44335" dependencies = [ "httpdate", "reqwest", "rustls", "sentry-backtrace", "sentry-contexts", - "sentry-core 0.36.0", + "sentry-core", "sentry-debug-images", "sentry-log", "sentry-panic", - "sentry-tower 0.36.0", - "sentry-tracing 0.36.0", + "sentry-tower", + "sentry-tracing", "tokio", "ureq", "webpki-roots", @@ -4089,107 +4091,83 @@ dependencies = [ [[package]] name = "sentry-backtrace" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "565ec31ad37bab8e6d9f289f34913ed8768347b133706192f10606dabd5c6bc4" +checksum = "00293cd332a859961f24fd69258f7e92af736feaeb91020cff84dac4188a4302" dependencies = [ "backtrace", "once_cell", "regex", - "sentry-core 0.36.0", + "sentry-core", ] [[package]] name = "sentry-contexts" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e860275f25f27e8c0c7726ce116c7d5c928c5bba2ee73306e52b20a752298ea6" +checksum = "961990f9caa76476c481de130ada05614cd7f5aa70fb57c2142f0e09ad3fb2aa" dependencies = [ "hostname", "libc", "os_info", "rustc_version", - "sentry-core 0.36.0", + "sentry-core", "uname", ] [[package]] name = "sentry-core" -version = "0.35.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9f8b6dcd4fbae1e3e22b447f32670360b27e31b62ab040f7fb04e0f80c04d92" +checksum = "1a6409d845707d82415c800290a5d63be5e3df3c2e417b0997c60531dfbd35ef" dependencies = [ "once_cell", "rand 0.8.5", - "sentry-types 0.35.0", - "serde", - "serde_json", -] - -[[package]] -name = "sentry-core" -version = "0.36.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "653942e6141f16651273159f4b8b1eaeedf37a7554c00cd798953e64b8a9bf72" -dependencies = [ - "once_cell", - "rand 0.8.5", - "sentry-types 0.36.0", + "sentry-types", "serde", "serde_json", ] [[package]] name = "sentry-debug-images" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a60bc2154e6df59beed0ac13d58f8dfaf5ad20a88548a53e29e4d92e8e835c2" +checksum = "71ab5df4f3b64760508edfe0ba4290feab5acbbda7566a79d72673065888e5cc" dependencies = [ "findshlibs", "once_cell", - "sentry-core 0.36.0", + "sentry-core", ] [[package]] name = "sentry-log" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c96d796cba1b3a0793e7f53edc420c61f9419fba8fb34ad5519f5c7d01af6b2" +checksum = "693841da8dfb693af29105edfbea1d91348a13d23dd0a5d03761eedb9e450c46" dependencies = [ "log", - "sentry-core 0.36.0", + "sentry-core", ] [[package]] name = "sentry-panic" -version = "0.36.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "105e3a956c8aa9dab1e4087b1657b03271bfc49d838c6ae9bfc7c58c802fd0ef" +checksum = "609b1a12340495ce17baeec9e08ff8ed423c337c1a84dffae36a178c783623f3" dependencies = [ "sentry-backtrace", - "sentry-core 0.36.0", + "sentry-core", ] [[package]] name = "sentry-tower" -version = "0.35.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcdaf9b1939589476bd57751d12a9653bbfe356610fc476d03d7683189183ab7" -dependencies = [ - "sentry-core 0.35.0", - "tower-layer", - "tower-service", -] - -[[package]] -name = "sentry-tower" -version = "0.36.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "082f781dfc504d984e16d99f8dbf94d6ee4762dd0fc28de25713d0f900a8164d" +checksum = "4b98005537e38ee3bc10e7d36e7febe9b8e573d03f2ddd85fcdf05d21f9abd6d" dependencies = [ "http", "pin-project", - "sentry-core 0.36.0", + "sentry-core", "tower-layer", "tower-service", "url", @@ -4197,49 +4175,21 @@ dependencies = [ [[package]] name = "sentry-tracing" -version = "0.35.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "263f73c757ed7915d3e1e34625eae18cad498a95b4261603d4ce3f87b159a6f0" -dependencies = [ - "sentry-core 0.35.0", - "tracing-core", - "tracing-subscriber", -] - -[[package]] -name = "sentry-tracing" -version = "0.36.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64e75c831b4d8b34a5aec1f65f67c5d46a26c7c5d3c7abd8b5ef430796900cf8" +checksum = "49f4e86402d5c50239dc7d8fd3f6d5e048221d5fcb4e026d8d50ab57fe4644cb" dependencies = [ "sentry-backtrace", - "sentry-core 0.36.0", + "sentry-core", "tracing-core", "tracing-subscriber", ] [[package]] name = "sentry-types" -version = "0.35.0" +version = "0.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a71ed3a389948a6a6d92b98e997a2723ca22f09660c5a7b7388ecd509a70a527" -dependencies = [ - "debugid", - "hex", - "rand 0.8.5", - "serde", - "serde_json", - "thiserror 1.0.69", - "time", - "url", - "uuid", -] - -[[package]] -name = "sentry-types" -version = "0.36.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d4203359e60724aa05cf2385aaf5d4f147e837185d7dd2b9ccf1ee77f4420c8" +checksum = "3d3f117b8755dbede8260952de2aeb029e20f432e72634e8969af34324591631" dependencies = [ "debugid", "hex", @@ -5532,8 +5482,8 @@ checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" dependencies = [ "windows-implement", "windows-interface", - "windows-result", - "windows-strings", + "windows-result 0.2.0", + "windows-strings 0.1.0", "windows-targets 0.52.6", ] @@ -5560,14 +5510,20 @@ dependencies = [ ] [[package]] -name = "windows-registry" -version = "0.2.0" +name = "windows-link" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" + +[[package]] +name = "windows-registry" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3" dependencies = [ - "windows-result", - "windows-strings", - "windows-targets 0.52.6", + "windows-result 0.3.2", + "windows-strings 0.3.1", + "windows-targets 0.53.0", ] [[package]] @@ -5579,16 +5535,34 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-result" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c64fd11a4fd95df68efcfee5f44a294fe71b8bc6a91993e2791938abcc712252" +dependencies = [ + "windows-link", +] + [[package]] name = "windows-strings" version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" dependencies = [ - "windows-result", + "windows-result 0.2.0", "windows-targets 0.52.6", ] +[[package]] +name = "windows-strings" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87fa48cc5d406560701792be122a10132491cff9d0aeb23583cc2dcafc847319" +dependencies = [ + "windows-link", +] + [[package]] name = "windows-sys" version = "0.48.0" @@ -5640,13 +5614,29 @@ dependencies = [ "windows_aarch64_gnullvm 0.52.6", "windows_aarch64_msvc 0.52.6", "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm", + "windows_i686_gnullvm 0.52.6", "windows_i686_msvc 0.52.6", "windows_x86_64_gnu 0.52.6", "windows_x86_64_gnullvm 0.52.6", "windows_x86_64_msvc 0.52.6", ] +[[package]] +name = "windows-targets" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1e4c7e8ceaaf9cb7d7507c974735728ab453b67ef8f18febdd7c11fe59dca8b" +dependencies = [ + "windows_aarch64_gnullvm 0.53.0", + "windows_aarch64_msvc 0.53.0", + "windows_i686_gnu 0.53.0", + "windows_i686_gnullvm 0.53.0", + "windows_i686_msvc 0.53.0", + "windows_x86_64_gnu 0.53.0", + "windows_x86_64_gnullvm 0.53.0", + "windows_x86_64_msvc 0.53.0", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" @@ -5659,6 +5649,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" + [[package]] name = "windows_aarch64_msvc" version = "0.48.5" @@ -5671,6 +5667,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +[[package]] +name = "windows_aarch64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" + [[package]] name = "windows_i686_gnu" version = "0.48.5" @@ -5683,12 +5685,24 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +[[package]] +name = "windows_i686_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" + [[package]] name = "windows_i686_gnullvm" version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +[[package]] +name = "windows_i686_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" + [[package]] name = "windows_i686_msvc" version = "0.48.5" @@ -5701,6 +5715,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +[[package]] +name = "windows_i686_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" + [[package]] name = "windows_x86_64_gnu" version = "0.48.5" @@ -5713,6 +5733,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +[[package]] +name = "windows_x86_64_gnu" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" + [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" @@ -5725,6 +5751,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" + [[package]] name = "windows_x86_64_msvc" version = "0.48.5" @@ -5737,6 +5769,12 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +[[package]] +name = "windows_x86_64_msvc" +version = "0.53.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" + [[package]] name = "winnow" version = "0.7.4" diff --git a/Cargo.toml b/Cargo.toml index 6c5c291f..3ffa9e44 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -141,12 +141,12 @@ features = [ ] [workspace.dependencies.rustls] -version = "0.23.19" +version = "0.23.25" default-features = false features = ["aws_lc_rs"] [workspace.dependencies.reqwest] -version = "0.12.9" +version = "0.12.15" default-features = false features = [ "rustls-tls-native-roots", @@ -204,7 +204,7 @@ features = [ # logging [workspace.dependencies.log] -version = "0.4.22" +version = "0.4.27" default-features = false [workspace.dependencies.tracing] version = "0.1.41" @@ -224,7 +224,7 @@ default-features = false # used for conduwuit's CLI and admin room command parsing [workspace.dependencies.clap] -version = "4.5.23" +version = "4.5.35" default-features = false features = [ "derive", @@ -320,7 +320,7 @@ default-features = false # Used when hashing the state [workspace.dependencies.ring] -version = "0.17.8" +version = "0.17.14" default-features = false # Used to make working with iterators easier, was already a transitive depdendency @@ -427,7 +427,7 @@ features = ["rt-tokio"] # optional sentry metrics for crash/panic reporting [workspace.dependencies.sentry] -version = "0.36.0" +version = "0.37.0" default-features = false features = [ "backtrace", @@ -443,9 +443,9 @@ features = [ ] [workspace.dependencies.sentry-tracing] -version = "0.35.0" +version = "0.37.0" [workspace.dependencies.sentry-tower] -version = "0.35.0" +version = "0.37.0" # jemalloc usage [workspace.dependencies.tikv-jemalloc-sys] @@ -479,7 +479,7 @@ default-features = false features = ["resource"] [workspace.dependencies.sd-notify] -version = "0.4.3" +version = "0.4.5" default-features = false [workspace.dependencies.hardened_malloc-rs] @@ -496,7 +496,7 @@ version = "0.4.3" default-features = false [workspace.dependencies.termimad] -version = "0.31.1" +version = "0.31.2" default-features = false [workspace.dependencies.checked_ops] From f9529937ce9a8dacf186fb4f60ef0c3315bb02a0 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Apr 2025 19:36:24 +0000 Subject: [PATCH 185/310] patch hyper-util due to conflicts with federation resolver hooks Signed-off-by: Jason Volk --- Cargo.lock | 3 +-- Cargo.toml | 6 ++++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index da33af05..8918a631 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2030,8 +2030,7 @@ dependencies = [ [[package]] name = "hyper-util" version = "0.1.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "497bbc33a26fdd4af9ed9c70d63f61cf56a938375fbb32df34db9b1cd6d643f2" +source = "git+https://github.com/girlbossceo/hyper-util?rev=e4ae7628fe4fcdacef9788c4c8415317a4489941#e4ae7628fe4fcdacef9788c4c8415317a4489941" dependencies = [ "bytes", "futures-channel", diff --git a/Cargo.toml b/Cargo.toml index 3ffa9e44..bf7ec2bb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -570,10 +570,16 @@ rev = "fe4aebeeaae435af60087ddd56b573a2e0be671d" git = "https://github.com/girlbossceo/async-channel" rev = "92e5e74063bf2a3b10414bcc8a0d68b235644280" +# adds affinity masks for selecting more than one core at a time [patch.crates-io.core_affinity] git = "https://github.com/girlbossceo/core_affinity_rs" rev = "9c8e51510c35077df888ee72a36b4b05637147da" +# reverts hyperium#148 conflicting with our delicate federation resolver hooks +[patch.crates-io.hyper-util] +git = "https://github.com/girlbossceo/hyper-util" +rev = "e4ae7628fe4fcdacef9788c4c8415317a4489941" + # # Our crates # From 45fd3875c8932e56d1ab092004065b0800861201 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Apr 2025 00:59:23 +0000 Subject: [PATCH 186/310] move runtime shutdown out of main; gather final stats Signed-off-by: Jason Volk --- src/main/main.rs | 7 +++---- src/main/runtime.rs | 43 ++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 43 insertions(+), 7 deletions(-) diff --git a/src/main/main.rs b/src/main/main.rs index 52f40384..1a9d3fe4 100644 --- a/src/main/main.rs +++ b/src/main/main.rs @@ -16,15 +16,14 @@ use server::Server; rustc_flags_capture! {} -fn main() -> Result<(), Error> { +fn main() -> Result { let args = clap::parse(); let runtime = runtime::new(&args)?; let server = Server::new(&args, Some(runtime.handle()))?; + runtime.spawn(signal::signal(server.clone())); runtime.block_on(async_main(&server))?; - - // explicit drop here to trace thread and tls dtors - drop(runtime); + runtime::shutdown(&server, runtime); #[cfg(unix)] if server.server.restarting.load(Ordering::Acquire) { diff --git a/src/main/runtime.rs b/src/main/runtime.rs index 920476db..1c58ea81 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -1,7 +1,7 @@ use std::{ iter::once, sync::{ - OnceLock, + Arc, OnceLock, atomic::{AtomicUsize, Ordering}, }, thread, @@ -11,17 +11,18 @@ use std::{ #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] use conduwuit_core::result::LogDebugErr; use conduwuit_core::{ - Result, is_true, + Result, debug, is_true, utils::sys::compute::{nth_core_available, set_affinity}, }; use tokio::runtime::Builder; -use crate::clap::Args; +use crate::{clap::Args, server::Server}; const WORKER_NAME: &str = "conduwuit:worker"; const WORKER_MIN: usize = 2; const WORKER_KEEPALIVE: u64 = 36; const MAX_BLOCKING_THREADS: usize = 1024; +const SHUTDOWN_TIMEOUT: Duration = Duration::from_millis(10000); #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] const DISABLE_MUZZY_THRESHOLD: usize = 4; @@ -83,6 +84,42 @@ fn enable_histogram(builder: &mut Builder, args: &Args) { .metrics_poll_time_histogram_configuration(linear); } +#[cfg(tokio_unstable)] +#[tracing::instrument(name = "stop", level = "info", skip_all)] +pub(super) fn shutdown(server: &Arc, runtime: tokio::runtime::Runtime) { + use conduwuit_core::event; + use tracing::Level; + + // The final metrics output is promoted to INFO when tokio_unstable is active in + // a release/bench mode and DEBUG is likely optimized out + const LEVEL: Level = if cfg!(debug_assertions) { + Level::DEBUG + } else { + Level::INFO + }; + + debug!( + timeout = ?SHUTDOWN_TIMEOUT, + "Waiting for runtime..." + ); + + runtime.shutdown_timeout(SHUTDOWN_TIMEOUT); + let runtime_metrics = server.server.metrics.runtime_interval().unwrap_or_default(); + + event!(LEVEL, ?runtime_metrics, "Final runtime metrics"); +} + +#[cfg(not(tokio_unstable))] +#[tracing::instrument(name = "stop", level = "info", skip_all)] +pub(super) fn shutdown(_server: &Arc, runtime: tokio::runtime::Runtime) { + debug!( + timeout = ?SHUTDOWN_TIMEOUT, + "Waiting for runtime..." + ); + + runtime.shutdown_timeout(SHUTDOWN_TIMEOUT); +} + #[tracing::instrument( name = "fork", level = "debug", From 29d55b80366e17737094d3ad9a8031fe20c6286e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 2 Apr 2025 04:12:24 +0000 Subject: [PATCH 187/310] move systemd stopping notification point Signed-off-by: Jason Volk --- src/core/server.rs | 19 ++++++++++--------- src/router/run.rs | 4 ++++ 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/src/core/server.rs b/src/core/server.rs index b67759d6..4b673f32 100644 --- a/src/core/server.rs +++ b/src/core/server.rs @@ -69,10 +69,6 @@ impl Server { return Err!("Reloading not enabled"); } - #[cfg(all(feature = "systemd", target_os = "linux"))] - sd_notify::notify(true, &[sd_notify::NotifyState::Reloading]) - .expect("failed to notify systemd of reloading state"); - if self.reloading.swap(true, Ordering::AcqRel) { return Err!("Reloading already in progress"); } @@ -98,10 +94,6 @@ impl Server { } pub fn shutdown(&self) -> Result { - #[cfg(all(feature = "systemd", target_os = "linux"))] - sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]) - .expect("failed to notify systemd of stopping state"); - if self.stopping.swap(true, Ordering::AcqRel) { return Err!("Shutdown already in progress"); } @@ -144,7 +136,16 @@ impl Server { } #[inline] - pub fn running(&self) -> bool { !self.stopping.load(Ordering::Acquire) } + pub fn running(&self) -> bool { !self.is_stopping() } + + #[inline] + pub fn is_stopping(&self) -> bool { self.stopping.load(Ordering::Relaxed) } + + #[inline] + pub fn is_reloading(&self) -> bool { self.reloading.load(Ordering::Relaxed) } + + #[inline] + pub fn is_restarting(&self) -> bool { self.restarting.load(Ordering::Relaxed) } #[inline] pub fn is_ours(&self, name: &str) -> bool { name == self.config.server_name } diff --git a/src/router/run.rs b/src/router/run.rs index 31789626..ff54594f 100644 --- a/src/router/run.rs +++ b/src/router/run.rs @@ -77,6 +77,10 @@ pub(crate) async fn start(server: Arc) -> Result> { pub(crate) async fn stop(services: Arc) -> Result<()> { debug!("Shutting down..."); + #[cfg(all(feature = "systemd", target_os = "linux"))] + sd_notify::notify(true, &[sd_notify::NotifyState::Stopping]) + .expect("failed to notify systemd of stopping state"); + // Wait for all completions before dropping or we'll lose them to the module // unload and explode. services.stop().await; From 94b107b42b722aff9518f64ad603ce01665b25f3 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Thu, 3 Apr 2025 16:08:02 -0400 Subject: [PATCH 188/310] add some debug logging and misc cleanup to keys/signatures/upload Signed-off-by: June Clementine Strawberry --- Cargo.lock | 22 +++++----- Cargo.toml | 2 +- src/api/client/keys.rs | 95 ++++++++++++++++++++++++++-------------- src/service/users/mod.rs | 18 +++++--- 4 files changed, 86 insertions(+), 51 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8918a631..0753f81d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3654,7 +3654,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "assign", "js_int", @@ -3674,7 +3674,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "js_int", "ruma-common", @@ -3686,7 +3686,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "as_variant", "assign", @@ -3709,7 +3709,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "as_variant", "base64 0.22.1", @@ -3741,7 +3741,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "as_variant", "indexmap 2.8.0", @@ -3766,7 +3766,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "bytes", "headers", @@ -3788,7 +3788,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "js_int", "thiserror 2.0.12", @@ -3797,7 +3797,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "js_int", "ruma-common", @@ -3807,7 +3807,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3822,7 +3822,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "js_int", "ruma-common", @@ -3834,7 +3834,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=0701341a2fd5a6ea74beada18d5974cc401a4fc1#0701341a2fd5a6ea74beada18d5974cc401a4fc1" +source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index bf7ec2bb..a44fc0f0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -350,7 +350,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "0701341a2fd5a6ea74beada18d5974cc401a4fc1" +rev = "edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" features = [ "compat", "rand", diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index f50d7afa..f6224343 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -9,7 +9,8 @@ use ruma::{ client::{ error::ErrorKind, keys::{ - claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures, + claim_keys, get_key_changes, get_keys, upload_keys, + upload_signatures::{self, v3::Failure}, upload_signing_keys, }, uiaa::{AuthFlow, AuthType, UiaaInfo}, @@ -308,53 +309,81 @@ async fn check_for_new_keys( /// # `POST /_matrix/client/r0/keys/signatures/upload` /// /// Uploads end-to-end key signatures from the sender user. +/// +/// TODO: clean this timo-code up more. tried to improve it a bit to stop +/// exploding the entire request on bad sigs, but needs way more work. pub(crate) async fn upload_signatures_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + use upload_signatures::v3::FailureErrorCode::*; + + if body.signed_keys.is_empty() { + debug!("Empty signed_keys sent in key signature upload"); + return Ok(upload_signatures::v3::Response::new()); + } + + let sender_user = body.sender_user(); + let mut failures: BTreeMap> = BTreeMap::new(); + let mut failure_reasons: BTreeMap = BTreeMap::new(); + let failure = Failure { + errcode: InvalidSignature, + error: String::new(), + }; for (user_id, keys) in &body.signed_keys { for (key_id, key) in keys { - let key = serde_json::to_value(key) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid key JSON"))?; + let Ok(key) = serde_json::to_value(key) + .inspect_err(|e| debug_warn!(?key_id, "Invalid \"key\" JSON: {e}")) + else { + let mut failure = failure.clone(); + failure.error = String::from("Invalid \"key\" JSON"); + failure_reasons.insert(key_id.to_owned(), failure); + continue; + }; - for signature in key - .get("signatures") - .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Missing signatures field."))? - .get(sender_user.to_string()) - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid user in signatures field.", - ))? - .as_object() - .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Invalid signature."))? - .clone() - { - // Signature validation? - let signature = ( - signature.0, - signature - .1 - .as_str() - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid signature value.", - ))? - .to_owned(), - ); + let Some(signatures) = key.get("signatures") else { + let mut failure = failure.clone(); + failure.error = String::from("Missing \"signatures\" field"); + failure_reasons.insert(key_id.to_owned(), failure); + continue; + }; - services + let Some(sender_user_val) = signatures.get(sender_user.to_string()) else { + let mut failure = failure.clone(); + failure.error = String::from("Invalid user in signatures field"); + failure_reasons.insert(key_id.to_owned(), failure); + continue; + }; + + let Some(sender_user_object) = sender_user_val.as_object() else { + let mut failure = failure.clone(); + failure.error = String::from("signatures field is not a JSON object"); + failure_reasons.insert(key_id.to_owned(), failure); + continue; + }; + + for (signature, val) in sender_user_object.clone() { + let signature = (signature, val.to_string()); + + if let Err(e) = services .users .sign_key(user_id, key_id, signature, sender_user) - .await?; + .await + .inspect_err(|e| debug_warn!("{e}")) + { + let mut failure = failure.clone(); + failure.error = format!("Error signing key: {e}"); + failure_reasons.insert(key_id.to_owned(), failure); + continue; + } } } + + failures.insert(user_id.to_owned(), failure_reasons.clone()); } - Ok(upload_signatures::v3::Response { - failures: BTreeMap::new(), // TODO: integrate - }) + Ok(upload_signatures::v3::Response { failures }) } /// # `POST /_matrix/client/r0/keys/changes` diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 87a8b93b..1eb289fc 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -593,7 +593,7 @@ impl Service { key_id: &str, signature: (String, String), sender_id: &UserId, - ) -> Result<()> { + ) -> Result { let key = (target_id, key_id); let mut cross_signing_key: serde_json::Value = self @@ -601,21 +601,27 @@ impl Service { .keyid_key .qry(&key) .await - .map_err(|_| err!(Request(InvalidParam("Tried to sign nonexistent key."))))? + .map_err(|_| err!(Request(InvalidParam("Tried to sign nonexistent key"))))? .deserialized() - .map_err(|e| err!(Database("key in keyid_key is invalid. {e:?}")))?; + .map_err(|e| err!(Database(debug_warn!("key in keyid_key is invalid: {e:?}"))))?; let signatures = cross_signing_key .get_mut("signatures") - .ok_or_else(|| err!(Database("key in keyid_key has no signatures field.")))? + .ok_or_else(|| { + err!(Database(debug_warn!("key in keyid_key has no signatures field"))) + })? .as_object_mut() - .ok_or_else(|| err!(Database("key in keyid_key has invalid signatures field.")))? + .ok_or_else(|| { + err!(Database(debug_warn!("key in keyid_key has invalid signatures field."))) + })? .entry(sender_id.to_string()) .or_insert_with(|| serde_json::Map::new().into()); signatures .as_object_mut() - .ok_or_else(|| err!(Database("signatures in keyid_key for a user is invalid.")))? + .ok_or_else(|| { + err!(Database(debug_warn!("signatures in keyid_key for a user is invalid."))) + })? .insert(signature.0, signature.1.into()); let key = (target_id, key_id); From b7109131e29804ac6b4e30aaaa40f213d092a63a Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Apr 2025 22:06:51 +0000 Subject: [PATCH 189/310] further simplify get_missing_events; various log calls Signed-off-by: Jason Volk --- src/api/server/get_missing_events.rs | 47 +++++++++---------- .../rooms/state_accessor/server_can.rs | 8 +--- src/service/rooms/state_accessor/user_can.rs | 14 ++---- 3 files changed, 29 insertions(+), 40 deletions(-) diff --git a/src/api/server/get_missing_events.rs b/src/api/server/get_missing_events.rs index d72918fa..04dc30ed 100644 --- a/src/api/server/get_missing_events.rs +++ b/src/api/server/get_missing_events.rs @@ -1,9 +1,5 @@ use axum::extract::State; -use conduwuit::{ - Result, debug, debug_info, debug_warn, - utils::{self}, - warn, -}; +use conduwuit::{Result, debug, debug_error, utils::to_canonical_object}; use ruma::api::federation::event::get_missing_events; use super::AccessCheck; @@ -43,19 +39,13 @@ pub(crate) async fn get_missing_events_route( let mut i: usize = 0; while i < queued_events.len() && events.len() < limit { let Ok(pdu) = services.rooms.timeline.get_pdu(&queued_events[i]).await else { - debug_info!(?body.origin, "Event {} does not exist locally, skipping", &queued_events[i]); - i = i.saturating_add(1); - continue; - }; - - if pdu.room_id != body.room_id { - warn!(?body.origin, - "Got an event for the wrong room in database. Found {:?} in {:?}, server requested events in {:?}. Skipping.", - pdu.event_id, pdu.room_id, body.room_id + debug!( + ?body.origin, + "Event {} does not exist locally, skipping", &queued_events[i] ); i = i.saturating_add(1); continue; - } + }; if body.earliest_events.contains(&queued_events[i]) { i = i.saturating_add(1); @@ -68,25 +58,32 @@ pub(crate) async fn get_missing_events_route( .server_can_see_event(body.origin(), &body.room_id, &queued_events[i]) .await { - debug!(?body.origin, "Server cannot see {:?} in {:?}, skipping", pdu.event_id, pdu.room_id); + debug!( + ?body.origin, + "Server cannot see {:?} in {:?}, skipping", pdu.event_id, pdu.room_id + ); i = i.saturating_add(1); continue; } - let Ok(pdu_json) = utils::to_canonical_object(&pdu) else { - debug_warn!(?body.origin, "Failed to convert PDU in database to canonical JSON: {pdu:?}"); + let Ok(event) = to_canonical_object(&pdu) else { + debug_error!( + ?body.origin, + "Failed to convert PDU in database to canonical JSON: {pdu:?}" + ); i = i.saturating_add(1); continue; }; - queued_events.extend(pdu.prev_events.iter().map(ToOwned::to_owned)); + let prev_events = pdu.prev_events.iter().map(ToOwned::to_owned); - events.push( - services - .sending - .convert_to_outgoing_federation_event(pdu_json) - .await, - ); + let event = services + .sending + .convert_to_outgoing_federation_event(event) + .await; + + queued_events.extend(prev_events); + events.push(event); } Ok(get_missing_events::v1::Response { events }) diff --git a/src/service/rooms/state_accessor/server_can.rs b/src/service/rooms/state_accessor/server_can.rs index c946fbfd..2befec22 100644 --- a/src/service/rooms/state_accessor/server_can.rs +++ b/src/service/rooms/state_accessor/server_can.rs @@ -1,4 +1,4 @@ -use conduwuit::{debug_info, implement, utils::stream::ReadyExt}; +use conduwuit::{implement, utils::stream::ReadyExt}; use futures::StreamExt; use ruma::{ EventId, RoomId, ServerName, @@ -36,7 +36,6 @@ pub async fn server_can_see_event( .ready_filter(|member| member.server_name() == origin); match history_visibility { - | HistoryVisibility::WorldReadable | HistoryVisibility::Shared => true, | HistoryVisibility::Invited => { // Allow if any member on requesting server was AT LEAST invited, else deny current_server_members @@ -49,9 +48,6 @@ pub async fn server_can_see_event( .any(|member| self.user_was_joined(shortstatehash, member)) .await }, - | _ => { - debug_info!(%room_id, "Unknown history visibility, defaulting to shared: {history_visibility:?}"); - true - }, + | HistoryVisibility::WorldReadable | HistoryVisibility::Shared | _ => true, } } diff --git a/src/service/rooms/state_accessor/user_can.rs b/src/service/rooms/state_accessor/user_can.rs index aa54407b..67e0b52b 100644 --- a/src/service/rooms/state_accessor/user_can.rs +++ b/src/service/rooms/state_accessor/user_can.rs @@ -1,4 +1,4 @@ -use conduwuit::{Err, Error, Result, debug_info, implement, pdu::PduBuilder}; +use conduwuit::{Err, Result, implement, pdu::PduBuilder}; use ruma::{ EventId, RoomId, UserId, events::{ @@ -76,8 +76,8 @@ pub async fn user_can_redact( || redacting_event .as_ref() .is_ok_and(|redacting_event| redacting_event.sender == sender)), - | _ => Err(Error::bad_database( - "No m.room.power_levels or m.room.create events in database for room", + | _ => Err!(Database( + "No m.room.power_levels or m.room.create events in database for room" )), } }, @@ -108,8 +108,6 @@ pub async fn user_can_see_event( }); match history_visibility { - | HistoryVisibility::WorldReadable => true, - | HistoryVisibility::Shared => currently_member, | HistoryVisibility::Invited => { // Allow if any member on requesting server was AT LEAST invited, else deny self.user_was_invited(shortstatehash, user_id).await @@ -118,10 +116,8 @@ pub async fn user_can_see_event( // Allow if any member on requested server was joined, else deny self.user_was_joined(shortstatehash, user_id).await }, - | _ => { - debug_info!(%room_id, "Unknown history visibility, defaulting to shared: {history_visibility:?}"); - currently_member - }, + | HistoryVisibility::WorldReadable => true, + | HistoryVisibility::Shared | _ => currently_member, } } From 6a073b4fa4c728b15f94de88ac37d136c97982bf Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 2 Apr 2025 06:28:34 +0000 Subject: [PATCH 190/310] remove additional unnecessary Arc Signed-off-by: Jason Volk --- .../fetch_and_handle_outliers.rs | 6 ++--- src/service/rooms/event_handler/fetch_prev.rs | 7 ++---- .../rooms/event_handler/handle_outlier_pdu.rs | 22 ++++++------------- .../rooms/event_handler/handle_prev_pdu.rs | 6 +---- .../rooms/event_handler/state_at_incoming.rs | 5 ++--- .../event_handler/upgrade_outlier_pdu.rs | 2 +- 6 files changed, 15 insertions(+), 33 deletions(-) diff --git a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs index 80e91eff..b0a7d827 100644 --- a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs +++ b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs @@ -1,6 +1,5 @@ use std::{ collections::{BTreeMap, HashSet, VecDeque, hash_map}, - sync::Arc, time::Instant, }; @@ -8,7 +7,6 @@ use conduwuit::{ PduEvent, debug, debug_error, debug_warn, implement, pdu, trace, utils::continue_exponential_backoff_secs, warn, }; -use futures::TryFutureExt; use ruma::{ CanonicalJsonValue, OwnedEventId, RoomId, ServerName, api::federation::event::get_event, }; @@ -31,7 +29,7 @@ pub(super) async fn fetch_and_handle_outliers<'a>( events: &'a [OwnedEventId], create_event: &'a PduEvent, room_id: &'a RoomId, -) -> Vec<(Arc, Option>)> { +) -> Vec<(PduEvent, Option>)> { let back_off = |id| match self .services .globals @@ -53,7 +51,7 @@ pub(super) async fn fetch_and_handle_outliers<'a>( // a. Look in the main timeline (pduid_pdu tree) // b. Look at outlier pdu tree // (get_pdu_json checks both) - if let Ok(local_pdu) = self.services.timeline.get_pdu(id).map_ok(Arc::new).await { + if let Ok(local_pdu) = self.services.timeline.get_pdu(id).await { trace!("Found {id} in db"); events_with_auth_events.push((id, Some(local_pdu), vec![])); continue; diff --git a/src/service/rooms/event_handler/fetch_prev.rs b/src/service/rooms/event_handler/fetch_prev.rs index e817430b..0f92d6e6 100644 --- a/src/service/rooms/event_handler/fetch_prev.rs +++ b/src/service/rooms/event_handler/fetch_prev.rs @@ -1,7 +1,4 @@ -use std::{ - collections::{BTreeMap, HashMap, HashSet, VecDeque}, - sync::Arc, -}; +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; use conduwuit::{ PduEvent, Result, debug_warn, err, implement, @@ -31,7 +28,7 @@ pub(super) async fn fetch_prev( initial_set: Vec, ) -> Result<( Vec, - HashMap, BTreeMap)>, + HashMap)>, )> { let mut graph: HashMap = HashMap::with_capacity(initial_set.len()); let mut eventid_info = HashMap::new(); diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index 99e90a50..5339249d 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -1,12 +1,9 @@ -use std::{ - collections::{BTreeMap, HashMap, hash_map}, - sync::Arc, -}; +use std::collections::{BTreeMap, HashMap, hash_map}; use conduwuit::{ Err, Error, PduEvent, Result, debug, debug_info, err, implement, state_res, trace, warn, }; -use futures::{TryFutureExt, future::ready}; +use futures::future::ready; use ruma::{ CanonicalJsonObject, CanonicalJsonValue, EventId, RoomId, ServerName, api::client::error::ErrorKind, events::StateEventType, @@ -24,7 +21,7 @@ pub(super) async fn handle_outlier_pdu<'a>( room_id: &'a RoomId, mut value: CanonicalJsonObject, auth_events_known: bool, -) -> Result<(Arc, BTreeMap)> { +) -> Result<(PduEvent, BTreeMap)> { // 1. Remove unsigned field value.remove("unsigned"); @@ -95,7 +92,7 @@ pub(super) async fn handle_outlier_pdu<'a>( // Build map of auth events let mut auth_events = HashMap::with_capacity(incoming_pdu.auth_events.len()); for id in &incoming_pdu.auth_events { - let Ok(auth_event) = self.services.timeline.get_pdu(id).map_ok(Arc::new).await else { + let Ok(auth_event) = self.services.timeline.get_pdu(id).await else { warn!("Could not find auth event {id}"); continue; }; @@ -123,15 +120,10 @@ pub(super) async fn handle_outlier_pdu<'a>( // The original create event must be in the auth events if !matches!( - auth_events - .get(&(StateEventType::RoomCreate, String::new().into())) - .map(AsRef::as_ref), + auth_events.get(&(StateEventType::RoomCreate, String::new().into())), Some(_) | None ) { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Incoming event refers to wrong create event.", - )); + return Err!(Request(InvalidParam("Incoming event refers to wrong create event."))); } let state_fetch = |ty: &StateEventType, sk: &str| { @@ -161,5 +153,5 @@ pub(super) async fn handle_outlier_pdu<'a>( trace!("Added pdu as outlier."); - Ok((Arc::new(incoming_pdu), val)) + Ok((incoming_pdu, val)) } diff --git a/src/service/rooms/event_handler/handle_prev_pdu.rs b/src/service/rooms/event_handler/handle_prev_pdu.rs index cf69a515..85e0a6b9 100644 --- a/src/service/rooms/event_handler/handle_prev_pdu.rs +++ b/src/service/rooms/event_handler/handle_prev_pdu.rs @@ -1,6 +1,5 @@ use std::{ collections::{BTreeMap, HashMap}, - sync::Arc, time::Instant, }; @@ -24,10 +23,7 @@ pub(super) async fn handle_prev_pdu<'a>( origin: &'a ServerName, event_id: &'a EventId, room_id: &'a RoomId, - eventid_info: &mut HashMap< - OwnedEventId, - (Arc, BTreeMap), - >, + eventid_info: &mut HashMap)>, create_event: &PduEvent, first_ts_in_room: UInt, prev_id: &EventId, diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 8326f9da..0402ff14 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -2,7 +2,6 @@ use std::{ borrow::Borrow, collections::{HashMap, HashSet}, iter::Iterator, - sync::Arc, }; use conduwuit::{ @@ -20,7 +19,7 @@ use crate::rooms::short::ShortStateHash; #[tracing::instrument(name = "state", level = "debug", skip_all)] pub(super) async fn state_at_incoming_degree_one( &self, - incoming_pdu: &Arc, + incoming_pdu: &PduEvent, ) -> Result>> { let prev_event = &incoming_pdu.prev_events[0]; let Ok(prev_event_sstatehash) = self @@ -67,7 +66,7 @@ pub(super) async fn state_at_incoming_degree_one( #[tracing::instrument(name = "state", level = "debug", skip_all)] pub(super) async fn state_at_incoming_resolved( &self, - incoming_pdu: &Arc, + incoming_pdu: &PduEvent, room_id: &RoomId, room_version_id: &RoomVersionId, ) -> Result>> { diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index c1a1c3eb..086dc6bd 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -18,7 +18,7 @@ use crate::rooms::{ #[implement(super::Service)] pub(super) async fn upgrade_outlier_to_timeline_pdu( &self, - incoming_pdu: Arc, + incoming_pdu: PduEvent, val: BTreeMap, create_event: &PduEvent, origin: &ServerName, From d036394ec79cf94aee484e6bea41421396dcd749 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 2 Apr 2025 09:53:42 +0000 Subject: [PATCH 191/310] refactor incoming prev events loop; mitigate large future Signed-off-by: Jason Volk --- .../event_handler/handle_incoming_pdu.rs | 102 ++++++++++-------- .../rooms/event_handler/handle_prev_pdu.rs | 65 +++++------ 2 files changed, 88 insertions(+), 79 deletions(-) diff --git a/src/service/rooms/event_handler/handle_incoming_pdu.rs b/src/service/rooms/event_handler/handle_incoming_pdu.rs index b437bf2e..77cae41d 100644 --- a/src/service/rooms/event_handler/handle_incoming_pdu.rs +++ b/src/service/rooms/event_handler/handle_incoming_pdu.rs @@ -3,9 +3,12 @@ use std::{ time::Instant, }; -use conduwuit::{Err, Result, debug, debug::INFO_SPAN_LEVEL, err, implement, warn}; +use conduwuit::{ + Err, Result, debug, debug::INFO_SPAN_LEVEL, defer, err, implement, utils::stream::IterStream, + warn, +}; use futures::{ - FutureExt, + FutureExt, TryFutureExt, TryStreamExt, future::{OptionFuture, try_join5}, }; use ruma::{CanonicalJsonValue, EventId, RoomId, ServerName, UserId, events::StateEventType}; @@ -86,7 +89,7 @@ pub async fn handle_incoming_pdu<'a>( .state_accessor .room_state_get(room_id, &StateEventType::RoomCreate, ""); - let (meta_exists, is_disabled, (), (), create_event) = try_join5( + let (meta_exists, is_disabled, (), (), ref create_event) = try_join5( meta_exists, is_disabled, origin_acl_check, @@ -104,7 +107,7 @@ pub async fn handle_incoming_pdu<'a>( } let (incoming_pdu, val) = self - .handle_outlier_pdu(origin, &create_event, event_id, room_id, value, false) + .handle_outlier_pdu(origin, create_event, event_id, room_id, value, false) .await?; // 8. if not timeline event: stop @@ -129,66 +132,71 @@ pub async fn handle_incoming_pdu<'a>( let (sorted_prev_events, mut eventid_info) = self .fetch_prev( origin, - &create_event, + create_event, room_id, first_ts_in_room, incoming_pdu.prev_events.clone(), ) .await?; - debug!(events = ?sorted_prev_events, "Got previous events"); - for prev_id in sorted_prev_events { - self.services.server.check_running()?; - if let Err(e) = self - .handle_prev_pdu( + debug!( + events = ?sorted_prev_events, + "Handling previous events" + ); + + sorted_prev_events + .iter() + .try_stream() + .map_ok(AsRef::as_ref) + .try_for_each(|prev_id| { + self.handle_prev_pdu( origin, event_id, room_id, - &mut eventid_info, - &create_event, + eventid_info.remove(prev_id), + create_event, first_ts_in_room, - &prev_id, + prev_id, ) - .await - { - use hash_map::Entry; - - let now = Instant::now(); - warn!("Prev event {prev_id} failed: {e}"); - - match self - .services - .globals - .bad_event_ratelimiter - .write() - .expect("locked") - .entry(prev_id) - { - | Entry::Vacant(e) => { - e.insert((now, 1)); - }, - | Entry::Occupied(mut e) => { - *e.get_mut() = (now, e.get().1.saturating_add(1)); - }, - } - } - } + .inspect_err(move |e| { + warn!("Prev {prev_id} failed: {e}"); + match self + .services + .globals + .bad_event_ratelimiter + .write() + .expect("locked") + .entry(prev_id.into()) + { + | hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + }, + | hash_map::Entry::Occupied(mut e) => { + let tries = e.get().1.saturating_add(1); + *e.get_mut() = (Instant::now(), tries); + }, + } + }) + .map(|_| self.services.server.check_running()) + }) + .boxed() + .await?; // Done with prev events, now handling the incoming event let start_time = Instant::now(); self.federation_handletime .write() .expect("locked") - .insert(room_id.to_owned(), (event_id.to_owned(), start_time)); + .insert(room_id.into(), (event_id.to_owned(), start_time)); - let r = self - .upgrade_outlier_to_timeline_pdu(incoming_pdu, val, &create_event, origin, room_id) - .await; + defer! {{ + self.federation_handletime + .write() + .expect("locked") + .remove(room_id); + }}; - self.federation_handletime - .write() - .expect("locked") - .remove(&room_id.to_owned()); - - r + self.upgrade_outlier_to_timeline_pdu(incoming_pdu, val, create_event, origin, room_id) + .boxed() + .await } diff --git a/src/service/rooms/event_handler/handle_prev_pdu.rs b/src/service/rooms/event_handler/handle_prev_pdu.rs index 85e0a6b9..d612b2bf 100644 --- a/src/service/rooms/event_handler/handle_prev_pdu.rs +++ b/src/service/rooms/event_handler/handle_prev_pdu.rs @@ -1,13 +1,10 @@ -use std::{ - collections::{BTreeMap, HashMap}, - time::Instant, -}; +use std::{collections::BTreeMap, time::Instant}; use conduwuit::{ - Err, PduEvent, Result, debug, debug::INFO_SPAN_LEVEL, implement, + Err, PduEvent, Result, debug, debug::INFO_SPAN_LEVEL, defer, implement, utils::continue_exponential_backoff_secs, }; -use ruma::{CanonicalJsonValue, EventId, OwnedEventId, RoomId, ServerName, UInt}; +use ruma::{CanonicalJsonValue, EventId, RoomId, ServerName, UInt}; #[implement(super::Service)] #[allow(clippy::type_complexity)] @@ -23,10 +20,10 @@ pub(super) async fn handle_prev_pdu<'a>( origin: &'a ServerName, event_id: &'a EventId, room_id: &'a RoomId, - eventid_info: &mut HashMap)>, - create_event: &PduEvent, + eventid_info: Option<(PduEvent, BTreeMap)>, + create_event: &'a PduEvent, first_ts_in_room: UInt, - prev_id: &EventId, + prev_id: &'a EventId, ) -> Result { // Check for disabled again because it might have changed if self.services.metadata.is_disabled(room_id).await { @@ -57,31 +54,35 @@ pub(super) async fn handle_prev_pdu<'a>( } } - if let Some((pdu, json)) = eventid_info.remove(prev_id) { - // Skip old events - if pdu.origin_server_ts < first_ts_in_room { - return Ok(()); - } + let Some((pdu, json)) = eventid_info else { + return Ok(()); + }; - let start_time = Instant::now(); - self.federation_handletime - .write() - .expect("locked") - .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time)); - - self.upgrade_outlier_to_timeline_pdu(pdu, json, create_event, origin, room_id) - .await?; - - self.federation_handletime - .write() - .expect("locked") - .remove(&room_id.to_owned()); - - debug!( - elapsed = ?start_time.elapsed(), - "Handled prev_event", - ); + // Skip old events + if pdu.origin_server_ts < first_ts_in_room { + return Ok(()); } + let start_time = Instant::now(); + self.federation_handletime + .write() + .expect("locked") + .insert(room_id.into(), ((*prev_id).to_owned(), start_time)); + + defer! {{ + self.federation_handletime + .write() + .expect("locked") + .remove(room_id); + }}; + + self.upgrade_outlier_to_timeline_pdu(pdu, json, create_event, origin, room_id) + .await?; + + debug!( + elapsed = ?start_time.elapsed(), + "Handled prev_event", + ); + Ok(()) } From 00f7745ec4ebcea5f892376c5de5db1299f71696 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 4 Apr 2025 02:56:54 +0000 Subject: [PATCH 192/310] remove the db pool queue full warning Signed-off-by: Jason Volk --- src/database/pool.rs | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/src/database/pool.rs b/src/database/pool.rs index 47e61c30..0fa742d1 100644 --- a/src/database/pool.rs +++ b/src/database/pool.rs @@ -12,7 +12,7 @@ use std::{ use async_channel::{QueueStrategy, Receiver, RecvError, Sender}; use conduwuit::{ - Error, Result, Server, debug, debug_warn, err, error, implement, + Error, Result, Server, debug, err, error, implement, result::DebugInspect, smallvec::SmallVec, trace, @@ -245,13 +245,6 @@ async fn execute(&self, queue: &Sender, cmd: Cmd) -> Result { self.queued_max.fetch_max(queue.len(), Ordering::Relaxed); } - if queue.is_full() { - debug_warn!( - capacity = ?queue.capacity(), - "pool queue is full" - ); - } - queue .send(cmd) .await From 4e5b87d0cd16f3d015f4b61285b369d027bb909d Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Fri, 4 Apr 2025 11:34:31 -0400 Subject: [PATCH 193/310] add missing condition for signatures upload failures Signed-off-by: June Clementine Strawberry --- src/api/client/keys.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index f6224343..2fdfc0bc 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -380,7 +380,9 @@ pub(crate) async fn upload_signatures_route( } } - failures.insert(user_id.to_owned(), failure_reasons.clone()); + if !failure_reasons.is_empty() { + failures.insert(user_id.to_owned(), failure_reasons.clone()); + } } Ok(upload_signatures::v3::Response { failures }) From 532dfd004dbc020baa74a4d4413d9ad8139f851e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 4 Apr 2025 03:30:13 +0000 Subject: [PATCH 194/310] move core::pdu and core::state_res into core::matrix:: Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 4 +- src/admin/user/commands.rs | 3 +- src/api/client/account.rs | 6 ++- src/api/client/account_data.rs | 5 +- src/api/client/alias.rs | 2 +- src/api/client/backup.rs | 4 +- src/api/client/context.rs | 6 ++- src/api/client/device.rs | 4 +- src/api/client/directory.rs | 2 +- src/api/client/filter.rs | 4 +- src/api/client/keys.rs | 6 +-- src/api/client/membership.rs | 27 ++++++----- src/api/client/message.rs | 22 +++++---- src/api/client/openid.rs | 4 +- src/api/client/profile.rs | 4 +- src/api/client/push.rs | 6 +-- src/api/client/read_marker.rs | 4 +- src/api/client/redact.rs | 3 +- src/api/client/relations.rs | 5 +- src/api/client/report.rs | 8 ++-- src/api/client/room/create.rs | 6 ++- src/api/client/room/upgrade.rs | 5 +- src/api/client/search.rs | 5 +- src/api/client/send.rs | 4 +- src/api/client/session.rs | 9 ++-- src/api/client/space.rs | 10 ++-- src/api/client/state.rs | 8 +++- src/api/client/sync/mod.rs | 5 +- src/api/client/sync/v3.rs | 9 ++-- src/api/client/sync/v5.rs | 9 +++- src/api/client/tag.rs | 3 +- src/api/client/thirdparty.rs | 3 +- src/api/client/threads.rs | 7 ++- src/api/client/to_device.rs | 2 +- src/api/client/typing.rs | 4 +- src/api/client/unversioned.rs | 3 +- src/api/client/user_directory.rs | 4 +- src/api/client/voip.rs | 4 +- src/api/client/well_known.rs | 3 +- src/api/mod.rs | 2 - src/api/server/hierarchy.rs | 4 +- src/api/server/invite.rs | 5 +- src/api/server/make_join.rs | 10 ++-- src/api/server/make_knock.rs | 5 +- src/api/server/make_leave.rs | 4 +- src/api/server/openid.rs | 3 +- src/api/server/publicrooms.rs | 3 +- src/api/server/send.rs | 16 +++---- src/api/server/send_join.rs | 2 +- src/api/server/send_knock.rs | 6 ++- src/api/server/send_leave.rs | 8 ++-- src/api/server/version.rs | 3 +- src/api/server/well_known.rs | 3 +- .../state_event.rs => matrix/event.rs} | 0 src/core/matrix/mod.rs | 9 ++++ src/core/{pdu/mod.rs => matrix/pdu.rs} | 47 +++++++++++++++---- src/core/{ => matrix}/pdu/builder.rs | 0 src/core/{ => matrix}/pdu/content.rs | 0 src/core/{ => matrix}/pdu/count.rs | 0 src/core/{ => matrix}/pdu/event_id.rs | 0 src/core/{ => matrix}/pdu/filter.rs | 0 src/core/{ => matrix}/pdu/id.rs | 0 src/core/{ => matrix}/pdu/raw_id.rs | 0 src/core/{ => matrix}/pdu/redact.rs | 0 src/core/{ => matrix}/pdu/relation.rs | 0 src/core/{ => matrix}/pdu/state_key.rs | 0 src/core/{ => matrix}/pdu/strip.rs | 0 src/core/{ => matrix}/pdu/tests.rs | 0 src/core/{ => matrix}/pdu/unsigned.rs | 0 src/core/{ => matrix}/state_res/LICENSE | 0 src/core/{ => matrix}/state_res/benches.rs | 0 src/core/{ => matrix}/state_res/error.rs | 0 src/core/{ => matrix}/state_res/event_auth.rs | 0 src/core/{ => matrix}/state_res/mod.rs | 8 ++-- src/core/{ => matrix}/state_res/outcomes.txt | 0 .../{ => matrix}/state_res/power_levels.rs | 2 +- .../{ => matrix}/state_res/room_version.rs | 0 src/core/{ => matrix}/state_res/test_utils.rs | 5 +- src/core/mod.rs | 6 +-- src/core/pdu/event.rs | 35 -------------- src/service/admin/grant.rs | 4 +- src/service/mod.rs | 1 - .../rooms/event_handler/state_at_incoming.rs | 4 +- .../event_handler/upgrade_outlier_pdu.rs | 3 +- src/service/rooms/outlier/mod.rs | 6 +-- src/service/rooms/read_receipt/mod.rs | 6 ++- src/service/rooms/short/mod.rs | 4 +- .../rooms/state_accessor/room_state.rs | 5 +- src/service/rooms/state_accessor/state.rs | 6 ++- src/service/rooms/threads/mod.rs | 5 +- src/service/rooms/timeline/mod.rs | 9 ++-- 91 files changed, 266 insertions(+), 205 deletions(-) rename src/core/{state_res/state_event.rs => matrix/event.rs} (100%) create mode 100644 src/core/matrix/mod.rs rename src/core/{pdu/mod.rs => matrix/pdu.rs} (72%) rename src/core/{ => matrix}/pdu/builder.rs (100%) rename src/core/{ => matrix}/pdu/content.rs (100%) rename src/core/{ => matrix}/pdu/count.rs (100%) rename src/core/{ => matrix}/pdu/event_id.rs (100%) rename src/core/{ => matrix}/pdu/filter.rs (100%) rename src/core/{ => matrix}/pdu/id.rs (100%) rename src/core/{ => matrix}/pdu/raw_id.rs (100%) rename src/core/{ => matrix}/pdu/redact.rs (100%) rename src/core/{ => matrix}/pdu/relation.rs (100%) rename src/core/{ => matrix}/pdu/state_key.rs (100%) rename src/core/{ => matrix}/pdu/strip.rs (100%) rename src/core/{ => matrix}/pdu/tests.rs (100%) rename src/core/{ => matrix}/pdu/unsigned.rs (100%) rename src/core/{ => matrix}/state_res/LICENSE (100%) rename src/core/{ => matrix}/state_res/benches.rs (100%) rename src/core/{ => matrix}/state_res/error.rs (100%) rename src/core/{ => matrix}/state_res/event_auth.rs (100%) rename src/core/{ => matrix}/state_res/mod.rs (99%) rename src/core/{ => matrix}/state_res/outcomes.txt (100%) rename src/core/{ => matrix}/state_res/power_levels.rs (99%) rename src/core/{ => matrix}/state_res/room_version.rs (100%) rename src/core/{ => matrix}/state_res/test_utils.rs (99%) delete mode 100644 src/core/pdu/event.rs diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index c6f6a170..87ca03a0 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -6,7 +6,9 @@ use std::{ }; use conduwuit::{ - Error, PduEvent, PduId, RawPduId, Result, debug_error, err, info, trace, utils, + Error, Result, debug_error, err, info, + matrix::pdu::{PduEvent, PduId, RawPduId}, + trace, utils, utils::{ stream::{IterStream, ReadyExt}, string::EMPTY, diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 35067304..45e550be 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -2,7 +2,8 @@ use std::{collections::BTreeMap, fmt::Write as _}; use api::client::{full_user_deactivate, join_room_by_id_helper, leave_room}; use conduwuit::{ - PduBuilder, Result, debug, debug_warn, error, info, is_equal_to, + Result, debug, debug_warn, error, info, is_equal_to, + matrix::pdu::PduBuilder, utils::{self, ReadyExt}, warn, }; diff --git a/src/api/client/account.rs b/src/api/client/account.rs index e5894d47..32f2530c 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -3,10 +3,13 @@ use std::fmt::Write; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - Err, Error, PduBuilder, Result, debug_info, err, error, info, is_equal_to, utils, + Err, Error, Result, debug_info, err, error, info, is_equal_to, + matrix::pdu::PduBuilder, + utils, utils::{ReadyExt, stream::BroadbandExt}, warn, }; +use conduwuit_service::Services; use futures::{FutureExt, StreamExt}; use register::RegistrationKind; use ruma::{ @@ -30,7 +33,6 @@ use ruma::{ }, push, }; -use service::Services; use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH, join_room_by_id_helper}; use crate::Ruma; diff --git a/src/api/client/account_data.rs b/src/api/client/account_data.rs index 60c18b37..e44ce4e7 100644 --- a/src/api/client/account_data.rs +++ b/src/api/client/account_data.rs @@ -1,5 +1,6 @@ use axum::extract::State; -use conduwuit::{Err, err}; +use conduwuit::{Err, Result, err}; +use conduwuit_service::Services; use ruma::{ RoomId, UserId, api::client::config::{ @@ -15,7 +16,7 @@ use ruma::{ use serde::Deserialize; use serde_json::{json, value::RawValue as RawJsonValue}; -use crate::{Result, Ruma, service::Services}; +use crate::Ruma; /// # `PUT /_matrix/client/r0/user/{userId}/account_data/{type}` /// diff --git a/src/api/client/alias.rs b/src/api/client/alias.rs index 319e5141..9f1b05f8 100644 --- a/src/api/client/alias.rs +++ b/src/api/client/alias.rs @@ -1,12 +1,12 @@ use axum::extract::State; use conduwuit::{Err, Result, debug}; +use conduwuit_service::Services; use futures::StreamExt; use rand::seq::SliceRandom; use ruma::{ OwnedServerName, RoomAliasId, RoomId, api::client::alias::{create_alias, delete_alias, get_alias}, }; -use service::Services; use crate::Ruma; diff --git a/src/api/client/backup.rs b/src/api/client/backup.rs index 83955fea..2ad37cf3 100644 --- a/src/api/client/backup.rs +++ b/src/api/client/backup.rs @@ -1,7 +1,7 @@ use std::cmp::Ordering; use axum::extract::State; -use conduwuit::{Err, err}; +use conduwuit::{Err, Result, err}; use ruma::{ UInt, api::client::backup::{ @@ -13,7 +13,7 @@ use ruma::{ }, }; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `POST /_matrix/client/r0/room_keys/version` /// diff --git a/src/api/client/context.rs b/src/api/client/context.rs index 1dda7b53..dbc2a22f 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -1,18 +1,20 @@ use axum::extract::State; use conduwuit::{ - Err, PduEvent, Result, at, debug_warn, err, ref_at, + Err, Result, at, debug_warn, err, + matrix::pdu::PduEvent, + ref_at, utils::{ IterStream, future::TryExtExt, stream::{BroadbandExt, ReadyExt, TryIgnore, WidebandExt}, }, }; +use conduwuit_service::rooms::{lazy_loading, lazy_loading::Options, short::ShortStateKey}; use futures::{ FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::{OptionFuture, join, join3, try_join3}, }; use ruma::{OwnedEventId, UserId, api::client::context::get_context, events::StateEventType}; -use service::rooms::{lazy_loading, lazy_loading::Options, short::ShortStateKey}; use crate::{ Ruma, diff --git a/src/api/client/device.rs b/src/api/client/device.rs index 7603c866..5519a1a5 100644 --- a/src/api/client/device.rs +++ b/src/api/client/device.rs @@ -1,6 +1,6 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{Err, debug, err}; +use conduwuit::{Err, Error, Result, debug, err, utils}; use futures::StreamExt; use ruma::{ MilliSecondsSinceUnixEpoch, OwnedDeviceId, @@ -12,7 +12,7 @@ use ruma::{ }; use super::SESSION_ID_LENGTH; -use crate::{Error, Result, Ruma, client::DEVICE_ID_LENGTH, utils}; +use crate::{Ruma, client::DEVICE_ID_LENGTH}; /// # `GET /_matrix/client/r0/devices` /// diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index f2f668c8..9ca35537 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -9,6 +9,7 @@ use conduwuit::{ stream::{ReadyExt, WidebandExt}, }, }; +use conduwuit_service::Services; use futures::{ FutureExt, StreamExt, TryFutureExt, future::{join, join4, join5}, @@ -35,7 +36,6 @@ use ruma::{ }, uint, }; -use service::Services; use crate::Ruma; diff --git a/src/api/client/filter.rs b/src/api/client/filter.rs index 84086452..97044ffc 100644 --- a/src/api/client/filter.rs +++ b/src/api/client/filter.rs @@ -1,8 +1,8 @@ use axum::extract::State; -use conduwuit::err; +use conduwuit::{Result, err}; use ruma::api::client::filter::{create_filter, get_filter}; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}` /// diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 2fdfc0bc..6865c2a4 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -2,6 +2,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use axum::extract::State; use conduwuit::{Err, Error, Result, debug, debug_warn, err, info, result::NotFound, utils}; +use conduwuit_service::{Services, users::parse_master_key}; use futures::{StreamExt, stream::FuturesUnordered}; use ruma::{ OneTimeKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, @@ -23,10 +24,7 @@ use ruma::{ use serde_json::json; use super::SESSION_ID_LENGTH; -use crate::{ - Ruma, - service::{Services, users::parse_master_key}, -}; +use crate::Ruma; /// # `POST /_matrix/client/r0/keys/upload` /// diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index ef40e972..d0345c8e 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -9,13 +9,25 @@ use std::{ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - Err, PduEvent, Result, StateKey, at, debug, debug_info, debug_warn, err, error, info, - pdu::{PduBuilder, gen_event_id_canonical_json}, + Err, Result, at, debug, debug_info, debug_warn, err, error, info, + matrix::{ + StateKey, + pdu::{PduBuilder, PduEvent, gen_event_id, gen_event_id_canonical_json}, + state_res, + }, result::{FlatOk, NotFound}, - state_res, trace, + trace, utils::{self, IterStream, ReadyExt, shuffle}, warn, }; +use conduwuit_service::{ + Services, + appservice::RegistrationInfo, + rooms::{ + state::RoomMutexGuard, + state_compressor::{CompressedState, HashSetCompressStateEvent}, + }, +}; use futures::{FutureExt, StreamExt, TryFutureExt, future::join4, join}; use ruma::{ CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, @@ -44,15 +56,6 @@ use ruma::{ }, }, }; -use service::{ - Services, - appservice::RegistrationInfo, - pdu::gen_event_id, - rooms::{ - state::RoomMutexGuard, - state_compressor::{CompressedState, HashSetCompressStateEvent}, - }, -}; use crate::{Ruma, client::full_user_deactivate}; diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 03c7335a..3e784a4a 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -1,12 +1,24 @@ use axum::extract::State; use conduwuit::{ - Err, Event, PduCount, PduEvent, Result, at, + Err, Result, at, + matrix::{ + Event, + pdu::{PduCount, PduEvent}, + }, utils::{ IterStream, ReadyExt, result::{FlatOk, LogErr}, stream::{BroadbandExt, TryIgnore, WidebandExt}, }, }; +use conduwuit_service::{ + Services, + rooms::{ + lazy_loading, + lazy_loading::{Options, Witness}, + timeline::PdusIterItem, + }, +}; use futures::{FutureExt, StreamExt, TryFutureExt, future::OptionFuture, pin_mut}; use ruma::{ RoomId, UserId, @@ -17,14 +29,6 @@ use ruma::{ events::{AnyStateEvent, StateEventType, TimelineEventType, TimelineEventType::*}, serde::Raw, }; -use service::{ - Services, - rooms::{ - lazy_loading, - lazy_loading::{Options, Witness}, - timeline::PdusIterItem, - }, -}; use crate::Ruma; diff --git a/src/api/client/openid.rs b/src/api/client/openid.rs index 671d0c6d..8d2de68d 100644 --- a/src/api/client/openid.rs +++ b/src/api/client/openid.rs @@ -1,14 +1,14 @@ use std::time::Duration; use axum::extract::State; -use conduwuit::utils; +use conduwuit::{Error, Result, utils}; use ruma::{ api::client::{account, error::ErrorKind}, authentication::TokenType, }; use super::TOKEN_LENGTH; -use crate::{Error, Result, Ruma}; +use crate::Ruma; /// # `POST /_matrix/client/v3/user/{userId}/openid/request_token` /// diff --git a/src/api/client/profile.rs b/src/api/client/profile.rs index 5abe5b23..3699b590 100644 --- a/src/api/client/profile.rs +++ b/src/api/client/profile.rs @@ -3,10 +3,11 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{ Err, Error, Result, - pdu::PduBuilder, + matrix::pdu::PduBuilder, utils::{IterStream, stream::TryIgnore}, warn, }; +use conduwuit_service::Services; use futures::{StreamExt, TryStreamExt, future::join3}; use ruma::{ OwnedMxcUri, OwnedRoomId, UserId, @@ -22,7 +23,6 @@ use ruma::{ events::room::member::{MembershipState, RoomMemberEventContent}, presence::PresenceState, }; -use service::Services; use crate::Ruma; diff --git a/src/api/client/push.rs b/src/api/client/push.rs index cc1d3be2..81020ffa 100644 --- a/src/api/client/push.rs +++ b/src/api/client/push.rs @@ -1,5 +1,6 @@ use axum::extract::State; -use conduwuit::{Err, err}; +use conduwuit::{Err, Error, Result, err}; +use conduwuit_service::Services; use ruma::{ CanonicalJsonObject, CanonicalJsonValue, api::client::{ @@ -19,9 +20,8 @@ use ruma::{ RemovePushRuleError, Ruleset, }, }; -use service::Services; -use crate::{Error, Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/client/r0/pushrules/` /// diff --git a/src/api/client/read_marker.rs b/src/api/client/read_marker.rs index b334e356..fbfc8fea 100644 --- a/src/api/client/read_marker.rs +++ b/src/api/client/read_marker.rs @@ -1,7 +1,7 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduwuit::{Err, PduCount, err}; +use conduwuit::{Err, PduCount, Result, err}; use ruma::{ MilliSecondsSinceUnixEpoch, api::client::{read_marker::set_read_marker, receipt::create_receipt}, @@ -11,7 +11,7 @@ use ruma::{ }, }; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `POST /_matrix/client/r0/rooms/{roomId}/read_markers` /// diff --git a/src/api/client/redact.rs b/src/api/client/redact.rs index 7b512d06..8dbe47a6 100644 --- a/src/api/client/redact.rs +++ b/src/api/client/redact.rs @@ -1,9 +1,10 @@ use axum::extract::State; +use conduwuit::{Result, matrix::pdu::PduBuilder}; use ruma::{ api::client::redact::redact_event, events::room::redaction::RoomRedactionEventContent, }; -use crate::{Result, Ruma, service::pdu::PduBuilder}; +use crate::Ruma; /// # `PUT /_matrix/client/r0/rooms/{roomId}/redact/{eventId}/{txnId}` /// diff --git a/src/api/client/relations.rs b/src/api/client/relations.rs index 7ed40f14..b8c2dd4d 100644 --- a/src/api/client/relations.rs +++ b/src/api/client/relations.rs @@ -1,8 +1,10 @@ use axum::extract::State; use conduwuit::{ - PduCount, Result, at, + Result, at, + matrix::pdu::PduCount, utils::{IterStream, ReadyExt, result::FlatOk, stream::WidebandExt}, }; +use conduwuit_service::{Services, rooms::timeline::PdusIterItem}; use futures::StreamExt; use ruma::{ EventId, RoomId, UInt, UserId, @@ -15,7 +17,6 @@ use ruma::{ }, events::{TimelineEventType, relation::RelationType}, }; -use service::{Services, rooms::timeline::PdusIterItem}; use crate::Ruma; diff --git a/src/api/client/report.rs b/src/api/client/report.rs index 7922caca..4ee8ebe5 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -2,7 +2,8 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{Err, info, utils::ReadyExt}; +use conduwuit::{Err, Error, Result, debug_info, info, matrix::pdu::PduEvent, utils::ReadyExt}; +use conduwuit_service::Services; use rand::Rng; use ruma::{ EventId, RoomId, UserId, @@ -15,10 +16,7 @@ use ruma::{ }; use tokio::time::sleep; -use crate::{ - Error, Result, Ruma, debug_info, - service::{Services, pdu::PduEvent}, -}; +use crate::Ruma; /// # `POST /_matrix/client/v3/rooms/{roomId}/report` /// diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index bdc5d5a5..4ce53f15 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -2,8 +2,11 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{ - Err, Error, Result, StateKey, debug_info, debug_warn, err, error, info, pdu::PduBuilder, warn, + Err, Error, Result, debug_info, debug_warn, err, error, info, + matrix::{StateKey, pdu::PduBuilder}, + warn, }; +use conduwuit_service::{Services, appservice::RegistrationInfo}; use futures::FutureExt; use ruma::{ CanonicalJsonObject, Int, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomId, RoomVersionId, @@ -29,7 +32,6 @@ use ruma::{ serde::{JsonObject, Raw}, }; use serde_json::{json, value::to_raw_value}; -use service::{Services, appservice::RegistrationInfo}; use crate::{Ruma, client::invite_helper}; diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs index 3cfb3c28..9ec0b3bb 100644 --- a/src/api/client/room/upgrade.rs +++ b/src/api/client/room/upgrade.rs @@ -1,7 +1,10 @@ use std::cmp::max; use axum::extract::State; -use conduwuit::{Error, Result, StateKey, err, info, pdu::PduBuilder}; +use conduwuit::{ + Error, Result, err, info, + matrix::{StateKey, pdu::PduBuilder}, +}; use futures::StreamExt; use ruma::{ CanonicalJsonObject, RoomId, RoomVersionId, diff --git a/src/api/client/search.rs b/src/api/client/search.rs index d66df881..d4dcde57 100644 --- a/src/api/client/search.rs +++ b/src/api/client/search.rs @@ -2,10 +2,12 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{ - Err, PduEvent, Result, at, is_true, + Err, Result, at, is_true, + matrix::pdu::PduEvent, result::FlatOk, utils::{IterStream, stream::ReadyExt}, }; +use conduwuit_service::{Services, rooms::search::RoomQuery}; use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::OptionFuture}; use ruma::{ OwnedRoomId, RoomId, UInt, UserId, @@ -17,7 +19,6 @@ use ruma::{ serde::Raw, }; use search_events::v3::{Request, Response}; -use service::{Services, rooms::search::RoomQuery}; use crate::Ruma; diff --git a/src/api/client/send.rs b/src/api/client/send.rs index 1af74f57..f753fa65 100644 --- a/src/api/client/send.rs +++ b/src/api/client/send.rs @@ -1,11 +1,11 @@ use std::collections::BTreeMap; use axum::extract::State; -use conduwuit::{Err, err}; +use conduwuit::{Err, Result, err, matrix::pdu::PduBuilder, utils}; use ruma::{api::client::message::send_message_event, events::MessageLikeEventType}; use serde_json::from_str; -use crate::{Result, Ruma, service::pdu::PduBuilder, utils}; +use crate::Ruma; /// # `PUT /_matrix/client/v3/rooms/{roomId}/send/{eventType}/{txnId}` /// diff --git a/src/api/client/session.rs b/src/api/client/session.rs index 3de625e4..2499a43d 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -2,7 +2,11 @@ use std::time::Duration; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{Err, debug, err, info, utils::ReadyExt}; +use conduwuit::{ + Err, Error, Result, debug, err, info, utils, + utils::{ReadyExt, hash}, +}; +use conduwuit_service::uiaa::SESSION_ID_LENGTH; use futures::StreamExt; use ruma::{ UserId, @@ -22,10 +26,9 @@ use ruma::{ uiaa, }, }; -use service::uiaa::SESSION_ID_LENGTH; use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH}; -use crate::{Error, Result, Ruma, utils, utils::hash}; +use crate::Ruma; /// # `GET /_matrix/client/v3/login` /// diff --git a/src/api/client/space.rs b/src/api/client/space.rs index 567ac62f..4eee9d76 100644 --- a/src/api/client/space.rs +++ b/src/api/client/space.rs @@ -8,16 +8,16 @@ use conduwuit::{ Err, Result, utils::{future::TryExtExt, stream::IterStream}, }; -use futures::{StreamExt, TryFutureExt, future::OptionFuture}; -use ruma::{ - OwnedRoomId, OwnedServerName, RoomId, UInt, UserId, api::client::space::get_hierarchy, -}; -use service::{ +use conduwuit_service::{ Services, rooms::spaces::{ PaginationToken, SummaryAccessibility, get_parent_children_via, summary_to_chunk, }, }; +use futures::{StreamExt, TryFutureExt, future::OptionFuture}; +use ruma::{ + OwnedRoomId, OwnedServerName, RoomId, UInt, UserId, api::client::space::get_hierarchy, +}; use crate::Ruma; diff --git a/src/api/client/state.rs b/src/api/client/state.rs index 23583356..5c5c71f2 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -1,5 +1,10 @@ use axum::extract::State; -use conduwuit::{Err, PduEvent, Result, err, pdu::PduBuilder, utils::BoolExt}; +use conduwuit::{ + Err, Result, err, + matrix::pdu::{PduBuilder, PduEvent}, + utils::BoolExt, +}; +use conduwuit_service::Services; use futures::TryStreamExt; use ruma::{ OwnedEventId, RoomId, UserId, @@ -16,7 +21,6 @@ use ruma::{ }, serde::Raw, }; -use service::Services; use crate::{Ruma, RumaResponse}; diff --git a/src/api/client/sync/mod.rs b/src/api/client/sync/mod.rs index 3eab76cc..14459acf 100644 --- a/src/api/client/sync/mod.rs +++ b/src/api/client/sync/mod.rs @@ -3,12 +3,14 @@ mod v4; mod v5; use conduwuit::{ - PduCount, + Error, PduCount, Result, + matrix::pdu::PduEvent, utils::{ IterStream, stream::{BroadbandExt, ReadyExt, TryIgnore}, }, }; +use conduwuit_service::Services; use futures::{StreamExt, pin_mut}; use ruma::{ RoomId, UserId, @@ -21,7 +23,6 @@ use ruma::{ pub(crate) use self::{ v3::sync_events_route, v4::sync_events_v4_route, v5::sync_events_v5_route, }; -use crate::{Error, PduEvent, Result, service::Services}; pub(crate) const DEFAULT_BUMP_TYPES: &[TimelineEventType; 6] = &[CallInvite, PollStart, Beacon, RoomEncrypted, RoomMessage, Sticker]; diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 83ffa55a..12731ff6 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -6,9 +6,12 @@ use std::{ use axum::extract::State; use conduwuit::{ - PduCount, PduEvent, Result, at, err, error, extract_variant, is_equal_to, pair_of, - pdu::{Event, EventHash}, - ref_at, + Result, at, err, error, extract_variant, is_equal_to, + matrix::{ + Event, + pdu::{EventHash, PduCount, PduEvent}, + }, + pair_of, ref_at, result::FlatOk, utils::{ self, BoolExt, IterStream, ReadyExt, TryFutureExtExt, diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index c4e71d88..684752ec 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -6,13 +6,19 @@ use std::{ use axum::extract::State; use conduwuit::{ - Error, PduEvent, Result, TypeStateKey, debug, error, extract_variant, trace, + Error, Result, debug, error, extract_variant, + matrix::{ + TypeStateKey, + pdu::{PduCount, PduEvent}, + }, + trace, utils::{ BoolExt, IterStream, ReadyExt, TryFutureExtExt, math::{ruma_from_usize, usize_from_ruma}, }, warn, }; +use conduwuit_service::rooms::read_receipt::pack_receipts; use futures::{FutureExt, StreamExt, TryFutureExt}; use ruma::{ DeviceId, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, @@ -27,7 +33,6 @@ use ruma::{ serde::Raw, uint, }; -use service::{PduCount, rooms::read_receipt::pack_receipts}; use super::{filter_rooms, share_encrypted_room}; use crate::{ diff --git a/src/api/client/tag.rs b/src/api/client/tag.rs index 3b3b40d4..caafe10d 100644 --- a/src/api/client/tag.rs +++ b/src/api/client/tag.rs @@ -1,6 +1,7 @@ use std::collections::BTreeMap; use axum::extract::State; +use conduwuit::Result; use ruma::{ api::client::tag::{create_tag, delete_tag, get_tags}, events::{ @@ -9,7 +10,7 @@ use ruma::{ }, }; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}` /// diff --git a/src/api/client/thirdparty.rs b/src/api/client/thirdparty.rs index 790b27d3..0713a882 100644 --- a/src/api/client/thirdparty.rs +++ b/src/api/client/thirdparty.rs @@ -1,8 +1,9 @@ use std::collections::BTreeMap; +use conduwuit::Result; use ruma::api::client::thirdparty::get_protocols; -use crate::{Result, Ruma, RumaResponse}; +use crate::{Ruma, RumaResponse}; /// # `GET /_matrix/client/r0/thirdparty/protocols` /// diff --git a/src/api/client/threads.rs b/src/api/client/threads.rs index 00bfe553..5b838bef 100644 --- a/src/api/client/threads.rs +++ b/src/api/client/threads.rs @@ -1,9 +1,12 @@ use axum::extract::State; -use conduwuit::{PduCount, PduEvent, at}; +use conduwuit::{ + Result, at, + matrix::pdu::{PduCount, PduEvent}, +}; use futures::StreamExt; use ruma::{api::client::threads::get_threads, uint}; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/client/r0/rooms/{roomId}/threads` pub(crate) async fn get_threads_route( diff --git a/src/api/client/to_device.rs b/src/api/client/to_device.rs index 1b942fba..8ad9dc99 100644 --- a/src/api/client/to_device.rs +++ b/src/api/client/to_device.rs @@ -2,6 +2,7 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{Error, Result}; +use conduwuit_service::sending::EduBuf; use futures::StreamExt; use ruma::{ api::{ @@ -10,7 +11,6 @@ use ruma::{ }, to_device::DeviceIdOrAllDevices, }; -use service::sending::EduBuf; use crate::Ruma; diff --git a/src/api/client/typing.rs b/src/api/client/typing.rs index b02cc473..1d8d02fd 100644 --- a/src/api/client/typing.rs +++ b/src/api/client/typing.rs @@ -1,8 +1,8 @@ use axum::extract::State; -use conduwuit::{Err, utils::math::Tried}; +use conduwuit::{Err, Result, utils, utils::math::Tried}; use ruma::api::client::typing::create_typing_event; -use crate::{Result, Ruma, utils}; +use crate::Ruma; /// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` /// diff --git a/src/api/client/unversioned.rs b/src/api/client/unversioned.rs index 4e2b7d9d..232d5b28 100644 --- a/src/api/client/unversioned.rs +++ b/src/api/client/unversioned.rs @@ -1,10 +1,11 @@ use std::collections::BTreeMap; use axum::{Json, extract::State, response::IntoResponse}; +use conduwuit::Result; use futures::StreamExt; use ruma::api::client::discovery::get_supported_versions; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/client/versions` /// diff --git a/src/api/client/user_directory.rs b/src/api/client/user_directory.rs index c5d79a56..8f564eed 100644 --- a/src/api/client/user_directory.rs +++ b/src/api/client/user_directory.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduwuit::utils::TryFutureExtExt; +use conduwuit::{Result, utils::TryFutureExtExt}; use futures::{StreamExt, pin_mut}; use ruma::{ api::client::user_directory::search_users, @@ -9,7 +9,7 @@ use ruma::{ }, }; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `POST /_matrix/client/r0/user_directory/search` /// diff --git a/src/api/client/voip.rs b/src/api/client/voip.rs index 37e67984..91991d24 100644 --- a/src/api/client/voip.rs +++ b/src/api/client/voip.rs @@ -2,12 +2,12 @@ use std::time::{Duration, SystemTime}; use axum::extract::State; use base64::{Engine as _, engine::general_purpose}; -use conduwuit::{Err, utils}; +use conduwuit::{Err, Result, utils}; use hmac::{Hmac, Mac}; use ruma::{SecondsSinceUnixEpoch, UserId, api::client::voip::get_turn_server_info}; use sha1::Sha1; -use crate::{Result, Ruma}; +use crate::Ruma; const RANDOM_USER_ID_LENGTH: usize = 10; diff --git a/src/api/client/well_known.rs b/src/api/client/well_known.rs index abda61b0..eedab981 100644 --- a/src/api/client/well_known.rs +++ b/src/api/client/well_known.rs @@ -1,4 +1,5 @@ use axum::{Json, extract::State, response::IntoResponse}; +use conduwuit::{Error, Result}; use ruma::api::client::{ discovery::{ discover_homeserver::{self, HomeserverInfo, SlidingSyncProxyInfo}, @@ -7,7 +8,7 @@ use ruma::api::client::{ error::ErrorKind, }; -use crate::{Error, Result, Ruma}; +use crate::Ruma; /// # `GET /.well-known/matrix/client` /// diff --git a/src/api/mod.rs b/src/api/mod.rs index 090cf897..9ca24e72 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -8,8 +8,6 @@ pub mod server; extern crate conduwuit_core as conduwuit; extern crate conduwuit_service as service; -pub(crate) use conduwuit::{Error, Result, debug_info, pdu::PduEvent, utils}; - pub(crate) use self::router::{Ruma, RumaResponse, State}; conduwuit::mod_ctor! {} diff --git a/src/api/server/hierarchy.rs b/src/api/server/hierarchy.rs index c759c8ea..42c348f9 100644 --- a/src/api/server/hierarchy.rs +++ b/src/api/server/hierarchy.rs @@ -3,9 +3,11 @@ use conduwuit::{ Err, Result, utils::stream::{BroadbandExt, IterStream}, }; +use conduwuit_service::rooms::spaces::{ + Identifier, SummaryAccessibility, get_parent_children_via, +}; use futures::{FutureExt, StreamExt}; use ruma::api::federation::space::get_hierarchy; -use service::rooms::spaces::{Identifier, SummaryAccessibility, get_parent_children_via}; use crate::Ruma; diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index f4cc6eb2..cda34fb5 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -1,14 +1,15 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; use base64::{Engine as _, engine::general_purpose}; -use conduwuit::{Err, Error, PduEvent, Result, err, utils, utils::hash::sha256, warn}; +use conduwuit::{ + Err, Error, PduEvent, Result, err, pdu::gen_event_id, utils, utils::hash::sha256, warn, +}; use ruma::{ CanonicalJsonValue, OwnedUserId, UserId, api::{client::error::ErrorKind, federation::membership::create_invite}, events::room::member::{MembershipState, RoomMemberEventContent}, serde::JsonObject, }; -use service::pdu::gen_event_id; use crate::Ruma; diff --git a/src/api/server/make_join.rs b/src/api/server/make_join.rs index f18d1304..4664b904 100644 --- a/src/api/server/make_join.rs +++ b/src/api/server/make_join.rs @@ -1,5 +1,8 @@ use axum::extract::State; -use conduwuit::{Err, debug_info, utils::IterStream, warn}; +use conduwuit::{ + Err, Error, Result, debug_info, matrix::pdu::PduBuilder, utils::IterStream, warn, +}; +use conduwuit_service::Services; use futures::StreamExt; use ruma::{ CanonicalJsonObject, OwnedUserId, RoomId, RoomVersionId, UserId, @@ -14,10 +17,7 @@ use ruma::{ }; use serde_json::value::to_raw_value; -use crate::{ - Error, Result, Ruma, - service::{Services, pdu::PduBuilder}, -}; +use crate::Ruma; /// # `GET /_matrix/federation/v1/make_join/{roomId}/{userId}` /// diff --git a/src/api/server/make_knock.rs b/src/api/server/make_knock.rs index 71536439..6d71ab2a 100644 --- a/src/api/server/make_knock.rs +++ b/src/api/server/make_knock.rs @@ -1,15 +1,14 @@ use RoomVersionId::*; use axum::extract::State; -use conduwuit::{Err, debug_warn}; +use conduwuit::{Err, Error, Result, debug_warn, matrix::pdu::PduBuilder, warn}; use ruma::{ RoomVersionId, api::{client::error::ErrorKind, federation::knock::create_knock_event_template}, events::room::member::{MembershipState, RoomMemberEventContent}, }; use serde_json::value::to_raw_value; -use tracing::warn; -use crate::{Error, Result, Ruma, service::pdu::PduBuilder}; +use crate::Ruma; /// # `GET /_matrix/federation/v1/make_knock/{roomId}/{userId}` /// diff --git a/src/api/server/make_leave.rs b/src/api/server/make_leave.rs index 1ed02785..cb6bd2fa 100644 --- a/src/api/server/make_leave.rs +++ b/src/api/server/make_leave.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduwuit::{Err, Result}; +use conduwuit::{Err, Result, matrix::pdu::PduBuilder}; use ruma::{ api::federation::membership::prepare_leave_event, events::room::member::{MembershipState, RoomMemberEventContent}, @@ -7,7 +7,7 @@ use ruma::{ use serde_json::value::to_raw_value; use super::make_join::maybe_strip_event_id; -use crate::{Ruma, service::pdu::PduBuilder}; +use crate::Ruma; /// # `GET /_matrix/federation/v1/make_leave/{roomId}/{eventId}` /// diff --git a/src/api/server/openid.rs b/src/api/server/openid.rs index 4833fbe1..a09cd7ad 100644 --- a/src/api/server/openid.rs +++ b/src/api/server/openid.rs @@ -1,7 +1,8 @@ use axum::extract::State; +use conduwuit::Result; use ruma::api::federation::openid::get_openid_userinfo; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/federation/v1/openid/userinfo` /// diff --git a/src/api/server/publicrooms.rs b/src/api/server/publicrooms.rs index ff74574a..cf66ea71 100644 --- a/src/api/server/publicrooms.rs +++ b/src/api/server/publicrooms.rs @@ -1,5 +1,6 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; +use conduwuit::{Error, Result}; use ruma::{ api::{ client::error::ErrorKind, @@ -8,7 +9,7 @@ use ruma::{ directory::Filter, }; -use crate::{Error, Result, Ruma}; +use crate::Ruma; /// # `POST /_matrix/federation/v1/publicRooms` /// diff --git a/src/api/server/send.rs b/src/api/server/send.rs index 1f467dac..9c5bfd2b 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -9,11 +9,15 @@ use conduwuit::{ result::LogErr, trace, utils::{ - IterStream, ReadyExt, + IterStream, ReadyExt, millis_since_unix_epoch, stream::{BroadbandExt, TryBroadbandExt, automatic_width}, }, warn, }; +use conduwuit_service::{ + Services, + sending::{EDU_LIMIT, PDU_LIMIT}, +}; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt}; use itertools::Itertools; use ruma::{ @@ -33,16 +37,8 @@ use ruma::{ serde::Raw, to_device::DeviceIdOrAllDevices, }; -use service::{ - Services, - sending::{EDU_LIMIT, PDU_LIMIT}, -}; -use utils::millis_since_unix_epoch; -use crate::{ - Ruma, - utils::{self}, -}; +use crate::Ruma; type ResolvedMap = BTreeMap; type Pdu = (OwnedRoomId, OwnedEventId, CanonicalJsonObject); diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index c1749835..2e2e89ee 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -9,6 +9,7 @@ use conduwuit::{ utils::stream::{IterStream, TryBroadbandExt}, warn, }; +use conduwuit_service::Services; use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{ CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, @@ -20,7 +21,6 @@ use ruma::{ }, }; use serde_json::value::{RawValue as RawJsonValue, to_raw_value}; -use service::Services; use crate::Ruma; diff --git a/src/api/server/send_knock.rs b/src/api/server/send_knock.rs index f7bb0735..c5ab0306 100644 --- a/src/api/server/send_knock.rs +++ b/src/api/server/send_knock.rs @@ -1,5 +1,9 @@ use axum::extract::State; -use conduwuit::{Err, PduEvent, Result, err, pdu::gen_event_id_canonical_json, warn}; +use conduwuit::{ + Err, Result, err, + matrix::pdu::{PduEvent, gen_event_id_canonical_json}, + warn, +}; use futures::FutureExt; use ruma::{ OwnedServerName, OwnedUserId, diff --git a/src/api/server/send_leave.rs b/src/api/server/send_leave.rs index 71516553..d3dc994c 100644 --- a/src/api/server/send_leave.rs +++ b/src/api/server/send_leave.rs @@ -1,7 +1,8 @@ #![allow(deprecated)] use axum::extract::State; -use conduwuit::{Err, Result, err}; +use conduwuit::{Err, Result, err, matrix::pdu::gen_event_id_canonical_json}; +use conduwuit_service::Services; use futures::FutureExt; use ruma::{ OwnedRoomId, OwnedUserId, RoomId, ServerName, @@ -13,10 +14,7 @@ use ruma::{ }; use serde_json::value::RawValue as RawJsonValue; -use crate::{ - Ruma, - service::{Services, pdu::gen_event_id_canonical_json}, -}; +use crate::Ruma; /// # `PUT /_matrix/federation/v1/send_leave/{roomId}/{eventId}` /// diff --git a/src/api/server/version.rs b/src/api/server/version.rs index 036b61f7..b08ff77a 100644 --- a/src/api/server/version.rs +++ b/src/api/server/version.rs @@ -1,6 +1,7 @@ +use conduwuit::Result; use ruma::api::federation::discovery::get_server_version; -use crate::{Result, Ruma}; +use crate::Ruma; /// # `GET /_matrix/federation/v1/version` /// diff --git a/src/api/server/well_known.rs b/src/api/server/well_known.rs index 48caa7d6..75c7cf5d 100644 --- a/src/api/server/well_known.rs +++ b/src/api/server/well_known.rs @@ -1,7 +1,8 @@ use axum::extract::State; +use conduwuit::{Error, Result}; use ruma::api::{client::error::ErrorKind, federation::discovery::discover_homeserver}; -use crate::{Error, Result, Ruma}; +use crate::Ruma; /// # `GET /.well-known/matrix/server` /// diff --git a/src/core/state_res/state_event.rs b/src/core/matrix/event.rs similarity index 100% rename from src/core/state_res/state_event.rs rename to src/core/matrix/event.rs diff --git a/src/core/matrix/mod.rs b/src/core/matrix/mod.rs new file mode 100644 index 00000000..8c978173 --- /dev/null +++ b/src/core/matrix/mod.rs @@ -0,0 +1,9 @@ +//! Core Matrix Library + +pub mod event; +pub mod pdu; +pub mod state_res; + +pub use event::Event; +pub use pdu::{PduBuilder, PduCount, PduEvent, PduId, RawPduId, StateKey}; +pub use state_res::{EventTypeExt, RoomVersion, StateMap, TypeStateKey}; diff --git a/src/core/pdu/mod.rs b/src/core/matrix/pdu.rs similarity index 72% rename from src/core/pdu/mod.rs rename to src/core/matrix/pdu.rs index 9fb2a3da..7e1ecfa8 100644 --- a/src/core/pdu/mod.rs +++ b/src/core/matrix/pdu.rs @@ -1,7 +1,6 @@ mod builder; mod content; mod count; -mod event; mod event_id; mod filter; mod id; @@ -17,8 +16,8 @@ mod unsigned; use std::cmp::Ordering; use ruma::{ - CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, - OwnedUserId, UInt, events::TimelineEventType, + CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, + OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, UInt, UserId, events::TimelineEventType, }; use serde::{Deserialize, Serialize}; use serde_json::value::RawValue as RawJsonValue; @@ -27,12 +26,12 @@ pub use self::{ Count as PduCount, Id as PduId, Pdu as PduEvent, RawId as RawPduId, builder::{Builder, Builder as PduBuilder}, count::Count, - event::Event, event_id::*, id::*, raw_id::*, state_key::{ShortStateKey, StateKey}, }; +use super::Event; use crate::Result; /// Persistent Data Unit (Event) @@ -79,6 +78,36 @@ impl Pdu { } } +impl Event for Pdu { + type Id = OwnedEventId; + + fn event_id(&self) -> &Self::Id { &self.event_id } + + fn room_id(&self) -> &RoomId { &self.room_id } + + fn sender(&self) -> &UserId { &self.sender } + + fn event_type(&self) -> &TimelineEventType { &self.kind } + + fn content(&self) -> &RawJsonValue { &self.content } + + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { + MilliSecondsSinceUnixEpoch(self.origin_server_ts) + } + + fn state_key(&self) -> Option<&str> { self.state_key.as_deref() } + + fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { + self.prev_events.iter() + } + + fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { + self.auth_events.iter() + } + + fn redacts(&self) -> Option<&Self::Id> { self.redacts.as_ref() } +} + /// Prevent derived equality which wouldn't limit itself to event_id impl Eq for Pdu {} @@ -87,12 +116,12 @@ impl PartialEq for Pdu { fn eq(&self, other: &Self) -> bool { self.event_id == other.event_id } } -/// Ordering determined by the Pdu's ID, not the memory representations. -impl PartialOrd for Pdu { - fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } -} - /// Ordering determined by the Pdu's ID, not the memory representations. impl Ord for Pdu { fn cmp(&self, other: &Self) -> Ordering { self.event_id.cmp(&other.event_id) } } + +/// Ordering determined by the Pdu's ID, not the memory representations. +impl PartialOrd for Pdu { + fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) } +} diff --git a/src/core/pdu/builder.rs b/src/core/matrix/pdu/builder.rs similarity index 100% rename from src/core/pdu/builder.rs rename to src/core/matrix/pdu/builder.rs diff --git a/src/core/pdu/content.rs b/src/core/matrix/pdu/content.rs similarity index 100% rename from src/core/pdu/content.rs rename to src/core/matrix/pdu/content.rs diff --git a/src/core/pdu/count.rs b/src/core/matrix/pdu/count.rs similarity index 100% rename from src/core/pdu/count.rs rename to src/core/matrix/pdu/count.rs diff --git a/src/core/pdu/event_id.rs b/src/core/matrix/pdu/event_id.rs similarity index 100% rename from src/core/pdu/event_id.rs rename to src/core/matrix/pdu/event_id.rs diff --git a/src/core/pdu/filter.rs b/src/core/matrix/pdu/filter.rs similarity index 100% rename from src/core/pdu/filter.rs rename to src/core/matrix/pdu/filter.rs diff --git a/src/core/pdu/id.rs b/src/core/matrix/pdu/id.rs similarity index 100% rename from src/core/pdu/id.rs rename to src/core/matrix/pdu/id.rs diff --git a/src/core/pdu/raw_id.rs b/src/core/matrix/pdu/raw_id.rs similarity index 100% rename from src/core/pdu/raw_id.rs rename to src/core/matrix/pdu/raw_id.rs diff --git a/src/core/pdu/redact.rs b/src/core/matrix/pdu/redact.rs similarity index 100% rename from src/core/pdu/redact.rs rename to src/core/matrix/pdu/redact.rs diff --git a/src/core/pdu/relation.rs b/src/core/matrix/pdu/relation.rs similarity index 100% rename from src/core/pdu/relation.rs rename to src/core/matrix/pdu/relation.rs diff --git a/src/core/pdu/state_key.rs b/src/core/matrix/pdu/state_key.rs similarity index 100% rename from src/core/pdu/state_key.rs rename to src/core/matrix/pdu/state_key.rs diff --git a/src/core/pdu/strip.rs b/src/core/matrix/pdu/strip.rs similarity index 100% rename from src/core/pdu/strip.rs rename to src/core/matrix/pdu/strip.rs diff --git a/src/core/pdu/tests.rs b/src/core/matrix/pdu/tests.rs similarity index 100% rename from src/core/pdu/tests.rs rename to src/core/matrix/pdu/tests.rs diff --git a/src/core/pdu/unsigned.rs b/src/core/matrix/pdu/unsigned.rs similarity index 100% rename from src/core/pdu/unsigned.rs rename to src/core/matrix/pdu/unsigned.rs diff --git a/src/core/state_res/LICENSE b/src/core/matrix/state_res/LICENSE similarity index 100% rename from src/core/state_res/LICENSE rename to src/core/matrix/state_res/LICENSE diff --git a/src/core/state_res/benches.rs b/src/core/matrix/state_res/benches.rs similarity index 100% rename from src/core/state_res/benches.rs rename to src/core/matrix/state_res/benches.rs diff --git a/src/core/state_res/error.rs b/src/core/matrix/state_res/error.rs similarity index 100% rename from src/core/state_res/error.rs rename to src/core/matrix/state_res/error.rs diff --git a/src/core/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs similarity index 100% rename from src/core/state_res/event_auth.rs rename to src/core/matrix/state_res/event_auth.rs diff --git a/src/core/state_res/mod.rs b/src/core/matrix/state_res/mod.rs similarity index 99% rename from src/core/state_res/mod.rs rename to src/core/matrix/state_res/mod.rs index 1db92e59..93c00d15 100644 --- a/src/core/state_res/mod.rs +++ b/src/core/matrix/state_res/mod.rs @@ -4,7 +4,6 @@ pub(crate) mod error; pub mod event_auth; mod power_levels; mod room_version; -mod state_event; #[cfg(test)] mod test_utils; @@ -36,9 +35,12 @@ use self::power_levels::PowerLevelsContentFields; pub use self::{ event_auth::{auth_check, auth_types_for_event}, room_version::RoomVersion, - state_event::Event, }; -use crate::{debug, pdu::StateKey, trace, warn}; +use crate::{ + debug, + matrix::{event::Event, pdu::StateKey}, + trace, warn, +}; /// A mapping of event type and state_key to some value `T`, usually an /// `EventId`. diff --git a/src/core/state_res/outcomes.txt b/src/core/matrix/state_res/outcomes.txt similarity index 100% rename from src/core/state_res/outcomes.txt rename to src/core/matrix/state_res/outcomes.txt diff --git a/src/core/state_res/power_levels.rs b/src/core/matrix/state_res/power_levels.rs similarity index 99% rename from src/core/state_res/power_levels.rs rename to src/core/matrix/state_res/power_levels.rs index 045b1666..19ba8fb9 100644 --- a/src/core/state_res/power_levels.rs +++ b/src/core/matrix/state_res/power_levels.rs @@ -11,9 +11,9 @@ use ruma::{ }; use serde::Deserialize; use serde_json::{Error, from_str as from_json_str}; -use tracing::error; use super::{Result, RoomVersion}; +use crate::error; #[derive(Deserialize)] struct IntRoomPowerLevelsEventContent { diff --git a/src/core/state_res/room_version.rs b/src/core/matrix/state_res/room_version.rs similarity index 100% rename from src/core/state_res/room_version.rs rename to src/core/matrix/state_res/room_version.rs diff --git a/src/core/state_res/test_utils.rs b/src/core/matrix/state_res/test_utils.rs similarity index 99% rename from src/core/state_res/test_utils.rs rename to src/core/matrix/state_res/test_utils.rs index d96ee927..f2ee4238 100644 --- a/src/core/state_res/test_utils.rs +++ b/src/core/matrix/state_res/test_utils.rs @@ -28,7 +28,10 @@ use serde_json::{ pub(crate) use self::event::PduEvent; use super::auth_types_for_event; -use crate::{Event, EventTypeExt, Result, StateMap, info}; +use crate::{ + Result, info, + matrix::{Event, EventTypeExt, StateMap}, +}; static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); diff --git a/src/core/mod.rs b/src/core/mod.rs index 80ebbdcb..b91cdf0b 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -6,11 +6,10 @@ pub mod debug; pub mod error; pub mod info; pub mod log; +pub mod matrix; pub mod metrics; pub mod mods; -pub mod pdu; pub mod server; -pub mod state_res; pub mod utils; pub use ::arrayvec; @@ -23,9 +22,8 @@ pub use ::tracing; pub use config::Config; pub use error::Error; pub use info::{rustc_flags_capture, version, version::version}; -pub use pdu::{Event, PduBuilder, PduCount, PduEvent, PduId, RawPduId, StateKey}; +pub use matrix::{Event, EventTypeExt, PduCount, PduEvent, PduId, RoomVersion, pdu, state_res}; pub use server::Server; -pub use state_res::{EventTypeExt, RoomVersion, StateMap, TypeStateKey}; pub use utils::{ctor, dtor, implement, result, result::Result}; pub use crate as conduwuit_core; diff --git a/src/core/pdu/event.rs b/src/core/pdu/event.rs deleted file mode 100644 index 09ad1666..00000000 --- a/src/core/pdu/event.rs +++ /dev/null @@ -1,35 +0,0 @@ -use ruma::{MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, events::TimelineEventType}; -use serde_json::value::RawValue as RawJsonValue; - -use super::Pdu; -pub use crate::state_res::Event; - -impl Event for Pdu { - type Id = OwnedEventId; - - fn event_id(&self) -> &Self::Id { &self.event_id } - - fn room_id(&self) -> &RoomId { &self.room_id } - - fn sender(&self) -> &UserId { &self.sender } - - fn event_type(&self) -> &TimelineEventType { &self.kind } - - fn content(&self) -> &RawJsonValue { &self.content } - - fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { - MilliSecondsSinceUnixEpoch(self.origin_server_ts) - } - - fn state_key(&self) -> Option<&str> { self.state_key.as_deref() } - - fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { - self.prev_events.iter() - } - - fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { - self.auth_events.iter() - } - - fn redacts(&self) -> Option<&Self::Id> { self.redacts.as_ref() } -} diff --git a/src/service/admin/grant.rs b/src/service/admin/grant.rs index 5173987a..6780b7ae 100644 --- a/src/service/admin/grant.rs +++ b/src/service/admin/grant.rs @@ -1,6 +1,6 @@ use std::collections::BTreeMap; -use conduwuit::{Err, Result, debug_info, debug_warn, error, implement}; +use conduwuit::{Err, Result, debug_info, debug_warn, error, implement, matrix::pdu::PduBuilder}; use ruma::{ RoomId, UserId, events::{ @@ -14,8 +14,6 @@ use ruma::{ }, }; -use crate::pdu::PduBuilder; - /// Invite the user to the conduwuit admin room. /// /// This is equivalent to granting server admin privileges. diff --git a/src/service/mod.rs b/src/service/mod.rs index 8f4a84b0..63a51213 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -31,7 +31,6 @@ pub mod users; extern crate conduwuit_core as conduwuit; extern crate conduwuit_database as database; -pub use conduwuit::{PduBuilder, PduCount, PduEvent, pdu}; pub(crate) use service::{Args, Dep, Service}; pub use crate::services::Services; diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index 0402ff14..eb38c2c3 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -5,7 +5,9 @@ use std::{ }; use conduwuit::{ - PduEvent, Result, StateMap, debug, err, implement, trace, + Result, debug, err, implement, + matrix::{PduEvent, StateMap}, + trace, utils::stream::{BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, TryWidebandExt}, }; use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::try_join}; diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index 086dc6bd..97d3df97 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -1,7 +1,8 @@ use std::{borrow::Borrow, collections::BTreeMap, iter::once, sync::Arc, time::Instant}; use conduwuit::{ - Err, EventTypeExt, PduEvent, Result, StateKey, debug, debug_info, err, implement, state_res, + Err, Result, debug, debug_info, err, implement, + matrix::{EventTypeExt, PduEvent, StateKey, state_res}, trace, utils::stream::{BroadbandExt, ReadyExt}, warn, diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index a1b0263a..12b56935 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -1,11 +1,9 @@ use std::sync::Arc; -use conduwuit::{Result, implement}; -use database::{Deserialized, Json, Map}; +use conduwuit::{Result, implement, matrix::pdu::PduEvent}; +use conduwuit_database::{Deserialized, Json, Map}; use ruma::{CanonicalJsonObject, EventId}; -use crate::PduEvent; - pub struct Service { db: Data, } diff --git a/src/service/rooms/read_receipt/mod.rs b/src/service/rooms/read_receipt/mod.rs index d6239aee..69e859c4 100644 --- a/src/service/rooms/read_receipt/mod.rs +++ b/src/service/rooms/read_receipt/mod.rs @@ -2,7 +2,11 @@ mod data; use std::{collections::BTreeMap, sync::Arc}; -use conduwuit::{PduCount, PduId, RawPduId, Result, debug, err, warn}; +use conduwuit::{ + Result, debug, err, + matrix::pdu::{PduCount, PduId, RawPduId}, + warn, +}; use futures::{Stream, TryFutureExt, try_join}; use ruma::{ OwnedEventId, OwnedUserId, RoomId, UserId, diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 3980617e..06ff6493 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -1,7 +1,7 @@ use std::{borrow::Borrow, fmt::Debug, mem::size_of_val, sync::Arc}; -pub use conduwuit::pdu::{ShortEventId, ShortId, ShortRoomId, ShortStateKey}; -use conduwuit::{Result, StateKey, err, implement, utils, utils::IterStream}; +pub use conduwuit::matrix::pdu::{ShortEventId, ShortId, ShortRoomId, ShortStateKey}; +use conduwuit::{Result, err, implement, matrix::StateKey, utils, utils::IterStream}; use database::{Deserialized, Get, Map, Qry}; use futures::{Stream, StreamExt}; use ruma::{EventId, RoomId, events::StateEventType}; diff --git a/src/service/rooms/state_accessor/room_state.rs b/src/service/rooms/state_accessor/room_state.rs index 642cd5d2..89fa2a83 100644 --- a/src/service/rooms/state_accessor/room_state.rs +++ b/src/service/rooms/state_accessor/room_state.rs @@ -1,6 +1,9 @@ use std::borrow::Borrow; -use conduwuit::{PduEvent, Result, StateKey, err, implement}; +use conduwuit::{ + Result, err, implement, + matrix::{PduEvent, StateKey}, +}; use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{EventId, RoomId, events::StateEventType}; use serde::Deserialize; diff --git a/src/service/rooms/state_accessor/state.rs b/src/service/rooms/state_accessor/state.rs index 8f2dd76f..169e69e9 100644 --- a/src/service/rooms/state_accessor/state.rs +++ b/src/service/rooms/state_accessor/state.rs @@ -1,13 +1,15 @@ use std::{borrow::Borrow, ops::Deref, sync::Arc}; use conduwuit::{ - PduEvent, Result, StateKey, at, err, implement, pair_of, + Result, at, err, implement, + matrix::{PduEvent, StateKey}, + pair_of, utils::{ result::FlatOk, stream::{BroadbandExt, IterStream, ReadyExt, TryIgnore}, }, }; -use database::Deserialized; +use conduwuit_database::Deserialized; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, future::try_join, pin_mut}; use ruma::{ EventId, OwnedEventId, UserId, diff --git a/src/service/rooms/threads/mod.rs b/src/service/rooms/threads/mod.rs index 7f9a7515..a680df55 100644 --- a/src/service/rooms/threads/mod.rs +++ b/src/service/rooms/threads/mod.rs @@ -1,13 +1,14 @@ use std::{collections::BTreeMap, sync::Arc}; use conduwuit::{ - PduCount, PduEvent, PduId, RawPduId, Result, err, + Result, err, + matrix::pdu::{PduCount, PduEvent, PduId, RawPduId}, utils::{ ReadyExt, stream::{TryIgnore, WidebandExt}, }, }; -use database::{Deserialized, Map}; +use conduwuit_database::{Deserialized, Map}; use futures::{Stream, StreamExt}; use ruma::{ CanonicalJsonValue, EventId, OwnedUserId, RoomId, UserId, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index dc359d22..947e1c38 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -10,16 +10,19 @@ use std::{ }; use async_trait::async_trait; +pub use conduwuit::matrix::pdu::{PduId, RawPduId}; use conduwuit::{ Err, Error, Result, Server, at, debug, debug_warn, err, error, implement, info, - pdu::{EventHash, PduBuilder, PduCount, PduEvent, gen_event_id}, - state_res::{self, Event, RoomVersion}, + matrix::{ + Event, + pdu::{EventHash, PduBuilder, PduCount, PduEvent, gen_event_id}, + state_res::{self, RoomVersion}, + }, utils::{ self, IterStream, MutexMap, MutexMapGuard, ReadyExt, future::TryExtExt, stream::TryIgnore, }, validated, warn, }; -pub use conduwuit::{PduId, RawPduId}; use futures::{ Future, FutureExt, Stream, StreamExt, TryStreamExt, future, future::ready, pin_mut, }; From bb8320a691eda03c202bc428e75a616b0021fe03 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 4 Apr 2025 02:39:40 +0000 Subject: [PATCH 195/310] abstract and encapsulate the awkward OptionFuture into Stream pattern Signed-off-by: Jason Volk --- src/api/client/sync/v3.rs | 45 +++----------------------- src/core/utils/future/mod.rs | 2 ++ src/core/utils/future/option_ext.rs | 3 ++ src/core/utils/future/option_stream.rs | 25 ++++++++++++++ 4 files changed, 35 insertions(+), 40 deletions(-) create mode 100644 src/core/utils/future/option_stream.rs diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 12731ff6..24930941 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -15,6 +15,7 @@ use conduwuit::{ result::FlatOk, utils::{ self, BoolExt, IterStream, ReadyExt, TryFutureExtExt, + future::OptionStream, math::ruma_from_u64, stream::{BroadbandExt, Tools, TryExpect, WidebandExt}, }, @@ -1036,7 +1037,7 @@ async fn calculate_state_incremental<'a>( }) .into(); - let state_diff: OptionFuture<_> = (!full_state && state_changed) + let state_diff_ids: OptionFuture<_> = (!full_state && state_changed) .then(|| { StreamExt::into_future( services @@ -1061,45 +1062,9 @@ async fn calculate_state_incremental<'a>( }) .into(); - let lazy_state_ids = lazy_state_ids - .map(|opt| { - opt.map(|(curr, next)| { - let opt = curr; - let iter = Option::into_iter(opt); - IterStream::stream(iter).chain(next) - }) - }) - .map(Option::into_iter) - .map(IterStream::stream) - .flatten_stream() - .flatten(); - - let state_diff_ids = state_diff - .map(|opt| { - opt.map(|(curr, next)| { - let opt = curr; - let iter = Option::into_iter(opt); - IterStream::stream(iter).chain(next) - }) - }) - .map(Option::into_iter) - .map(IterStream::stream) - .flatten_stream() - .flatten(); - let state_events = current_state_ids - .map(|opt| { - opt.map(|(curr, next)| { - let opt = curr; - let iter = Option::into_iter(opt); - IterStream::stream(iter).chain(next) - }) - }) - .map(Option::into_iter) - .map(IterStream::stream) - .flatten_stream() - .flatten() - .chain(state_diff_ids) + .stream() + .chain(state_diff_ids.stream()) .broad_filter_map(|(shortstatekey, shorteventid)| async move { if witness.is_none() || encrypted_room { return Some(shorteventid); @@ -1107,7 +1072,7 @@ async fn calculate_state_incremental<'a>( lazy_filter(services, sender_user, shortstatekey, shorteventid).await }) - .chain(lazy_state_ids) + .chain(lazy_state_ids.stream()) .broad_filter_map(|shorteventid| { services .rooms diff --git a/src/core/utils/future/mod.rs b/src/core/utils/future/mod.rs index e1d96941..4edd0102 100644 --- a/src/core/utils/future/mod.rs +++ b/src/core/utils/future/mod.rs @@ -1,9 +1,11 @@ mod bool_ext; mod ext_ext; mod option_ext; +mod option_stream; mod try_ext_ext; pub use bool_ext::{BoolExt, and, or}; pub use ext_ext::ExtExt; pub use option_ext::OptionExt; +pub use option_stream::OptionStream; pub use try_ext_ext::TryExtExt; diff --git a/src/core/utils/future/option_ext.rs b/src/core/utils/future/option_ext.rs index d553e5dc..920dd044 100644 --- a/src/core/utils/future/option_ext.rs +++ b/src/core/utils/future/option_ext.rs @@ -11,11 +11,14 @@ pub trait OptionExt { impl OptionExt for OptionFuture where Fut: Future + Send, + T: Send, { + #[inline] fn is_none_or(self, f: impl FnOnce(&T) -> bool + Send) -> impl Future + Send { self.map(|o| o.as_ref().is_none_or(f)) } + #[inline] fn is_some_and(self, f: impl FnOnce(&T) -> bool + Send) -> impl Future + Send { self.map(|o| o.as_ref().is_some_and(f)) } diff --git a/src/core/utils/future/option_stream.rs b/src/core/utils/future/option_stream.rs new file mode 100644 index 00000000..81130c87 --- /dev/null +++ b/src/core/utils/future/option_stream.rs @@ -0,0 +1,25 @@ +use futures::{Future, FutureExt, Stream, StreamExt, future::OptionFuture}; + +use super::super::IterStream; + +pub trait OptionStream { + fn stream(self) -> impl Stream + Send; +} + +impl OptionStream for OptionFuture +where + Fut: Future + Send, + S: Stream + Send, + O: IntoIterator + Send, + ::IntoIter: Send, + T: Send, +{ + #[inline] + fn stream(self) -> impl Stream + Send { + self.map(|opt| opt.map(|(curr, next)| curr.into_iter().stream().chain(next))) + .map(Option::into_iter) + .map(IterStream::stream) + .flatten_stream() + .flatten() + } +} From 58b8c7516a755c0300be1fe0d36b819ebda36ffb Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Apr 2025 09:02:12 +0000 Subject: [PATCH 196/310] extend extract_variant to multiple variants Signed-off-by: Jason Volk --- src/core/utils/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 7593990c..117fb739 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -49,10 +49,10 @@ pub fn exchange(state: &mut T, source: T) -> T { std::mem::replace(state, sou #[macro_export] macro_rules! extract_variant { - ($e:expr_2021, $variant:path) => { + ( $e:expr_2021, $( $variant:path )|* ) => { match $e { - | $variant(value) => Some(value), - | _ => None, + $( $variant(value) => Some(value), )* + _ => None, } }; } From a212bf7cfca7a6547681f46a438ecc278a905aab Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 5 Apr 2025 14:00:40 -0400 Subject: [PATCH 197/310] update default room version to v11 Signed-off-by: June Clementine Strawberry --- conduwuit-example.toml | 4 ++-- src/core/config/mod.rs | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 75ecddab..46459547 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -527,9 +527,9 @@ # Default room version conduwuit will create rooms with. # -# Per spec, room version 10 is the default. +# Per spec, room version 11 is the default. # -#default_room_version = 10 +#default_room_version = 11 # This item is undocumented. Please contribute documentation for it. # diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 7be140a5..bb509a0d 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -640,9 +640,9 @@ pub struct Config { /// Default room version conduwuit will create rooms with. /// - /// Per spec, room version 10 is the default. + /// Per spec, room version 11 is the default. /// - /// default: 10 + /// default: 11 #[serde(default = "default_default_room_version")] pub default_room_version: RoomVersionId, @@ -2170,7 +2170,7 @@ fn default_rocksdb_stats_level() -> u8 { 1 } // I know, it's a great name #[must_use] #[inline] -pub fn default_default_room_version() -> RoomVersionId { RoomVersionId::V10 } +pub fn default_default_room_version() -> RoomVersionId { RoomVersionId::V11 } fn default_ip_range_denylist() -> Vec { vec![ From c7246662f4b2c892667b253aff1560523d8e2cff Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 5 Apr 2025 14:07:37 -0400 Subject: [PATCH 198/310] try partially reverting 94b107b42b722aff9518f64ad603ce01665b25f3 Signed-off-by: June Clementine Strawberry --- src/api/client/keys.rs | 43 ++++++++++-------------------------------- 1 file changed, 10 insertions(+), 33 deletions(-) diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 6865c2a4..adbdd715 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -11,7 +11,7 @@ use ruma::{ error::ErrorKind, keys::{ claim_keys, get_key_changes, get_keys, upload_keys, - upload_signatures::{self, v3::Failure}, + upload_signatures::{self}, upload_signing_keys, }, uiaa::{AuthFlow, AuthType, UiaaInfo}, @@ -308,82 +308,59 @@ async fn check_for_new_keys( /// /// Uploads end-to-end key signatures from the sender user. /// -/// TODO: clean this timo-code up more. tried to improve it a bit to stop -/// exploding the entire request on bad sigs, but needs way more work. +/// TODO: clean this timo-code up more and integrate failures. tried to improve +/// it a bit to stop exploding the entire request on bad sigs, but needs way +/// more work. pub(crate) async fn upload_signatures_route( State(services): State, body: Ruma, ) -> Result { - use upload_signatures::v3::FailureErrorCode::*; - if body.signed_keys.is_empty() { debug!("Empty signed_keys sent in key signature upload"); return Ok(upload_signatures::v3::Response::new()); } let sender_user = body.sender_user(); - let mut failures: BTreeMap> = BTreeMap::new(); - let mut failure_reasons: BTreeMap = BTreeMap::new(); - let failure = Failure { - errcode: InvalidSignature, - error: String::new(), - }; for (user_id, keys) in &body.signed_keys { for (key_id, key) in keys { let Ok(key) = serde_json::to_value(key) .inspect_err(|e| debug_warn!(?key_id, "Invalid \"key\" JSON: {e}")) else { - let mut failure = failure.clone(); - failure.error = String::from("Invalid \"key\" JSON"); - failure_reasons.insert(key_id.to_owned(), failure); continue; }; let Some(signatures) = key.get("signatures") else { - let mut failure = failure.clone(); - failure.error = String::from("Missing \"signatures\" field"); - failure_reasons.insert(key_id.to_owned(), failure); continue; }; let Some(sender_user_val) = signatures.get(sender_user.to_string()) else { - let mut failure = failure.clone(); - failure.error = String::from("Invalid user in signatures field"); - failure_reasons.insert(key_id.to_owned(), failure); continue; }; let Some(sender_user_object) = sender_user_val.as_object() else { - let mut failure = failure.clone(); - failure.error = String::from("signatures field is not a JSON object"); - failure_reasons.insert(key_id.to_owned(), failure); continue; }; for (signature, val) in sender_user_object.clone() { - let signature = (signature, val.to_string()); + let Some(val) = val.as_str().map(ToOwned::to_owned) else { + continue; + }; + let signature = (signature, val); - if let Err(e) = services + if let Err(_e) = services .users .sign_key(user_id, key_id, signature, sender_user) .await .inspect_err(|e| debug_warn!("{e}")) { - let mut failure = failure.clone(); - failure.error = format!("Error signing key: {e}"); - failure_reasons.insert(key_id.to_owned(), failure); continue; } } } - - if !failure_reasons.is_empty() { - failures.insert(user_id.to_owned(), failure_reasons.clone()); - } } - Ok(upload_signatures::v3::Response { failures }) + Ok(upload_signatures::v3::Response { failures: BTreeMap::new() }) } /// # `POST /_matrix/client/r0/keys/changes` From e28ae8fb4d442cba0eb52728a129372289c85ccd Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 5 Apr 2025 14:26:00 -0400 Subject: [PATCH 199/310] downgrade `deranged` crate Signed-off-by: June Clementine Strawberry --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0753f81d..86833adb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1279,9 +1279,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.4.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28cfac68e08048ae1883171632c2aef3ebc555621ae56fbccce1cbf22dd7f058" +checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" dependencies = [ "powerfmt", ] From d6cc447add272f9eff0b2c77fb751dcf055d3208 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 3 Apr 2025 21:26:53 +0000 Subject: [PATCH 200/310] simplify acl brick-check conditions Signed-off-by: Jason Volk --- src/api/client/state.rs | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/src/api/client/state.rs b/src/api/client/state.rs index 5c5c71f2..2ddc8f14 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -211,7 +211,7 @@ async fn allowed_to_send_state_event( // irreversible mistakes match json.deserialize_as::() { | Ok(acl_content) => { - if acl_content.allow.is_empty() { + if acl_content.allow_is_empty() { return Err!(Request(BadJson(debug_warn!( ?room_id, "Sending an ACL event with an empty allow key will permanently \ @@ -220,9 +220,7 @@ async fn allowed_to_send_state_event( )))); } - if acl_content.deny.contains(&String::from("*")) - && acl_content.allow.contains(&String::from("*")) - { + if acl_content.deny_contains("*") && acl_content.allow_contains("*") { return Err!(Request(BadJson(debug_warn!( ?room_id, "Sending an ACL event with a deny and allow key value of \"*\" will \ @@ -231,11 +229,9 @@ async fn allowed_to_send_state_event( )))); } - if acl_content.deny.contains(&String::from("*")) + if acl_content.deny_contains("*") && !acl_content.is_allowed(services.globals.server_name()) - && !acl_content - .allow - .contains(&services.globals.server_name().to_string()) + && !acl_content.allow_contains(services.globals.server_name().as_str()) { return Err!(Request(BadJson(debug_warn!( ?room_id, @@ -245,11 +241,9 @@ async fn allowed_to_send_state_event( )))); } - if !acl_content.allow.contains(&String::from("*")) + if !acl_content.allow_contains("*") && !acl_content.is_allowed(services.globals.server_name()) - && !acl_content - .allow - .contains(&services.globals.server_name().to_string()) + && !acl_content.allow_contains(services.globals.server_name().as_str()) { return Err!(Request(BadJson(debug_warn!( ?room_id, From 500faa8d7fcefab2f5bee867bf268f87fc0643fa Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 4 Apr 2025 01:05:43 +0000 Subject: [PATCH 201/310] simplify space join rules related Signed-off-by: Jason Volk --- Cargo.lock | 22 ++--- Cargo.toml | 2 +- src/api/client/room/summary.rs | 70 +++++++++------ src/service/rooms/spaces/mod.rs | 110 ++++++++++++------------ src/service/rooms/state_accessor/mod.rs | 37 +------- 5 files changed, 113 insertions(+), 128 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 86833adb..c2c5182f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3654,7 +3654,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "assign", "js_int", @@ -3674,7 +3674,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "js_int", "ruma-common", @@ -3686,7 +3686,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "as_variant", "assign", @@ -3709,7 +3709,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "as_variant", "base64 0.22.1", @@ -3741,7 +3741,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "as_variant", "indexmap 2.8.0", @@ -3766,7 +3766,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "bytes", "headers", @@ -3788,7 +3788,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "js_int", "thiserror 2.0.12", @@ -3797,7 +3797,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "js_int", "ruma-common", @@ -3807,7 +3807,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3822,7 +3822,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "js_int", "ruma-common", @@ -3834,7 +3834,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef#edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index a44fc0f0..b1c5acb5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -350,7 +350,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://github.com/girlbossceo/ruwuma" #branch = "conduwuit-changes" -rev = "edbdc79e560d01d9e4a76f7421e70ea4fd4c54ef" +rev = "920148dca1076454ca0ca5d43b5ce1aa708381d4" features = [ "compat", "rand", diff --git a/src/api/client/room/summary.rs b/src/api/client/room/summary.rs index 34820e83..2fa81bd2 100644 --- a/src/api/client/room/summary.rs +++ b/src/api/client/room/summary.rs @@ -4,9 +4,13 @@ use conduwuit::{ Err, Result, debug_warn, utils::{IterStream, future::TryExtExt}, }; -use futures::{FutureExt, StreamExt, future::join3, stream::FuturesUnordered}; +use futures::{ + FutureExt, StreamExt, + future::{OptionFuture, join3}, + stream::FuturesUnordered, +}; use ruma::{ - OwnedRoomId, OwnedServerName, RoomId, UserId, + OwnedServerName, RoomId, UserId, api::{ client::room::get_summary, federation::space::{SpaceHierarchyParentSummary, get_hierarchy}, @@ -91,13 +95,9 @@ async fn room_summary_response( join_rule: room.join_rule, room_type: room.room_type, room_version: room.room_version, - membership: if sender_user.is_none() { - None - } else { - Some(MembershipState::Leave) - }, encryption: room.encryption, allowed_room_ids: room.allowed_room_ids, + membership: sender_user.is_some().then_some(MembershipState::Leave), }) } @@ -106,20 +106,22 @@ async fn local_room_summary_response( room_id: &RoomId, sender_user: Option<&UserId>, ) -> Result { - let join_rule = services.rooms.state_accessor.get_space_join_rule(room_id); + let join_rule = services.rooms.state_accessor.get_join_rules(room_id); + let world_readable = services.rooms.state_accessor.is_world_readable(room_id); + let guest_can_join = services.rooms.state_accessor.guest_can_join(room_id); - let ((join_rule, allowed_room_ids), world_readable, guest_can_join) = + let (join_rule, world_readable, guest_can_join) = join3(join_rule, world_readable, guest_can_join).await; user_can_see_summary( services, room_id, - &join_rule, + &join_rule.clone().into(), guest_can_join, world_readable, - &allowed_room_ids, + join_rule.allowed_rooms(), sender_user, ) .await?; @@ -129,26 +131,43 @@ async fn local_room_summary_response( .state_accessor .get_canonical_alias(room_id) .ok(); + let name = services.rooms.state_accessor.get_name(room_id).ok(); + let topic = services.rooms.state_accessor.get_room_topic(room_id).ok(); + let room_type = services.rooms.state_accessor.get_room_type(room_id).ok(); + let avatar_url = services .rooms .state_accessor .get_avatar(room_id) .map(|res| res.into_option().unwrap_or_default().url); + let room_version = services.rooms.state.get_room_version(room_id).ok(); + let encryption = services .rooms .state_accessor .get_room_encryption(room_id) .ok(); + let num_joined_members = services .rooms .state_cache .room_joined_count(room_id) .unwrap_or(0); + let membership: OptionFuture<_> = sender_user + .map(|sender_user| { + services + .rooms + .state_accessor + .get_member(room_id, sender_user) + .map_ok_or(MembershipState::Leave, |content| content.membership) + }) + .into(); + let ( canonical_alias, name, @@ -158,6 +177,7 @@ async fn local_room_summary_response( room_type, room_version, encryption, + membership, ) = futures::join!( canonical_alias, name, @@ -167,6 +187,7 @@ async fn local_room_summary_response( room_type, room_version, encryption, + membership, ); Ok(get_summary::msc3266::Response { @@ -178,21 +199,12 @@ async fn local_room_summary_response( num_joined_members: num_joined_members.try_into().unwrap_or_default(), topic, world_readable, - join_rule, room_type, room_version, - membership: if let Some(sender_user) = sender_user { - services - .rooms - .state_accessor - .get_member(room_id, sender_user) - .await - .map_or(Some(MembershipState::Leave), |content| Some(content.membership)) - } else { - None - }, encryption, - allowed_room_ids, + membership, + allowed_room_ids: join_rule.allowed_rooms().map(Into::into).collect(), + join_rule: join_rule.into(), }) } @@ -241,7 +253,7 @@ async fn remote_room_summary_hierarchy_response( &room.join_rule, room.guest_can_join, room.world_readable, - &room.allowed_room_ids, + room.allowed_room_ids.iter().map(AsRef::as_ref), sender_user, ) .await @@ -254,15 +266,18 @@ async fn remote_room_summary_hierarchy_response( ))) } -async fn user_can_see_summary( +async fn user_can_see_summary<'a, I>( services: &Services, room_id: &RoomId, join_rule: &SpaceRoomJoinRule, guest_can_join: bool, world_readable: bool, - allowed_room_ids: &[OwnedRoomId], + allowed_room_ids: I, sender_user: Option<&UserId>, -) -> Result { +) -> Result +where + I: Iterator + Send, +{ match sender_user { | Some(sender_user) => { let user_can_see_state_events = services @@ -271,7 +286,6 @@ async fn user_can_see_summary( .user_can_see_state_events(sender_user, room_id); let is_guest = services.users.is_deactivated(sender_user).unwrap_or(false); let user_in_allowed_restricted_room = allowed_room_ids - .iter() .stream() .any(|room| services.rooms.state_cache.is_joined(sender_user, room)); diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index f51a5e3a..ea9756ba 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -121,21 +121,22 @@ pub async fn get_summary_and_children_local( | None => (), // cache miss | Some(None) => return Ok(None), | Some(Some(cached)) => { - return Ok(Some( - if self - .is_accessible_child( - current_room, - &cached.summary.join_rule, - identifier, - &cached.summary.allowed_room_ids, - ) - .await - { - SummaryAccessibility::Accessible(cached.summary.clone()) - } else { - SummaryAccessibility::Inaccessible - }, - )); + let allowed_rooms = cached.summary.allowed_room_ids.iter().map(AsRef::as_ref); + + let is_accessible_child = self.is_accessible_child( + current_room, + &cached.summary.join_rule, + identifier, + allowed_rooms, + ); + + let accessibility = if is_accessible_child.await { + SummaryAccessibility::Accessible(cached.summary.clone()) + } else { + SummaryAccessibility::Inaccessible + }; + + return Ok(Some(accessibility)); }, } @@ -145,12 +146,11 @@ pub async fn get_summary_and_children_local( .collect() .await; - let summary = self + let Ok(summary) = self .get_room_summary(current_room, children_pdus, identifier) .boxed() - .await; - - let Ok(summary) = summary else { + .await + else { return Ok(None); }; @@ -217,20 +217,19 @@ async fn get_summary_and_children_federation( .await; let identifier = Identifier::UserId(user_id); + let allowed_room_ids = summary.allowed_room_ids.iter().map(AsRef::as_ref); + let is_accessible_child = self - .is_accessible_child( - current_room, - &summary.join_rule, - &identifier, - &summary.allowed_room_ids, - ) + .is_accessible_child(current_room, &summary.join_rule, &identifier, allowed_room_ids) .await; - if is_accessible_child { - return Ok(Some(SummaryAccessibility::Accessible(summary))); - } + let accessibility = if is_accessible_child { + SummaryAccessibility::Accessible(summary) + } else { + SummaryAccessibility::Inaccessible + }; - Ok(Some(SummaryAccessibility::Inaccessible)) + Ok(Some(accessibility)) } /// Simply returns the stripped m.space.child events of a room @@ -305,14 +304,15 @@ async fn get_room_summary( children_state: Vec>, identifier: &Identifier<'_>, ) -> Result { - let (join_rule, allowed_room_ids) = self - .services - .state_accessor - .get_space_join_rule(room_id) - .await; + let join_rule = self.services.state_accessor.get_join_rules(room_id).await; let is_accessible_child = self - .is_accessible_child(room_id, &join_rule, identifier, &allowed_room_ids) + .is_accessible_child( + room_id, + &join_rule.clone().into(), + identifier, + join_rule.allowed_rooms(), + ) .await; if !is_accessible_child { @@ -379,7 +379,7 @@ async fn get_room_summary( encryption, ); - Ok(SpaceHierarchyParentSummary { + let summary = SpaceHierarchyParentSummary { canonical_alias, name, topic, @@ -388,24 +388,29 @@ async fn get_room_summary( avatar_url, room_type, children_state, - allowed_room_ids, - join_rule, - room_id: room_id.to_owned(), - num_joined_members: num_joined_members.try_into().unwrap_or_default(), encryption, room_version, - }) + room_id: room_id.to_owned(), + num_joined_members: num_joined_members.try_into().unwrap_or_default(), + allowed_room_ids: join_rule.allowed_rooms().map(Into::into).collect(), + join_rule: join_rule.clone().into(), + }; + + Ok(summary) } /// With the given identifier, checks if a room is accessable #[implement(Service)] -async fn is_accessible_child( +async fn is_accessible_child<'a, I>( &self, current_room: &RoomId, join_rule: &SpaceRoomJoinRule, identifier: &Identifier<'_>, - allowed_room_ids: &[OwnedRoomId], -) -> bool { + allowed_rooms: I, +) -> bool +where + I: Iterator + Send, +{ if let Identifier::ServerName(server_name) = identifier { // Checks if ACLs allow for the server to participate if self @@ -430,21 +435,18 @@ async fn is_accessible_child( } } - match join_rule { + match *join_rule { | SpaceRoomJoinRule::Public | SpaceRoomJoinRule::Knock | SpaceRoomJoinRule::KnockRestricted => true, | SpaceRoomJoinRule::Restricted => - allowed_room_ids - .iter() + allowed_rooms .stream() - .any(|room| async { - match identifier { - | Identifier::UserId(user) => - self.services.state_cache.is_joined(user, room).await, - | Identifier::ServerName(server) => - self.services.state_cache.server_in_room(server, room).await, - } + .any(async |room| match identifier { + | Identifier::UserId(user) => + self.services.state_cache.is_joined(user, room).await, + | Identifier::ServerName(server) => + self.services.state_cache.server_in_room(server, room).await, }) .await, diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 7fff5935..f719fc7b 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -9,7 +9,7 @@ use async_trait::async_trait; use conduwuit::{Result, err}; use database::Map; use ruma::{ - EventEncryptionAlgorithm, JsOption, OwnedRoomAliasId, OwnedRoomId, RoomId, UserId, + EventEncryptionAlgorithm, JsOption, OwnedRoomAliasId, RoomId, UserId, events::{ StateEventType, room::{ @@ -19,14 +19,13 @@ use ruma::{ encryption::RoomEncryptionEventContent, guest_access::{GuestAccess, RoomGuestAccessEventContent}, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, - join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent, RoomMembership}, + join_rules::{JoinRule, RoomJoinRulesEventContent}, member::RoomMemberEventContent, name::RoomNameEventContent, topic::RoomTopicEventContent, }, }, room::RoomType, - space::SpaceRoomJoinRule, }; use crate::{Dep, rooms}; @@ -129,42 +128,12 @@ impl Service { .map(|c: RoomTopicEventContent| c.topic) } - /// Returns the space join rule (`SpaceRoomJoinRule`) for a given room and - /// any allowed room IDs if available. Will default to Invite and empty vec - /// if doesnt exist or invalid, - pub async fn get_space_join_rule( - &self, - room_id: &RoomId, - ) -> (SpaceRoomJoinRule, Vec) { - self.room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") - .await - .map_or_else( - |_| (SpaceRoomJoinRule::Invite, vec![]), - |c: RoomJoinRulesEventContent| { - (c.join_rule.clone().into(), self.allowed_room_ids(c.join_rule)) - }, - ) - } - /// Returns the join rules for a given room (`JoinRule` type). Will default /// to Invite if doesnt exist or invalid pub async fn get_join_rules(&self, room_id: &RoomId) -> JoinRule { self.room_state_get_content(room_id, &StateEventType::RoomJoinRules, "") .await - .map_or_else(|_| JoinRule::Invite, |c: RoomJoinRulesEventContent| (c.join_rule)) - } - - /// Returns an empty vec if not a restricted room - pub fn allowed_room_ids(&self, join_rule: JoinRule) -> Vec { - let mut room_ids = Vec::with_capacity(1); // restricted rooms generally only have 1 allowed room ID - if let JoinRule::Restricted(r) | JoinRule::KnockRestricted(r) = join_rule { - for rule in r.allow { - if let AllowRule::RoomMembership(RoomMembership { room_id: membership }) = rule { - room_ids.push(membership.clone()); - } - } - } - room_ids + .map_or(JoinRule::Invite, |c: RoomJoinRulesEventContent| c.join_rule) } pub async fn get_room_type(&self, room_id: &RoomId) -> Result { From 9678948daf76b64368a6865d359ab162de1c5855 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 5 Apr 2025 18:31:14 -0400 Subject: [PATCH 202/310] use patch of resolv-conf crate to allow no-aaaa resolv.conf option Signed-off-by: June Clementine Strawberry --- Cargo.lock | 3 +-- Cargo.toml | 9 ++++++++- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c2c5182f..8817af1a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3625,8 +3625,7 @@ dependencies = [ [[package]] name = "resolv-conf" version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48375394603e3dd4b2d64371f7148fd8c7baa2680e28741f2cb8d23b59e3d4c4" +source = "git+https://github.com/girlbossceo/resolv-conf?rev=200e958941d522a70c5877e3d846f55b5586c68d#200e958941d522a70c5877e3d846f55b5586c68d" dependencies = [ "hostname", ] diff --git a/Cargo.toml b/Cargo.toml index b1c5acb5..62350dee 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,7 @@ license = "Apache-2.0" # See also `rust-toolchain.toml` readme = "README.md" repository = "https://github.com/girlbossceo/conduwuit" -rust-version = "1.85.0" +rust-version = "1.86.0" version = "0.5.0" [workspace.metadata.crane] @@ -580,6 +580,13 @@ rev = "9c8e51510c35077df888ee72a36b4b05637147da" git = "https://github.com/girlbossceo/hyper-util" rev = "e4ae7628fe4fcdacef9788c4c8415317a4489941" +# allows no-aaaa option in resolv.conf +# bumps rust edition and toolchain to 1.86.0 and 2024 +# use sat_add on line number errors +[patch.crates-io.resolv-conf] +git = "https://github.com/girlbossceo/resolv-conf" +rev = "200e958941d522a70c5877e3d846f55b5586c68d" + # # Our crates # From 3cc92b32ec97667bbabfb44edc305a972a7d3437 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 5 Apr 2025 18:37:13 -0400 Subject: [PATCH 203/310] bump rust toolchain to 1.86.0 Signed-off-by: June Clementine Strawberry --- flake.nix | 2 +- rust-toolchain.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/flake.nix b/flake.nix index 9db2e90a..49e860ed 100644 --- a/flake.nix +++ b/flake.nix @@ -26,7 +26,7 @@ file = ./rust-toolchain.toml; # See also `rust-toolchain.toml` - sha256 = "sha256-AJ6LX/Q/Er9kS15bn9iflkUwcgYqRQxiOIL2ToVAXaU="; + sha256 = "sha256-X/4ZBHO3iW0fOenQ3foEvscgAPJYl2abspaBThDOukI="; }; mkScope = pkgs: pkgs.lib.makeScope pkgs.newScope (self: { diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 97b4a789..aadc8f99 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -9,7 +9,7 @@ # If you're having trouble making the relevant changes, bug a maintainer. [toolchain] -channel = "1.85.0" +channel = "1.86.0" profile = "minimal" components = [ # For rust-analyzer From 6578b83bce71e9a232ff8531e80ab7d6d12a731c Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sat, 5 Apr 2025 20:09:22 -0400 Subject: [PATCH 204/310] parallelise IO of user searching, improve perf, raise max limit to 500 Signed-off-by: June Clementine Strawberry --- src/api/client/user_directory.rs | 121 ++++++++++++++----------------- 1 file changed, 55 insertions(+), 66 deletions(-) diff --git a/src/api/client/user_directory.rs b/src/api/client/user_directory.rs index 8f564eed..99b3bb67 100644 --- a/src/api/client/user_directory.rs +++ b/src/api/client/user_directory.rs @@ -1,16 +1,20 @@ use axum::extract::State; -use conduwuit::{Result, utils::TryFutureExtExt}; -use futures::{StreamExt, pin_mut}; +use conduwuit::{ + Result, + utils::{future::BoolExt, stream::BroadbandExt}, +}; +use futures::{FutureExt, StreamExt, pin_mut}; use ruma::{ - api::client::user_directory::search_users, - events::{ - StateEventType, - room::join_rules::{JoinRule, RoomJoinRulesEventContent}, - }, + api::client::user_directory::search_users::{self}, + events::room::join_rules::JoinRule, }; use crate::Ruma; +// conduwuit can handle a lot more results than synapse +const LIMIT_MAX: usize = 500; +const LIMIT_DEFAULT: usize = 10; + /// # `POST /_matrix/client/r0/user_directory/search` /// /// Searches all known users for a match. @@ -21,78 +25,63 @@ pub(crate) async fn search_users_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let limit = usize::try_from(body.limit).map_or(10, usize::from).min(100); // default limit is 10 + let sender_user = body.sender_user(); + let limit = usize::try_from(body.limit) + .map_or(LIMIT_DEFAULT, usize::from) + .min(LIMIT_MAX); - let users = services.users.stream().filter_map(|user_id| async { - // Filter out buggy users (they should not exist, but you never know...) - let user = search_users::v3::User { - user_id: user_id.to_owned(), - display_name: services.users.displayname(user_id).await.ok(), - avatar_url: services.users.avatar_url(user_id).await.ok(), - }; + let mut users = services + .users + .stream() + .map(ToOwned::to_owned) + .broad_filter_map(async |user_id| { + let user = search_users::v3::User { + user_id: user_id.clone(), + display_name: services.users.displayname(&user_id).await.ok(), + avatar_url: services.users.avatar_url(&user_id).await.ok(), + }; - let user_id_matches = user - .user_id - .to_string() - .to_lowercase() - .contains(&body.search_term.to_lowercase()); + let user_id_matches = user + .user_id + .as_str() + .to_lowercase() + .contains(&body.search_term.to_lowercase()); - let user_displayname_matches = user - .display_name - .as_ref() - .filter(|name| { + let user_displayname_matches = user.display_name.as_ref().is_some_and(|name| { name.to_lowercase() .contains(&body.search_term.to_lowercase()) - }) - .is_some(); + }); - if !user_id_matches && !user_displayname_matches { - return None; - } + if !user_id_matches && !user_displayname_matches { + return None; + } - // It's a matching user, but is the sender allowed to see them? - let mut user_visible = false; - - let user_is_in_public_rooms = services - .rooms - .state_cache - .rooms_joined(&user.user_id) - .any(|room| { - services - .rooms - .state_accessor - .room_state_get_content::( - room, - &StateEventType::RoomJoinRules, - "", - ) - .map_ok_or(false, |content| content.join_rule == JoinRule::Public) - }) - .await; - - if user_is_in_public_rooms { - user_visible = true; - } else { - let user_is_in_shared_rooms = services + let user_in_public_room = services .rooms .state_cache - .user_sees_user(sender_user, &user.user_id) - .await; + .rooms_joined(&user_id) + .map(ToOwned::to_owned) + .any(|room| async move { + services + .rooms + .state_accessor + .get_join_rules(&room) + .map(|rule| matches!(rule, JoinRule::Public)) + .await + }); - if user_is_in_shared_rooms { - user_visible = true; - } - } + let user_sees_user = services + .rooms + .state_cache + .user_sees_user(sender_user, &user_id); - user_visible.then_some(user) - }); + pin_mut!(user_in_public_room, user_sees_user); - pin_mut!(users); + user_in_public_room.or(user_sees_user).await.then_some(user) + }); - let limited = users.by_ref().next().await.is_some(); - - let results = users.take(limit).collect().await; + let results = users.by_ref().take(limit).collect().await; + let limited = users.next().await.is_some(); Ok(search_users::v3::Response { results, limited }) } From 5f8c68ab842d66ecda70726e2f9726824d51b815 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 6 Apr 2025 13:17:13 -0400 Subject: [PATCH 205/310] add trace logging for room summaries, use server_in_room instead of exists Signed-off-by: June Clementine Strawberry --- src/api/client/room/summary.rs | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/src/api/client/room/summary.rs b/src/api/client/room/summary.rs index 2fa81bd2..67d2e2ad 100644 --- a/src/api/client/room/summary.rs +++ b/src/api/client/room/summary.rs @@ -1,7 +1,7 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - Err, Result, debug_warn, + Err, Result, debug_warn, trace, utils::{IterStream, future::TryExtExt}, }; use futures::{ @@ -74,7 +74,12 @@ async fn room_summary_response( servers: &[OwnedServerName], sender_user: Option<&UserId>, ) -> Result { - if services.rooms.metadata.exists(room_id).await { + if services + .rooms + .state_cache + .server_in_room(services.globals.server_name(), room_id) + .await + { return local_room_summary_response(services, room_id, sender_user) .boxed() .await; @@ -106,14 +111,14 @@ async fn local_room_summary_response( room_id: &RoomId, sender_user: Option<&UserId>, ) -> Result { + trace!(?sender_user, "Sending local room summary response for {room_id:?}"); let join_rule = services.rooms.state_accessor.get_join_rules(room_id); - let world_readable = services.rooms.state_accessor.is_world_readable(room_id); - let guest_can_join = services.rooms.state_accessor.guest_can_join(room_id); let (join_rule, world_readable, guest_can_join) = join3(join_rule, world_readable, guest_can_join).await; + trace!("{join_rule:?}, {world_readable:?}, {guest_can_join:?}"); user_can_see_summary( services, @@ -215,6 +220,7 @@ async fn remote_room_summary_hierarchy_response( servers: &[OwnedServerName], sender_user: Option<&UserId>, ) -> Result { + trace!(?sender_user, ?servers, "Sending remote room summary response for {room_id:?}"); if !services.config.allow_federation { return Err!(Request(Forbidden("Federation is disabled."))); } @@ -237,6 +243,7 @@ async fn remote_room_summary_hierarchy_response( .collect(); while let Some(Ok(response)) = requests.next().await { + trace!("{response:?}"); let room = response.room.clone(); if room.room_id != room_id { debug_warn!( @@ -278,6 +285,7 @@ async fn user_can_see_summary<'a, I>( where I: Iterator + Send, { + let is_public_room = matches!(join_rule, Public | Knock | KnockRestricted); match sender_user { | Some(sender_user) => { let user_can_see_state_events = services @@ -296,7 +304,7 @@ where if user_can_see_state_events || (is_guest && guest_can_join) - || matches!(&join_rule, &Public | &Knock | &KnockRestricted) + || is_public_room || user_in_allowed_restricted_room { return Ok(()); @@ -309,7 +317,7 @@ where ))) }, | None => { - if matches!(join_rule, Public | Knock | KnockRestricted) || world_readable { + if is_public_room || world_readable { return Ok(()); } From ff276a42a36cfe565ff541ce064db25bbb1946c8 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 6 Apr 2025 13:19:09 -0400 Subject: [PATCH 206/310] drop unnecessary info log to debug Signed-off-by: June Clementine Strawberry --- src/api/client/keys.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index adbdd715..650c573f 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -1,7 +1,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use axum::extract::State; -use conduwuit::{Err, Error, Result, debug, debug_warn, err, info, result::NotFound, utils}; +use conduwuit::{Err, Error, Result, debug, debug_warn, err, result::NotFound, utils}; use conduwuit_service::{Services, users::parse_master_key}; use futures::{StreamExt, stream::FuturesUnordered}; use ruma::{ @@ -177,7 +177,7 @@ pub(crate) async fn upload_signing_keys_route( body.master_key.as_ref(), ) .await - .inspect_err(|e| info!(?e)) + .inspect_err(|e| debug!(?e)) { | Ok(exists) => { if let Some(result) = exists { From d5ad973464168c567c3f9615380ced9e0067da4f Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 6 Apr 2025 15:25:11 -0400 Subject: [PATCH 207/310] change forbidden_server_names and etc to allow regex patterns for wildcards Signed-off-by: June Clementine Strawberry --- conduwuit-example.toml | 27 ++++++++++------ src/api/client/directory.rs | 14 ++++++--- src/api/client/membership.rs | 6 ++-- src/api/client/message.rs | 3 +- src/api/router/auth.rs | 3 +- src/api/server/invite.rs | 6 ++-- src/api/server/make_join.rs | 6 ++-- src/api/server/make_knock.rs | 6 ++-- src/api/server/send_join.rs | 12 +++----- src/api/server/send_knock.rs | 6 ++-- src/core/config/mod.rs | 51 +++++++++++++++++-------------- src/service/federation/execute.rs | 2 +- src/service/media/remote.rs | 8 ++++- 13 files changed, 79 insertions(+), 71 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 46459547..118bc57d 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -594,7 +594,7 @@ # Currently, conduwuit doesn't support inbound batched key requests, so # this list should only contain other Synapse servers. # -# example: ["matrix.org", "envs.net", "tchncs.de"] +# example: ["matrix.org", "tchncs.de"] # #trusted_servers = ["matrix.org"] @@ -1186,13 +1186,16 @@ # #prune_missing_media = false -# Vector list of servers that conduwuit will refuse to download remote -# media from. +# Vector list of regex patterns of server names that conduwuit will refuse +# to download remote media from. +# +# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] # #prevent_media_downloads_from = [] -# List of forbidden server names that we will block incoming AND outgoing -# federation with, and block client room joins / remote user invites. +# List of forbidden server names via regex patterns that we will block +# incoming AND outgoing federation with, and block client room joins / +# remote user invites. # # This check is applied on the room ID, room alias, sender server name, # sender user's server name, inbound federation X-Matrix origin, and @@ -1200,11 +1203,15 @@ # # Basically "global" ACLs. # +# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] +# #forbidden_remote_server_names = [] -# List of forbidden server names that we will block all outgoing federated -# room directory requests for. Useful for preventing our users from -# wandering into bad servers or spaces. +# List of forbidden server names via regex patterns that we will block all +# outgoing federated room directory requests for. Useful for preventing +# our users from wandering into bad servers or spaces. +# +# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] # #forbidden_remote_room_directory_server_names = [] @@ -1315,7 +1322,7 @@ # used, and startup as warnings if any room aliases in your database have # a forbidden room alias/ID. # -# example: ["19dollarfortnitecards", "b[4a]droom"] +# example: ["19dollarfortnitecards", "b[4a]droom", "badphrase"] # #forbidden_alias_names = [] @@ -1328,7 +1335,7 @@ # startup as warnings if any local users in your database have a forbidden # username. # -# example: ["administrator", "b[a4]dusernam[3e]"] +# example: ["administrator", "b[a4]dusernam[3e]", "badphrase"] # #forbidden_usernames = [] diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index 9ca35537..b44b9f64 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -52,10 +52,13 @@ pub(crate) async fn get_public_rooms_filtered_route( ) -> Result { if let Some(server) = &body.server { if services - .server .config .forbidden_remote_room_directory_server_names - .contains(server) + .is_match(server.host()) + || services + .config + .forbidden_remote_server_names + .is_match(server.host()) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } @@ -90,10 +93,13 @@ pub(crate) async fn get_public_rooms_route( ) -> Result { if let Some(server) = &body.server { if services - .server .config .forbidden_remote_room_directory_server_names - .contains(server) + .is_match(server.host()) + || services + .config + .forbidden_remote_server_names + .is_match(server.host()) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index d0345c8e..1eeacf83 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -79,10 +79,9 @@ async fn banned_room_check( if let Some(room_id) = room_id { if services.rooms.metadata.is_banned(room_id).await || services - .server .config .forbidden_remote_server_names - .contains(&room_id.server_name().unwrap().to_owned()) + .is_match(room_id.server_name().unwrap().host()) { warn!( "User {user_id} who is not an admin attempted to send an invite for or \ @@ -120,10 +119,9 @@ async fn banned_room_check( } } else if let Some(server_name) = server_name { if services - .server .config .forbidden_remote_server_names - .contains(&server_name.to_owned()) + .is_match(server_name.host()) { warn!( "User {user_id} who is not an admin tried joining a room which has the server \ diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 3e784a4a..db11ef4a 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -261,10 +261,9 @@ pub(crate) async fn is_ignored_pdu( let ignored_type = IGNORED_MESSAGE_TYPES.binary_search(&pdu.kind).is_ok(); let ignored_server = services - .server .config .forbidden_remote_server_names - .contains(pdu.sender().server_name()); + .is_match(pdu.sender().server_name().host()); if ignored_type && (ignored_server || services.users.user_is_ignored(&pdu.sender, user_id).await) diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index 5cd7b831..0eb61ca6 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -317,10 +317,9 @@ fn auth_server_checks(services: &Services, x_matrix: &XMatrix) -> Result<()> { let origin = &x_matrix.origin; if services - .server .config .forbidden_remote_server_names - .contains(origin) + .is_match(origin.host()) { return Err!(Request(Forbidden(debug_warn!( "Federation requests from {origin} denied." diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index cda34fb5..edd6ac16 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -38,20 +38,18 @@ pub(crate) async fn create_invite_route( if let Some(server) = body.room_id.server_name() { if services - .server .config .forbidden_remote_server_names - .contains(&server.to_owned()) + .is_match(server.host()) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } } if services - .server .config .forbidden_remote_server_names - .contains(body.origin()) + .is_match(body.origin().host()) { warn!( "Received federated/remote invite from banned server {} for room ID {}. Rejecting.", diff --git a/src/api/server/make_join.rs b/src/api/server/make_join.rs index 4664b904..ac2c5485 100644 --- a/src/api/server/make_join.rs +++ b/src/api/server/make_join.rs @@ -42,10 +42,9 @@ pub(crate) async fn create_join_event_template_route( .await?; if services - .server .config .forbidden_remote_server_names - .contains(body.origin()) + .is_match(body.origin().host()) { warn!( "Server {} for remote user {} tried joining room ID {} which has a server name that \ @@ -59,10 +58,9 @@ pub(crate) async fn create_join_event_template_route( if let Some(server) = body.room_id.server_name() { if services - .server .config .forbidden_remote_server_names - .contains(&server.to_owned()) + .is_match(server.host()) { return Err!(Request(Forbidden(warn!( "Room ID server name {server} is banned on this homeserver." diff --git a/src/api/server/make_knock.rs b/src/api/server/make_knock.rs index 6d71ab2a..511c13b2 100644 --- a/src/api/server/make_knock.rs +++ b/src/api/server/make_knock.rs @@ -33,10 +33,9 @@ pub(crate) async fn create_knock_event_template_route( .await?; if services - .server .config .forbidden_remote_server_names - .contains(body.origin()) + .is_match(body.origin().host()) { warn!( "Server {} for remote user {} tried knocking room ID {} which has a server name \ @@ -50,10 +49,9 @@ pub(crate) async fn create_knock_event_template_route( if let Some(server) = body.room_id.server_name() { if services - .server .config .forbidden_remote_server_names - .contains(&server.to_owned()) + .is_match(server.host()) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index 2e2e89ee..a66d8890 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -268,10 +268,9 @@ pub(crate) async fn create_join_event_v1_route( body: Ruma, ) -> Result { if services - .server .config .forbidden_remote_server_names - .contains(body.origin()) + .is_match(body.origin().host()) { warn!( "Server {} tried joining room ID {} through us who has a server name that is \ @@ -284,10 +283,9 @@ pub(crate) async fn create_join_event_v1_route( if let Some(server) = body.room_id.server_name() { if services - .server .config .forbidden_remote_server_names - .contains(&server.to_owned()) + .is_match(server.host()) { warn!( "Server {} tried joining room ID {} through us which has a server name that is \ @@ -316,20 +314,18 @@ pub(crate) async fn create_join_event_v2_route( body: Ruma, ) -> Result { if services - .server .config .forbidden_remote_server_names - .contains(body.origin()) + .is_match(body.origin().host()) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } if let Some(server) = body.room_id.server_name() { if services - .server .config .forbidden_remote_server_names - .contains(&server.to_owned()) + .is_match(server.host()) { warn!( "Server {} tried joining room ID {} through us which has a server name that is \ diff --git a/src/api/server/send_knock.rs b/src/api/server/send_knock.rs index c5ab0306..ee7b6cba 100644 --- a/src/api/server/send_knock.rs +++ b/src/api/server/send_knock.rs @@ -26,10 +26,9 @@ pub(crate) async fn create_knock_event_v1_route( body: Ruma, ) -> Result { if services - .server .config .forbidden_remote_server_names - .contains(body.origin()) + .is_match(body.origin().host()) { warn!( "Server {} tried knocking room ID {} who has a server name that is globally \ @@ -42,10 +41,9 @@ pub(crate) async fn create_knock_event_v1_route( if let Some(server) = body.room_id.server_name() { if services - .server .config .forbidden_remote_server_names - .contains(&server.to_owned()) + .is_match(server.host()) { warn!( "Server {} tried knocking room ID {} which has a server name that is globally \ diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index bb509a0d..0ca6bbaf 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -3,7 +3,7 @@ pub mod manager; pub mod proxy; use std::{ - collections::{BTreeMap, BTreeSet, HashSet}, + collections::{BTreeMap, BTreeSet}, net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}, path::{Path, PathBuf}, }; @@ -715,7 +715,7 @@ pub struct Config { /// Currently, conduwuit doesn't support inbound batched key requests, so /// this list should only contain other Synapse servers. /// - /// example: ["matrix.org", "envs.net", "tchncs.de"] + /// example: ["matrix.org", "tchncs.de"] /// /// default: ["matrix.org"] #[serde(default = "default_trusted_servers")] @@ -1361,15 +1361,18 @@ pub struct Config { #[serde(default)] pub prune_missing_media: bool, - /// Vector list of servers that conduwuit will refuse to download remote - /// media from. + /// Vector list of regex patterns of server names that conduwuit will refuse + /// to download remote media from. + /// + /// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] /// /// default: [] - #[serde(default)] - pub prevent_media_downloads_from: HashSet, + #[serde(default, with = "serde_regex")] + pub prevent_media_downloads_from: RegexSet, - /// List of forbidden server names that we will block incoming AND outgoing - /// federation with, and block client room joins / remote user invites. + /// List of forbidden server names via regex patterns that we will block + /// incoming AND outgoing federation with, and block client room joins / + /// remote user invites. /// /// This check is applied on the room ID, room alias, sender server name, /// sender user's server name, inbound federation X-Matrix origin, and @@ -1377,17 +1380,21 @@ pub struct Config { /// /// Basically "global" ACLs. /// - /// default: [] - #[serde(default)] - pub forbidden_remote_server_names: HashSet, - - /// List of forbidden server names that we will block all outgoing federated - /// room directory requests for. Useful for preventing our users from - /// wandering into bad servers or spaces. + /// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] /// /// default: [] - #[serde(default = "HashSet::new")] - pub forbidden_remote_room_directory_server_names: HashSet, + #[serde(default, with = "serde_regex")] + pub forbidden_remote_server_names: RegexSet, + + /// List of forbidden server names via regex patterns that we will block all + /// outgoing federated room directory requests for. Useful for preventing + /// our users from wandering into bad servers or spaces. + /// + /// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] + /// + /// default: [] + #[serde(default, with = "serde_regex")] + pub forbidden_remote_room_directory_server_names: RegexSet, /// Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you /// do not want conduwuit to send outbound requests to. Defaults to @@ -1508,11 +1515,10 @@ pub struct Config { /// used, and startup as warnings if any room aliases in your database have /// a forbidden room alias/ID. /// - /// example: ["19dollarfortnitecards", "b[4a]droom"] + /// example: ["19dollarfortnitecards", "b[4a]droom", "badphrase"] /// /// default: [] - #[serde(default)] - #[serde(with = "serde_regex")] + #[serde(default, with = "serde_regex")] pub forbidden_alias_names: RegexSet, /// List of forbidden username patterns/strings. @@ -1524,11 +1530,10 @@ pub struct Config { /// startup as warnings if any local users in your database have a forbidden /// username. /// - /// example: ["administrator", "b[a4]dusernam[3e]"] + /// example: ["administrator", "b[a4]dusernam[3e]", "badphrase"] /// /// default: [] - #[serde(default)] - #[serde(with = "serde_regex")] + #[serde(default, with = "serde_regex")] pub forbidden_usernames: RegexSet, /// Retry failed and incomplete messages to remote servers immediately upon diff --git a/src/service/federation/execute.rs b/src/service/federation/execute.rs index 63f2ccfb..97314ffb 100644 --- a/src/service/federation/execute.rs +++ b/src/service/federation/execute.rs @@ -69,7 +69,7 @@ where .server .config .forbidden_remote_server_names - .contains(dest) + .is_match(dest.host()) { return Err!(Request(Forbidden(debug_warn!("Federation with {dest} is not allowed.")))); } diff --git a/src/service/media/remote.rs b/src/service/media/remote.rs index b6c853d2..cdcb429e 100644 --- a/src/service/media/remote.rs +++ b/src/service/media/remote.rs @@ -426,7 +426,13 @@ fn check_fetch_authorized(&self, mxc: &Mxc<'_>) -> Result<()> { .server .config .prevent_media_downloads_from - .contains(mxc.server_name) + .is_match(mxc.server_name.host()) + || self + .services + .server + .config + .forbidden_remote_server_names + .is_match(mxc.server_name.host()) { // we'll lie to the client and say the blocked server's media was not found and // log. the client has no way of telling anyways so this is a security bonus. From 99868b166173d7bd510a7f2dd3a1b1e415a99682 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Sun, 6 Apr 2025 15:30:01 -0400 Subject: [PATCH 208/310] update new complement flakes Signed-off-by: June Clementine Strawberry --- .../complement/test_results.jsonl | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/tests/test_results/complement/test_results.jsonl b/tests/test_results/complement/test_results.jsonl index c0e28750..97c2e1b1 100644 --- a/tests/test_results/complement/test_results.jsonl +++ b/tests/test_results/complement/test_results.jsonl @@ -491,7 +491,7 @@ {"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself"} {"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself/parallel"} {"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Joining_room_twice_is_idempotent"} -{"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Room_creation_reports_m.room.create_to_myself"} +{"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself/parallel/Room_creation_reports_m.room.create_to_myself"} {"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Room_creation_reports_m.room.member_to_myself"} {"Action":"pass","Test":"TestRoomCreationReportsEventsToMyself/parallel/Setting_room_topic_reports_m.room.topic_to_myself"} {"Action":"fail","Test":"TestRoomCreationReportsEventsToMyself/parallel/Setting_state_twice_is_idempotent"} @@ -527,17 +527,17 @@ {"Action":"pass","Test":"TestRoomMessagesLazyLoadingLocalUser"} {"Action":"pass","Test":"TestRoomReadMarkers"} {"Action":"pass","Test":"TestRoomReceipts"} -{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin"} -{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin/Bob_can_find_Alice_by_mxid"} -{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin/Bob_can_find_Alice_by_profile_display_name"} -{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin/Eve_can_find_Alice_by_mxid"} -{"Action":"fail","Test":"TestRoomSpecificUsernameAtJoin/Eve_can_find_Alice_by_profile_display_name"} +{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin"} +{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin/Bob_can_find_Alice_by_mxid"} +{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin/Bob_can_find_Alice_by_profile_display_name"} +{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin/Eve_can_find_Alice_by_mxid"} +{"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin/Eve_can_find_Alice_by_profile_display_name"} {"Action":"pass","Test":"TestRoomSpecificUsernameAtJoin/Eve_cannot_find_Alice_by_room-specific_name_that_Eve_is_not_privy_to"} -{"Action":"fail","Test":"TestRoomSpecificUsernameChange"} -{"Action":"fail","Test":"TestRoomSpecificUsernameChange/Bob_can_find_Alice_by_mxid"} -{"Action":"fail","Test":"TestRoomSpecificUsernameChange/Bob_can_find_Alice_by_profile_display_name"} -{"Action":"fail","Test":"TestRoomSpecificUsernameChange/Eve_can_find_Alice_by_mxid"} -{"Action":"fail","Test":"TestRoomSpecificUsernameChange/Eve_can_find_Alice_by_profile_display_name"} +{"Action":"pass","Test":"TestRoomSpecificUsernameChange"} +{"Action":"pass","Test":"TestRoomSpecificUsernameChange/Bob_can_find_Alice_by_mxid"} +{"Action":"pass","Test":"TestRoomSpecificUsernameChange/Bob_can_find_Alice_by_profile_display_name"} +{"Action":"pass","Test":"TestRoomSpecificUsernameChange/Eve_can_find_Alice_by_mxid"} +{"Action":"pass","Test":"TestRoomSpecificUsernameChange/Eve_can_find_Alice_by_profile_display_name"} {"Action":"pass","Test":"TestRoomSpecificUsernameChange/Eve_cannot_find_Alice_by_room-specific_name_that_Eve_is_not_privy_to"} {"Action":"fail","Test":"TestRoomState"} {"Action":"fail","Test":"TestRoomState/Parallel"} @@ -589,7 +589,7 @@ {"Action":"fail","Test":"TestSync/parallel/Newly_joined_room_has_correct_timeline_in_incremental_sync"} {"Action":"fail","Test":"TestSync/parallel/Newly_joined_room_includes_presence_in_incremental_sync"} {"Action":"pass","Test":"TestSync/parallel/Newly_joined_room_is_included_in_an_incremental_sync"} -{"Action":"fail","Test":"TestSync/parallel/sync_should_succeed_even_if_the_sync_token_points_to_a_redaction_of_an_unknown_event"} +{"Action":"pass","Test":"TestSync/parallel/sync_should_succeed_even_if_the_sync_token_points_to_a_redaction_of_an_unknown_event"} {"Action":"pass","Test":"TestSyncFilter"} {"Action":"pass","Test":"TestSyncFilter/Can_create_filter"} {"Action":"pass","Test":"TestSyncFilter/Can_download_filter"} From 47f83454570a1d4338137708b4b042e8c49b7cb7 Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 8 Apr 2025 09:05:49 -0400 Subject: [PATCH 209/310] bump tokio because of RUSTSEC-2025-0023 Signed-off-by: June Clementine Strawberry --- Cargo.lock | 4 ++-- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8817af1a..c724e31e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4758,9 +4758,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.44.1" +version = "1.44.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f382da615b842244d4b8738c82ed1275e6c5dd90c459a30941cd07080b06c91a" +checksum = "e6b88822cbe49de4185e3a4cbf8321dd487cf5fe0c5c65695fef6346371e9c48" dependencies = [ "backtrace", "bytes", diff --git a/Cargo.toml b/Cargo.toml index 62350dee..f5ee3f0f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -242,7 +242,7 @@ default-features = false features = ["std", "async-await"] [workspace.dependencies.tokio] -version = "1.44.1" +version = "1.44.2" default-features = false features = [ "fs", From d8311a5ff672fdc4729d956af5e3af8646b0670d Mon Sep 17 00:00:00 2001 From: June Clementine Strawberry Date: Tue, 8 Apr 2025 23:38:54 -0400 Subject: [PATCH 210/310] bump crossbeam-channel bc yanked crate with potential double free Signed-off-by: June Clementine Strawberry --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c724e31e..d81fdbc0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1119,9 +1119,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.14" +version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" dependencies = [ "crossbeam-utils", ] From e054a56b3286a6fb3091bedd5261089435ed26d1 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Mon, 14 Apr 2025 22:34:22 +0100 Subject: [PATCH 211/310] docs: New readme It's a continuwuation! --- README.md | 214 +++++++++++++++++++++--------------------------------- 1 file changed, 82 insertions(+), 132 deletions(-) diff --git a/README.md b/README.md index d8f99d45..89e1a299 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,6 @@ -# conduwuit +# continuwuity + -### a very cool [Matrix](https://matrix.org/) chat homeserver written in Rust +## A community-driven [Matrix](https://matrix.org/) homeserver in Rust -Visit the [conduwuit documentation](https://conduwuit.puppyirl.gay/) for more -information and how to deploy/setup conduwuit. +[continuwuity] is a Matrix homeserver written in Rust. +It's a community continuation of the [conduwuit](https://github.com/girlbossceo/conduwuit) homeserver. -#### What is Matrix? + +### Why does this exist? + +The original conduwuit project has been archived and is no longer maintained. Rather than letting this Rust-based Matrix homeserver disappear, a group of community contributors have forked the project to continue its development, fix outstanding issues, and add new features. + +Our aim is to provide a stable, well-maintained alternative for current conduwuit users and to welcome new users looking for a lightweight, efficient Matrix homeserver. + + +### Who are we? + +We are a group of Matrix enthusiasts, developers and system administrators who have used conduwuit and believe in its potential. Our team includes both previous +contributors to the original project and new developers who want to help maintain and improve this important piece of Matrix infrastructure. + +We operate as an open community project, welcoming contributions from anyone interested in improving continuwuity. + +### What is Matrix? [Matrix](https://matrix.org) is an open, federated, and extensible network for -decentralised communication. Users from any Matrix homeserver can chat with users from all +decentralized communication. Users from any Matrix homeserver can chat with users from all other homeservers over federation. Matrix is designed to be extensible and built on top of. You can even use bridges such as Matrix Appservices to communicate with users outside of Matrix, like a community on Discord. -#### What is the goal? +### What are the project's goals? -A high-performance, efficient, low-cost, and featureful Matrix homeserver that's -easy to set up and just works with minimal configuration needed. +Continuwuity aims to: -#### Can I try it out? +- Maintain a stable, reliable Matrix homeserver implementation in Rust +- Improve compatibility and specification compliance with the Matrix protocol +- Fix bugs and performance issues from the original conduwuit +- Add missing features needed by homeserver administrators +- Provide comprehensive documentation and easy deployment options +- Create a sustainable development model for long-term maintenance +- Keep a lightweight, efficient codebase that can run on modest hardware -An official conduwuit server ran by me is available at transfem.dev -([element.transfem.dev](https://element.transfem.dev) / -[cinny.transfem.dev](https://cinny.transfem.dev)) +### Can I try it out? -transfem.dev is a public homeserver that can be used, it is not a "test only -homeserver". This means there are rules, so please read the rules: -[https://transfem.dev/homeserver_rules.txt](https://transfem.dev/homeserver_rules.txt) +Not right now. We've still got work to do! -transfem.dev is also listed at -[servers.joinmatrix.org](https://servers.joinmatrix.org/), which is a list of -popular public Matrix homeservers, including some others that run conduwuit. -#### What is the current status? +### What are we working on? -conduwuit is technically a hard fork of [Conduit](https://conduit.rs/), which is in beta. -The beta status initially was inherited from Conduit, however the huge amount of -codebase divergance, changes, fixes, and improvements have effectively made this -beta status not entirely applicable to us anymore. +We're working our way through all of the issues in the [Forgejo project](https://forgejo.ellis.link/continuwuation/continuwuity/issues). -conduwuit is very stable based on our rapidly growing userbase, has lots of features that users -expect, and very usable as a daily driver for small, medium, and upper-end medium sized homeservers. +- [Replacing old conduwuit links with working continuwuity links](https://forgejo.ellis.link/continuwuation/continuwuity/issues/742) +- [Getting CI and docs deployment working on the new Forgejo project](https://forgejo.ellis.link/continuwuation/continuwuity/issues/740) +- [Packaging & availability in more places](https://forgejo.ellis.link/continuwuation/continuwuity/issues/747) +- [Appservices bugs & features](https://forgejo.ellis.link/continuwuation/continuwuity/issues?q=&type=all&state=open&labels=178&milestone=0&assignee=0&poster=0) +- [Improving compatibility and spec compliance](https://forgejo.ellis.link/continuwuation/continuwuity/issues?labels=119) +- Automated testing +- [Admin API](https://forgejo.ellis.link/continuwuation/continuwuity/issues/748) +- [Policy-list controlled moderation](https://forgejo.ellis.link/continuwuation/continuwuity/issues/750) -A lot of critical stability and performance issues have been fixed, and a lot of -necessary groundwork has finished; making this project way better than it was -back in the start at ~early 2024. +### Can I migrate my data from x? -#### Where is the differences page? +- Conduwuit: Yes +- Conduit: No, database is now incompatible +- Grapevine: No, database is now incompatible +- Dendrite: No +- Synapse: No -conduwuit historically had a "differences" page that listed each and every single -different thing about conduwuit from Conduit, as a way to promote and advertise -conduwuit by showing significant amounts of work done. While this was feasible to -maintain back when the project was new in early-2024, this became impossible -very quickly and has unfortunately became heavily outdated, missing tons of things, etc. - -It's difficult to list out what we do differently, what are our notable features, etc -when there's so many things and features and bug fixes and performance optimisations, -the list goes on. We simply recommend folks to just try out conduwuit, or ask us -what features you are looking for and if they're implemented in conduwuit. - -#### How is conduwuit funded? Is conduwuit sustainable? - -conduwuit has no external funding. This is made possible purely in my freetime with -contributors, also in their free time, and only by user-curated donations. - -conduwuit has existed since around November 2023, but [only became more publicly known -in March/April 2024](https://matrix.org/blog/2024/04/26/this-week-in-matrix-2024-04-26/#conduwuit-website) -and we have no plans in stopping or slowing down any time soon! - -#### Can I migrate or switch from Conduit? - -conduwuit had drop-in migration/replacement support for Conduit for about 12 months before -bugs somewhere along the line broke it. Maintaining this has been difficult and -the majority of Conduit users have already migrated, additionally debugging Conduit -is not one of our interests, and so Conduit migration no longer works. We also -feel that 12 months has been plenty of time for people to seamlessly migrate. - -If you are a Conduit user looking to migrate, you will have to wipe and reset -your database. We may fix seamless migration support at some point, but it's not an interest -from us. - -#### Can I migrate from Synapse or Dendrite? - -Currently there is no known way to seamlessly migrate all user data from the old -homeserver to conduwuit. However it is perfectly acceptable to replace the old -homeserver software with conduwuit using the same server name and there will not -be any issues with federation. - -There is an interest in developing a built-in seamless user data migration -method into conduwuit, however there is no concrete ETA or timeline for this. +Although you can't migrate your data from other homeservers, it is perfectly acceptable to set up continuwuity on the same domain as a previous homeserver. +## Contribution + +### Development flow + +- Features / changes must developed in a separate branch +- For each change, create a descriptive PR +- Your code will be reviewed by one or more of the continuwuity developers +- The branch will be deployed live on multiple tester's matrix servers to shake out bugs +- Once all testers and reviewers have agreed, the PR will be merged to the main branch +- The main branch will have nightly builds deployed to users on the cutting edge +- Every week or two, a new release is cut. + +The main branch is always green! + + +### Policy on pulling from other forks + +We welcome contributions from other forks of conduwuit, subject to our review process. +When incorporating code from other forks: + +- All external contributions must go through our standard PR process +- Code must meet our quality standards and pass tests +- Code changes will require testing on multiple test servers before merging +- Attribution will be given to original authors and forks +- We prioritize stability and compatibility when evaluating external contributions +- Features that align with our project goals will be given priority consideration + #### Contact -[`#conduwuit:puppygock.gay`](https://matrix.to/#/#conduwuit:puppygock.gay) -is the official project Matrix room. You can get support here, ask questions or -concerns, get assistance setting up conduwuit, etc. - -This room should stay relevant and focused on conduwuit. An offtopic general -chatter room can be found in the room topic there as well. - -Please keep the issue trackers focused on *actual* bug reports and enhancement requests. - -General support is extremely difficult to be offered over an issue tracker, and -simple questions should be asked directly in an interactive platform like our -Matrix room above as they can turn into a relevant discussion and/or may not be -simple to answer. If you're not sure, just ask in the Matrix room. - -If you have a bug or feature to request: [Open an issue on GitHub](https://github.com/girlbossceo/conduwuit/issues/new) - -If you need to contact the primary maintainer, my contact methods are on my website: https://girlboss.ceo - -#### Donate - -conduwuit development is purely made possible by myself and contributors. I do -not get paid to work on this, and I work on it in my free time. Donations are -heavily appreciated! 💜🥺 - -- Liberapay: -- GitHub Sponsors: -- Ko-fi: - -I do not and will not accept cryptocurrency donations, including things related. - -Note that donations will NOT guarantee you or give you any kind of tangible product, -feature prioritisation, etc. By donating, you are agreeing that conduwuit is NOT -going to provide you any goods or services as part of your donation, and this -donation is purely a generous donation. We will not provide things like paid -personal/direct support, feature request priority, merchandise, etc. - -#### Logo - -Original repo and Matrix room picture was from bran (<3). Current banner image -and logo is directly from [this cohost -post](https://web.archive.org/web/20241126004041/https://cohost.org/RatBaby/post/1028290-finally-a-flag-for). - -An SVG logo made by [@nktnet1](https://github.com/nktnet1) is available here: - -#### Is it conduwuit or Conduwuit? - -Both, but I prefer conduwuit. - -#### Mirrors of conduwuit - -If GitHub is unavailable in your country, or has poor connectivity, conduwuit's -source code is mirrored onto the following additional platforms I maintain: - -- GitHub: -- GitLab: -- git.girlcock.ceo: -- git.gay: -- mau.dev: -- Codeberg: -- sourcehut: + + + +[continuwuity]: https://forgejo.ellis.link/continuwuation/continuwuity From 57d26dae0d35f8be9e66054479261ca33a1ea42c Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Mon, 14 Apr 2025 22:58:30 +0100 Subject: [PATCH 212/310] docs: Remove hidden conduwuit badges --- README.md | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/README.md b/README.md index 89e1a299..f61f6a87 100644 --- a/README.md +++ b/README.md @@ -1,18 +1,5 @@ # continuwuity - - - ## A community-driven [Matrix](https://matrix.org/) homeserver in Rust From 3e54c7e69163ebfcc24414dad4888b4d8a4380b8 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Mon, 14 Apr 2025 23:01:22 +0100 Subject: [PATCH 213/310] docs: Phrasing --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index f61f6a87..24a34d18 100644 --- a/README.md +++ b/README.md @@ -16,8 +16,7 @@ It's a community continuation of the [conduwuit](https://github.com/girlbossceo/ The original conduwuit project has been archived and is no longer maintained. Rather than letting this Rust-based Matrix homeserver disappear, a group of community contributors have forked the project to continue its development, fix outstanding issues, and add new features. -Our aim is to provide a stable, well-maintained alternative for current conduwuit users and to welcome new users looking for a lightweight, efficient Matrix homeserver. - +We aim to provide a stable, well-maintained alternative for current Conduit users and welcome newcomers seeking a lightweight, efficient Matrix homeserver. ### Who are we? From 4f9e9174e2aaabcb45a81ec33ec9284159bc59fd Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Mon, 14 Apr 2025 23:06:39 +0100 Subject: [PATCH 214/310] docs: Mention future migration guide --- README.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/README.md b/README.md index 24a34d18..deaed364 100644 --- a/README.md +++ b/README.md @@ -70,8 +70,7 @@ We're working our way through all of the issues in the [Forgejo project](https:/ - Dendrite: No - Synapse: No -Although you can't migrate your data from other homeservers, it is perfectly acceptable to set up continuwuity on the same domain as a previous homeserver. - +We haven't written up a guide on migrating from incompatible homeservers yet. Reach out to us if you need to do this! From 35bffa5970e311a2b2ffa7c23ecd77121c36dfbe Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 15 Apr 2025 10:25:49 +0100 Subject: [PATCH 215/310] ci: Delete all old CI files Part of #753 --- .github/workflows/ci.yml | 717 ------------------- .github/workflows/docker-hub-description.yml | 41 -- .github/workflows/documentation.yml | 104 --- .github/workflows/release.yml | 118 --- .gitlab-ci.yml | 152 ---- .gitlab/merge_request_templates/MR.md | 8 - .gitlab/route-map.yml | 3 - 7 files changed, 1143 deletions(-) delete mode 100644 .github/workflows/ci.yml delete mode 100644 .github/workflows/docker-hub-description.yml delete mode 100644 .github/workflows/documentation.yml delete mode 100644 .github/workflows/release.yml delete mode 100644 .gitlab-ci.yml delete mode 100644 .gitlab/merge_request_templates/MR.md delete mode 100644 .gitlab/route-map.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml deleted file mode 100644 index 5043f23b..00000000 --- a/.github/workflows/ci.yml +++ /dev/null @@ -1,717 +0,0 @@ -name: CI and Artifacts - -on: - pull_request: - push: - paths-ignore: - - '.gitlab-ci.yml' - - '.gitignore' - - 'renovate.json' - - 'debian/**' - - 'docker/**' - branches: - - main - tags: - - '*' - # Allows you to run this workflow manually from the Actions tab - workflow_dispatch: - -concurrency: - group: ${{ github.head_ref || github.ref_name }} - cancel-in-progress: true - -env: - # Required to make some things output color - TERM: ansi - # Publishing to my nix binary cache - ATTIC_TOKEN: ${{ secrets.ATTIC_TOKEN }} - # conduwuit.cachix.org - CACHIX_AUTH_TOKEN: ${{ secrets.CACHIX_AUTH_TOKEN }} - # Just in case incremental is still being set to true, speeds up CI - CARGO_INCREMENTAL: 0 - # Custom nix binary cache if fork is being used - ATTIC_ENDPOINT: ${{ vars.ATTIC_ENDPOINT }} - ATTIC_PUBLIC_KEY: ${{ vars.ATTIC_PUBLIC_KEY }} - # Get error output from nix that we can actually use, and use our binary caches for the earlier CI steps - NIX_CONFIG: | - show-trace = true - extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net https://nix-community.cachix.org https://crane.cachix.org - extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs= crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk= - experimental-features = nix-command flakes - extra-experimental-features = nix-command flakes - accept-flake-config = true - WEB_UPLOAD_SSH_USERNAME: ${{ secrets.WEB_UPLOAD_SSH_USERNAME }} - GH_REF_NAME: ${{ github.ref_name }} - WEBSERVER_DIR_NAME: ${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }} - -permissions: {} - -jobs: - tests: - name: Test - runs-on: self-hosted - steps: - - name: Setup SSH web publish - env: - web_upload_ssh_private_key: ${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }} - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' - run: | - mkdir -p -v ~/.ssh - - echo "${{ secrets.WEB_UPLOAD_SSH_KNOWN_HOSTS }}" >> ~/.ssh/known_hosts - echo "${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}" >> ~/.ssh/id_ed25519 - - chmod 600 ~/.ssh/id_ed25519 - - cat >>~/.ssh/config <> "$GITHUB_ENV" - - - name: Sync repository - uses: actions/checkout@v4 - with: - persist-credentials: false - - - name: Tag comparison check - if: ${{ startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') }} - run: | - # Tag mismatch with latest repo tag check to prevent potential downgrades - LATEST_TAG=$(git describe --tags `git rev-list --tags --max-count=1`) - - if [ ${LATEST_TAG} != ${GH_REF_NAME} ]; then - echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.' - echo '# WARNING: Attempting to run this workflow for a tag that is not the latest repo tag. Aborting.' >> $GITHUB_STEP_SUMMARY - exit 1 - fi - - - name: Prepare build environment - run: | - echo 'source $HOME/.nix-profile/share/nix-direnv/direnvrc' > "$HOME/.direnvrc" - direnv allow - nix develop .#all-features --command true - - - name: Cache CI dependencies - run: | - bin/nix-build-and-cache ci - bin/nix-build-and-cache just '.#devShells.x86_64-linux.default' - bin/nix-build-and-cache just '.#devShells.x86_64-linux.all-features' - bin/nix-build-and-cache just '.#devShells.x86_64-linux.dynamic' - - # use rust-cache - - uses: Swatinem/rust-cache@v2 - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - #if: ${{ !startsWith(github.ref, 'refs/tags/') }} - with: - cache-all-crates: "true" - cache-on-failure: "true" - cache-targets: "true" - - - name: Run CI tests - env: - CARGO_PROFILE: "test" - run: | - direnv exec . engage > >(tee -a test_output.log) - - - name: Run Complement tests - env: - CARGO_PROFILE: "test" - run: | - # the nix devshell sets $COMPLEMENT_SRC, so "/dev/null" is no-op - direnv exec . bin/complement "/dev/null" complement_test_logs.jsonl complement_test_results.jsonl > >(tee -a test_output.log) - cp -v -f result complement_oci_image.tar.gz - - - name: Upload Complement OCI image - uses: actions/upload-artifact@v4 - with: - name: complement_oci_image.tar.gz - path: complement_oci_image.tar.gz - if-no-files-found: error - compression-level: 0 - - - name: Upload Complement logs - uses: actions/upload-artifact@v4 - with: - name: complement_test_logs.jsonl - path: complement_test_logs.jsonl - if-no-files-found: error - - - name: Upload Complement results - uses: actions/upload-artifact@v4 - with: - name: complement_test_results.jsonl - path: complement_test_results.jsonl - if-no-files-found: error - - - name: Diff Complement results with checked-in repo results - run: | - diff -u --color=always tests/test_results/complement/test_results.jsonl complement_test_results.jsonl > >(tee -a complement_diff_output.log) - - - name: Update Job Summary - env: - GH_JOB_STATUS: ${{ job.status }} - if: success() || failure() - run: | - if [ ${GH_JOB_STATUS} == 'success' ]; then - echo '# ✅ CI completed suwuccessfully' >> $GITHUB_STEP_SUMMARY - else - echo '# ❌ CI failed (last 100 lines of output)' >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - tail -n 100 test_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - - echo '# Complement diff results (last 100 lines)' >> $GITHUB_STEP_SUMMARY - echo '```diff' >> $GITHUB_STEP_SUMMARY - tail -n 100 complement_diff_output.log | sed 's/\x1b\[[0-9;]*m//g' >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - fi - - build: - name: Build - runs-on: self-hosted - strategy: - matrix: - include: - - target: aarch64-linux-musl - - target: x86_64-linux-musl - steps: - - name: Sync repository - uses: actions/checkout@v4 - with: - persist-credentials: false - - - name: Setup SSH web publish - env: - web_upload_ssh_private_key: ${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }} - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && (env.web_upload_ssh_private_key != '') && github.event.pull_request.user.login != 'renovate[bot]' - run: | - mkdir -p -v ~/.ssh - - echo "${{ secrets.WEB_UPLOAD_SSH_KNOWN_HOSTS }}" >> ~/.ssh/known_hosts - echo "${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}" >> ~/.ssh/id_ed25519 - - chmod 600 ~/.ssh/id_ed25519 - - cat >>~/.ssh/config <> "$GITHUB_ENV" - - - name: Prepare build environment - run: | - echo 'source $HOME/.nix-profile/share/nix-direnv/direnvrc' > "$HOME/.direnvrc" - direnv allow - nix develop .#all-features --command true --impure - - # use rust-cache - - uses: Swatinem/rust-cache@v2 - # we want a fresh-state when we do releases/tags to avoid potential cache poisoning attacks impacting - # releases and tags - #if: ${{ !startsWith(github.ref, 'refs/tags/') }} - with: - cache-all-crates: "true" - cache-on-failure: "true" - cache-targets: "true" - - - name: Build static ${{ matrix.target }}-all-features - run: | - if [[ ${{ matrix.target }} == "x86_64-linux-musl" ]] - then - CARGO_DEB_TARGET_TUPLE="x86_64-unknown-linux-musl" - elif [[ ${{ matrix.target }} == "aarch64-linux-musl" ]] - then - CARGO_DEB_TARGET_TUPLE="aarch64-unknown-linux-musl" - fi - - SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct) - - bin/nix-build-and-cache just .#static-${{ matrix.target }}-all-features - - mkdir -v -p target/release/ - mkdir -v -p target/$CARGO_DEB_TARGET_TUPLE/release/ - cp -v -f result/bin/conduwuit target/release/conduwuit - cp -v -f result/bin/conduwuit target/$CARGO_DEB_TARGET_TUPLE/release/conduwuit - direnv exec . cargo deb --verbose --no-build --no-strip -p conduwuit --target=$CARGO_DEB_TARGET_TUPLE --output target/release/${{ matrix.target }}.deb - mv -v target/release/conduwuit static-${{ matrix.target }} - mv -v target/release/${{ matrix.target }}.deb ${{ matrix.target }}.deb - - - name: Build static x86_64-linux-musl-all-features-x86_64-haswell-optimised - if: ${{ matrix.target == 'x86_64-linux-musl' }} - run: | - CARGO_DEB_TARGET_TUPLE="x86_64-unknown-linux-musl" - SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct) - - bin/nix-build-and-cache just .#static-x86_64-linux-musl-all-features-x86_64-haswell-optimised - - mkdir -v -p target/release/ - mkdir -v -p target/$CARGO_DEB_TARGET_TUPLE/release/ - cp -v -f result/bin/conduwuit target/release/conduwuit - cp -v -f result/bin/conduwuit target/$CARGO_DEB_TARGET_TUPLE/release/conduwuit - direnv exec . cargo deb --verbose --no-build --no-strip -p conduwuit --target=$CARGO_DEB_TARGET_TUPLE --output target/release/x86_64-linux-musl-x86_64-haswell-optimised.deb - mv -v target/release/conduwuit static-x86_64-linux-musl-x86_64-haswell-optimised - mv -v target/release/x86_64-linux-musl-x86_64-haswell-optimised.deb x86_64-linux-musl-x86_64-haswell-optimised.deb - - # quick smoke test of the x86_64 static release binary - - name: Quick smoke test the x86_64 static release binary - if: ${{ matrix.target == 'x86_64-linux-musl' }} - run: | - # GH actions default runners are x86_64 only - if file result/bin/conduwuit | grep x86-64; then - result/bin/conduwuit --version - result/bin/conduwuit --help - result/bin/conduwuit -Oserver_name="'$(date -u +%s).local'" -Odatabase_path="'/tmp/$(date -u +%s)'" --execute "server admin-notice awawawawawawawawawawa" --execute "server memory-usage" --execute "server shutdown" - fi - - - name: Build static debug ${{ matrix.target }}-all-features - run: | - if [[ ${{ matrix.target }} == "x86_64-linux-musl" ]] - then - CARGO_DEB_TARGET_TUPLE="x86_64-unknown-linux-musl" - elif [[ ${{ matrix.target }} == "aarch64-linux-musl" ]] - then - CARGO_DEB_TARGET_TUPLE="aarch64-unknown-linux-musl" - fi - - SOURCE_DATE_EPOCH=$(git log -1 --pretty=%ct) - - bin/nix-build-and-cache just .#static-${{ matrix.target }}-all-features-debug - - # > warning: dev profile is not supported and will be a hard error in the future. cargo-deb is for making releases, and it doesn't make sense to use it with dev profiles. - # so we need to coerce cargo-deb into thinking this is a release binary - mkdir -v -p target/release/ - mkdir -v -p target/$CARGO_DEB_TARGET_TUPLE/release/ - cp -v -f result/bin/conduwuit target/release/conduwuit - cp -v -f result/bin/conduwuit target/$CARGO_DEB_TARGET_TUPLE/release/conduwuit - direnv exec . cargo deb --verbose --no-build --no-strip -p conduwuit --target=$CARGO_DEB_TARGET_TUPLE --output target/release/${{ matrix.target }}-debug.deb - mv -v target/release/conduwuit static-${{ matrix.target }}-debug - mv -v target/release/${{ matrix.target }}-debug.deb ${{ matrix.target }}-debug.deb - - # quick smoke test of the x86_64 static debug binary - - name: Run x86_64 static debug binary - run: | - # GH actions default runners are x86_64 only - if file result/bin/conduwuit | grep x86-64; then - result/bin/conduwuit --version - fi - - # check validity of produced deb package, invalid debs will error on these commands - - name: Validate produced deb package - run: | - # List contents - dpkg-deb --contents ${{ matrix.target }}.deb - dpkg-deb --contents ${{ matrix.target }}-debug.deb - # List info - dpkg-deb --info ${{ matrix.target }}.deb - dpkg-deb --info ${{ matrix.target }}-debug.deb - - - name: Upload static-x86_64-linux-musl-all-features-x86_64-haswell-optimised to GitHub - uses: actions/upload-artifact@v4 - if: ${{ matrix.target == 'x86_64-linux-musl' }} - with: - name: static-x86_64-linux-musl-x86_64-haswell-optimised - path: static-x86_64-linux-musl-x86_64-haswell-optimised - if-no-files-found: error - - - name: Upload static-${{ matrix.target }}-all-features to GitHub - uses: actions/upload-artifact@v4 - with: - name: static-${{ matrix.target }} - path: static-${{ matrix.target }} - if-no-files-found: error - - - name: Upload static deb ${{ matrix.target }}-all-features to GitHub - uses: actions/upload-artifact@v4 - with: - name: deb-${{ matrix.target }} - path: ${{ matrix.target }}.deb - if-no-files-found: error - compression-level: 0 - - - name: Upload static-x86_64-linux-musl-all-features-x86_64-haswell-optimised to webserver - if: ${{ matrix.target == 'x86_64-linux-musl' }} - run: | - if [ ! -z $SSH_WEBSITE ]; then - chmod +x static-x86_64-linux-musl-x86_64-haswell-optimised - scp static-x86_64-linux-musl-x86_64-haswell-optimised website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/static-x86_64-linux-musl-x86_64-haswell-optimised - fi - - - name: Upload static-${{ matrix.target }}-all-features to webserver - run: | - if [ ! -z $SSH_WEBSITE ]; then - chmod +x static-${{ matrix.target }} - scp static-${{ matrix.target }} website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/static-${{ matrix.target }} - fi - - - name: Upload static deb x86_64-linux-musl-all-features-x86_64-haswell-optimised to webserver - if: ${{ matrix.target == 'x86_64-linux-musl' }} - run: | - if [ ! -z $SSH_WEBSITE ]; then - scp x86_64-linux-musl-x86_64-haswell-optimised.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/x86_64-linux-musl-x86_64-haswell-optimised.deb - fi - - - name: Upload static deb ${{ matrix.target }}-all-features to webserver - run: | - if [ ! -z $SSH_WEBSITE ]; then - scp ${{ matrix.target }}.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/${{ matrix.target }}.deb - fi - - - name: Upload static-${{ matrix.target }}-debug-all-features to GitHub - uses: actions/upload-artifact@v4 - with: - name: static-${{ matrix.target }}-debug - path: static-${{ matrix.target }}-debug - if-no-files-found: error - - - name: Upload static deb ${{ matrix.target }}-debug-all-features to GitHub - uses: actions/upload-artifact@v4 - with: - name: deb-${{ matrix.target }}-debug - path: ${{ matrix.target }}-debug.deb - if-no-files-found: error - compression-level: 0 - - - name: Upload static-${{ matrix.target }}-debug-all-features to webserver - run: | - if [ ! -z $SSH_WEBSITE ]; then - scp static-${{ matrix.target }}-debug website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/static-${{ matrix.target }}-debug - fi - - - name: Upload static deb ${{ matrix.target }}-debug-all-features to webserver - run: | - if [ ! -z $SSH_WEBSITE ]; then - scp ${{ matrix.target }}-debug.deb website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/${{ matrix.target }}-debug.deb - fi - - - name: Build OCI image ${{ matrix.target }}-all-features - run: | - bin/nix-build-and-cache just .#oci-image-${{ matrix.target }}-all-features - - cp -v -f result oci-image-${{ matrix.target }}.tar.gz - - - name: Build OCI image x86_64-linux-musl-all-features-x86_64-haswell-optimised - if: ${{ matrix.target == 'x86_64-linux-musl' }} - run: | - bin/nix-build-and-cache just .#oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised - - cp -v -f result oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz - - - name: Build debug OCI image ${{ matrix.target }}-all-features - run: | - bin/nix-build-and-cache just .#oci-image-${{ matrix.target }}-all-features-debug - - cp -v -f result oci-image-${{ matrix.target }}-debug.tar.gz - - - name: Upload OCI image x86_64-linux-musl-all-features-x86_64-haswell-optimised to GitHub - if: ${{ matrix.target == 'x86_64-linux-musl' }} - uses: actions/upload-artifact@v4 - with: - name: oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised - path: oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz - if-no-files-found: error - compression-level: 0 - - name: Upload OCI image ${{ matrix.target }}-all-features to GitHub - uses: actions/upload-artifact@v4 - with: - name: oci-image-${{ matrix.target }} - path: oci-image-${{ matrix.target }}.tar.gz - if-no-files-found: error - compression-level: 0 - - - name: Upload OCI image ${{ matrix.target }}-debug-all-features to GitHub - uses: actions/upload-artifact@v4 - with: - name: oci-image-${{ matrix.target }}-debug - path: oci-image-${{ matrix.target }}-debug.tar.gz - if-no-files-found: error - compression-level: 0 - - - name: Upload OCI image x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz to webserver - if: ${{ matrix.target == 'x86_64-linux-musl' }} - run: | - if [ ! -z $SSH_WEBSITE ]; then - scp oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised.tar.gz - fi - - - name: Upload OCI image ${{ matrix.target }}-all-features to webserver - run: | - if [ ! -z $SSH_WEBSITE ]; then - scp oci-image-${{ matrix.target }}.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/oci-image-${{ matrix.target }}.tar.gz - fi - - - name: Upload OCI image ${{ matrix.target }}-debug-all-features to webserver - run: | - if [ ! -z $SSH_WEBSITE ]; then - scp oci-image-${{ matrix.target }}-debug.tar.gz website:/var/www/girlboss.ceo/~strawberry/conduwuit/ci-bins/${WEBSERVER_DIR_NAME}/oci-image-${{ matrix.target }}-debug.tar.gz - fi - - variables: - outputs: - github_repository: ${{ steps.var.outputs.github_repository }} - runs-on: self-hosted - steps: - - name: Setting global variables - uses: actions/github-script@v7 - id: var - with: - script: | - core.setOutput('github_repository', '${{ github.repository }}'.toLowerCase()) - docker: - name: Docker publish - runs-on: self-hosted - needs: [build, variables, tests] - permissions: - packages: write - contents: read - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && github.event.pull_request.user.login != 'renovate[bot]' - env: - DOCKER_HUB_REPO: docker.io/${{ needs.variables.outputs.github_repository }} - GHCR_REPO: ghcr.io/${{ needs.variables.outputs.github_repository }} - GLCR_REPO: registry.gitlab.com/conduwuit/conduwuit - UNIQUE_TAG: ${{ (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }}-${{ github.sha }} - BRANCH_TAG: ${{ (startsWith(github.ref, 'refs/tags/v') && !endsWith(github.ref, '-rc') && 'latest') || (github.head_ref != '' && format('merge-{0}-{1}', github.event.number, github.event.pull_request.user.login)) || github.ref_name }} - - DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} - GITLAB_TOKEN: ${{ secrets.GITLAB_TOKEN }} - GHCR_ENABLED: "${{ (github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false) && 'true' || 'false' }}" - steps: - - name: Login to GitHub Container Registry - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Login to Docker Hub - if: ${{ (vars.DOCKER_USERNAME != '') && (env.DOCKERHUB_TOKEN != '') }} - uses: docker/login-action@v3 - with: - registry: docker.io - username: ${{ vars.DOCKER_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - - name: Login to GitLab Container Registry - if: ${{ (vars.GITLAB_USERNAME != '') && (env.GITLAB_TOKEN != '') }} - uses: docker/login-action@v3 - with: - registry: registry.gitlab.com - username: ${{ vars.GITLAB_USERNAME }} - password: ${{ secrets.GITLAB_TOKEN }} - - - name: Download artifacts - uses: actions/download-artifact@v4 - with: - pattern: "oci*" - - - name: Move OCI images into position - run: | - mv -v oci-image-x86_64-linux-musl-all-features-x86_64-haswell-optimised/*.tar.gz oci-image-amd64-haswell-optimised.tar.gz - mv -v oci-image-x86_64-linux-musl/*.tar.gz oci-image-amd64.tar.gz - mv -v oci-image-aarch64-linux-musl/*.tar.gz oci-image-arm64v8.tar.gz - mv -v oci-image-x86_64-linux-musl-debug/*.tar.gz oci-image-amd64-debug.tar.gz - mv -v oci-image-aarch64-linux-musl-debug/*.tar.gz oci-image-arm64v8-debug.tar.gz - - - name: Load and push amd64 haswell image - run: | - docker load -i oci-image-amd64-haswell-optimised.tar.gz - if [ ! -z $DOCKERHUB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell - docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell - fi - if [ $GHCR_ENABLED = "true" ]; then - docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-haswell - docker push ${GHCR_REPO}:${UNIQUE_TAG}-haswell - fi - if [ ! -z $GITLAB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-haswell - docker push ${GLCR_REPO}:${UNIQUE_TAG}-haswell - fi - - - name: Load and push amd64 image - run: | - docker load -i oci-image-amd64.tar.gz - if [ ! -z $DOCKERHUB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64 - docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64 - fi - if [ $GHCR_ENABLED = "true" ]; then - docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-amd64 - docker push ${GHCR_REPO}:${UNIQUE_TAG}-amd64 - fi - if [ ! -z $GITLAB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-amd64 - docker push ${GLCR_REPO}:${UNIQUE_TAG}-amd64 - fi - - - name: Load and push arm64 image - run: | - docker load -i oci-image-arm64v8.tar.gz - if [ ! -z $DOCKERHUB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8 - docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8 - fi - if [ $GHCR_ENABLED = "true" ]; then - docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8 - docker push ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8 - fi - if [ ! -z $GITLAB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8 - docker push ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8 - fi - - - name: Load and push amd64 debug image - run: | - docker load -i oci-image-amd64-debug.tar.gz - if [ ! -z $DOCKERHUB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug - docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug - fi - if [ $GHCR_ENABLED = "true" ]; then - docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug - docker push ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug - fi - if [ ! -z $GITLAB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug - docker push ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug - fi - - - name: Load and push arm64 debug image - run: | - docker load -i oci-image-arm64v8-debug.tar.gz - if [ ! -z $DOCKERHUB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug - docker push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug - fi - if [ $GHCR_ENABLED = "true" ]; then - docker tag $(docker images -q conduwuit:main) ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug - docker push ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug - fi - if [ ! -z $GITLAB_TOKEN ]; then - docker tag $(docker images -q conduwuit:main) ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug - docker push ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug - fi - - - name: Create Docker haswell manifests - run: | - # Dockerhub Container Registry - if [ ! -z $DOCKERHUB_TOKEN ]; then - docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell - docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG}-haswell --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell - fi - # GitHub Container Registry - if [ $GHCR_ENABLED = "true" ]; then - docker manifest create ${GHCR_REPO}:${UNIQUE_TAG}-haswell --amend ${GHCR_REPO}:${UNIQUE_TAG}-haswell - docker manifest create ${GHCR_REPO}:${BRANCH_TAG}-haswell --amend ${GHCR_REPO}:${UNIQUE_TAG}-haswell - fi - # GitLab Container Registry - if [ ! -z $GITLAB_TOKEN ]; then - docker manifest create ${GLCR_REPO}:${UNIQUE_TAG}-haswell --amend ${GLCR_REPO}:${UNIQUE_TAG}-haswell - docker manifest create ${GLCR_REPO}:${BRANCH_TAG}-haswell --amend ${GLCR_REPO}:${UNIQUE_TAG}-haswell - fi - - - name: Create Docker combined manifests - run: | - # Dockerhub Container Registry - if [ ! -z $DOCKERHUB_TOKEN ]; then - docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG} --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64 - docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG} --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64 - fi - # GitHub Container Registry - if [ $GHCR_ENABLED = "true" ]; then - docker manifest create ${GHCR_REPO}:${UNIQUE_TAG} --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64 - docker manifest create ${GHCR_REPO}:${BRANCH_TAG} --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64 - fi - # GitLab Container Registry - if [ ! -z $GITLAB_TOKEN ]; then - docker manifest create ${GLCR_REPO}:${UNIQUE_TAG} --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64 - docker manifest create ${GLCR_REPO}:${BRANCH_TAG} --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8 --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64 - fi - - - name: Create Docker combined debug manifests - run: | - # Dockerhub Container Registry - if [ ! -z $DOCKERHUB_TOKEN ]; then - docker manifest create ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug - docker manifest create ${DOCKER_HUB_REPO}:${BRANCH_TAG}-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-amd64-debug - fi - # GitHub Container Registry - if [ $GHCR_ENABLED = "true" ]; then - docker manifest create ${GHCR_REPO}:${UNIQUE_TAG}-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug - docker manifest create ${GHCR_REPO}:${BRANCH_TAG}-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GHCR_REPO}:${UNIQUE_TAG}-amd64-debug - fi - # GitLab Container Registry - if [ ! -z $GITLAB_TOKEN ]; then - docker manifest create ${GLCR_REPO}:${UNIQUE_TAG}-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug - docker manifest create ${GLCR_REPO}:${BRANCH_TAG}-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-arm64v8-debug --amend ${GLCR_REPO}:${UNIQUE_TAG}-amd64-debug - fi - - - name: Push manifests to Docker registries - run: | - if [ ! -z $DOCKERHUB_TOKEN ]; then - docker manifest push ${DOCKER_HUB_REPO}:${UNIQUE_TAG} - docker manifest push ${DOCKER_HUB_REPO}:${BRANCH_TAG} - docker manifest push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-debug - docker manifest push ${DOCKER_HUB_REPO}:${BRANCH_TAG}-debug - docker manifest push ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell - docker manifest push ${DOCKER_HUB_REPO}:${BRANCH_TAG}-haswell - fi - if [ $GHCR_ENABLED = "true" ]; then - docker manifest push ${GHCR_REPO}:${UNIQUE_TAG} - docker manifest push ${GHCR_REPO}:${BRANCH_TAG} - docker manifest push ${GHCR_REPO}:${UNIQUE_TAG}-debug - docker manifest push ${GHCR_REPO}:${BRANCH_TAG}-debug - docker manifest push ${GHCR_REPO}:${UNIQUE_TAG}-haswell - docker manifest push ${GHCR_REPO}:${BRANCH_TAG}-haswell - fi - if [ ! -z $GITLAB_TOKEN ]; then - docker manifest push ${GLCR_REPO}:${UNIQUE_TAG} - docker manifest push ${GLCR_REPO}:${BRANCH_TAG} - docker manifest push ${GLCR_REPO}:${UNIQUE_TAG}-debug - docker manifest push ${GLCR_REPO}:${BRANCH_TAG}-debug - docker manifest push ${GLCR_REPO}:${UNIQUE_TAG}-haswell - docker manifest push ${GLCR_REPO}:${BRANCH_TAG}-haswell - fi - - - name: Add Image Links to Job Summary - run: | - if [ ! -z $DOCKERHUB_TOKEN ]; then - echo "- \`docker pull ${DOCKER_HUB_REPO}:${UNIQUE_TAG}\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${DOCKER_HUB_REPO}:${UNIQUE_TAG}-haswell\`" >> $GITHUB_STEP_SUMMARY - fi - if [ $GHCR_ENABLED = "true" ]; then - echo "- \`docker pull ${GHCR_REPO}:${UNIQUE_TAG}\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${GHCR_REPO}:${UNIQUE_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${GHCR_REPO}:${UNIQUE_TAG}-haswell\`" >> $GITHUB_STEP_SUMMARY - fi - if [ ! -z $GITLAB_TOKEN ]; then - echo "- \`docker pull ${GLCR_REPO}:${UNIQUE_TAG}\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${GLCR_REPO}:${UNIQUE_TAG}-debug\`" >> $GITHUB_STEP_SUMMARY - echo "- \`docker pull ${GLCR_REPO}:${UNIQUE_TAG}-haswell\`" >> $GITHUB_STEP_SUMMARY - fi diff --git a/.github/workflows/docker-hub-description.yml b/.github/workflows/docker-hub-description.yml deleted file mode 100644 index b4f142db..00000000 --- a/.github/workflows/docker-hub-description.yml +++ /dev/null @@ -1,41 +0,0 @@ -name: Update Docker Hub Description - -on: - push: - branches: - - main - paths: - - README.md - - .github/workflows/docker-hub-description.yml - - workflow_dispatch: - -jobs: - dockerHubDescription: - runs-on: ubuntu-latest - if: ${{ (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main' || (github.event.pull_request.draft != true)) && github.event.pull_request.user.login != 'renovate[bot]' && (vars.DOCKER_USERNAME != '') }} - steps: - - uses: actions/checkout@v4 - with: - persist-credentials: false - - - name: Setting variables - uses: actions/github-script@v7 - id: var - with: - script: | - const githubRepo = '${{ github.repository }}'.toLowerCase() - const repoId = githubRepo.split('/')[1] - - core.setOutput('github_repository', githubRepo) - const dockerRepo = '${{ vars.DOCKER_USERNAME }}'.toLowerCase() + '/' + repoId - core.setOutput('docker_repo', dockerRepo) - - - name: Docker Hub Description - uses: peter-evans/dockerhub-description@v4 - with: - username: ${{ vars.DOCKER_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - repository: ${{ steps.var.outputs.docker_repo }} - short-description: ${{ github.event.repository.description }} - enable-url-completion: true diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml deleted file mode 100644 index b5b4ff46..00000000 --- a/.github/workflows/documentation.yml +++ /dev/null @@ -1,104 +0,0 @@ -name: Documentation and GitHub Pages - -on: - pull_request: - push: - branches: - - main - tags: - - '*' - - # Allows you to run this workflow manually from the Actions tab - workflow_dispatch: - -env: - # Required to make some things output color - TERM: ansi - # Publishing to my nix binary cache - ATTIC_TOKEN: ${{ secrets.ATTIC_TOKEN }} - # conduwuit.cachix.org - CACHIX_AUTH_TOKEN: ${{ secrets.CACHIX_AUTH_TOKEN }} - # Custom nix binary cache if fork is being used - ATTIC_ENDPOINT: ${{ vars.ATTIC_ENDPOINT }} - ATTIC_PUBLIC_KEY: ${{ vars.ATTIC_PUBLIC_KEY }} - # Get error output from nix that we can actually use, and use our binary caches for the earlier CI steps - NIX_CONFIG: | - show-trace = true - extra-substituters = https://attic.kennel.juneis.dog/conduwuit https://attic.kennel.juneis.dog/conduit https://conduwuit.cachix.org https://aseipp-nix-cache.freetls.fastly.net https://nix-community.cachix.org https://crane.cachix.org - extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= cache.lix.systems:aBnZUw8zA7H35Cz2RyKFVs3H4PlGTLawyY5KRbvJR8o= conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs= crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk= - experimental-features = nix-command flakes - extra-experimental-features = nix-command flakes - accept-flake-config = true - -# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued. -# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete. -concurrency: - group: "pages" - cancel-in-progress: false - -permissions: {} - -jobs: - docs: - name: Documentation and GitHub Pages - runs-on: self-hosted - - permissions: - pages: write - id-token: write - - environment: - name: github-pages - url: ${{ steps.deployment.outputs.page_url }} - - steps: - - name: Sync repository - uses: actions/checkout@v4 - with: - persist-credentials: false - - - name: Setup GitHub Pages - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request') - uses: actions/configure-pages@v5 - - - name: Prepare build environment - run: | - echo 'source $HOME/.nix-profile/share/nix-direnv/direnvrc' > "$HOME/.direnvrc" - direnv allow - nix develop --command true - - - name: Cache CI dependencies - run: | - bin/nix-build-and-cache ci - - - name: Run lychee and markdownlint - run: | - direnv exec . engage just lints lychee - direnv exec . engage just lints markdownlint - - - name: Build documentation (book) - run: | - bin/nix-build-and-cache just .#book - - cp -r --dereference result public - chmod u+w -R public - - - name: Upload generated documentation (book) as normal artifact - uses: actions/upload-artifact@v4 - with: - name: public - path: public - if-no-files-found: error - # don't compress again - compression-level: 0 - - - name: Upload generated documentation (book) as GitHub Pages artifact - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request') - uses: actions/upload-pages-artifact@v3 - with: - path: public - - - name: Deploy to GitHub Pages - if: (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main') && (github.event_name != 'pull_request') - id: deployment - uses: actions/deploy-pages@v4 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml deleted file mode 100644 index cfe72d2a..00000000 --- a/.github/workflows/release.yml +++ /dev/null @@ -1,118 +0,0 @@ -name: Upload Release Assets - -on: - release: - types: [published] - workflow_dispatch: - inputs: - tag: - description: 'Tag to release' - required: true - type: string - action_id: - description: 'Action ID of the CI run' - required: true - type: string - -permissions: {} - -jobs: - publish: - runs-on: ubuntu-latest - permissions: - contents: write - env: - GH_EVENT_NAME: ${{ github.event_name }} - GH_EVENT_INPUTS_ACTION_ID: ${{ github.event.inputs.action_id }} - GH_EVENT_INPUTS_TAG: ${{ github.event.inputs.tag }} - GH_REPOSITORY: ${{ github.repository }} - GH_SHA: ${{ github.sha }} - GH_TAG: ${{ github.event.release.tag_name }} - - steps: - - name: get latest ci id - id: get_ci_id - env: - GH_TOKEN: ${{ github.token }} - run: | - if [ "${GH_EVENT_NAME}" == "workflow_dispatch" ]; then - id="${GH_EVENT_INPUTS_ACTION_ID}" - tag="${GH_EVENT_INPUTS_TAG}" - else - # get all runs of the ci workflow - json=$(gh api "repos/${GH_REPOSITORY}/actions/workflows/ci.yml/runs") - - # find first run that is github sha and status is completed - id=$(echo "$json" | jq ".workflow_runs[] | select(.head_sha == \"${GH_SHA}\" and .status == \"completed\") | .id" | head -n 1) - - if [ ! "$id" ]; then - echo "No completed runs found" - echo "ci_id=0" >> "$GITHUB_OUTPUT" - exit 0 - fi - - tag="${GH_TAG}" - fi - - echo "ci_id=$id" >> "$GITHUB_OUTPUT" - echo "tag=$tag" >> "$GITHUB_OUTPUT" - - - name: get latest ci artifacts - if: steps.get_ci_id.outputs.ci_id != 0 - uses: actions/download-artifact@v4 - env: - GH_TOKEN: ${{ github.token }} - with: - merge-multiple: true - run-id: ${{ steps.get_ci_id.outputs.ci_id }} - github-token: ${{ github.token }} - - - run: | - ls - - - name: upload release assets - if: steps.get_ci_id.outputs.ci_id != 0 - env: - GH_TOKEN: ${{ github.token }} - TAG: ${{ steps.get_ci_id.outputs.tag }} - run: | - for file in $(find . -type f); do - case "$file" in - *json*) echo "Skipping $file...";; - *) echo "Uploading $file..."; gh release upload $TAG "$file" --clobber --repo="${GH_REPOSITORY}" || echo "Something went wrong, skipping.";; - esac - done - - - name: upload release assets to website - if: steps.get_ci_id.outputs.ci_id != 0 - env: - TAG: ${{ steps.get_ci_id.outputs.tag }} - run: | - mkdir -p -v ~/.ssh - - echo "${{ secrets.WEB_UPLOAD_SSH_KNOWN_HOSTS }}" >> ~/.ssh/known_hosts - echo "${{ secrets.WEB_UPLOAD_SSH_PRIVATE_KEY }}" >> ~/.ssh/id_ed25519 - - chmod 600 ~/.ssh/id_ed25519 - - cat >>~/.ssh/config < /dev/null; then echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi - - if command -v nix > /dev/null; then echo "extra-experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi - # Accept flake config from "untrusted" users - - if command -v nix > /dev/null; then echo "accept-flake-config = true" >> /etc/nix/nix.conf; fi - - # Add conduwuit binary cache - - if command -v nix > /dev/null; then echo "extra-substituters = https://attic.kennel.juneis.dog/conduwuit" >> /etc/nix/nix.conf; fi - - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE=" >> /etc/nix/nix.conf; fi - - - if command -v nix > /dev/null; then echo "extra-substituters = https://attic.kennel.juneis.dog/conduit" >> /etc/nix/nix.conf; fi - - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk=" >> /etc/nix/nix.conf; fi - - # Add alternate binary cache - - if command -v nix > /dev/null && [ -n "$ATTIC_ENDPOINT" ]; then echo "extra-substituters = $ATTIC_ENDPOINT" >> /etc/nix/nix.conf; fi - - if command -v nix > /dev/null && [ -n "$ATTIC_PUBLIC_KEY" ]; then echo "extra-trusted-public-keys = $ATTIC_PUBLIC_KEY" >> /etc/nix/nix.conf; fi - - # Add crane binary cache - - if command -v nix > /dev/null; then echo "extra-substituters = https://crane.cachix.org" >> /etc/nix/nix.conf; fi - - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = crane.cachix.org-1:8Scfpmn9w+hGdXH/Q9tTLiYAE/2dnJYRJP7kl80GuRk=" >> /etc/nix/nix.conf; fi - - # Add nix-community binary cache - - if command -v nix > /dev/null; then echo "extra-substituters = https://nix-community.cachix.org" >> /etc/nix/nix.conf; fi - - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" >> /etc/nix/nix.conf; fi - - - if command -v nix > /dev/null; then echo "extra-substituters = https://aseipp-nix-cache.freetls.fastly.net" >> /etc/nix/nix.conf; fi - - # Install direnv and nix-direnv - - if command -v nix > /dev/null; then nix-env -iA nixpkgs.direnv nixpkgs.nix-direnv; fi - - # Allow .envrc - - if command -v nix > /dev/null; then direnv allow; fi - - # Set CARGO_HOME to a cacheable path - - export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo" - -ci: - stage: ci - image: nixos/nix:2.24.9 - script: - # Cache CI dependencies - - ./bin/nix-build-and-cache ci - - - direnv exec . engage - cache: - key: nix - paths: - - target - - .gitlab-ci.d - rules: - # CI on upstream runners (only available for maintainers) - - if: $CI_PIPELINE_SOURCE == "merge_request_event" && $IS_UPSTREAM_CI == "true" - # Manual CI on unprotected branches that are not MRs - - if: $CI_PIPELINE_SOURCE != "merge_request_event" && $CI_COMMIT_REF_PROTECTED == "false" - when: manual - # Manual CI on forks - - if: $IS_UPSTREAM_CI != "true" - when: manual - - if: $CI - interruptible: true - -artifacts: - stage: artifacts - image: nixos/nix:2.24.9 - script: - - ./bin/nix-build-and-cache just .#static-x86_64-linux-musl - - cp result/bin/conduit x86_64-linux-musl - - - mkdir -p target/release - - cp result/bin/conduit target/release - - direnv exec . cargo deb --no-build --no-strip - - mv target/debian/*.deb x86_64-linux-musl.deb - - # Since the OCI image package is based on the binary package, this has the - # fun side effect of uploading the normal binary too. Conduit users who are - # deploying with Nix can leverage this fact by adding our binary cache to - # their systems. - # - # Note that although we have an `oci-image-x86_64-linux-musl` - # output, we don't build it because it would be largely redundant to this - # one since it's all containerized anyway. - - ./bin/nix-build-and-cache just .#oci-image - - cp result oci-image-amd64.tar.gz - - - ./bin/nix-build-and-cache just .#static-aarch64-linux-musl - - cp result/bin/conduit aarch64-linux-musl - - - ./bin/nix-build-and-cache just .#oci-image-aarch64-linux-musl - - cp result oci-image-arm64v8.tar.gz - - - ./bin/nix-build-and-cache just .#book - # We can't just copy the symlink, we need to dereference it https://gitlab.com/gitlab-org/gitlab/-/issues/19746 - - cp -r --dereference result public - artifacts: - paths: - - x86_64-linux-musl - - aarch64-linux-musl - - x86_64-linux-musl.deb - - oci-image-amd64.tar.gz - - oci-image-arm64v8.tar.gz - - public - rules: - # CI required for all MRs - - if: $CI_PIPELINE_SOURCE == "merge_request_event" - # Optional CI on forks - - if: $IS_UPSTREAM_CI != "true" - when: manual - allow_failure: true - - if: $CI - interruptible: true - -pages: - stage: publish - dependencies: - - artifacts - only: - - next - script: - - "true" - artifacts: - paths: - - public diff --git a/.gitlab/merge_request_templates/MR.md b/.gitlab/merge_request_templates/MR.md deleted file mode 100644 index 4210554b..00000000 --- a/.gitlab/merge_request_templates/MR.md +++ /dev/null @@ -1,8 +0,0 @@ - - - ------------------------------------------------------------------------------ - -- [ ] I ran `cargo fmt`, `cargo clippy`, and `cargo test` -- [ ] I agree to release my code and all other changes of this MR under the Apache-2.0 license - diff --git a/.gitlab/route-map.yml b/.gitlab/route-map.yml deleted file mode 100644 index cf31bd18..00000000 --- a/.gitlab/route-map.yml +++ /dev/null @@ -1,3 +0,0 @@ -# Docs: Map markdown to html files -- source: /docs/(.+)\.md/ - public: '\1.html' From 57779df66a5fa6891894d7e96acac3a50b9dfecc Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 15 Apr 2025 12:05:04 +0100 Subject: [PATCH 216/310] chore: Add mailmap --- .mailmap | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 .mailmap diff --git a/.mailmap b/.mailmap new file mode 100644 index 00000000..1909b3f5 --- /dev/null +++ b/.mailmap @@ -0,0 +1,13 @@ +AlexPewMaster <68469103+AlexPewMaster@users.noreply.github.com> +Daniel Wiesenberg +Devin Ragotzy +Devin Ragotzy +Jonas Platte +Jonas Zohren +Jonathan de Jong +June Clementine Strawberry +June Clementine Strawberry +June Clementine Strawberry +Rudi Floren +Tamara Schmitz <15906939+tamara-schmitz@users.noreply.github.com> +x4u <14617923-x4u@users.noreply.gitlab.com> From 6e5392c2f50d110b40f724725843dd74513ad8f4 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 15 Apr 2025 12:15:09 +0100 Subject: [PATCH 217/310] =?UTF-8?q?chore:=20Add=20Timo=20K=C3=B6sters=20to?= =?UTF-8?q?=20the=20mailmap?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .mailmap | 1 + 1 file changed, 1 insertion(+) diff --git a/.mailmap b/.mailmap index 1909b3f5..0cd8d7ec 100644 --- a/.mailmap +++ b/.mailmap @@ -10,4 +10,5 @@ June Clementine Strawberry June Clementine Strawberry Rudi Floren Tamara Schmitz <15906939+tamara-schmitz@users.noreply.github.com> +Timo Kösters x4u <14617923-x4u@users.noreply.gitlab.com> From 17a04940fca6ab39731c5f8815d5029bbf30762d Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 15 Apr 2025 21:58:39 +0100 Subject: [PATCH 218/310] chore: Update Olivia Lee in mailmap --- .mailmap | 1 + 1 file changed, 1 insertion(+) diff --git a/.mailmap b/.mailmap index 0cd8d7ec..fa267e13 100644 --- a/.mailmap +++ b/.mailmap @@ -8,6 +8,7 @@ Jonathan de Jong June Clementine Strawberry June Clementine Strawberry June Clementine Strawberry +Olivia Lee Rudi Floren Tamara Schmitz <15906939+tamara-schmitz@users.noreply.github.com> Timo Kösters From dad407fb22a991238926ab65b685ec55d2272ab9 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 15 Apr 2025 12:09:14 +0100 Subject: [PATCH 219/310] chore: Add words to cspell dictionary --- .vscode/settings.json | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 .vscode/settings.json diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000..a4fad964 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,11 @@ +{ + "cSpell.words": [ + "Forgejo", + "appservice", + "appservices", + "conduwuit", + "continuwuity", + "homeserver", + "homeservers" + ] +} From f76f669d163778a1ec768773da5cdbefc78539ec Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 15 Apr 2025 23:26:25 +0100 Subject: [PATCH 220/310] chore: Remove the default sentry endpoint --- conduwuit-example.toml | 2 +- src/core/config/mod.rs | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 118bc57d..af8da6bb 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1428,7 +1428,7 @@ # Sentry reporting URL, if a custom one is desired. # -#sentry_endpoint = "https://fe2eb4536aa04949e28eff3128d64757@o4506996327251968.ingest.us.sentry.io/4506996334657536" +#sentry_endpoint = "" # Report your conduwuit server_name in Sentry.io crash reports and # metrics. diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 0ca6bbaf..a7205423 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1636,7 +1636,7 @@ pub struct Config { /// Sentry reporting URL, if a custom one is desired. /// /// display: sensitive - /// default: "https://fe2eb4536aa04949e28eff3128d64757@o4506996327251968.ingest.us.sentry.io/4506996334657536" + /// default: "" #[serde(default = "default_sentry_endpoint")] pub sentry_endpoint: Option, @@ -2207,9 +2207,7 @@ fn default_url_preview_max_spider_size() -> usize { fn default_new_user_displayname_suffix() -> String { "🏳️‍⚧️".to_owned() } -fn default_sentry_endpoint() -> Option { - Url::parse("https://fe2eb4536aa04949e28eff3128d64757@o4506996327251968.ingest.us.sentry.io/4506996334657536").ok() -} +fn default_sentry_endpoint() -> Option { None } fn default_sentry_traces_sample_rate() -> f32 { 0.15 } From 90880e268966718c39b9970b0d564b0bad7f823d Mon Sep 17 00:00:00 2001 From: Tom Foster Date: Wed, 16 Apr 2025 02:56:22 +0100 Subject: [PATCH 221/310] Update mdBook config for continuwuity --- book.toml | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/book.toml b/book.toml index 7eb1983b..46d3a7b0 100644 --- a/book.toml +++ b/book.toml @@ -1,8 +1,8 @@ [book] -title = "conduwuit 🏳️‍⚧️ 💜 🦴" -description = "conduwuit, which is a well-maintained fork of Conduit, is a simple, fast and reliable chat server for the Matrix protocol" +title = "continuwuity" +description = "continuwuity is a community continuation of the conduwuit Matrix homeserver, written in Rust." language = "en" -authors = ["strawberry (June)"] +authors = ["The continuwuity Community"] text-direction = "ltr" multilingual = false src = "docs" @@ -16,12 +16,9 @@ extra-watch-dirs = ["debian", "docs"] edition = "2024" [output.html] -git-repository-url = "https://github.com/girlbossceo/conduwuit" -edit-url-template = "https://github.com/girlbossceo/conduwuit/edit/main/{path}" -git-repository-icon = "fa-github-square" - -[output.html.redirect] -"/differences.html" = "https://conduwuit.puppyirl.gay/#where-is-the-differences-page" +edit-url-template = "https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/{path}" +git-repository-url = "https://forgejo.ellis.link/continuwuation/continuwuity" +git-repository-icon = "fa-git-alt" [output.html.search] limit-results = 15 From 538347204fab75e9965ee4a60f00690f8a354ddf Mon Sep 17 00:00:00 2001 From: Tom Foster Date: Wed, 16 Apr 2025 02:56:47 +0100 Subject: [PATCH 222/310] Add Matrix .well-known files --- docs/static/_headers | 3 +++ docs/static/client | 1 + docs/static/server | 1 + 3 files changed, 5 insertions(+) create mode 100644 docs/static/_headers create mode 100644 docs/static/client create mode 100644 docs/static/server diff --git a/docs/static/_headers b/docs/static/_headers new file mode 100644 index 00000000..5e960241 --- /dev/null +++ b/docs/static/_headers @@ -0,0 +1,3 @@ +/.well-known/matrix/* + Access-Control-Allow-Origin: * + Content-Type: application/json diff --git a/docs/static/client b/docs/static/client new file mode 100644 index 00000000..c2b70a14 --- /dev/null +++ b/docs/static/client @@ -0,0 +1 @@ +{"m.homeserver":{"base_url": "https://matrix.continuwuity.org"},"org.matrix.msc3575.proxy":{"url": "https://matrix.continuwuity.org"}} diff --git a/docs/static/server b/docs/static/server new file mode 100644 index 00000000..a3099f6e --- /dev/null +++ b/docs/static/server @@ -0,0 +1 @@ +{"m.server":"matrix.continuwuity.org:443"} From b91af70e0b909ce7486bb88768d65f573bcfd9e8 Mon Sep 17 00:00:00 2001 From: Tom Foster Date: Wed, 16 Apr 2025 15:47:12 +0100 Subject: [PATCH 223/310] Add Forgejo CI workflow for Cloudflare Pages --- .forgejo/workflows/documentation.yml | 68 ++++++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 .forgejo/workflows/documentation.yml diff --git a/.forgejo/workflows/documentation.yml b/.forgejo/workflows/documentation.yml new file mode 100644 index 00000000..c08c1abb --- /dev/null +++ b/.forgejo/workflows/documentation.yml @@ -0,0 +1,68 @@ +name: Documentation + +on: + pull_request: + push: + branches: + - main + tags: + - "v*" + workflow_dispatch: + +concurrency: + group: "pages-${{ github.ref }}" + cancel-in-progress: true + +jobs: + docs: + name: Build and Deploy Documentation + runs-on: not-nexy + + steps: + - name: Sync repository + uses: https://github.com/actions/checkout@v4 + with: + persist-credentials: false + fetch-depth: 0 + + - name: Setup mdBook + uses: https://github.com/peaceiris/actions-mdbook@v2 + with: + mdbook-version: "latest" + + - name: Build mdbook + run: mdbook build + + - name: Prepare static files for deployment + run: | + mkdir -p ./public/.well-known/matrix + # Copy the Matrix .well-known files + cp ./docs/static/server ./public/.well-known/matrix/server + cp ./docs/static/client ./public/.well-known/matrix/client + # Copy the custom headers file + cp ./docs/static/_headers ./public/_headers + echo "Copied .well-known files and _headers to ./public" + + - name: Setup Node.js + uses: https://github.com/actions/setup-node@v4 + with: + node-version: 20 + + - name: Install dependencies + run: npm install --save-dev wrangler@latest + + - name: Deploy to Cloudflare Pages (Production) + if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} + uses: https://github.com/cloudflare/wrangler-action@v3 + with: + accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} + apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} + command: pages deploy ./public --branch=main --commit-dirty=true --project-name=${{ vars.CLOUDFLARE_PROJECT_NAME }}" + + - name: Deploy to Cloudflare Pages (Preview) + if: ${{ github.event_name != 'push' || github.ref != 'refs/heads/main' }} + uses: https://github.com/cloudflare/wrangler-action@v3 + with: + accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} + apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} + command: pages deploy ./public --branch=${{ github.head_ref }} --commit-dirty=true --project-name=${{ vars.CLOUDFLARE_PROJECT_NAME }}" From 773c3d457b214d60376780b0baaa4031e4352685 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Thu, 17 Apr 2025 07:48:54 -0700 Subject: [PATCH 224/310] fix space hierarchy pagination not respecting client-specified limit. --- src/api/client/space.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/client/space.rs b/src/api/client/space.rs index 4eee9d76..92768926 100644 --- a/src/api/client/space.rs +++ b/src/api/client/space.rs @@ -179,7 +179,7 @@ where (next_short_room_ids.iter().ne(short_room_ids) && !next_short_room_ids.is_empty()) .then_some(PaginationToken { short_room_ids: next_short_room_ids, - limit: max_depth.try_into().ok()?, + limit: limit.try_into().ok()?, max_depth: max_depth.try_into().ok()?, suggested_only, }) From 68d68a0645d52e4e5fe5a0c27d1c1b31f87f35aa Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Tue, 15 Apr 2025 17:35:33 +0100 Subject: [PATCH 225/310] fix: Do not panic when sender_device is None in `/messages` route The device ID is not always present when the appservice is the client. This was causing 500 errors for some users, as appservices can lazy load from `/messages`. Fixes #738 Co-authored-by: Jade Ellis --- src/api/client/message.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/api/client/message.rs b/src/api/client/message.rs index db11ef4a..f85611ca 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -21,7 +21,7 @@ use conduwuit_service::{ }; use futures::{FutureExt, StreamExt, TryFutureExt, future::OptionFuture, pin_mut}; use ruma::{ - RoomId, UserId, + DeviceId, RoomId, UserId, api::{ Direction, client::{filter::RoomEventFilter, message::get_message_events}, @@ -67,8 +67,8 @@ pub(crate) async fn get_message_events_route( body: Ruma, ) -> Result { debug_assert!(IGNORED_MESSAGE_TYPES.is_sorted(), "IGNORED_MESSAGE_TYPES is not sorted"); - let sender = body.sender(); - let (sender_user, sender_device) = sender; + let sender_user = body.sender_user(); + let sender_device = body.sender_device.as_ref(); let room_id = &body.room_id; let filter = &body.filter; @@ -132,7 +132,7 @@ pub(crate) async fn get_message_events_route( let lazy_loading_context = lazy_loading::Context { user_id: sender_user, - device_id: sender_device, + device_id: sender_device.map_or_else(|| <&DeviceId>::from(""), AsRef::as_ref), room_id, token: Some(from.into_unsigned()), options: Some(&filter.lazy_load_options), From 3a95585f0edb529154a5a8e3f181f4b5e929b698 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Wed, 16 Apr 2025 13:47:35 +0100 Subject: [PATCH 226/310] fix: Disambiguate appservices in lazy loading context In the previous commit, app services would all appear to be the same device when accessing the same user. This sets the device ID to be the appservice ID when available to avoid possible clobbering. --- src/api/client/message.rs | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/src/api/client/message.rs b/src/api/client/message.rs index f85611ca..9c2c4057 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -26,7 +26,10 @@ use ruma::{ Direction, client::{filter::RoomEventFilter, message::get_message_events}, }, - events::{AnyStateEvent, StateEventType, TimelineEventType, TimelineEventType::*}, + events::{ + AnyStateEvent, StateEventType, + TimelineEventType::{self, *}, + }, serde::Raw, }; @@ -129,10 +132,20 @@ pub(crate) async fn get_message_events_route( .take(limit) .collect() .await; + // let appservice_id = body.appservice_info.map(|appservice| + // appservice.registration.id); let lazy_loading_context = lazy_loading::Context { user_id: sender_user, - device_id: sender_device.map_or_else(|| <&DeviceId>::from(""), AsRef::as_ref), + device_id: match sender_device { + | Some(device_id) => device_id, + | None => + if let Some(registration) = body.appservice_info.as_ref() { + <&DeviceId>::from(registration.registration.id.as_str()) + } else { + <&DeviceId>::from("") + }, + }, room_id, token: Some(from.into_unsigned()), options: Some(&filter.lazy_load_options), From dc599db19c48ad3cbae15fc419c4a531d217ed05 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Wed, 16 Apr 2025 13:52:28 +0100 Subject: [PATCH 227/310] chore: Change branding string to continuwuity --- src/core/info/version.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/core/info/version.rs b/src/core/info/version.rs index 37580210..6abb6e13 100644 --- a/src/core/info/version.rs +++ b/src/core/info/version.rs @@ -7,7 +7,7 @@ use std::sync::OnceLock; -static BRANDING: &str = "conduwuit"; +static BRANDING: &str = "continuwuity"; static SEMANTIC: &str = env!("CARGO_PKG_VERSION"); static VERSION: OnceLock = OnceLock::new(); From 6b92e965824924ea7f78399758cb4c4a1057e2fb Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Wed, 16 Apr 2025 18:54:36 +0100 Subject: [PATCH 228/310] feat: Docker images built with Forgejo Actions --- .dockerignore | 4 +- .forgejo/workflows/release-image.yml | 223 +++++++++++++++++++++++++++ docker/Dockerfile | 216 ++++++++++++++++++++++++++ 3 files changed, 441 insertions(+), 2 deletions(-) create mode 100644 .forgejo/workflows/release-image.yml create mode 100644 docker/Dockerfile diff --git a/.dockerignore b/.dockerignore index 35d35e1b..453634df 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,9 +1,9 @@ # Local build and dev artifacts -target -tests +target/ # Docker files Dockerfile* +docker/ # IDE files .vscode diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml new file mode 100644 index 00000000..3a6c93eb --- /dev/null +++ b/.forgejo/workflows/release-image.yml @@ -0,0 +1,223 @@ +name: Release Docker Image + +on: + pull_request: + push: + paths-ignore: + - '.gitlab-ci.yml' + - '.gitignore' + - 'renovate.json' + - 'debian/**' + - 'docker/**' + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +env: + BUILTIN_REGISTRY: forgejo.ellis.link + BUILTIN_REGISTRY_ENABLED: "${{ (github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false) && 'true' || 'false' }}" + +jobs: + define-variables: + runs-on: ubuntu-latest + + outputs: + images: ${{ steps.var.outputs.images }} + images_list: ${{ steps.var.outputs.images_list }} + build_matrix: ${{ steps.var.outputs.build_matrix }} + + steps: + - name: Setting variables + uses: https://github.com/actions/github-script@v7 + id: var + with: + script: | + const githubRepo = '${{ github.repository }}'.toLowerCase() + const repoId = githubRepo.split('/')[1] + + core.setOutput('github_repository', githubRepo) + const builtinImage = '${{ env.BUILTIN_REGISTRY }}/' + githubRepo + let images = [] + if (process.env.BUILTIN_REGISTRY_ENABLED === "true") { + images.push(builtinImage) + } + core.setOutput('images', images.join("\n")) + core.setOutput('images_list', images.join(",")) + const platforms = ['linux/amd64', 'linux/arm64'] + core.setOutput('build_matrix', JSON.stringify({ + platform: platforms, + include: platforms.map(platform => { return { + platform, + slug: platform.replace('/', '-') + }}) + })) + + build-image: + runs-on: not-nexy + container: ghcr.io/catthehacker/ubuntu:act-latest + needs: define-variables + permissions: + contents: read + packages: write + attestations: write + id-token: write + strategy: + matrix: ${{ fromJSON(needs.define-variables.outputs.build_matrix) }} + steps: + - name: Echo strategy + run: echo '${{ toJSON(fromJSON(needs.define-variables.outputs.build_matrix)) }}' + - name: Echo matrix + run: echo '${{ toJSON(matrix) }}' + - name: Checkout repository + uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + # Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here. + - name: Login to builtin registry + uses: docker/login-action@v3 + with: + registry: ${{ env.BUILTIN_REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + # This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels. + - name: Extract metadata (labels, annotations) for Docker + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{needs.define-variables.outputs.images}} + # default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509 + env: + DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,index + + # This step uses the `docker/build-push-action` action to build the image, based on your repository's `Dockerfile`. If the build succeeds, it pushes the image to GitHub Packages. + # It uses the `context` parameter to define the build's context as the set of files located in the specified path. For more information, see "[Usage](https://github.com/docker/build-push-action#usage)" in the README of the `docker/build-push-action` repository. + # It uses the `tags` and `labels` parameters to tag and label the image with the output from the "meta" step. + # It will not push images generated from a pull request + - name: Get short git commit SHA + id: sha + run: | + calculatedSha=$(git rev-parse --short ${{ github.sha }}) + echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV + - name: Get Git commit timestamps + run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV + - uses: https://github.com/Swatinem/rust-cache@v2 + with: + prefix-key: v0-rust-linux/${{ matrix.slug }} + id: rust-cache + - name: Inject cache into Docker + uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.1.2 + with: + cache-map: | + { + "/home/runner/.cargo/registry": "/usr/local/cargo/registry", + "/home/runner/.cargo/git/db": "/usr/local/cargo/git/db", + "./target": "/app/target", + "./timelord": "/timelord" + } + - name: Cache timelord state + uses: actions/cache@v4 + with: + path: "./timelord" + key: ${{ runner.os }}-${{ matrix.slug }} + - name: Build and push Docker image by digest + id: build + uses: docker/build-push-action@v6 + with: + context: . + file: "docker/Dockerfile" + build-args: | + CONDUWUIT_VERSION_EXTRA=${{ env.COMMIT_SHORT_SHA }} + platforms: ${{ matrix.platform }} + labels: ${{ steps.meta.outputs.labels }} + annotations: ${{ steps.meta.outputs.annotations }} + cache-from: type=gha + cache-to: type=gha,mode=max + sbom: true + outputs: type=image,"name=${{ needs.define-variables.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true + env: + SOURCE_DATE_EPOCH: ${{ env.TIMESTAMP }} + + # For publishing multi-platform manifests + - name: Export digest + run: | + mkdir -p /tmp/digests + digest="${{ steps.build.outputs.digest }}" + touch "/tmp/digests/${digest#sha256:}" + + - name: Upload digest + uses: actions/upload-artifact@v4 + with: + name: digests-${{ matrix.slug }} + path: /tmp/digests/* + if-no-files-found: error + retention-days: 1 + + merge: + runs-on: not-nexy + container: ghcr.io/catthehacker/ubuntu:act-latest + needs: [define-variables, build-image] + steps: + - name: Download digests + uses: actions/download-artifact@v4 + with: + path: /tmp/digests + pattern: digests-* + merge-multiple: true + # Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here. + - name: Login to builtin registry + uses: docker/login-action@v3 + with: + registry: ${{ env.BUILTIN_REGISTRY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Extract metadata (tags) for Docker + id: meta + uses: docker/metadata-action@v5 + with: + tags: | + type=semver,pattern=v{{version}} + type=semver,pattern=v{{major}}.{{minor}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.0.') }} + type=semver,pattern=v{{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }} + type=ref,event=branch + type=ref,event=pr + type=sha,format=long + images: ${{needs.define-variables.outputs.images}} + # default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509 + env: + DOCKER_METADATA_ANNOTATIONS_LEVELS: index + + - name: Create manifest list and push + working-directory: /tmp/digests + env: + IMAGES: ${{needs.define-variables.outputs.images}} + shell: bash + run: | + IFS=$'\n' + IMAGES_LIST=($IMAGES) + ANNOTATIONS_LIST=($DOCKER_METADATA_OUTPUT_ANNOTATIONS) + TAGS_LIST=($DOCKER_METADATA_OUTPUT_TAGS) + for REPO in "${IMAGES_LIST[@]}"; do + docker buildx imagetools create \ + $(for tag in "${TAGS_LIST[@]}"; do echo "--tag"; echo "$tag"; done) \ + $(for annotation in "${ANNOTATIONS_LIST[@]}"; do echo "--annotation"; echo "$annotation"; done) \ + $(for reference in *; do printf "$REPO@sha256:%s\n" $reference; done) + done + + - name: Inspect image + env: + IMAGES: ${{needs.define-variables.outputs.images}} + shell: bash + run: | + IMAGES_LIST=($IMAGES) + for REPO in "${IMAGES_LIST[@]}"; do + docker buildx imagetools inspect $REPO:${{ steps.meta.outputs.version }} + done diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 00000000..10f54d94 --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,216 @@ +ARG RUST_VERSION=1 + +FROM --platform=$BUILDPLATFORM docker.io/tonistiigi/xx AS xx +FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-slim-bookworm AS base +FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-slim-bookworm AS toolchain + +# Prevent deletion of apt cache +RUN rm -f /etc/apt/apt.conf.d/docker-clean + +# Match Rustc version as close as possible +# rustc -vV +ARG LLVM_VERSION=19 +# ENV RUSTUP_TOOLCHAIN=${RUST_VERSION} + +# Install repo tools +# Line one: compiler tools +# Line two: curl, for downloading binaries +# Line three: for xx-verify +RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ +apt-get update && apt-get install -y \ + clang-${LLVM_VERSION} lld-${LLVM_VERSION} pkg-config make jq \ + curl git \ + file + +# Create symlinks for LLVM tools +RUN <> /etc/environment + +# Configure pkg-config +RUN <> /etc/environment + echo "PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /etc/environment + echo "PKG_CONFIG_ALLOW_CROSS=true" >> /etc/environment +EOF + +# Configure cc to use clang version +RUN <> /etc/environment + echo "CXX=clang++" >> /etc/environment +EOF + +# Cross-language LTO +RUN <> /etc/environment + echo "CXXFLAGS=-flto" >> /etc/environment + # Linker is set to target-compatible clang by xx + echo "RUSTFLAGS='-Clinker-plugin-lto -Clink-arg=-fuse-ld=lld'" >> /etc/environment +EOF + +# Apply CPU-specific optimizations if TARGET_CPU is provided +ARG TARGET_CPU= +RUN <> /etc/environment + echo "CXXFLAGS='${CXXFLAGS} -march=${TARGET_CPU}'" >> /etc/environment + echo "RUSTFLAGS='${RUSTFLAGS} -C target-cpu=${TARGET_CPU}'" >> /etc/environment + fi +EOF + +# Prepare output directories +RUN mkdir /out + +FROM toolchain AS builder + +# Conduwuit version info +ARG COMMIT_SHA= +ARG CONDUWUIT_VERSION_EXTRA= +ENV CONDUWUIT_VERSION_EXTRA=$CONDUWUIT_VERSION_EXTRA +RUN <> /etc/environment +fi +EOF + +ARG TARGETPLATFORM + +# Verify environment configuration +RUN cat /etc/environment +RUN xx-cargo --print-target-triple + +# Get source +COPY . . + +# Timelord sync +RUN --mount=type=cache,target=/timelord/ \ + timelord sync --source-dir . --cache-dir /timelord/ + +# Build the binary +RUN --mount=type=cache,target=/usr/local/cargo/registry \ + --mount=type=cache,target=/usr/local/cargo/git/db \ + --mount=type=cache,target=/app/target \ + bash <<'EOF' + set -o allexport + . /etc/environment + TARGET_DIR=($(cargo metadata --no-deps --format-version 1 | \ + jq -r ".target_directory")) + mkdir /out/sbin + PACKAGE=conduwuit + xx-cargo build --locked --release \ + -p $PACKAGE; + BINARIES=($(cargo metadata --no-deps --format-version 1 | \ + jq -r ".packages[] | select(.name == \"$PACKAGE\") | .targets[] | select( .kind | map(. == \"bin\") | any ) | .name")) + for BINARY in "${BINARIES[@]}"; do + echo $BINARY + xx-verify $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY + cp $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY /out/sbin/$BINARY + done +EOF + +# Generate Software Bill of Materials (SBOM) +RUN --mount=type=cache,target=/usr/local/cargo/registry \ + --mount=type=cache,target=/usr/local/cargo/git/db \ + bash <<'EOF' + mkdir /out/sbom + typeset -A PACKAGES + for BINARY in /out/sbin/*; do + BINARY_BASE=$(basename ${BINARY}) + package=$(cargo metadata --no-deps --format-version 1 | jq -r ".packages[] | select(.targets[] | select( .kind | map(. == \"bin\") | any ) | .name == \"$BINARY_BASE\") | .name") + if [ -z "$package" ]; then + continue + fi + PACKAGES[$package]=1 + done + for PACKAGE in $(echo ${!PACKAGES[@]}); do + echo $PACKAGE + cargo sbom --cargo-package $PACKAGE > /out/sbom/$PACKAGE.spdx.json + done +EOF + +# Extract dynamically linked dependencies +RUN < Date: Thu, 17 Apr 2025 10:33:19 +0100 Subject: [PATCH 229/310] build: Use hacks for a cached actions build - Use cache dance for github actions caching - Use timelord hack to avoid bad cache invalidation --- .forgejo/workflows/release-image.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 3a6c93eb..fddb493c 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -107,7 +107,7 @@ jobs: run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV - uses: https://github.com/Swatinem/rust-cache@v2 with: - prefix-key: v0-rust-linux/${{ matrix.slug }} + prefix-key: v0-rust-linux/${{ matrix.platform.slug }} id: rust-cache - name: Inject cache into Docker uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.1.2 @@ -123,7 +123,7 @@ jobs: uses: actions/cache@v4 with: path: "./timelord" - key: ${{ runner.os }}-${{ matrix.slug }} + key: ${{ runner.os }}-${{ matrix.platform.slug }} - name: Build and push Docker image by digest id: build uses: docker/build-push-action@v6 From 70cee36041913c2d077ff427e89a341d5e5ac2e1 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Thu, 17 Apr 2025 12:02:59 +0100 Subject: [PATCH 230/310] fix: Allow specifying user & password for builtin registry --- .forgejo/workflows/release-image.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index fddb493c..65dfb43c 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -81,8 +81,8 @@ jobs: uses: docker/login-action@v3 with: registry: ${{ env.BUILTIN_REGISTRY }} - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} + username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }} + password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }} # This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels. - name: Extract metadata (labels, annotations) for Docker @@ -173,8 +173,8 @@ jobs: uses: docker/login-action@v3 with: registry: ${{ env.BUILTIN_REGISTRY }} - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} + username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }} + password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }} - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 From 3ced2e2f9064200ffdcad18c49866bc96cf5c2a1 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Thu, 17 Apr 2025 12:06:45 +0100 Subject: [PATCH 231/310] fix: Use forgejo patched artifact actions --- .forgejo/workflows/release-image.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 65dfb43c..8a178d36 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -150,7 +150,7 @@ jobs: touch "/tmp/digests/${digest#sha256:}" - name: Upload digest - uses: actions/upload-artifact@v4 + uses: forgejo/upload-artifact@v4 with: name: digests-${{ matrix.slug }} path: /tmp/digests/* @@ -163,7 +163,7 @@ jobs: needs: [define-variables, build-image] steps: - name: Download digests - uses: actions/download-artifact@v4 + uses: forgejo/download-artifact@v4 with: path: /tmp/digests pattern: digests-* From 0ac1ce9996952b96a865c36a6cbda85df0db60da Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Thu, 17 Apr 2025 12:13:19 +0100 Subject: [PATCH 232/310] fix: Hardcode matrix --- .forgejo/workflows/release-image.yml | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 8a178d36..9fc50441 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -61,7 +61,22 @@ jobs: attestations: write id-token: write strategy: - matrix: ${{ fromJSON(needs.define-variables.outputs.build_matrix) }} + matrix: { + "include": [ + { + "platform": "linux/amd64", + "slug": "linux-amd64" + }, + { + "platform": "linux/arm64", + "slug": "linux-arm64" + } + ], + "platform": [ + "linux/amd64", + "linux/arm64" + ] + } steps: - name: Echo strategy run: echo '${{ toJSON(fromJSON(needs.define-variables.outputs.build_matrix)) }}' From 93253237e938984cdaf7a947f8b3a3d4c54c63e4 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Thu, 17 Apr 2025 12:44:18 +0100 Subject: [PATCH 233/310] ci: Prefix branch builds with branch- --- .forgejo/workflows/release-image.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 9fc50441..3970ff9d 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -202,7 +202,7 @@ jobs: type=semver,pattern=v{{version}} type=semver,pattern=v{{major}}.{{minor}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.0.') }} type=semver,pattern=v{{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }} - type=ref,event=branch + type=ref,event=branch,prefix=branch- type=ref,event=pr type=sha,format=long images: ${{needs.define-variables.outputs.images}} From 10947f6f1a268b03f7a35b30e82679b98e7b9337 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Thu, 17 Apr 2025 12:51:18 +0100 Subject: [PATCH 234/310] fix: Replace rust cache with direct cache use, as Rust is not installed on CI image --- .forgejo/workflows/release-image.yml | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 3970ff9d..7c72082a 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -120,10 +120,20 @@ jobs: echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV - name: Get Git commit timestamps run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV - - uses: https://github.com/Swatinem/rust-cache@v2 + - name: Rust cache + uses: actions/cache@v4 with: - prefix-key: v0-rust-linux/${{ matrix.platform.slug }} + key: v0-rust-linux/${{ runner.os }}-${{ matrix.slug }} + path: | + ~/.cargo/registry + ~/.cargo/git/db + ./target id: rust-cache + - name: Cache timelord state + uses: actions/cache@v4 + with: + path: "./timelord" + key: ${{ runner.os }}-${{ matrix.slug }} - name: Inject cache into Docker uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.1.2 with: @@ -134,11 +144,6 @@ jobs: "./target": "/app/target", "./timelord": "/timelord" } - - name: Cache timelord state - uses: actions/cache@v4 - with: - path: "./timelord" - key: ${{ runner.os }}-${{ matrix.platform.slug }} - name: Build and push Docker image by digest id: build uses: docker/build-push-action@v6 From fb793e8315bd0acb222b1ac53aef07f247afff38 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Thu, 17 Apr 2025 14:35:16 +0100 Subject: [PATCH 235/310] ci: Limit concurrency Mainly to prevent runners from getting bogged down --- .forgejo/workflows/release-image.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 7c72082a..0eaf945a 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -1,4 +1,6 @@ name: Release Docker Image +concurrency: + group: "release-image-${{ github.ref }}" on: pull_request: From 71d2421f55a97aee5b2b31dc1889f3f688d5315c Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Thu, 17 Apr 2025 15:44:56 +0100 Subject: [PATCH 236/310] ci: Only prefix non-default branches AKA, tag image:main as the latest commit --- .forgejo/workflows/release-image.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 0eaf945a..f03341a7 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -209,7 +209,7 @@ jobs: type=semver,pattern=v{{version}} type=semver,pattern=v{{major}}.{{minor}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.0.') }} type=semver,pattern=v{{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }} - type=ref,event=branch,prefix=branch- + type=ref,event=branch,prefix=${{ format('refs/heads/{0}', github.event.repository.default_branch) == github.ref && '' || 'branch-' }} type=ref,event=pr type=sha,format=long images: ${{needs.define-variables.outputs.images}} From d85aaabe9ee239e22bff1be5decbaeac51880ec1 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Thu, 17 Apr 2025 15:59:40 +0100 Subject: [PATCH 237/310] fix: Disable buildkit caching This is for tom's runners, whilst they're having network issues --- .forgejo/workflows/release-image.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index f03341a7..194fa93a 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -157,8 +157,8 @@ jobs: platforms: ${{ matrix.platform }} labels: ${{ steps.meta.outputs.labels }} annotations: ${{ steps.meta.outputs.annotations }} - cache-from: type=gha - cache-to: type=gha,mode=max + # cache-from: type=gha + # cache-to: type=gha,mode=max sbom: true outputs: type=image,"name=${{ needs.define-variables.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true env: From 9e0530839d54f8ae8e58e8ff01c57e0deca72af8 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Fri, 18 Apr 2025 14:08:21 +0100 Subject: [PATCH 238/310] ci: Remove non-functional cache steps --- .forgejo/workflows/release-image.yml | 24 ------------------------ 1 file changed, 24 deletions(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 194fa93a..6bde932d 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -122,30 +122,6 @@ jobs: echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV - name: Get Git commit timestamps run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV - - name: Rust cache - uses: actions/cache@v4 - with: - key: v0-rust-linux/${{ runner.os }}-${{ matrix.slug }} - path: | - ~/.cargo/registry - ~/.cargo/git/db - ./target - id: rust-cache - - name: Cache timelord state - uses: actions/cache@v4 - with: - path: "./timelord" - key: ${{ runner.os }}-${{ matrix.slug }} - - name: Inject cache into Docker - uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.1.2 - with: - cache-map: | - { - "/home/runner/.cargo/registry": "/usr/local/cargo/registry", - "/home/runner/.cargo/git/db": "/usr/local/cargo/git/db", - "./target": "/app/target", - "./timelord": "/timelord" - } - name: Build and push Docker image by digest id: build uses: docker/build-push-action@v6 From b16e26952af74da3cdcbfbb72ec6786a55a37133 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Fri, 18 Apr 2025 14:09:09 +0100 Subject: [PATCH 239/310] ci: Use dind label --- .forgejo/workflows/release-image.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 6bde932d..142529ae 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -54,7 +54,7 @@ jobs: })) build-image: - runs-on: not-nexy + runs-on: dind container: ghcr.io/catthehacker/ubuntu:act-latest needs: define-variables permissions: @@ -156,7 +156,7 @@ jobs: retention-days: 1 merge: - runs-on: not-nexy + runs-on: dind container: ghcr.io/catthehacker/ubuntu:act-latest needs: [define-variables, build-image] steps: From 2e6ec2f89cd218f89489d7ca86997bce94d25064 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Fri, 18 Apr 2025 17:38:42 +0100 Subject: [PATCH 240/310] chore: Update git links --- Cargo.lock | 54 +++++++++++++++++++++++++++--------------------------- Cargo.toml | 42 +++++++++++++++++++++--------------------- 2 files changed, 48 insertions(+), 48 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d81fdbc0..def41f68 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -118,7 +118,7 @@ checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" [[package]] name = "async-channel" version = "2.3.1" -source = "git+https://github.com/girlbossceo/async-channel?rev=92e5e74063bf2a3b10414bcc8a0d68b235644280#92e5e74063bf2a3b10414bcc8a0d68b235644280" +source = "git+https://forgejo.ellis.link/continuwuation/async-channel?rev=92e5e74063bf2a3b10414bcc8a0d68b235644280#92e5e74063bf2a3b10414bcc8a0d68b235644280" dependencies = [ "concurrent-queue", "event-listener-strategy", @@ -1047,7 +1047,7 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "core_affinity" version = "0.8.1" -source = "git+https://github.com/girlbossceo/core_affinity_rs?rev=9c8e51510c35077df888ee72a36b4b05637147da#9c8e51510c35077df888ee72a36b4b05637147da" +source = "git+https://forgejo.ellis.link/continuwuation/core_affinity_rs?rev=9c8e51510c35077df888ee72a36b4b05637147da#9c8e51510c35077df888ee72a36b4b05637147da" dependencies = [ "libc", "num_cpus", @@ -1379,7 +1379,7 @@ dependencies = [ [[package]] name = "event-listener" version = "5.3.1" -source = "git+https://github.com/girlbossceo/event-listener?rev=fe4aebeeaae435af60087ddd56b573a2e0be671d#fe4aebeeaae435af60087ddd56b573a2e0be671d" +source = "git+https://forgejo.ellis.link/continuwuation/event-listener?rev=fe4aebeeaae435af60087ddd56b573a2e0be671d#fe4aebeeaae435af60087ddd56b573a2e0be671d" dependencies = [ "concurrent-queue", "parking", @@ -2030,7 +2030,7 @@ dependencies = [ [[package]] name = "hyper-util" version = "0.1.11" -source = "git+https://github.com/girlbossceo/hyper-util?rev=e4ae7628fe4fcdacef9788c4c8415317a4489941#e4ae7628fe4fcdacef9788c4c8415317a4489941" +source = "git+https://forgejo.ellis.link/continuwuation/hyper-util?rev=e4ae7628fe4fcdacef9788c4c8415317a4489941#e4ae7628fe4fcdacef9788c4c8415317a4489941" dependencies = [ "bytes", "futures-channel", @@ -3625,7 +3625,7 @@ dependencies = [ [[package]] name = "resolv-conf" version = "0.7.1" -source = "git+https://github.com/girlbossceo/resolv-conf?rev=200e958941d522a70c5877e3d846f55b5586c68d#200e958941d522a70c5877e3d846f55b5586c68d" +source = "git+https://forgejo.ellis.link/continuwuation/resolv-conf?rev=200e958941d522a70c5877e3d846f55b5586c68d#200e958941d522a70c5877e3d846f55b5586c68d" dependencies = [ "hostname", ] @@ -3653,7 +3653,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "assign", "js_int", @@ -3673,7 +3673,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "js_int", "ruma-common", @@ -3685,7 +3685,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "as_variant", "assign", @@ -3708,7 +3708,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "as_variant", "base64 0.22.1", @@ -3740,7 +3740,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "as_variant", "indexmap 2.8.0", @@ -3765,7 +3765,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "bytes", "headers", @@ -3787,7 +3787,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "js_int", "thiserror 2.0.12", @@ -3796,7 +3796,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "js_int", "ruma-common", @@ -3806,7 +3806,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3821,7 +3821,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "js_int", "ruma-common", @@ -3833,7 +3833,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://github.com/girlbossceo/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" dependencies = [ "base64 0.22.1", "ed25519-dalek", @@ -3849,7 +3849,7 @@ dependencies = [ [[package]] name = "rust-librocksdb-sys" version = "0.33.0+9.11.1" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=1c267e0bf0cc7b7702e9a329deccd89de79ef4c3#1c267e0bf0cc7b7702e9a329deccd89de79ef4c3" +source = "git+https://forgejo.ellis.link/continuwuation/rust-rocksdb-zaidoon1?rev=fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd#fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd" dependencies = [ "bindgen 0.71.1", "bzip2-sys", @@ -3866,7 +3866,7 @@ dependencies = [ [[package]] name = "rust-rocksdb" version = "0.37.0" -source = "git+https://github.com/girlbossceo/rust-rocksdb-zaidoon1?rev=1c267e0bf0cc7b7702e9a329deccd89de79ef4c3#1c267e0bf0cc7b7702e9a329deccd89de79ef4c3" +source = "git+https://forgejo.ellis.link/continuwuation/rust-rocksdb-zaidoon1?rev=fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd#fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd" dependencies = [ "libc", "rust-librocksdb-sys", @@ -3979,7 +3979,7 @@ checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" [[package]] name = "rustyline-async" version = "0.4.3" -source = "git+https://github.com/girlbossceo/rustyline-async?rev=deaeb0694e2083f53d363b648da06e10fc13900c#deaeb0694e2083f53d363b648da06e10fc13900c" +source = "git+https://forgejo.ellis.link/continuwuation/rustyline-async?rev=deaeb0694e2083f53d363b648da06e10fc13900c#deaeb0694e2083f53d363b648da06e10fc13900c" dependencies = [ "crossterm", "futures-channel", @@ -4675,7 +4675,7 @@ dependencies = [ [[package]] name = "tikv-jemalloc-ctl" version = "0.6.0" -source = "git+https://github.com/girlbossceo/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" +source = "git+https://forgejo.ellis.link/continuwuation/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" dependencies = [ "libc", "paste", @@ -4685,7 +4685,7 @@ dependencies = [ [[package]] name = "tikv-jemalloc-sys" version = "0.6.0+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7" -source = "git+https://github.com/girlbossceo/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" +source = "git+https://forgejo.ellis.link/continuwuation/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" dependencies = [ "cc", "libc", @@ -4694,7 +4694,7 @@ dependencies = [ [[package]] name = "tikv-jemallocator" version = "0.6.0" -source = "git+https://github.com/girlbossceo/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" +source = "git+https://forgejo.ellis.link/continuwuation/jemallocator?rev=82af58d6a13ddd5dcdc7d4e91eae3b63292995b8#82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" dependencies = [ "libc", "tikv-jemalloc-sys", @@ -4980,7 +4980,7 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" version = "0.1.41" -source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" +source = "git+https://forgejo.ellis.link/continuwuation/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "pin-project-lite", "tracing-attributes", @@ -4990,7 +4990,7 @@ dependencies = [ [[package]] name = "tracing-attributes" version = "0.1.28" -source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" +source = "git+https://forgejo.ellis.link/continuwuation/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "proc-macro2", "quote", @@ -5000,7 +5000,7 @@ dependencies = [ [[package]] name = "tracing-core" version = "0.1.33" -source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" +source = "git+https://forgejo.ellis.link/continuwuation/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "once_cell", "valuable", @@ -5020,7 +5020,7 @@ dependencies = [ [[package]] name = "tracing-log" version = "0.2.0" -source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" +source = "git+https://forgejo.ellis.link/continuwuation/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "log", "once_cell", @@ -5048,7 +5048,7 @@ dependencies = [ [[package]] name = "tracing-subscriber" version = "0.3.19" -source = "git+https://github.com/girlbossceo/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" +source = "git+https://forgejo.ellis.link/continuwuation/tracing?rev=1e64095a8051a1adf0d1faa307f9f030889ec2aa#1e64095a8051a1adf0d1faa307f9f030889ec2aa" dependencies = [ "matchers", "nu-ansi-term", diff --git a/Cargo.toml b/Cargo.toml index f5ee3f0f..e9ae0007 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,12 +14,12 @@ authors = [ categories = ["network-programming"] description = "a very cool Matrix chat homeserver written in Rust" edition = "2024" -homepage = "https://conduwuit.puppyirl.gay/" +homepage = "https://continuwuity.org/" keywords = ["chat", "matrix", "networking", "server", "uwu"] license = "Apache-2.0" # See also `rust-toolchain.toml` readme = "README.md" -repository = "https://github.com/girlbossceo/conduwuit" +repository = "https://forgejo.ellis.link/continuwuation/continuwuity" rust-version = "1.86.0" version = "0.5.0" @@ -348,7 +348,7 @@ version = "0.1.2" # Used for matrix spec type definitions and helpers [workspace.dependencies.ruma] -git = "https://github.com/girlbossceo/ruwuma" +git = "https://forgejo.ellis.link/continuwuation/ruwuma" #branch = "conduwuit-changes" rev = "920148dca1076454ca0ca5d43b5ce1aa708381d4" features = [ @@ -388,8 +388,8 @@ features = [ ] [workspace.dependencies.rust-rocksdb] -git = "https://github.com/girlbossceo/rust-rocksdb-zaidoon1" -rev = "1c267e0bf0cc7b7702e9a329deccd89de79ef4c3" +git = "https://forgejo.ellis.link/continuwuation/rust-rocksdb-zaidoon1" +rev = "fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd" default-features = false features = [ "multi-threaded-cf", @@ -449,7 +449,7 @@ version = "0.37.0" # jemalloc usage [workspace.dependencies.tikv-jemalloc-sys] -git = "https://github.com/girlbossceo/jemallocator" +git = "https://forgejo.ellis.link/continuwuation/jemallocator" rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" default-features = false features = [ @@ -457,7 +457,7 @@ features = [ "unprefixed_malloc_on_supported_platforms", ] [workspace.dependencies.tikv-jemallocator] -git = "https://github.com/girlbossceo/jemallocator" +git = "https://forgejo.ellis.link/continuwuation/jemallocator" rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" default-features = false features = [ @@ -465,7 +465,7 @@ features = [ "unprefixed_malloc_on_supported_platforms", ] [workspace.dependencies.tikv-jemalloc-ctl] -git = "https://github.com/girlbossceo/jemallocator" +git = "https://forgejo.ellis.link/continuwuation/jemallocator" rev = "82af58d6a13ddd5dcdc7d4e91eae3b63292995b8" default-features = false features = ["use_std"] @@ -542,49 +542,49 @@ version = "1.0.2" # backport of [https://github.com/tokio-rs/tracing/pull/2956] to the 0.1.x branch of tracing. # we can switch back to upstream if #2956 is merged and backported in the upstream repo. -# https://github.com/girlbossceo/tracing/commit/b348dca742af641c47bc390261f60711c2af573c +# https://forgejo.ellis.link/continuwuation/tracing/commit/b348dca742af641c47bc390261f60711c2af573c [patch.crates-io.tracing-subscriber] -git = "https://github.com/girlbossceo/tracing" +git = "https://forgejo.ellis.link/continuwuation/tracing" rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" [patch.crates-io.tracing] -git = "https://github.com/girlbossceo/tracing" +git = "https://forgejo.ellis.link/continuwuation/tracing" rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" [patch.crates-io.tracing-core] -git = "https://github.com/girlbossceo/tracing" +git = "https://forgejo.ellis.link/continuwuation/tracing" rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" [patch.crates-io.tracing-log] -git = "https://github.com/girlbossceo/tracing" +git = "https://forgejo.ellis.link/continuwuation/tracing" rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" -# adds a tab completion callback: https://github.com/girlbossceo/rustyline-async/commit/de26100b0db03e419a3d8e1dd26895d170d1fe50 -# adds event for CTRL+\: https://github.com/girlbossceo/rustyline-async/commit/67d8c49aeac03a5ef4e818f663eaa94dd7bf339b +# adds a tab completion callback: https://forgejo.ellis.link/continuwuation/rustyline-async/commit/de26100b0db03e419a3d8e1dd26895d170d1fe50 +# adds event for CTRL+\: https://forgejo.ellis.link/continuwuation/rustyline-async/commit/67d8c49aeac03a5ef4e818f663eaa94dd7bf339b [patch.crates-io.rustyline-async] -git = "https://github.com/girlbossceo/rustyline-async" +git = "https://forgejo.ellis.link/continuwuation/rustyline-async" rev = "deaeb0694e2083f53d363b648da06e10fc13900c" # adds LIFO queue scheduling; this should be updated with PR progress. [patch.crates-io.event-listener] -git = "https://github.com/girlbossceo/event-listener" +git = "https://forgejo.ellis.link/continuwuation/event-listener" rev = "fe4aebeeaae435af60087ddd56b573a2e0be671d" [patch.crates-io.async-channel] -git = "https://github.com/girlbossceo/async-channel" +git = "https://forgejo.ellis.link/continuwuation/async-channel" rev = "92e5e74063bf2a3b10414bcc8a0d68b235644280" # adds affinity masks for selecting more than one core at a time [patch.crates-io.core_affinity] -git = "https://github.com/girlbossceo/core_affinity_rs" +git = "https://forgejo.ellis.link/continuwuation/core_affinity_rs" rev = "9c8e51510c35077df888ee72a36b4b05637147da" # reverts hyperium#148 conflicting with our delicate federation resolver hooks [patch.crates-io.hyper-util] -git = "https://github.com/girlbossceo/hyper-util" +git = "https://forgejo.ellis.link/continuwuation/hyper-util" rev = "e4ae7628fe4fcdacef9788c4c8415317a4489941" # allows no-aaaa option in resolv.conf # bumps rust edition and toolchain to 1.86.0 and 2024 # use sat_add on line number errors [patch.crates-io.resolv-conf] -git = "https://github.com/girlbossceo/resolv-conf" +git = "https://forgejo.ellis.link/continuwuation/resolv-conf" rev = "200e958941d522a70c5877e3d846f55b5586c68d" # From c5b99fbccda7140d7232cca1ca456ff4b1124109 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Fri, 18 Apr 2025 21:05:17 +0100 Subject: [PATCH 241/310] ci: Enable buildx caching --- .forgejo/workflows/release-image.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 142529ae..ba94acc3 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -133,8 +133,8 @@ jobs: platforms: ${{ matrix.platform }} labels: ${{ steps.meta.outputs.labels }} annotations: ${{ steps.meta.outputs.annotations }} - # cache-from: type=gha - # cache-to: type=gha,mode=max + cache-from: type=gha + cache-to: type=gha,mode=max sbom: true outputs: type=image,"name=${{ needs.define-variables.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true env: From 298e2af3d7aed47d5f070c5259f23a2a761673b3 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Fri, 18 Apr 2025 22:24:35 +0100 Subject: [PATCH 242/310] ci: Try invert condition for branch prefix --- .forgejo/workflows/release-image.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index ba94acc3..1411db61 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -185,7 +185,7 @@ jobs: type=semver,pattern=v{{version}} type=semver,pattern=v{{major}}.{{minor}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.0.') }} type=semver,pattern=v{{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }} - type=ref,event=branch,prefix=${{ format('refs/heads/{0}', github.event.repository.default_branch) == github.ref && '' || 'branch-' }} + type=ref,event=branch,prefix=${{ format('refs/heads/{0}', github.event.repository.default_branch) 1= github.ref && 'branch-' || '' }} type=ref,event=pr type=sha,format=long images: ${{needs.define-variables.outputs.images}} From d4561e950b25f1120d15167f62431177c466202b Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Fri, 18 Apr 2025 22:25:10 +0100 Subject: [PATCH 243/310] ci: Run builtin registry whenever secret is available --- .forgejo/workflows/release-image.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 1411db61..0b764110 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -16,7 +16,8 @@ on: env: BUILTIN_REGISTRY: forgejo.ellis.link - BUILTIN_REGISTRY_ENABLED: "${{ (github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false) && 'true' || 'false' }}" + BUILTIN_REGISTRY_ENABLED: "${{ ((vars.BUILTIN_REGISTRY_USER && secrets.BUILTIN_REGISTRY_PASSWORD) || (github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)) && 'true' || 'false' }}" + jobs: define-variables: From 1b1198771f577f9dad4e34a00aff0e85ecc5879a Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Fri, 18 Apr 2025 22:38:17 +0100 Subject: [PATCH 244/310] ci: Move timelord to actions to avoid bad cache invalidations from cargo --- .forgejo/workflows/release-image.yml | 20 +++++++++++++++++++- docker/Dockerfile | 8 -------- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 0b764110..adf70594 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -89,7 +89,13 @@ jobs: uses: actions/checkout@v4 with: persist-credentials: false - + - run: | + if ! command -v rustup &> /dev/null ; then + curl --proto '=https' --tlsv1.2 --retry 10 --retry-connrefused -fsSL "https://sh.rustup.rs" | sh -s -- --default-toolchain none -y + echo "${CARGO_HOME:-$HOME/.cargo}/bin" >> $GITHUB_PATH + fi + - uses: https://github.com/cargo-bins/cargo-binstall@main + - run: cargo binstall timelord-cli@3.0.1 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Set up QEMU @@ -123,6 +129,18 @@ jobs: echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV - name: Get Git commit timestamps run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV + - name: Set up timelord + uses: actions/cache/restore@v3 + with: + path: /timelord/ + key: timelord-v0 # Cache is already split per runner + - name: Run timelord to set timestamps + run: timelord sync --source-dir . --cache-dir /timelord/ + - name: Save timelord + uses: actions/cache/save@v3 + with: + path: /timelord/ + key: timelord-v0 - name: Build and push Docker image by digest id: build uses: docker/build-push-action@v6 diff --git a/docker/Dockerfile b/docker/Dockerfile index 10f54d94..536af632 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -44,15 +44,11 @@ ENV CARGO_SBOM_VERSION=0.9.1 # renovate: datasource=crate depName=lddtree ENV LDDTREE_VERSION=0.3.7 -# renovate: datasource=crate depName=timelord-cli -ENV TIMELORD_VERSION=3.0.1 - # Install unpackaged tools RUN < Date: Sun, 6 Apr 2025 05:52:48 +0000 Subject: [PATCH 245/310] relax Send requirement on some drier stream extensions Signed-off-by: Jason Volk --- src/core/utils/stream/expect.rs | 2 +- src/core/utils/stream/ready.rs | 4 ++-- src/core/utils/stream/try_ready.rs | 8 ++++---- src/core/utils/stream/try_tools.rs | 8 ++++---- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/src/core/utils/stream/expect.rs b/src/core/utils/stream/expect.rs index 3509bb83..ec572714 100644 --- a/src/core/utils/stream/expect.rs +++ b/src/core/utils/stream/expect.rs @@ -10,7 +10,7 @@ pub trait TryExpect<'a, Item> { impl<'a, T, Item> TryExpect<'a, Item> for T where - T: Stream> + TryStream + Send + 'a, + T: Stream> + Send + TryStream + 'a, Item: 'a, { #[inline] diff --git a/src/core/utils/stream/ready.rs b/src/core/utils/stream/ready.rs index dce7d378..38feaf64 100644 --- a/src/core/utils/stream/ready.rs +++ b/src/core/utils/stream/ready.rs @@ -16,7 +16,7 @@ use futures::{ /// This interface is not necessarily complete; feel free to add as-needed. pub trait ReadyExt where - Self: Stream + Send + Sized, + Self: Stream + Sized, { fn ready_all(self, f: F) -> All, impl FnMut(Item) -> Ready> where @@ -93,7 +93,7 @@ where impl ReadyExt for S where - S: Stream + Send + Sized, + S: Stream + Sized, { #[inline] fn ready_all(self, f: F) -> All, impl FnMut(Item) -> Ready> diff --git a/src/core/utils/stream/try_ready.rs b/src/core/utils/stream/try_ready.rs index 611c177f..287fa1e1 100644 --- a/src/core/utils/stream/try_ready.rs +++ b/src/core/utils/stream/try_ready.rs @@ -13,8 +13,8 @@ use crate::Result; /// This interface is not necessarily complete; feel free to add as-needed. pub trait TryReadyExt where - S: TryStream> + Send + ?Sized, - Self: TryStream + Send + Sized, + S: TryStream> + ?Sized, + Self: TryStream + Sized, { fn ready_and_then( self, @@ -67,8 +67,8 @@ where impl TryReadyExt for S where - S: TryStream> + Send + ?Sized, - Self: TryStream + Send + Sized, + S: TryStream> + ?Sized, + Self: TryStream + Sized, { #[inline] fn ready_and_then( diff --git a/src/core/utils/stream/try_tools.rs b/src/core/utils/stream/try_tools.rs index ea3b50fc..417806fc 100644 --- a/src/core/utils/stream/try_tools.rs +++ b/src/core/utils/stream/try_tools.rs @@ -8,8 +8,8 @@ use crate::Result; /// TryStreamTools pub trait TryTools where - S: TryStream> + Send + ?Sized, - Self: TryStream + Send + Sized, + S: TryStream> + ?Sized, + Self: TryStream + Sized, { fn try_take( self, @@ -23,8 +23,8 @@ where impl TryTools for S where - S: TryStream> + Send + ?Sized, - Self: TryStream + Send + Sized, + S: TryStream> + ?Sized, + Self: TryStream + Sized, { #[inline] fn try_take( From 75fb19a5cacf853740fb2cfd016c435c319e5e5f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 6 Apr 2025 05:42:27 +0000 Subject: [PATCH 246/310] add ready_find() stream extension Signed-off-by: Jason Volk --- src/core/utils/stream/ready.rs | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/src/core/utils/stream/ready.rs b/src/core/utils/stream/ready.rs index 38feaf64..be4d1b25 100644 --- a/src/core/utils/stream/ready.rs +++ b/src/core/utils/stream/ready.rs @@ -2,7 +2,7 @@ #![allow(clippy::type_complexity)] use futures::{ - future::{Ready, ready}, + future::{FutureExt, Ready, ready}, stream::{ All, Any, Filter, FilterMap, Fold, ForEach, Scan, SkipWhile, Stream, StreamExt, TakeWhile, }, @@ -26,6 +26,12 @@ where where F: Fn(Item) -> bool; + fn ready_find<'a, F>(self, f: F) -> impl Future> + Send + where + Self: Send + Unpin + 'a, + F: Fn(&Item) -> bool + Send + 'a, + Item: Send; + fn ready_filter<'a, F>( self, f: F, @@ -111,6 +117,19 @@ where self.any(move |t| ready(f(t))) } + #[inline] + fn ready_find<'a, F>(self, f: F) -> impl Future> + Send + where + Self: Send + Unpin + 'a, + F: Fn(&Item) -> bool + Send + 'a, + Item: Send, + { + self.ready_filter(f) + .take(1) + .into_future() + .map(|(curr, _next)| curr) + } + #[inline] fn ready_filter<'a, F>( self, From d8b56c9c35a953aed756f4c6374109219dfdaf77 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 6 Apr 2025 19:09:07 +0000 Subject: [PATCH 247/310] add ReadyEq future extension Signed-off-by: Jason Volk --- src/core/utils/future/bool_ext.rs | 48 +++++++++++++-------------- src/core/utils/future/mod.rs | 2 ++ src/core/utils/future/ready_eq_ext.rs | 25 ++++++++++++++ src/core/utils/mod.rs | 2 +- 4 files changed, 52 insertions(+), 25 deletions(-) create mode 100644 src/core/utils/future/ready_eq_ext.rs diff --git a/src/core/utils/future/bool_ext.rs b/src/core/utils/future/bool_ext.rs index c93c7dbc..24f239ff 100644 --- a/src/core/utils/future/bool_ext.rs +++ b/src/core/utils/future/bool_ext.rs @@ -22,30 +22,6 @@ where Self: Sized + Unpin; } -pub async fn and(args: I) -> impl Future + Send -where - I: Iterator + Send, - F: Future + Send, -{ - type Result = crate::Result<(), ()>; - - let args = args.map(|a| a.map(|a| a.then_some(()).ok_or(Result::Err(())))); - - try_join_all(args).map(|result| result.is_ok()) -} - -pub async fn or(args: I) -> impl Future + Send -where - I: Iterator + Send, - F: Future + Send + Unpin, -{ - type Result = crate::Result<(), ()>; - - let args = args.map(|a| a.map(|a| a.then_some(()).ok_or(Result::Err(())))); - - select_ok(args).map(|result| result.is_ok()) -} - impl BoolExt for Fut where Fut: Future + Send, @@ -80,3 +56,27 @@ where try_select(a, b).map(|result| result.is_ok()) } } + +pub async fn and(args: I) -> impl Future + Send +where + I: Iterator + Send, + F: Future + Send, +{ + type Result = crate::Result<(), ()>; + + let args = args.map(|a| a.map(|a| a.then_some(()).ok_or(Result::Err(())))); + + try_join_all(args).map(|result| result.is_ok()) +} + +pub async fn or(args: I) -> impl Future + Send +where + I: Iterator + Send, + F: Future + Send + Unpin, +{ + type Result = crate::Result<(), ()>; + + let args = args.map(|a| a.map(|a| a.then_some(()).ok_or(Result::Err(())))); + + select_ok(args).map(|result| result.is_ok()) +} diff --git a/src/core/utils/future/mod.rs b/src/core/utils/future/mod.rs index 4edd0102..d896e66d 100644 --- a/src/core/utils/future/mod.rs +++ b/src/core/utils/future/mod.rs @@ -2,10 +2,12 @@ mod bool_ext; mod ext_ext; mod option_ext; mod option_stream; +mod ready_eq_ext; mod try_ext_ext; pub use bool_ext::{BoolExt, and, or}; pub use ext_ext::ExtExt; pub use option_ext::OptionExt; pub use option_stream::OptionStream; +pub use ready_eq_ext::ReadyEqExt; pub use try_ext_ext::TryExtExt; diff --git a/src/core/utils/future/ready_eq_ext.rs b/src/core/utils/future/ready_eq_ext.rs new file mode 100644 index 00000000..1625adae --- /dev/null +++ b/src/core/utils/future/ready_eq_ext.rs @@ -0,0 +1,25 @@ +//! Future extension for Partial Equality against present value + +use futures::{Future, FutureExt}; + +pub trait ReadyEqExt +where + Self: Future + Send + Sized, + T: PartialEq + Send + Sync, +{ + fn eq(self, t: &T) -> impl Future + Send; + + fn ne(self, t: &T) -> impl Future + Send; +} + +impl ReadyEqExt for Fut +where + Fut: Future + Send + Sized, + T: PartialEq + Send + Sync, +{ + #[inline] + fn eq(self, t: &T) -> impl Future + Send { self.map(move |r| r.eq(t)) } + + #[inline] + fn ne(self, t: &T) -> impl Future + Send { self.map(move |r| r.ne(t)) } +} diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 117fb739..5e6f2868 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -28,7 +28,7 @@ pub use self::{ bool::BoolExt, bytes::{increment, u64_from_bytes, u64_from_u8, u64_from_u8x8}, debug::slice_truncated as debug_slice_truncated, - future::TryExtExt as TryFutureExtExt, + future::{BoolExt as FutureBoolExt, OptionStream, TryExtExt as TryFutureExtExt}, hash::sha256::delimited as calculate_hash, html::Escape as HtmlEscape, json::{deserialize_from_str, to_canonical_object}, From e7c3f783775cde5b606005e24220c008f657b970 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 6 Apr 2025 06:39:45 +0000 Subject: [PATCH 248/310] modernize state_res w/ stream extensions Signed-off-by: Jason Volk --- src/core/matrix/state_res/mod.rs | 220 ++++++++++++++++--------------- 1 file changed, 111 insertions(+), 109 deletions(-) diff --git a/src/core/matrix/state_res/mod.rs b/src/core/matrix/state_res/mod.rs index 93c00d15..ce6b7e89 100644 --- a/src/core/matrix/state_res/mod.rs +++ b/src/core/matrix/state_res/mod.rs @@ -15,11 +15,10 @@ use std::{ borrow::Borrow, cmp::{Ordering, Reverse}, collections::{BinaryHeap, HashMap, HashSet}, - fmt::Debug, hash::{BuildHasher, Hash}, }; -use futures::{Future, FutureExt, StreamExt, TryFutureExt, TryStreamExt, future, stream}; +use futures::{Future, FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt, future}; use ruma::{ EventId, Int, MilliSecondsSinceUnixEpoch, RoomVersionId, events::{ @@ -37,9 +36,13 @@ pub use self::{ room_version::RoomVersion, }; use crate::{ - debug, + debug, debug_error, matrix::{event::Event, pdu::StateKey}, - trace, warn, + trace, + utils::stream::{ + BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, TryReadyExt, WidebandExt, + }, + warn, }; /// A mapping of event type and state_key to some value `T`, usually an @@ -112,20 +115,16 @@ where debug!(count = conflicting.len(), "conflicting events"); trace!(map = ?conflicting, "conflicting events"); - let auth_chain_diff = - get_auth_chain_diff(auth_chain_sets).chain(conflicting.into_values().flatten()); + let conflicting_values = conflicting.into_values().flatten().stream(); // `all_conflicted` contains unique items // synapse says `full_set = {eid for eid in full_conflicted_set if eid in // event_map}` - let all_conflicted: HashSet<_> = stream::iter(auth_chain_diff) - // Don't honor events we cannot "verify" - .map(|id| event_exists(id.clone()).map(move |exists| (id, exists))) - .buffer_unordered(parallel_fetches) - .filter_map(|(id, exists)| future::ready(exists.then_some(id))) - .collect() - .boxed() - .await; + let all_conflicted: HashSet<_> = get_auth_chain_diff(auth_chain_sets) + .chain(conflicting_values) + .broad_filter_map(async |id| event_exists(id.clone()).await.then_some(id)) + .collect() + .await; debug!(count = all_conflicted.len(), "full conflicted set"); trace!(set = ?all_conflicted, "full conflicted set"); @@ -135,12 +134,15 @@ where // Get only the control events with a state_key: "" or ban/kick event (sender != // state_key) - let control_events: Vec<_> = stream::iter(all_conflicted.iter()) - .map(|id| is_power_event_id(id, &event_fetch).map(move |is| (id, is))) - .buffer_unordered(parallel_fetches) - .filter_map(|(id, is)| future::ready(is.then_some(id.clone()))) + let control_events: Vec<_> = all_conflicted + .iter() + .stream() + .wide_filter_map(async |id| { + is_power_event_id(id, &event_fetch) + .await + .then_some(id.clone()) + }) .collect() - .boxed() .await; // Sort the control events based on power_level/clock/event_id and @@ -160,10 +162,9 @@ where // Sequentially auth check each control event. let resolved_control = iterative_auth_check( &room_version, - sorted_control_levels.iter(), + sorted_control_levels.iter().stream(), clean.clone(), &event_fetch, - parallel_fetches, ) .await?; @@ -172,36 +173,35 @@ where // At this point the control_events have been resolved we now have to // sort the remaining events using the mainline of the resolved power level. - let deduped_power_ev = sorted_control_levels.into_iter().collect::>(); + let deduped_power_ev: HashSet<_> = sorted_control_levels.into_iter().collect(); // This removes the control events that passed auth and more importantly those // that failed auth - let events_to_resolve = all_conflicted + let events_to_resolve: Vec<_> = all_conflicted .iter() .filter(|&id| !deduped_power_ev.contains(id.borrow())) .cloned() - .collect::>(); + .collect(); debug!(count = events_to_resolve.len(), "events left to resolve"); trace!(list = ?events_to_resolve, "events left to resolve"); // This "epochs" power level event - let power_event = resolved_control.get(&(StateEventType::RoomPowerLevels, StateKey::new())); + let power_levels_ty_sk = (StateEventType::RoomPowerLevels, StateKey::new()); + let power_event = resolved_control.get(&power_levels_ty_sk); debug!(event_id = ?power_event, "power event"); let sorted_left_events = - mainline_sort(&events_to_resolve, power_event.cloned(), &event_fetch, parallel_fetches) - .await?; + mainline_sort(&events_to_resolve, power_event.cloned(), &event_fetch).await?; trace!(list = ?sorted_left_events, "events left, sorted"); let mut resolved_state = iterative_auth_check( &room_version, - sorted_left_events.iter(), + sorted_left_events.iter().stream(), resolved_control, // The control events are added to the final resolved state &event_fetch, - parallel_fetches, ) .await?; @@ -265,7 +265,7 @@ where #[allow(clippy::arithmetic_side_effects)] fn get_auth_chain_diff( auth_chain_sets: &[HashSet], -) -> impl Iterator + Send + use +) -> impl Stream + Send + use where Id: Clone + Eq + Hash + Send, Hasher: BuildHasher + Send + Sync, @@ -279,6 +279,7 @@ where id_counts .into_iter() .filter_map(move |(id, count)| (count < num_sets).then_some(id)) + .stream() } /// Events are sorted from "earliest" to "latest". @@ -310,13 +311,15 @@ where } // This is used in the `key_fn` passed to the lexico_topo_sort fn - let event_to_pl = stream::iter(graph.keys()) + let event_to_pl = graph + .keys() + .stream() .map(|event_id| { - get_power_level_for_sender(event_id.clone(), fetch_event, parallel_fetches) + get_power_level_for_sender(event_id.clone(), fetch_event) .map(move |res| res.map(|pl| (event_id, pl))) }) .buffer_unordered(parallel_fetches) - .try_fold(HashMap::new(), |mut event_to_pl, (event_id, pl)| { + .ready_try_fold(HashMap::new(), |mut event_to_pl, (event_id, pl)| { debug!( event_id = event_id.borrow().as_str(), power_level = i64::from(pl), @@ -324,7 +327,7 @@ where ); event_to_pl.insert(event_id.clone(), pl); - future::ok(event_to_pl) + Ok(event_to_pl) }) .boxed() .await?; @@ -475,7 +478,6 @@ where async fn get_power_level_for_sender( event_id: E::Id, fetch_event: &F, - parallel_fetches: usize, ) -> serde_json::Result where F: Fn(E::Id) -> Fut + Sync, @@ -485,19 +487,17 @@ where { debug!("fetch event ({event_id}) senders power level"); - let event = fetch_event(event_id.clone()).await; + let event = fetch_event(event_id).await; - let auth_events = event.as_ref().map(Event::auth_events).into_iter().flatten(); + let auth_events = event.as_ref().map(Event::auth_events); - let pl = stream::iter(auth_events) - .map(|aid| fetch_event(aid.clone())) - .buffer_unordered(parallel_fetches.min(5)) - .filter_map(future::ready) - .collect::>() - .boxed() - .await + let pl = auth_events .into_iter() - .find(|aev| is_type_and_key(aev, &TimelineEventType::RoomPowerLevels, "")); + .flatten() + .stream() + .broadn_filter_map(5, |aid| fetch_event(aid.clone())) + .ready_find(|aev| is_type_and_key(aev, &TimelineEventType::RoomPowerLevels, "")) + .await; let content: PowerLevelsContentFields = match pl { | None => return Ok(int!(0)), @@ -525,34 +525,28 @@ where /// For each `events_to_check` event we gather the events needed to auth it from /// the the `fetch_event` closure and verify each event using the /// `event_auth::auth_check` function. -async fn iterative_auth_check<'a, E, F, Fut, I>( +async fn iterative_auth_check<'a, E, F, Fut, S>( room_version: &RoomVersion, - events_to_check: I, + events_to_check: S, unconflicted_state: StateMap, fetch_event: &F, - parallel_fetches: usize, ) -> Result> where F: Fn(E::Id) -> Fut + Sync, Fut: Future> + Send, E::Id: Borrow + Clone + Eq + Ord + Send + Sync + 'a, - I: Iterator + Debug + Send + 'a, + S: Stream + Send + 'a, E: Event + Clone + Send + Sync, { debug!("starting iterative auth check"); - trace!( - list = ?events_to_check, - "events to check" - ); - let events_to_check: Vec<_> = stream::iter(events_to_check) + let events_to_check: Vec<_> = events_to_check .map(Result::Ok) - .map_ok(|event_id| { - fetch_event(event_id.clone()).map(move |result| { - result.ok_or_else(|| Error::NotFound(format!("Failed to find {event_id}"))) - }) + .broad_and_then(async |event_id| { + fetch_event(event_id.clone()) + .await + .ok_or_else(|| Error::NotFound(format!("Failed to find {event_id}"))) }) - .try_buffer_unordered(parallel_fetches) .try_collect() .boxed() .await?; @@ -562,10 +556,10 @@ where .flat_map(|event: &E| event.auth_events().map(Clone::clone)) .collect(); - let auth_events: HashMap = stream::iter(auth_event_ids.into_iter()) - .map(fetch_event) - .buffer_unordered(parallel_fetches) - .filter_map(future::ready) + let auth_events: HashMap = auth_event_ids + .into_iter() + .stream() + .broad_filter_map(fetch_event) .map(|auth_event| (auth_event.event_id().clone(), auth_event)) .collect() .boxed() @@ -574,7 +568,6 @@ where let auth_events = &auth_events; let mut resolved_state = unconflicted_state; for event in &events_to_check { - let event_id = event.event_id(); let state_key = event .state_key() .ok_or_else(|| Error::InvalidPdu("State event had no state key".to_owned()))?; @@ -603,24 +596,22 @@ where } } - stream::iter( - auth_types - .iter() - .filter_map(|key| Some((key, resolved_state.get(key)?))), - ) - .filter_map(|(key, ev_id)| async move { - if let Some(event) = auth_events.get(ev_id.borrow()) { - Some((key, event.clone())) - } else { - Some((key, fetch_event(ev_id.clone()).await?)) - } - }) - .for_each(|(key, event)| { - //TODO: synapse checks "rejected_reason" is None here - auth_state.insert(key.to_owned(), event); - future::ready(()) - }) - .await; + auth_types + .iter() + .stream() + .ready_filter_map(|key| Some((key, resolved_state.get(key)?))) + .filter_map(|(key, ev_id)| async move { + if let Some(event) = auth_events.get(ev_id.borrow()) { + Some((key, event.clone())) + } else { + Some((key, fetch_event(ev_id.clone()).await?)) + } + }) + .ready_for_each(|(key, event)| { + //TODO: synapse checks "rejected_reason" is None here + auth_state.insert(key.to_owned(), event); + }) + .await; debug!("event to check {:?}", event.event_id()); @@ -634,12 +625,25 @@ where future::ready(auth_state.get(&ty.with_state_key(key))) }; - if auth_check(room_version, &event, current_third_party.as_ref(), fetch_state).await? { - // add event to resolved state map - resolved_state.insert(event.event_type().with_state_key(state_key), event_id.clone()); - } else { - // synapse passes here on AuthError. We do not add this event to resolved_state. - warn!("event {event_id} failed the authentication check"); + let auth_result = + auth_check(room_version, &event, current_third_party.as_ref(), fetch_state).await; + + match auth_result { + | Ok(true) => { + // add event to resolved state map + resolved_state.insert( + event.event_type().with_state_key(state_key), + event.event_id().clone(), + ); + }, + | Ok(false) => { + // synapse passes here on AuthError. We do not add this event to resolved_state. + warn!("event {} failed the authentication check", event.event_id()); + }, + | Err(e) => { + debug_error!("event {} failed the authentication check: {e}", event.event_id()); + return Err(e); + }, } } @@ -659,7 +663,6 @@ async fn mainline_sort( to_sort: &[E::Id], resolved_power_level: Option, fetch_event: &F, - parallel_fetches: usize, ) -> Result> where F: Fn(E::Id) -> Fut + Sync, @@ -682,11 +685,13 @@ where let event = fetch_event(p.clone()) .await .ok_or_else(|| Error::NotFound(format!("Failed to find {p}")))?; + pl = None; for aid in event.auth_events() { let ev = fetch_event(aid.clone()) .await .ok_or_else(|| Error::NotFound(format!("Failed to find {aid}")))?; + if is_type_and_key(&ev, &TimelineEventType::RoomPowerLevels, "") { pl = Some(aid.to_owned()); break; @@ -694,36 +699,32 @@ where } } - let mainline_map = mainline + let mainline_map: HashMap<_, _> = mainline .iter() .rev() .enumerate() .map(|(idx, eid)| ((*eid).clone(), idx)) - .collect::>(); + .collect(); - let order_map = stream::iter(to_sort.iter()) - .map(|ev_id| { - fetch_event(ev_id.clone()).map(move |event| event.map(|event| (event, ev_id))) + let order_map: HashMap<_, _> = to_sort + .iter() + .stream() + .broad_filter_map(async |ev_id| { + fetch_event(ev_id.clone()).await.map(|event| (event, ev_id)) }) - .buffer_unordered(parallel_fetches) - .filter_map(future::ready) - .map(|(event, ev_id)| { + .broad_filter_map(|(event, ev_id)| { get_mainline_depth(Some(event.clone()), &mainline_map, fetch_event) - .map_ok(move |depth| (depth, event, ev_id)) + .map_ok(move |depth| (ev_id, (depth, event.origin_server_ts(), ev_id))) .map(Result::ok) }) - .buffer_unordered(parallel_fetches) - .filter_map(future::ready) - .fold(HashMap::new(), |mut order_map, (depth, event, ev_id)| { - order_map.insert(ev_id, (depth, event.origin_server_ts(), ev_id)); - future::ready(order_map) - }) + .collect() .boxed() .await; // Sort the event_ids by their depth, timestamp and EventId // unwrap is OK order map and sort_event_ids are from to_sort (the same Vec) - let mut sort_event_ids = order_map.keys().map(|&k| k.clone()).collect::>(); + let mut sort_event_ids: Vec<_> = order_map.keys().map(|&k| k.clone()).collect(); + sort_event_ids.sort_by_key(|sort_id| &order_map[sort_id]); Ok(sort_event_ids) @@ -744,6 +745,7 @@ where { while let Some(sort_ev) = event { debug!(event_id = sort_ev.event_id().borrow().as_str(), "mainline"); + let id = sort_ev.event_id(); if let Some(depth) = mainline_map.get(id.borrow()) { return Ok(*depth); @@ -754,6 +756,7 @@ where let aev = fetch_event(aid.clone()) .await .ok_or_else(|| Error::NotFound(format!("Failed to find {aid}")))?; + if is_type_and_key(&aev, &TimelineEventType::RoomPowerLevels, "") { event = Some(aev); break; @@ -884,7 +887,7 @@ mod tests { zara, }, }; - use crate::debug; + use crate::{debug, utils::stream::IterStream}; async fn test_event_sort() { use futures::future::ready; @@ -915,10 +918,9 @@ mod tests { let resolved_power = super::iterative_auth_check( &RoomVersion::V6, - sorted_power_events.iter(), + sorted_power_events.iter().stream(), HashMap::new(), // unconflicted events &fetcher, - 1, ) .await .expect("iterative auth check failed on resolved events"); @@ -932,7 +934,7 @@ mod tests { .get(&(StateEventType::RoomPowerLevels, "".into())) .cloned(); - let sorted_event_ids = super::mainline_sort(&events_to_sort, power_level, &fetcher, 1) + let sorted_event_ids = super::mainline_sort(&events_to_sort, power_level, &fetcher) .await .unwrap(); From 05e65936fa8a522667035b12774ef788944303e7 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 6 Apr 2025 20:30:15 +0000 Subject: [PATCH 249/310] modest cleanup of snake sync service related Signed-off-by: Jason Volk --- src/api/client/sync/v4.rs | 83 +++++++-------- src/api/client/sync/v5.rs | 55 ++++------ src/service/sync/mod.rs | 205 +++++++++++++++++--------------------- 3 files changed, 143 insertions(+), 200 deletions(-) diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index f7edb8c0..55faf420 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -6,22 +6,23 @@ use std::{ use axum::extract::State; use conduwuit::{ - Error, PduCount, PduEvent, Result, debug, error, extract_variant, + Err, Error, PduCount, PduEvent, Result, debug, error, extract_variant, utils::{ BoolExt, IterStream, ReadyExt, TryFutureExtExt, math::{ruma_from_usize, usize_from_ruma, usize_from_u64_truncated}, }, warn, }; +use conduwuit_service::{ + rooms::read_receipt::pack_receipts, + sync::{into_db_key, into_snake_key}, +}; use futures::{FutureExt, StreamExt, TryFutureExt}; use ruma::{ MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, - api::client::{ - error::ErrorKind, - sync::sync_events::{ - self, DeviceLists, UnreadNotificationsCount, - v4::{SlidingOp, SlidingSyncRoomHero}, - }, + api::client::sync::sync_events::{ + self, DeviceLists, UnreadNotificationsCount, + v4::{SlidingOp, SlidingSyncRoomHero}, }, events::{ AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, @@ -31,7 +32,6 @@ use ruma::{ serde::Raw, uint, }; -use service::rooms::read_receipt::pack_receipts; use super::{load_timeline, share_encrypted_room}; use crate::{ @@ -50,10 +50,11 @@ pub(crate) async fn sync_events_v4_route( ) -> Result { debug_assert!(DEFAULT_BUMP_TYPES.is_sorted(), "DEFAULT_BUMP_TYPES is not sorted"); let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); let mut body = body.body; + // Setup watchers, so if there's no response, we can wait for them - let watcher = services.sync.watch(sender_user, &sender_device); + let watcher = services.sync.watch(sender_user, sender_device); let next_batch = services.globals.next_count()?; @@ -68,33 +69,21 @@ pub(crate) async fn sync_events_v4_route( .and_then(|string| string.parse().ok()) .unwrap_or(0); - if globalsince != 0 - && !services - .sync - .remembered(sender_user.clone(), sender_device.clone(), conn_id.clone()) - { + let db_key = into_db_key(sender_user, sender_device, conn_id.clone()); + if globalsince != 0 && !services.sync.remembered(&db_key) { debug!("Restarting sync stream because it was gone from the database"); - return Err(Error::Request( - ErrorKind::UnknownPos, - "Connection data lost since last time".into(), - http::StatusCode::BAD_REQUEST, - )); + return Err!(Request(UnknownPos("Connection data lost since last time"))); } if globalsince == 0 { - services.sync.forget_sync_request_connection( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - ); + services.sync.forget_sync_request_connection(&db_key); } // Get sticky parameters from cache - let known_rooms = services.sync.update_sync_request_with_cache( - sender_user.clone(), - sender_device.clone(), - &mut body, - ); + let snake_key = into_snake_key(sender_user, sender_device, conn_id.clone()); + let known_rooms = services + .sync + .update_sync_request_with_cache(&snake_key, &mut body); let all_joined_rooms: Vec<_> = services .rooms @@ -136,7 +125,7 @@ pub(crate) async fn sync_events_v4_route( if body.extensions.to_device.enabled.unwrap_or(false) { services .users - .remove_to_device_events(sender_user, &sender_device, globalsince) + .remove_to_device_events(sender_user, sender_device, globalsince) .await; } @@ -261,7 +250,7 @@ pub(crate) async fn sync_events_v4_route( if let Some(Ok(user_id)) = pdu.state_key.as_deref().map(UserId::parse) { - if user_id == *sender_user { + if user_id == sender_user { continue; } @@ -299,7 +288,7 @@ pub(crate) async fn sync_events_v4_route( .state_cache .room_members(room_id) // Don't send key updates from the sender to the sender - .ready_filter(|user_id| sender_user != user_id) + .ready_filter(|&user_id| sender_user != user_id) // Only send keys if the sender doesn't share an encrypted room with the target // already .filter_map(|user_id| { @@ -425,10 +414,9 @@ pub(crate) async fn sync_events_v4_route( }); if let Some(conn_id) = &body.conn_id { + let db_key = into_db_key(sender_user, sender_device, conn_id); services.sync.update_sync_known_rooms( - sender_user, - &sender_device, - conn_id.clone(), + &db_key, list_id.clone(), new_known_rooms, globalsince, @@ -478,23 +466,20 @@ pub(crate) async fn sync_events_v4_route( } if let Some(conn_id) = &body.conn_id { + let db_key = into_db_key(sender_user, sender_device, conn_id); services.sync.update_sync_known_rooms( - sender_user, - &sender_device, - conn_id.clone(), + &db_key, "subscriptions".to_owned(), known_subscription_rooms, globalsince, ); } - if let Some(conn_id) = &body.conn_id { - services.sync.update_sync_subscriptions( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - body.room_subscriptions, - ); + if let Some(conn_id) = body.conn_id.clone() { + let db_key = into_db_key(sender_user, sender_device, conn_id); + services + .sync + .update_sync_subscriptions(&db_key, body.room_subscriptions); } let mut rooms = BTreeMap::new(); @@ -648,7 +633,7 @@ pub(crate) async fn sync_events_v4_route( .rooms .state_cache .room_members(room_id) - .ready_filter(|member| member != sender_user) + .ready_filter(|&member| member != sender_user) .filter_map(|user_id| { services .rooms @@ -787,7 +772,7 @@ pub(crate) async fn sync_events_v4_route( .users .get_to_device_events( sender_user, - &sender_device, + sender_device, Some(globalsince), Some(next_batch), ) @@ -805,7 +790,7 @@ pub(crate) async fn sync_events_v4_route( }, device_one_time_keys_count: services .users - .count_one_time_keys(sender_user, &sender_device) + .count_one_time_keys(sender_user, sender_device) .await, // Fallback keys are not yet supported device_unused_fallback_key_types: None, diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index 684752ec..00a2d18d 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -6,7 +6,7 @@ use std::{ use axum::extract::State; use conduwuit::{ - Error, Result, debug, error, extract_variant, + Err, Error, Result, error, extract_variant, matrix::{ TypeStateKey, pdu::{PduCount, PduEvent}, @@ -18,14 +18,11 @@ use conduwuit::{ }, warn, }; -use conduwuit_service::rooms::read_receipt::pack_receipts; +use conduwuit_service::{rooms::read_receipt::pack_receipts, sync::into_snake_key}; use futures::{FutureExt, StreamExt, TryFutureExt}; use ruma::{ DeviceId, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, - api::client::{ - error::ErrorKind, - sync::sync_events::{self, DeviceLists, UnreadNotificationsCount}, - }, + api::client::sync::sync_events::{self, DeviceLists, UnreadNotificationsCount}, events::{ AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType, room::member::{MembershipState, RoomMemberEventContent}, @@ -74,35 +71,23 @@ pub(crate) async fn sync_events_v5_route( .and_then(|string| string.parse().ok()) .unwrap_or(0); - if globalsince != 0 - && !services.sync.snake_connection_cached( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - ) { - debug!("Restarting sync stream because it was gone from the database"); - return Err(Error::Request( - ErrorKind::UnknownPos, - "Connection data lost since last time".into(), - http::StatusCode::BAD_REQUEST, - )); + let snake_key = into_snake_key(sender_user, sender_device, conn_id); + + if globalsince != 0 && !services.sync.snake_connection_cached(&snake_key) { + return Err!(Request(UnknownPos( + "Connection data unknown to server; restarting sync stream." + ))); } // Client / User requested an initial sync if globalsince == 0 { - services.sync.forget_snake_sync_connection( - sender_user.clone(), - sender_device.clone(), - conn_id.clone(), - ); + services.sync.forget_snake_sync_connection(&snake_key); } // Get sticky parameters from cache - let known_rooms = services.sync.update_snake_sync_request_with_cache( - sender_user.clone(), - sender_device.clone(), - &mut body, - ); + let known_rooms = services + .sync + .update_snake_sync_request_with_cache(&snake_key, &mut body); let all_joined_rooms: Vec<_> = services .rooms @@ -254,11 +239,10 @@ async fn fetch_subscriptions( // body.room_subscriptions.remove(&r); //} - if let Some(conn_id) = &body.conn_id { + if let Some(conn_id) = body.conn_id.clone() { + let snake_key = into_snake_key(sender_user, sender_device, conn_id); services.sync.update_snake_sync_known_rooms( - sender_user, - sender_device, - conn_id.clone(), + &snake_key, "subscriptions".to_owned(), known_subscription_rooms, globalsince, @@ -340,11 +324,10 @@ async fn handle_lists<'a>( count: ruma_from_usize(active_rooms.len()), }); - if let Some(conn_id) = &body.conn_id { + if let Some(conn_id) = body.conn_id.clone() { + let snake_key = into_snake_key(sender_user, sender_device, conn_id); services.sync.update_snake_sync_known_rooms( - sender_user, - sender_device, - conn_id.clone(), + &snake_key, list_id.clone(), new_known_rooms, globalsince, diff --git a/src/service/sync/mod.rs b/src/service/sync/mod.rs index bf2bc142..b095d2c1 100644 --- a/src/service/sync/mod.rs +++ b/src/service/sync/mod.rs @@ -8,7 +8,7 @@ use std::{ use conduwuit::{Result, Server}; use database::Map; use ruma::{ - DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, UserId, + OwnedDeviceId, OwnedRoomId, OwnedUserId, api::client::sync::sync_events::{ self, v4::{ExtensionsConfig, SyncRequestList}, @@ -49,8 +49,8 @@ struct Services { struct SlidingSyncCache { lists: BTreeMap, subscriptions: BTreeMap, - known_rooms: BTreeMap>, /* For every room, the - * roomsince number */ + // For every room, the roomsince number + known_rooms: BTreeMap>, extensions: ExtensionsConfig, } @@ -98,79 +98,35 @@ impl crate::Service for Service { fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } -/// load params from cache if body doesn't contain it, as long as it's allowed -/// in some cases we may need to allow an empty list as an actual value -fn list_or_sticky(target: &mut Vec, cached: &Vec) { - if target.is_empty() { - target.clone_from(cached); - } -} -fn some_or_sticky(target: &mut Option, cached: Option) { - if target.is_none() { - *target = cached; - } -} - impl Service { - pub fn snake_connection_cached( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: Option, - ) -> bool { - self.snake_connections - .lock() - .unwrap() - .contains_key(&(user_id, device_id, conn_id)) - } - - pub fn forget_snake_sync_connection( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: Option, - ) { + pub fn snake_connection_cached(&self, key: &SnakeConnectionsKey) -> bool { self.snake_connections .lock() .expect("locked") - .remove(&(user_id, device_id, conn_id)); + .contains_key(key) } - pub fn remembered( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: String, - ) -> bool { - self.connections - .lock() - .unwrap() - .contains_key(&(user_id, device_id, conn_id)) + pub fn forget_snake_sync_connection(&self, key: &SnakeConnectionsKey) { + self.snake_connections.lock().expect("locked").remove(key); } - pub fn forget_sync_request_connection( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: String, - ) { - self.connections - .lock() - .expect("locked") - .remove(&(user_id, device_id, conn_id)); + pub fn remembered(&self, key: &DbConnectionsKey) -> bool { + self.connections.lock().expect("locked").contains_key(key) + } + + pub fn forget_sync_request_connection(&self, key: &DbConnectionsKey) { + self.connections.lock().expect("locked").remove(key); } pub fn update_snake_sync_request_with_cache( &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, + snake_key: &SnakeConnectionsKey, request: &mut v5::Request, ) -> BTreeMap> { - let conn_id = request.conn_id.clone(); let mut cache = self.snake_connections.lock().expect("locked"); let cached = Arc::clone( cache - .entry((user_id, device_id, conn_id)) + .entry(snake_key.clone()) .or_insert_with(|| Arc::new(Mutex::new(SnakeSyncCache::default()))), ); let cached = &mut cached.lock().expect("locked"); @@ -268,25 +224,23 @@ impl Service { pub fn update_sync_request_with_cache( &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, + key: &SnakeConnectionsKey, request: &mut sync_events::v4::Request, ) -> BTreeMap> { let Some(conn_id) = request.conn_id.clone() else { return BTreeMap::new(); }; + let key = into_db_key(key.0.clone(), key.1.clone(), conn_id); let mut cache = self.connections.lock().expect("locked"); - let cached = Arc::clone(cache.entry((user_id, device_id, conn_id)).or_insert_with( - || { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }, - )); + let cached = Arc::clone(cache.entry(key).or_insert_with(|| { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + })); let cached = &mut cached.lock().expect("locked"); drop(cache); @@ -371,22 +325,18 @@ impl Service { pub fn update_sync_subscriptions( &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: String, + key: &DbConnectionsKey, subscriptions: BTreeMap, ) { let mut cache = self.connections.lock().expect("locked"); - let cached = Arc::clone(cache.entry((user_id, device_id, conn_id)).or_insert_with( - || { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }, - )); + let cached = Arc::clone(cache.entry(key.clone()).or_insert_with(|| { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + })); let cached = &mut cached.lock().expect("locked"); drop(cache); @@ -395,90 +345,81 @@ impl Service { pub fn update_sync_known_rooms( &self, - user_id: &UserId, - device_id: &DeviceId, - conn_id: String, + key: &DbConnectionsKey, list_id: String, new_cached_rooms: BTreeSet, globalsince: u64, ) { let mut cache = self.connections.lock().expect("locked"); - let cached = Arc::clone( - cache - .entry((user_id.to_owned(), device_id.to_owned(), conn_id)) - .or_insert_with(|| { - Arc::new(Mutex::new(SlidingSyncCache { - lists: BTreeMap::new(), - subscriptions: BTreeMap::new(), - known_rooms: BTreeMap::new(), - extensions: ExtensionsConfig::default(), - })) - }), - ); + let cached = Arc::clone(cache.entry(key.clone()).or_insert_with(|| { + Arc::new(Mutex::new(SlidingSyncCache { + lists: BTreeMap::new(), + subscriptions: BTreeMap::new(), + known_rooms: BTreeMap::new(), + extensions: ExtensionsConfig::default(), + })) + })); let cached = &mut cached.lock().expect("locked"); drop(cache); - for (roomid, lastsince) in cached + for (room_id, lastsince) in cached .known_rooms .entry(list_id.clone()) .or_default() .iter_mut() { - if !new_cached_rooms.contains(roomid) { + if !new_cached_rooms.contains(room_id) { *lastsince = 0; } } let list = cached.known_rooms.entry(list_id).or_default(); - for roomid in new_cached_rooms { - list.insert(roomid, globalsince); + for room_id in new_cached_rooms { + list.insert(room_id, globalsince); } } pub fn update_snake_sync_known_rooms( &self, - user_id: &UserId, - device_id: &DeviceId, - conn_id: String, + key: &SnakeConnectionsKey, list_id: String, new_cached_rooms: BTreeSet, globalsince: u64, ) { + assert!(key.2.is_some(), "Some(conn_id) required for this call"); let mut cache = self.snake_connections.lock().expect("locked"); let cached = Arc::clone( cache - .entry((user_id.to_owned(), device_id.to_owned(), Some(conn_id))) + .entry(key.clone()) .or_insert_with(|| Arc::new(Mutex::new(SnakeSyncCache::default()))), ); let cached = &mut cached.lock().expect("locked"); drop(cache); - for (roomid, lastsince) in cached + for (room_id, lastsince) in cached .known_rooms .entry(list_id.clone()) .or_default() .iter_mut() { - if !new_cached_rooms.contains(roomid) { + if !new_cached_rooms.contains(room_id) { *lastsince = 0; } } let list = cached.known_rooms.entry(list_id).or_default(); - for roomid in new_cached_rooms { - list.insert(roomid, globalsince); + for room_id in new_cached_rooms { + list.insert(room_id, globalsince); } } pub fn update_snake_sync_subscriptions( &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, - conn_id: Option, + key: &SnakeConnectionsKey, subscriptions: BTreeMap, ) { let mut cache = self.snake_connections.lock().expect("locked"); let cached = Arc::clone( cache - .entry((user_id, device_id, conn_id)) + .entry(key.clone()) .or_insert_with(|| Arc::new(Mutex::new(SnakeSyncCache::default()))), ); let cached = &mut cached.lock().expect("locked"); @@ -487,3 +428,37 @@ impl Service { cached.subscriptions = subscriptions; } } + +#[inline] +pub fn into_snake_key(user_id: U, device_id: D, conn_id: C) -> SnakeConnectionsKey +where + U: Into, + D: Into, + C: Into>, +{ + (user_id.into(), device_id.into(), conn_id.into()) +} + +#[inline] +pub fn into_db_key(user_id: U, device_id: D, conn_id: C) -> DbConnectionsKey +where + U: Into, + D: Into, + C: Into, +{ + (user_id.into(), device_id.into(), conn_id.into()) +} + +/// load params from cache if body doesn't contain it, as long as it's allowed +/// in some cases we may need to allow an empty list as an actual value +fn list_or_sticky(target: &mut Vec, cached: &Vec) { + if target.is_empty() { + target.clone_from(cached); + } +} + +fn some_or_sticky(target: &mut Option, cached: Option) { + if target.is_none() { + *target = cached; + } +} From e8a64bb59d13aad7588a1ca28422f463e0f1a302 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 6 Apr 2025 21:59:18 +0000 Subject: [PATCH 250/310] increase snake sync asynchronicity Signed-off-by: Jason Volk --- src/api/client/sync/mod.rs | 36 +----- src/api/client/sync/v3.rs | 17 ++- src/api/client/sync/v4.rs | 38 +++++- src/api/client/sync/v5.rs | 256 ++++++++++++++++++++++++------------- 4 files changed, 212 insertions(+), 135 deletions(-) diff --git a/src/api/client/sync/mod.rs b/src/api/client/sync/mod.rs index 14459acf..40370160 100644 --- a/src/api/client/sync/mod.rs +++ b/src/api/client/sync/mod.rs @@ -5,16 +5,12 @@ mod v5; use conduwuit::{ Error, PduCount, Result, matrix::pdu::PduEvent, - utils::{ - IterStream, - stream::{BroadbandExt, ReadyExt, TryIgnore}, - }, + utils::stream::{BroadbandExt, ReadyExt, TryIgnore}, }; use conduwuit_service::Services; use futures::{StreamExt, pin_mut}; use ruma::{ RoomId, UserId, - directory::RoomTypeFilter, events::TimelineEventType::{ self, Beacon, CallInvite, PollStart, RoomEncrypted, RoomMessage, Sticker, }, @@ -87,33 +83,3 @@ async fn share_encrypted_room( }) .await } - -pub(crate) async fn filter_rooms<'a>( - services: &Services, - rooms: &[&'a RoomId], - filter: &[RoomTypeFilter], - negate: bool, -) -> Vec<&'a RoomId> { - rooms - .iter() - .stream() - .filter_map(|r| async move { - let room_type = services.rooms.state_accessor.get_room_type(r).await; - - if room_type.as_ref().is_err_and(|e| !e.is_not_found()) { - return None; - } - - let room_type_filter = RoomTypeFilter::from(room_type.ok()); - - let include = if negate { - !filter.contains(&room_type_filter) - } else { - filter.is_empty() || filter.contains(&room_type_filter) - }; - - include.then_some(r) - }) - .collect() - .await -} diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 24930941..8eac6b66 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -14,8 +14,8 @@ use conduwuit::{ pair_of, ref_at, result::FlatOk, utils::{ - self, BoolExt, IterStream, ReadyExt, TryFutureExtExt, - future::OptionStream, + self, BoolExt, FutureBoolExt, IterStream, ReadyExt, TryFutureExtExt, + future::{OptionStream, ReadyEqExt}, math::ruma_from_u64, stream::{BroadbandExt, Tools, TryExpect, WidebandExt}, }, @@ -32,6 +32,7 @@ use conduwuit_service::{ use futures::{ FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::{OptionFuture, join, join3, join4, join5, try_join, try_join4}, + pin_mut, }; use ruma::{ DeviceId, EventId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId, @@ -433,10 +434,14 @@ async fn handle_left_room( return Ok(None); } - if !services.rooms.metadata.exists(room_id).await - || services.rooms.metadata.is_disabled(room_id).await - || services.rooms.metadata.is_banned(room_id).await - { + let is_not_found = services.rooms.metadata.exists(room_id).eq(&false); + + let is_disabled = services.rooms.metadata.is_disabled(room_id); + + let is_banned = services.rooms.metadata.is_banned(room_id); + + pin_mut!(is_not_found, is_disabled, is_banned); + if is_not_found.or(is_disabled).or(is_banned).await { // This is just a rejected invite, not a room we know // Insert a leave event anyways for the client let event = PduEvent { diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index 55faf420..f153b2da 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -7,6 +7,7 @@ use std::{ use axum::extract::State; use conduwuit::{ Err, Error, PduCount, PduEvent, Result, debug, error, extract_variant, + matrix::TypeStateKey, utils::{ BoolExt, IterStream, ReadyExt, TryFutureExtExt, math::{ruma_from_usize, usize_from_ruma, usize_from_u64_truncated}, @@ -14,6 +15,7 @@ use conduwuit::{ warn, }; use conduwuit_service::{ + Services, rooms::read_receipt::pack_receipts, sync::{into_db_key, into_snake_key}, }; @@ -24,6 +26,7 @@ use ruma::{ self, DeviceLists, UnreadNotificationsCount, v4::{SlidingOp, SlidingSyncRoomHero}, }, + directory::RoomTypeFilter, events::{ AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType::*, @@ -36,10 +39,11 @@ use ruma::{ use super::{load_timeline, share_encrypted_room}; use crate::{ Ruma, - client::{DEFAULT_BUMP_TYPES, filter_rooms, ignored_filter, sync::v5::TodoRooms}, + client::{DEFAULT_BUMP_TYPES, ignored_filter}, }; -pub(crate) const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync"; +type TodoRooms = BTreeMap, usize, u64)>; +const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync"; /// POST `/_matrix/client/unstable/org.matrix.msc3575/sync` /// @@ -802,3 +806,33 @@ pub(crate) async fn sync_events_v4_route( delta_token: None, }) } + +async fn filter_rooms<'a>( + services: &Services, + rooms: &[&'a RoomId], + filter: &[RoomTypeFilter], + negate: bool, +) -> Vec<&'a RoomId> { + rooms + .iter() + .stream() + .filter_map(|r| async move { + let room_type = services.rooms.state_accessor.get_room_type(r).await; + + if room_type.as_ref().is_err_and(|e| !e.is_not_found()) { + return None; + } + + let room_type_filter = RoomTypeFilter::from(room_type.ok()); + + let include = if negate { + !filter.contains(&room_type_filter) + } else { + filter.is_empty() || filter.contains(&room_type_filter) + }; + + include.then_some(r) + }) + .collect() + .await +} diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index 00a2d18d..f3fc0f44 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -1,28 +1,35 @@ use std::{ cmp::{self, Ordering}, collections::{BTreeMap, BTreeSet, HashMap, HashSet}, + ops::Deref, time::Duration, }; use axum::extract::State; use conduwuit::{ - Err, Error, Result, error, extract_variant, + Err, Error, Result, error, extract_variant, is_equal_to, matrix::{ TypeStateKey, pdu::{PduCount, PduEvent}, }, trace, utils::{ - BoolExt, IterStream, ReadyExt, TryFutureExtExt, + BoolExt, FutureBoolExt, IterStream, ReadyExt, TryFutureExtExt, + future::ReadyEqExt, math::{ruma_from_usize, usize_from_ruma}, }, warn, }; -use conduwuit_service::{rooms::read_receipt::pack_receipts, sync::into_snake_key}; -use futures::{FutureExt, StreamExt, TryFutureExt}; +use conduwuit_service::{Services, rooms::read_receipt::pack_receipts, sync::into_snake_key}; +use futures::{ + FutureExt, Stream, StreamExt, TryFutureExt, + future::{OptionFuture, join3, try_join4}, + pin_mut, +}; use ruma::{ DeviceId, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId, api::client::sync::sync_events::{self, DeviceLists, UnreadNotificationsCount}, + directory::RoomTypeFilter, events::{ AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType, room::member::{MembershipState, RoomMemberEventContent}, @@ -31,13 +38,15 @@ use ruma::{ uint, }; -use super::{filter_rooms, share_encrypted_room}; +use super::share_encrypted_room; use crate::{ Ruma, client::{DEFAULT_BUMP_TYPES, ignored_filter, sync::load_timeline}, }; type SyncInfo<'a> = (&'a UserId, &'a DeviceId, u64, &'a sync_events::v5::Request); +type TodoRooms = BTreeMap, usize, u64)>; +type KnownRooms = BTreeMap>; /// `POST /_matrix/client/unstable/org.matrix.simplified_msc3575/sync` /// ([MSC4186]) @@ -50,7 +59,7 @@ type SyncInfo<'a> = (&'a UserId, &'a DeviceId, u64, &'a sync_events::v5::Request /// [MSC3575]: https://github.com/matrix-org/matrix-spec-proposals/pull/3575 /// [MSC4186]: https://github.com/matrix-org/matrix-spec-proposals/pull/4186 pub(crate) async fn sync_events_v5_route( - State(services): State, + State(ref services): State, body: Ruma, ) -> Result { debug_assert!(DEFAULT_BUMP_TYPES.is_sorted(), "DEFAULT_BUMP_TYPES is not sorted"); @@ -89,65 +98,77 @@ pub(crate) async fn sync_events_v5_route( .sync .update_snake_sync_request_with_cache(&snake_key, &mut body); - let all_joined_rooms: Vec<_> = services + let all_joined_rooms = services .rooms .state_cache .rooms_joined(sender_user) .map(ToOwned::to_owned) - .collect() - .await; + .collect::>(); - let all_invited_rooms: Vec<_> = services + let all_invited_rooms = services .rooms .state_cache .rooms_invited(sender_user) .map(|r| r.0) - .collect() - .await; + .collect::>(); - let all_knocked_rooms: Vec<_> = services + let all_knocked_rooms = services .rooms .state_cache .rooms_knocked(sender_user) .map(|r| r.0) - .collect() - .await; + .collect::>(); - let all_rooms: Vec<&RoomId> = all_joined_rooms - .iter() - .map(AsRef::as_ref) - .chain(all_invited_rooms.iter().map(AsRef::as_ref)) - .chain(all_knocked_rooms.iter().map(AsRef::as_ref)) - .collect(); + let (all_joined_rooms, all_invited_rooms, all_knocked_rooms) = + join3(all_joined_rooms, all_invited_rooms, all_knocked_rooms).await; - let all_joined_rooms = all_joined_rooms.iter().map(AsRef::as_ref).collect(); - let all_invited_rooms = all_invited_rooms.iter().map(AsRef::as_ref).collect(); + let all_joined_rooms = all_joined_rooms.iter().map(AsRef::as_ref); + let all_invited_rooms = all_invited_rooms.iter().map(AsRef::as_ref); + let all_knocked_rooms = all_knocked_rooms.iter().map(AsRef::as_ref); + let all_rooms = all_joined_rooms + .clone() + .chain(all_invited_rooms.clone()) + .chain(all_knocked_rooms.clone()); let pos = next_batch.clone().to_string(); let mut todo_rooms: TodoRooms = BTreeMap::new(); let sync_info: SyncInfo<'_> = (sender_user, sender_device, globalsince, &body); + + let account_data = collect_account_data(services, sync_info).map(Ok); + + let e2ee = collect_e2ee(services, sync_info, all_joined_rooms.clone()); + + let to_device = collect_to_device(services, sync_info, next_batch).map(Ok); + + let receipts = collect_receipts(services).map(Ok); + + let (account_data, e2ee, to_device, receipts) = + try_join4(account_data, e2ee, to_device, receipts).await?; + + let extensions = sync_events::v5::response::Extensions { + account_data, + e2ee, + to_device, + receipts, + typing: sync_events::v5::response::Typing::default(), + }; + let mut response = sync_events::v5::Response { txn_id: body.txn_id.clone(), pos, lists: BTreeMap::new(), rooms: BTreeMap::new(), - extensions: sync_events::v5::response::Extensions { - account_data: collect_account_data(services, sync_info).await, - e2ee: collect_e2ee(services, sync_info, &all_joined_rooms).await?, - to_device: collect_to_device(services, sync_info, next_batch).await, - receipts: collect_receipts(services).await, - typing: sync_events::v5::response::Typing::default(), - }, + extensions, }; handle_lists( services, sync_info, - &all_invited_rooms, - &all_joined_rooms, - &all_rooms, + all_invited_rooms.clone(), + all_joined_rooms.clone(), + all_rooms, &mut todo_rooms, &known_rooms, &mut response, @@ -160,7 +181,7 @@ pub(crate) async fn sync_events_v5_route( services, sender_user, next_batch, - &all_invited_rooms, + all_invited_rooms.clone(), &todo_rooms, &mut response, &body, @@ -185,31 +206,33 @@ pub(crate) async fn sync_events_v5_route( } trace!( - rooms=?response.rooms.len(), - account_data=?response.extensions.account_data.rooms.len(), - receipts=?response.extensions.receipts.rooms.len(), + rooms = ?response.rooms.len(), + account_data = ?response.extensions.account_data.rooms.len(), + receipts = ?response.extensions.receipts.rooms.len(), "responding to request with" ); Ok(response) } -type KnownRooms = BTreeMap>; -pub(crate) type TodoRooms = BTreeMap, usize, u64)>; - async fn fetch_subscriptions( - services: crate::State, + services: &Services, (sender_user, sender_device, globalsince, body): SyncInfo<'_>, known_rooms: &KnownRooms, todo_rooms: &mut TodoRooms, ) { let mut known_subscription_rooms = BTreeSet::new(); for (room_id, room) in &body.room_subscriptions { - if !services.rooms.metadata.exists(room_id).await - || services.rooms.metadata.is_disabled(room_id).await - || services.rooms.metadata.is_banned(room_id).await - { + let not_exists = services.rooms.metadata.exists(room_id).eq(&false); + + let is_disabled = services.rooms.metadata.is_disabled(room_id); + + let is_banned = services.rooms.metadata.is_banned(room_id); + + pin_mut!(not_exists, is_disabled, is_banned); + if not_exists.or(is_disabled).or(is_banned).await { continue; } + let todo_room = todo_rooms .entry(room_id.clone()) @@ -251,27 +274,39 @@ async fn fetch_subscriptions( } #[allow(clippy::too_many_arguments)] -async fn handle_lists<'a>( - services: crate::State, +async fn handle_lists<'a, Rooms, AllRooms>( + services: &Services, (sender_user, sender_device, globalsince, body): SyncInfo<'_>, - all_invited_rooms: &Vec<&'a RoomId>, - all_joined_rooms: &Vec<&'a RoomId>, - all_rooms: &Vec<&'a RoomId>, + all_invited_rooms: Rooms, + all_joined_rooms: Rooms, + all_rooms: AllRooms, todo_rooms: &'a mut TodoRooms, known_rooms: &'a KnownRooms, response: &'_ mut sync_events::v5::Response, -) -> KnownRooms { +) -> KnownRooms +where + Rooms: Iterator + Clone + Send + 'a, + AllRooms: Iterator + Clone + Send + 'a, +{ for (list_id, list) in &body.lists { - let active_rooms = match list.filters.clone().and_then(|f| f.is_invite) { - | Some(true) => all_invited_rooms, - | Some(false) => all_joined_rooms, - | None => all_rooms, + let active_rooms: Vec<_> = match list.filters.as_ref().and_then(|f| f.is_invite) { + | None => all_rooms.clone().collect(), + | Some(true) => all_invited_rooms.clone().collect(), + | Some(false) => all_joined_rooms.clone().collect(), }; - let active_rooms = match list.filters.clone().map(|f| f.not_room_types) { - | Some(filter) if filter.is_empty() => active_rooms, - | Some(value) => &filter_rooms(&services, active_rooms, &value, true).await, + let active_rooms = match list.filters.as_ref().map(|f| &f.not_room_types) { | None => active_rooms, + | Some(filter) if filter.is_empty() => active_rooms, + | Some(value) => + filter_rooms( + services, + value, + &true, + active_rooms.iter().stream().map(Deref::deref), + ) + .collect() + .await, }; let mut new_known_rooms: BTreeSet = BTreeSet::new(); @@ -289,6 +324,7 @@ async fn handle_lists<'a>( let new_rooms: BTreeSet = room_ids.clone().into_iter().map(From::from).collect(); + new_known_rooms.extend(new_rooms); //new_known_rooms.extend(room_ids..cloned()); for room_id in room_ids { @@ -334,18 +370,22 @@ async fn handle_lists<'a>( ); } } + BTreeMap::default() } -async fn process_rooms( - services: crate::State, +async fn process_rooms<'a, Rooms>( + services: &Services, sender_user: &UserId, next_batch: u64, - all_invited_rooms: &[&RoomId], + all_invited_rooms: Rooms, todo_rooms: &TodoRooms, response: &mut sync_events::v5::Response, body: &sync_events::v5::Request, -) -> Result> { +) -> Result> +where + Rooms: Iterator + Clone + Send + 'a, +{ let mut rooms = BTreeMap::new(); for (room_id, (required_state_request, timeline_limit, roomsince)) in todo_rooms { let roomsincecount = PduCount::Normal(*roomsince); @@ -354,7 +394,7 @@ async fn process_rooms( let mut invite_state = None; let (timeline_pdus, limited); let new_room_id: &RoomId = (*room_id).as_ref(); - if all_invited_rooms.contains(&new_room_id) { + if all_invited_rooms.clone().any(is_equal_to!(new_room_id)) { // TODO: figure out a timestamp we can use for remote invites invite_state = services .rooms @@ -366,7 +406,7 @@ async fn process_rooms( (timeline_pdus, limited) = (Vec::new(), true); } else { (timeline_pdus, limited) = match load_timeline( - &services, + services, sender_user, room_id, roomsincecount, @@ -399,18 +439,17 @@ async fn process_rooms( .rooms .read_receipt .last_privateread_update(sender_user, room_id) - .await > *roomsince; + .await; - let private_read_event = if last_privateread_update { - services - .rooms - .read_receipt - .private_read_get(room_id, sender_user) - .await - .ok() - } else { - None - }; + let private_read_event: OptionFuture<_> = (last_privateread_update > *roomsince) + .then(|| { + services + .rooms + .read_receipt + .private_read_get(room_id, sender_user) + .ok() + }) + .into(); let mut receipts: Vec> = services .rooms @@ -426,7 +465,7 @@ async fn process_rooms( .collect() .await; - if let Some(private_read_event) = private_read_event { + if let Some(private_read_event) = private_read_event.await.flatten() { receipts.push(private_read_event); } @@ -475,7 +514,7 @@ async fn process_rooms( let room_events: Vec<_> = timeline_pdus .iter() .stream() - .filter_map(|item| ignored_filter(&services, item.clone(), sender_user)) + .filter_map(|item| ignored_filter(services, item.clone(), sender_user)) .map(|(_, pdu)| pdu.to_sync_room_event()) .collect() .await; @@ -627,7 +666,7 @@ async fn process_rooms( Ok(rooms) } async fn collect_account_data( - services: crate::State, + services: &Services, (sender_user, _, globalsince, body): (&UserId, &DeviceId, u64, &sync_events::v5::Request), ) -> sync_events::v5::response::AccountData { let mut account_data = sync_events::v5::response::AccountData { @@ -663,16 +702,19 @@ async fn collect_account_data( account_data } -async fn collect_e2ee<'a>( - services: crate::State, +async fn collect_e2ee<'a, Rooms>( + services: &Services, (sender_user, sender_device, globalsince, body): ( &UserId, &DeviceId, u64, &sync_events::v5::Request, ), - all_joined_rooms: &'a Vec<&'a RoomId>, -) -> Result { + all_joined_rooms: Rooms, +) -> Result +where + Rooms: Iterator + Send + 'a, +{ if !body.extensions.e2ee.enabled.unwrap_or(false) { return Ok(sync_events::v5::response::E2EE::default()); } @@ -773,7 +815,7 @@ async fn collect_e2ee<'a>( | MembershipState::Join => { // A new user joined an encrypted room if !share_encrypted_room( - &services, + services, sender_user, user_id, Some(room_id), @@ -806,7 +848,7 @@ async fn collect_e2ee<'a>( // Only send keys if the sender doesn't share an encrypted room with the target // already .filter_map(|user_id| { - share_encrypted_room(&services, sender_user, user_id, Some(room_id)) + share_encrypted_room(services, sender_user, user_id, Some(room_id)) .map(|res| res.or_some(user_id.to_owned())) }) .collect::>() @@ -829,7 +871,7 @@ async fn collect_e2ee<'a>( for user_id in left_encrypted_users { let dont_share_encrypted_room = - !share_encrypted_room(&services, sender_user, &user_id, None).await; + !share_encrypted_room(services, sender_user, &user_id, None).await; // If the user doesn't share an encrypted room with the target anymore, we need // to tell them @@ -839,20 +881,22 @@ async fn collect_e2ee<'a>( } Ok(sync_events::v5::response::E2EE { - device_lists: DeviceLists { - changed: device_list_changes.into_iter().collect(), - left: device_list_left.into_iter().collect(), - }, + device_unused_fallback_key_types: None, + device_one_time_keys_count: services .users .count_one_time_keys(sender_user, sender_device) .await, - device_unused_fallback_key_types: None, + + device_lists: DeviceLists { + changed: device_list_changes.into_iter().collect(), + left: device_list_left.into_iter().collect(), + }, }) } async fn collect_to_device( - services: crate::State, + services: &Services, (sender_user, sender_device, globalsince, body): SyncInfo<'_>, next_batch: u64, ) -> Option { @@ -875,7 +919,35 @@ async fn collect_to_device( }) } -async fn collect_receipts(_services: crate::State) -> sync_events::v5::response::Receipts { +async fn collect_receipts(_services: &Services) -> sync_events::v5::response::Receipts { sync_events::v5::response::Receipts { rooms: BTreeMap::new() } // TODO: get explicitly requested read receipts } + +fn filter_rooms<'a, Rooms>( + services: &'a Services, + filter: &'a [RoomTypeFilter], + negate: &'a bool, + rooms: Rooms, +) -> impl Stream + Send + 'a +where + Rooms: Stream + Send + 'a, +{ + rooms.filter_map(async |room_id| { + let room_type = services.rooms.state_accessor.get_room_type(room_id).await; + + if room_type.as_ref().is_err_and(|e| !e.is_not_found()) { + return None; + } + + let room_type_filter = RoomTypeFilter::from(room_type.ok()); + + let include = if *negate { + !filter.contains(&room_type_filter) + } else { + filter.is_empty() || filter.contains(&room_type_filter) + }; + + include.then_some(room_id) + }) +} From eac713a2a91569ed0736aa8c88cfe543b44ec4fb Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 6 Apr 2025 22:50:01 +0000 Subject: [PATCH 251/310] slightly optimize user directory search loop Signed-off-by: Jason Volk --- src/api/client/user_directory.rs | 43 ++++++++++++++++---------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/src/api/client/user_directory.rs b/src/api/client/user_directory.rs index 99b3bb67..748fc049 100644 --- a/src/api/client/user_directory.rs +++ b/src/api/client/user_directory.rs @@ -1,7 +1,10 @@ use axum::extract::State; use conduwuit::{ Result, - utils::{future::BoolExt, stream::BroadbandExt}, + utils::{ + future::BoolExt, + stream::{BroadbandExt, ReadyExt}, + }, }; use futures::{FutureExt, StreamExt, pin_mut}; use ruma::{ @@ -30,29 +33,21 @@ pub(crate) async fn search_users_route( .map_or(LIMIT_DEFAULT, usize::from) .min(LIMIT_MAX); + let search_term = body.search_term.to_lowercase(); let mut users = services .users .stream() + .ready_filter(|user_id| user_id.as_str().to_lowercase().contains(&search_term)) .map(ToOwned::to_owned) .broad_filter_map(async |user_id| { - let user = search_users::v3::User { - user_id: user_id.clone(), - display_name: services.users.displayname(&user_id).await.ok(), - avatar_url: services.users.avatar_url(&user_id).await.ok(), - }; + let display_name = services.users.displayname(&user_id).await.ok(); - let user_id_matches = user - .user_id - .as_str() - .to_lowercase() - .contains(&body.search_term.to_lowercase()); + let display_name_matches = display_name + .as_deref() + .map(str::to_lowercase) + .is_some_and(|display_name| display_name.contains(&search_term)); - let user_displayname_matches = user.display_name.as_ref().is_some_and(|name| { - name.to_lowercase() - .contains(&body.search_term.to_lowercase()) - }); - - if !user_id_matches && !user_displayname_matches { + if !display_name_matches { return None; } @@ -61,11 +56,11 @@ pub(crate) async fn search_users_route( .state_cache .rooms_joined(&user_id) .map(ToOwned::to_owned) - .any(|room| async move { + .broad_any(async |room_id| { services .rooms .state_accessor - .get_join_rules(&room) + .get_join_rules(&room_id) .map(|rule| matches!(rule, JoinRule::Public)) .await }); @@ -76,8 +71,14 @@ pub(crate) async fn search_users_route( .user_sees_user(sender_user, &user_id); pin_mut!(user_in_public_room, user_sees_user); - - user_in_public_room.or(user_sees_user).await.then_some(user) + user_in_public_room + .or(user_sees_user) + .await + .then_some(search_users::v3::User { + user_id: user_id.clone(), + display_name, + avatar_url: services.users.avatar_url(&user_id).await.ok(), + }) }); let results = users.by_ref().take(limit).collect().await; From 83126cc66765541c88168e0ada63c41ed9f07058 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 9 Apr 2025 03:35:40 +0000 Subject: [PATCH 252/310] propagate better message from RustlsConfig load error. (#734) Signed-off-by: Jason Volk --- src/router/serve/tls.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/router/serve/tls.rs b/src/router/serve/tls.rs index dd46ab53..20b58601 100644 --- a/src/router/serve/tls.rs +++ b/src/router/serve/tls.rs @@ -31,12 +31,14 @@ pub(super) async fn serve( .install_default() .expect("failed to initialise aws-lc-rs rustls crypto provider"); - debug!("Using direct TLS. Certificate path {certs} and certificate private key path {key}",); info!( "Note: It is strongly recommended that you use a reverse proxy instead of running \ conduwuit directly with TLS." ); - let conf = RustlsConfig::from_pem_file(certs, key).await?; + debug!("Using direct TLS. Certificate path {certs} and certificate private key path {key}",); + let conf = RustlsConfig::from_pem_file(certs, key) + .await + .map_err(|e| err!(Config("tls", "Failed to load certificates or key: {e}")))?; let mut join_set = JoinSet::new(); let app = app.into_make_service_with_connect_info::(); From b3e5d2f683d633c948cc66b6cec77bbca6952f91 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 8 Apr 2025 04:39:01 +0000 Subject: [PATCH 253/310] remove box ids from admin room command arguments Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 33 +++++++++++----------- src/admin/debug/mod.rs | 32 ++++++++++----------- src/admin/federation/commands.rs | 10 +++---- src/admin/federation/mod.rs | 10 +++---- src/admin/media/commands.rs | 8 +++--- src/admin/media/mod.rs | 8 +++--- src/admin/query/account_data.rs | 18 ++++++------ src/admin/query/appservice.rs | 2 +- src/admin/query/globals.rs | 4 +-- src/admin/query/presence.rs | 4 +-- src/admin/query/pusher.rs | 4 +-- src/admin/query/room_alias.rs | 6 ++-- src/admin/query/room_state_cache.rs | 44 +++++++++++++++-------------- src/admin/query/sending.rs | 20 ++++++------- src/admin/room/alias.rs | 8 ++---- src/admin/room/directory.rs | 6 ++-- src/admin/room/info.rs | 10 +++---- src/admin/room/moderation.rs | 10 +++---- src/admin/user/commands.rs | 10 +++---- src/admin/user/mod.rs | 10 +++---- 20 files changed, 128 insertions(+), 129 deletions(-) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 87ca03a0..6d0e375a 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -17,10 +17,9 @@ use conduwuit::{ }; use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{ - CanonicalJsonObject, EventId, OwnedEventId, OwnedRoomOrAliasId, RoomId, RoomVersionId, - ServerName, - api::{client::error::ErrorKind, federation::event::get_room_state}, - events::room::message::RoomMessageEventContent, + CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, + OwnedRoomOrAliasId, OwnedServerName, RoomId, RoomVersionId, + api::federation::event::get_room_state, events::room::message::RoomMessageEventContent, }; use service::rooms::{ short::{ShortEventId, ShortRoomId}, @@ -40,7 +39,7 @@ pub(super) async fn echo(&self, message: Vec) -> Result, + event_id: OwnedEventId, ) -> Result { let Ok(event) = self.services.rooms.timeline.get_pdu_json(&event_id).await else { return Ok(RoomMessageEventContent::notice_plain("Event not found.")); @@ -109,7 +108,7 @@ pub(super) async fn parse_pdu(&self) -> Result { } #[admin_command] -pub(super) async fn get_pdu(&self, event_id: Box) -> Result { +pub(super) async fn get_pdu(&self, event_id: OwnedEventId) -> Result { let mut outlier = false; let mut pdu_json = self .services @@ -173,7 +172,7 @@ pub(super) async fn get_short_pdu( #[admin_command] pub(super) async fn get_remote_pdu_list( &self, - server: Box, + server: OwnedServerName, force: bool, ) -> Result { if !self.services.server.config.allow_federation { @@ -359,7 +358,7 @@ pub(super) async fn get_room_state( } #[admin_command] -pub(super) async fn ping(&self, server: Box) -> Result { +pub(super) async fn ping(&self, server: OwnedServerName) -> Result { if server == self.services.globals.server_name() { return Ok(RoomMessageEventContent::text_plain( "Not allowed to send federation requests to ourselves.", @@ -538,7 +537,7 @@ pub(super) async fn verify_json(&self) -> Result { } #[admin_command] -pub(super) async fn verify_pdu(&self, event_id: Box) -> Result { +pub(super) async fn verify_pdu(&self, event_id: OwnedEventId) -> Result { let mut event = self.services.rooms.timeline.get_pdu_json(&event_id).await?; event.remove("event_id"); @@ -556,7 +555,7 @@ pub(super) async fn verify_pdu(&self, event_id: Box) -> Result, + room_id: OwnedRoomId, ) -> Result { if !self .services @@ -585,7 +584,7 @@ pub(super) async fn first_pdu_in_room( #[tracing::instrument(skip(self))] pub(super) async fn latest_pdu_in_room( &self, - room_id: Box, + room_id: OwnedRoomId, ) -> Result { if !self .services @@ -614,8 +613,8 @@ pub(super) async fn latest_pdu_in_room( #[tracing::instrument(skip(self))] pub(super) async fn force_set_room_state_from_server( &self, - room_id: Box, - server_name: Box, + room_id: OwnedRoomId, + server_name: OwnedServerName, ) -> Result { if !self .services @@ -763,8 +762,8 @@ pub(super) async fn force_set_room_state_from_server( #[admin_command] pub(super) async fn get_signing_keys( &self, - server_name: Option>, - notary: Option>, + server_name: Option, + notary: Option, query: bool, ) -> Result { let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone().into()); @@ -801,7 +800,7 @@ pub(super) async fn get_signing_keys( #[admin_command] pub(super) async fn get_verify_keys( &self, - server_name: Option>, + server_name: Option, ) -> Result { let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone().into()); @@ -824,7 +823,7 @@ pub(super) async fn get_verify_keys( #[admin_command] pub(super) async fn resolve_true_destination( &self, - server_name: Box, + server_name: OwnedServerName, no_cache: bool, ) -> Result { if !self.services.server.config.allow_federation { diff --git a/src/admin/debug/mod.rs b/src/admin/debug/mod.rs index db04ccf4..9b86f18c 100644 --- a/src/admin/debug/mod.rs +++ b/src/admin/debug/mod.rs @@ -3,7 +3,7 @@ pub(crate) mod tester; use clap::Subcommand; use conduwuit::Result; -use ruma::{EventId, OwnedRoomOrAliasId, RoomId, ServerName}; +use ruma::{OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedServerName}; use service::rooms::short::{ShortEventId, ShortRoomId}; use self::tester::TesterCommand; @@ -20,7 +20,7 @@ pub(super) enum DebugCommand { /// - Get the auth_chain of a PDU GetAuthChain { /// An event ID (the $ character followed by the base64 reference hash) - event_id: Box, + event_id: OwnedEventId, }, /// - Parse and print a PDU from a JSON @@ -35,7 +35,7 @@ pub(super) enum DebugCommand { /// - Retrieve and print a PDU by EventID from the conduwuit database GetPdu { /// An event ID (a $ followed by the base64 reference hash) - event_id: Box, + event_id: OwnedEventId, }, /// - Retrieve and print a PDU by PduId from the conduwuit database @@ -52,11 +52,11 @@ pub(super) enum DebugCommand { /// (following normal event auth rules, handles it as an incoming PDU). GetRemotePdu { /// An event ID (a $ followed by the base64 reference hash) - event_id: Box, + event_id: OwnedEventId, /// Argument for us to attempt to fetch the event from the /// specified remote server. - server: Box, + server: OwnedServerName, }, /// - Same as `get-remote-pdu` but accepts a codeblock newline delimited @@ -64,7 +64,7 @@ pub(super) enum DebugCommand { GetRemotePduList { /// Argument for us to attempt to fetch all the events from the /// specified remote server. - server: Box, + server: OwnedServerName, /// If set, ignores errors, else stops at the first error/failure. #[arg(short, long)] @@ -88,10 +88,10 @@ pub(super) enum DebugCommand { /// - Get and display signing keys from local cache or remote server. GetSigningKeys { - server_name: Option>, + server_name: Option, #[arg(long)] - notary: Option>, + notary: Option, #[arg(short, long)] query: bool, @@ -99,14 +99,14 @@ pub(super) enum DebugCommand { /// - Get and display signing keys from local cache or remote server. GetVerifyKeys { - server_name: Option>, + server_name: Option, }, /// - Sends a federation request to the remote server's /// `/_matrix/federation/v1/version` endpoint and measures the latency it /// took for the server to respond Ping { - server: Box, + server: OwnedServerName, }, /// - Forces device lists for all local and remote users to be updated (as @@ -141,21 +141,21 @@ pub(super) enum DebugCommand { /// /// This re-verifies a PDU existing in the database found by ID. VerifyPdu { - event_id: Box, + event_id: OwnedEventId, }, /// - Prints the very first PDU in the specified room (typically /// m.room.create) FirstPduInRoom { /// The room ID - room_id: Box, + room_id: OwnedRoomId, }, /// - Prints the latest ("last") PDU in the specified room (typically a /// message) LatestPduInRoom { /// The room ID - room_id: Box, + room_id: OwnedRoomId, }, /// - Forcefully replaces the room state of our local copy of the specified @@ -174,9 +174,9 @@ pub(super) enum DebugCommand { /// `/_matrix/federation/v1/state/{roomId}`. ForceSetRoomStateFromServer { /// The impacted room ID - room_id: Box, + room_id: OwnedRoomId, /// The server we will use to query the room state for - server_name: Box, + server_name: OwnedServerName, }, /// - Runs a server name through conduwuit's true destination resolution @@ -184,7 +184,7 @@ pub(super) enum DebugCommand { /// /// Useful for debugging well-known issues ResolveTrueDestination { - server_name: Box, + server_name: OwnedServerName, #[arg(short, long)] no_cache: bool, diff --git a/src/admin/federation/commands.rs b/src/admin/federation/commands.rs index 240ffa6a..12ed9c25 100644 --- a/src/admin/federation/commands.rs +++ b/src/admin/federation/commands.rs @@ -3,19 +3,19 @@ use std::fmt::Write; use conduwuit::Result; use futures::StreamExt; use ruma::{ - OwnedRoomId, RoomId, ServerName, UserId, events::room::message::RoomMessageEventContent, + OwnedRoomId, OwnedServerName, OwnedUserId, events::room::message::RoomMessageEventContent, }; use crate::{admin_command, get_room_info}; #[admin_command] -pub(super) async fn disable_room(&self, room_id: Box) -> Result { +pub(super) async fn disable_room(&self, room_id: OwnedRoomId) -> Result { self.services.rooms.metadata.disable_room(&room_id, true); Ok(RoomMessageEventContent::text_plain("Room disabled.")) } #[admin_command] -pub(super) async fn enable_room(&self, room_id: Box) -> Result { +pub(super) async fn enable_room(&self, room_id: OwnedRoomId) -> Result { self.services.rooms.metadata.disable_room(&room_id, false); Ok(RoomMessageEventContent::text_plain("Room enabled.")) } @@ -42,7 +42,7 @@ pub(super) async fn incoming_federation(&self) -> Result, + server_name: OwnedServerName, ) -> Result { let response = self .services @@ -90,7 +90,7 @@ pub(super) async fn fetch_support_well_known( #[admin_command] pub(super) async fn remote_user_in_rooms( &self, - user_id: Box, + user_id: OwnedUserId, ) -> Result { if user_id.server_name() == self.services.server.name { return Ok(RoomMessageEventContent::text_plain( diff --git a/src/admin/federation/mod.rs b/src/admin/federation/mod.rs index 3adfd459..2c539adc 100644 --- a/src/admin/federation/mod.rs +++ b/src/admin/federation/mod.rs @@ -2,7 +2,7 @@ mod commands; use clap::Subcommand; use conduwuit::Result; -use ruma::{RoomId, ServerName, UserId}; +use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId}; use crate::admin_command_dispatch; @@ -14,12 +14,12 @@ pub(super) enum FederationCommand { /// - Disables incoming federation handling for a room. DisableRoom { - room_id: Box, + room_id: OwnedRoomId, }, /// - Enables incoming federation handling for a room again. EnableRoom { - room_id: Box, + room_id: OwnedRoomId, }, /// - Fetch `/.well-known/matrix/support` from the specified server @@ -32,11 +32,11 @@ pub(super) enum FederationCommand { /// moderation, and security inquiries. This command provides a way to /// easily fetch that information. FetchSupportWellKnown { - server_name: Box, + server_name: OwnedServerName, }, /// - Lists all the rooms we share/track with the specified *remote* user RemoteUserInRooms { - user_id: Box, + user_id: OwnedUserId, }, } diff --git a/src/admin/media/commands.rs b/src/admin/media/commands.rs index aeefa9f2..c8364969 100644 --- a/src/admin/media/commands.rs +++ b/src/admin/media/commands.rs @@ -5,7 +5,7 @@ use conduwuit::{ }; use conduwuit_service::media::Dim; use ruma::{ - EventId, Mxc, MxcUri, OwnedMxcUri, OwnedServerName, ServerName, + Mxc, OwnedEventId, OwnedMxcUri, OwnedServerName, events::room::message::RoomMessageEventContent, }; @@ -14,8 +14,8 @@ use crate::{admin_command, utils::parse_local_user_id}; #[admin_command] pub(super) async fn delete( &self, - mxc: Option>, - event_id: Option>, + mxc: Option, + event_id: Option, ) -> Result { if event_id.is_some() && mxc.is_some() { return Ok(RoomMessageEventContent::text_plain( @@ -282,7 +282,7 @@ pub(super) async fn delete_all_from_user( #[admin_command] pub(super) async fn delete_all_from_server( &self, - server_name: Box, + server_name: OwnedServerName, yes_i_want_to_delete_local_media: bool, ) -> Result { if server_name == self.services.globals.server_name() && !yes_i_want_to_delete_local_media { diff --git a/src/admin/media/mod.rs b/src/admin/media/mod.rs index 641834b2..d1e6cd3a 100644 --- a/src/admin/media/mod.rs +++ b/src/admin/media/mod.rs @@ -3,7 +3,7 @@ mod commands; use clap::Subcommand; use conduwuit::Result; -use ruma::{EventId, MxcUri, OwnedMxcUri, OwnedServerName, ServerName}; +use ruma::{OwnedEventId, OwnedMxcUri, OwnedServerName}; use crate::admin_command_dispatch; @@ -15,12 +15,12 @@ pub(super) enum MediaCommand { Delete { /// The MXC URL to delete #[arg(long)] - mxc: Option>, + mxc: Option, /// - The message event ID which contains the media and thumbnail MXC /// URLs #[arg(long)] - event_id: Option>, + event_id: Option, }, /// - Deletes a codeblock list of MXC URLs from our database and on the @@ -57,7 +57,7 @@ pub(super) enum MediaCommand { /// - Deletes all remote media from the specified remote server. This will /// always ignore errors by default. DeleteAllFromServer { - server_name: Box, + server_name: OwnedServerName, /// Long argument to delete local media #[arg(long)] diff --git a/src/admin/query/account_data.rs b/src/admin/query/account_data.rs index b2bf5e6d..879aed16 100644 --- a/src/admin/query/account_data.rs +++ b/src/admin/query/account_data.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::{RoomId, UserId, events::room::message::RoomMessageEventContent}; +use ruma::{OwnedRoomId, OwnedUserId, events::room::message::RoomMessageEventContent}; use crate::{admin_command, admin_command_dispatch}; @@ -12,30 +12,30 @@ pub(crate) enum AccountDataCommand { /// - Returns all changes to the account data that happened after `since`. ChangesSince { /// Full user ID - user_id: Box, + user_id: OwnedUserId, /// UNIX timestamp since (u64) since: u64, /// Optional room ID of the account data - room_id: Option>, + room_id: Option, }, /// - Searches the account data for a specific kind. AccountDataGet { /// Full user ID - user_id: Box, + user_id: OwnedUserId, /// Account data event type kind: String, /// Optional room ID of the account data - room_id: Option>, + room_id: Option, }, } #[admin_command] async fn changes_since( &self, - user_id: Box, + user_id: OwnedUserId, since: u64, - room_id: Option>, + room_id: Option, ) -> Result { let timer = tokio::time::Instant::now(); let results: Vec<_> = self @@ -54,9 +54,9 @@ async fn changes_since( #[admin_command] async fn account_data_get( &self, - user_id: Box, + user_id: OwnedUserId, kind: String, - room_id: Option>, + room_id: Option, ) -> Result { let timer = tokio::time::Instant::now(); let results = self diff --git a/src/admin/query/appservice.rs b/src/admin/query/appservice.rs index f9e1fd2c..93c76a7e 100644 --- a/src/admin/query/appservice.rs +++ b/src/admin/query/appservice.rs @@ -9,7 +9,7 @@ pub(crate) enum AppserviceCommand { /// - Gets the appservice registration info/details from the ID as a string GetRegistration { /// Appservice registration ID - appservice_id: Box, + appservice_id: String, }, /// - Gets all appservice registrations with their ID and registration info diff --git a/src/admin/query/globals.rs b/src/admin/query/globals.rs index 1642f7cd..33810704 100644 --- a/src/admin/query/globals.rs +++ b/src/admin/query/globals.rs @@ -1,6 +1,6 @@ use clap::Subcommand; use conduwuit::Result; -use ruma::ServerName; +use ruma::OwnedServerName; use crate::Command; @@ -16,7 +16,7 @@ pub(crate) enum GlobalsCommand { /// - This returns an empty `Ok(BTreeMap<..>)` when there are no keys found /// for the server. SigningKeysFor { - origin: Box, + origin: OwnedServerName, }, } diff --git a/src/admin/query/presence.rs b/src/admin/query/presence.rs index 38272749..65164802 100644 --- a/src/admin/query/presence.rs +++ b/src/admin/query/presence.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::UserId; +use ruma::OwnedUserId; use crate::Command; @@ -11,7 +11,7 @@ pub(crate) enum PresenceCommand { /// - Returns the latest presence event for the given user. GetPresence { /// Full user ID - user_id: Box, + user_id: OwnedUserId, }, /// - Iterator of the most recent presence updates that happened after the diff --git a/src/admin/query/pusher.rs b/src/admin/query/pusher.rs index 34edf4db..583c4999 100644 --- a/src/admin/query/pusher.rs +++ b/src/admin/query/pusher.rs @@ -1,6 +1,6 @@ use clap::Subcommand; use conduwuit::Result; -use ruma::UserId; +use ruma::OwnedUserId; use crate::Command; @@ -9,7 +9,7 @@ pub(crate) enum PusherCommand { /// - Returns all the pushers for the user. GetPushers { /// Full user ID - user_id: Box, + user_id: OwnedUserId, }, } diff --git a/src/admin/query/room_alias.rs b/src/admin/query/room_alias.rs index 2d4d8104..f58f6717 100644 --- a/src/admin/query/room_alias.rs +++ b/src/admin/query/room_alias.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::{RoomAliasId, RoomId}; +use ruma::{OwnedRoomAliasId, OwnedRoomId}; use crate::Command; @@ -10,13 +10,13 @@ use crate::Command; pub(crate) enum RoomAliasCommand { ResolveLocalAlias { /// Full room alias - alias: Box, + alias: OwnedRoomAliasId, }, /// - Iterator of all our local room aliases for the room ID LocalAliasesForRoom { /// Full room ID - room_id: Box, + room_id: OwnedRoomId, }, /// - Iterator of all our local aliases in our database with their room IDs diff --git a/src/admin/query/room_state_cache.rs b/src/admin/query/room_state_cache.rs index 1de5c02d..7f5e2536 100644 --- a/src/admin/query/room_state_cache.rs +++ b/src/admin/query/room_state_cache.rs @@ -1,78 +1,80 @@ use clap::Subcommand; use conduwuit::{Error, Result}; use futures::StreamExt; -use ruma::{RoomId, ServerName, UserId, events::room::message::RoomMessageEventContent}; +use ruma::{ + OwnedRoomId, OwnedServerName, OwnedUserId, events::room::message::RoomMessageEventContent, +}; use crate::Command; #[derive(Debug, Subcommand)] pub(crate) enum RoomStateCacheCommand { ServerInRoom { - server: Box, - room_id: Box, + server: OwnedServerName, + room_id: OwnedRoomId, }, RoomServers { - room_id: Box, + room_id: OwnedRoomId, }, ServerRooms { - server: Box, + server: OwnedServerName, }, RoomMembers { - room_id: Box, + room_id: OwnedRoomId, }, LocalUsersInRoom { - room_id: Box, + room_id: OwnedRoomId, }, ActiveLocalUsersInRoom { - room_id: Box, + room_id: OwnedRoomId, }, RoomJoinedCount { - room_id: Box, + room_id: OwnedRoomId, }, RoomInvitedCount { - room_id: Box, + room_id: OwnedRoomId, }, RoomUserOnceJoined { - room_id: Box, + room_id: OwnedRoomId, }, RoomMembersInvited { - room_id: Box, + room_id: OwnedRoomId, }, GetInviteCount { - room_id: Box, - user_id: Box, + room_id: OwnedRoomId, + user_id: OwnedUserId, }, GetLeftCount { - room_id: Box, - user_id: Box, + room_id: OwnedRoomId, + user_id: OwnedUserId, }, RoomsJoined { - user_id: Box, + user_id: OwnedUserId, }, RoomsLeft { - user_id: Box, + user_id: OwnedUserId, }, RoomsInvited { - user_id: Box, + user_id: OwnedUserId, }, InviteState { - user_id: Box, - room_id: Box, + user_id: OwnedUserId, + room_id: OwnedRoomId, }, } diff --git a/src/admin/query/sending.rs b/src/admin/query/sending.rs index a148f718..860bca4a 100644 --- a/src/admin/query/sending.rs +++ b/src/admin/query/sending.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::{ServerName, UserId, events::room::message::RoomMessageEventContent}; +use ruma::{OwnedServerName, OwnedUserId, events::room::message::RoomMessageEventContent}; use service::sending::Destination; use crate::Command; @@ -27,9 +27,9 @@ pub(crate) enum SendingCommand { #[arg(short, long)] appservice_id: Option, #[arg(short, long)] - server_name: Option>, + server_name: Option, #[arg(short, long)] - user_id: Option>, + user_id: Option, #[arg(short, long)] push_key: Option, }, @@ -49,15 +49,15 @@ pub(crate) enum SendingCommand { #[arg(short, long)] appservice_id: Option, #[arg(short, long)] - server_name: Option>, + server_name: Option, #[arg(short, long)] - user_id: Option>, + user_id: Option, #[arg(short, long)] push_key: Option, }, GetLatestEduCount { - server_name: Box, + server_name: OwnedServerName, }, } @@ -120,7 +120,7 @@ pub(super) async fn reprocess( | (None, Some(server_name), None, None) => services .sending .db - .queued_requests(&Destination::Federation(server_name.into())), + .queued_requests(&Destination::Federation(server_name)), | (None, None, Some(user_id), Some(push_key)) => { if push_key.is_empty() { return Ok(RoomMessageEventContent::text_plain( @@ -132,7 +132,7 @@ pub(super) async fn reprocess( services .sending .db - .queued_requests(&Destination::Push(user_id.into(), push_key)) + .queued_requests(&Destination::Push(user_id, push_key)) }, | (Some(_), Some(_), Some(_), Some(_)) => { return Ok(RoomMessageEventContent::text_plain( @@ -190,7 +190,7 @@ pub(super) async fn reprocess( | (None, Some(server_name), None, None) => services .sending .db - .active_requests_for(&Destination::Federation(server_name.into())), + .active_requests_for(&Destination::Federation(server_name)), | (None, None, Some(user_id), Some(push_key)) => { if push_key.is_empty() { return Ok(RoomMessageEventContent::text_plain( @@ -202,7 +202,7 @@ pub(super) async fn reprocess( services .sending .db - .active_requests_for(&Destination::Push(user_id.into(), push_key)) + .active_requests_for(&Destination::Push(user_id, push_key)) }, | (Some(_), Some(_), Some(_), Some(_)) => { return Ok(RoomMessageEventContent::text_plain( diff --git a/src/admin/room/alias.rs b/src/admin/room/alias.rs index ab21170c..4cfff2e5 100644 --- a/src/admin/room/alias.rs +++ b/src/admin/room/alias.rs @@ -3,9 +3,7 @@ use std::fmt::Write; use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::{ - OwnedRoomAliasId, OwnedRoomId, RoomId, events::room::message::RoomMessageEventContent, -}; +use ruma::{OwnedRoomAliasId, OwnedRoomId, events::room::message::RoomMessageEventContent}; use crate::{Command, escape_html}; @@ -18,7 +16,7 @@ pub(crate) enum RoomAliasCommand { force: bool, /// The room id to set the alias on - room_id: Box, + room_id: OwnedRoomId, /// The alias localpart to use (`alias`, not `#alias:servername.tld`) room_alias_localpart: String, @@ -40,7 +38,7 @@ pub(crate) enum RoomAliasCommand { /// - List aliases currently being used List { /// If set, only list the aliases for this room - room_id: Option>, + room_id: Option, }, } diff --git a/src/admin/room/directory.rs b/src/admin/room/directory.rs index ca036825..179131e4 100644 --- a/src/admin/room/directory.rs +++ b/src/admin/room/directory.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::{RoomId, events::room::message::RoomMessageEventContent}; +use ruma::{OwnedRoomId, events::room::message::RoomMessageEventContent}; use crate::{Command, PAGE_SIZE, get_room_info}; @@ -10,13 +10,13 @@ pub(crate) enum RoomDirectoryCommand { /// - Publish a room to the room directory Publish { /// The room id of the room to publish - room_id: Box, + room_id: OwnedRoomId, }, /// - Unpublish a room to the room directory Unpublish { /// The room id of the room to unpublish - room_id: Box, + room_id: OwnedRoomId, }, /// - List rooms that are published diff --git a/src/admin/room/info.rs b/src/admin/room/info.rs index a39728fe..35a92b6a 100644 --- a/src/admin/room/info.rs +++ b/src/admin/room/info.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::{Result, utils::ReadyExt}; use futures::StreamExt; -use ruma::{RoomId, events::room::message::RoomMessageEventContent}; +use ruma::{OwnedRoomId, events::room::message::RoomMessageEventContent}; use crate::{admin_command, admin_command_dispatch}; @@ -10,7 +10,7 @@ use crate::{admin_command, admin_command_dispatch}; pub(crate) enum RoomInfoCommand { /// - List joined members in a room ListJoinedMembers { - room_id: Box, + room_id: OwnedRoomId, /// Lists only our local users in the specified room #[arg(long)] @@ -22,14 +22,14 @@ pub(crate) enum RoomInfoCommand { /// Room topics can be huge, so this is in its /// own separate command ViewRoomTopic { - room_id: Box, + room_id: OwnedRoomId, }, } #[admin_command] async fn list_joined_members( &self, - room_id: Box, + room_id: OwnedRoomId, local_only: bool, ) -> Result { let room_name = self @@ -79,7 +79,7 @@ async fn list_joined_members( } #[admin_command] -async fn view_room_topic(&self, room_id: Box) -> Result { +async fn view_room_topic(&self, room_id: OwnedRoomId) -> Result { let Ok(room_topic) = self .services .rooms diff --git a/src/admin/room/moderation.rs b/src/admin/room/moderation.rs index dd5ea627..41458622 100644 --- a/src/admin/room/moderation.rs +++ b/src/admin/room/moderation.rs @@ -7,7 +7,7 @@ use conduwuit::{ }; use futures::StreamExt; use ruma::{ - OwnedRoomId, RoomAliasId, RoomId, RoomOrAliasId, + OwnedRoomId, OwnedRoomOrAliasId, RoomAliasId, RoomId, RoomOrAliasId, events::room::message::RoomMessageEventContent, }; @@ -24,7 +24,7 @@ pub(crate) enum RoomModerationCommand { BanRoom { /// The room in the format of `!roomid:example.com` or a room alias in /// the format of `#roomalias:example.com` - room: Box, + room: OwnedRoomOrAliasId, }, /// - Bans a list of rooms (room IDs and room aliases) from a newline @@ -36,7 +36,7 @@ pub(crate) enum RoomModerationCommand { UnbanRoom { /// The room in the format of `!roomid:example.com` or a room alias in /// the format of `#roomalias:example.com` - room: Box, + room: OwnedRoomOrAliasId, }, /// - List of all rooms we have banned @@ -49,7 +49,7 @@ pub(crate) enum RoomModerationCommand { } #[admin_command] -async fn ban_room(&self, room: Box) -> Result { +async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result { debug!("Got room alias or ID: {}", room); let admin_room_alias = &self.services.globals.admin_alias; @@ -363,7 +363,7 @@ async fn ban_list_of_rooms(&self) -> Result { } #[admin_command] -async fn unban_room(&self, room: Box) -> Result { +async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result { let room_id = if room.is_room_id() { let room_id = match RoomId::parse(&room) { | Ok(room_id) => room_id, diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 45e550be..84795f9b 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -10,7 +10,7 @@ use conduwuit::{ use conduwuit_api::client::{leave_all_rooms, update_avatar_url, update_displayname}; use futures::StreamExt; use ruma::{ - EventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, RoomId, UserId, + OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, UserId, events::{ RoomAccountDataEventType, StateEventType, room::{ @@ -802,7 +802,7 @@ pub(super) async fn make_user_admin(&self, user_id: String) -> Result, + room_id: OwnedRoomId, tag: String, ) -> Result { let user_id = parse_active_local_user_id(self.services, &user_id).await?; @@ -840,7 +840,7 @@ pub(super) async fn put_room_tag( pub(super) async fn delete_room_tag( &self, user_id: String, - room_id: Box, + room_id: OwnedRoomId, tag: String, ) -> Result { let user_id = parse_active_local_user_id(self.services, &user_id).await?; @@ -876,7 +876,7 @@ pub(super) async fn delete_room_tag( pub(super) async fn get_room_tags( &self, user_id: String, - room_id: Box, + room_id: OwnedRoomId, ) -> Result { let user_id = parse_active_local_user_id(self.services, &user_id).await?; @@ -898,7 +898,7 @@ pub(super) async fn get_room_tags( #[admin_command] pub(super) async fn redact_event( &self, - event_id: Box, + event_id: OwnedEventId, ) -> Result { let Ok(event) = self .services diff --git a/src/admin/user/mod.rs b/src/admin/user/mod.rs index 1494ea8f..e789376a 100644 --- a/src/admin/user/mod.rs +++ b/src/admin/user/mod.rs @@ -2,7 +2,7 @@ mod commands; use clap::Subcommand; use conduwuit::Result; -use ruma::{EventId, OwnedRoomOrAliasId, RoomId}; +use ruma::{OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId}; use crate::admin_command_dispatch; @@ -102,21 +102,21 @@ pub(super) enum UserCommand { /// room's internal ID, and the tag name `m.server_notice`. PutRoomTag { user_id: String, - room_id: Box, + room_id: OwnedRoomId, tag: String, }, /// - Deletes the room tag for the specified user and room ID DeleteRoomTag { user_id: String, - room_id: Box, + room_id: OwnedRoomId, tag: String, }, /// - Gets all the room tags for the specified user and room ID GetRoomTags { user_id: String, - room_id: Box, + room_id: OwnedRoomId, }, /// - Attempts to forcefully redact the specified event ID from the sender @@ -124,7 +124,7 @@ pub(super) enum UserCommand { /// /// This is only valid for local users RedactEvent { - event_id: Box, + event_id: OwnedEventId, }, /// - Force joins a specified list of local users to join the specified From ecf20f7ebbcaeff2e5adedf4b7c45b6d9cbb49b7 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 7 Apr 2025 03:28:51 +0000 Subject: [PATCH 254/310] improve appservice service async interfaces Signed-off-by: Jason Volk --- src/admin/query/appservice.rs | 3 +- src/service/appservice/mod.rs | 78 +++++++++++++++-------------------- 2 files changed, 36 insertions(+), 45 deletions(-) diff --git a/src/admin/query/appservice.rs b/src/admin/query/appservice.rs index 93c76a7e..0359261a 100644 --- a/src/admin/query/appservice.rs +++ b/src/admin/query/appservice.rs @@ -1,5 +1,6 @@ use clap::Subcommand; use conduwuit::Result; +use futures::TryStreamExt; use crate::Command; @@ -31,7 +32,7 @@ pub(super) async fn process(subcommand: AppserviceCommand, context: &Command<'_> }, | AppserviceCommand::All => { let timer = tokio::time::Instant::now(); - let results = services.appservice.all().await; + let results: Vec<_> = services.appservice.iter_db_ids().try_collect().await?; let query_time = timer.elapsed(); write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index 50a60033..7be8a471 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -1,20 +1,20 @@ mod namespace_regex; mod registration_info; -use std::{collections::BTreeMap, sync::Arc}; +use std::{collections::BTreeMap, iter::IntoIterator, sync::Arc}; use async_trait::async_trait; -use conduwuit::{Result, err, utils::stream::TryIgnore}; +use conduwuit::{Result, err, utils::stream::IterStream}; use database::Map; -use futures::{Future, StreamExt, TryStreamExt}; +use futures::{Future, FutureExt, Stream, TryStreamExt}; use ruma::{RoomAliasId, RoomId, UserId, api::appservice::Registration}; -use tokio::sync::RwLock; +use tokio::sync::{RwLock, RwLockReadGuard}; pub use self::{namespace_regex::NamespaceRegex, registration_info::RegistrationInfo}; use crate::{Dep, sending}; pub struct Service { - registration_info: RwLock>, + registration_info: RwLock, services: Services, db: Data, } @@ -27,6 +27,8 @@ struct Data { id_appserviceregistrations: Arc, } +type Registrations = BTreeMap; + #[async_trait] impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { @@ -41,19 +43,18 @@ impl crate::Service for Service { })) } - async fn worker(self: Arc) -> Result<()> { + async fn worker(self: Arc) -> Result { // Inserting registrations into cache - for appservice in self.iter_db_ids().await? { - self.registration_info.write().await.insert( - appservice.0, - appservice - .1 - .try_into() - .expect("Should be validated on registration"), - ); - } + self.iter_db_ids() + .try_for_each(async |appservice| { + self.registration_info + .write() + .await + .insert(appservice.0, appservice.1.try_into()?); - Ok(()) + Ok(()) + }) + .await } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } @@ -84,7 +85,7 @@ impl Service { /// # Arguments /// /// * `service_name` - the registration ID of the appservice - pub async fn unregister_appservice(&self, appservice_id: &str) -> Result<()> { + pub async fn unregister_appservice(&self, appservice_id: &str) -> Result { // removes the appservice registration info self.registration_info .write() @@ -112,15 +113,6 @@ impl Service { .map(|info| info.registration) } - pub async fn iter_ids(&self) -> Vec { - self.registration_info - .read() - .await - .keys() - .cloned() - .collect() - } - pub async fn find_from_token(&self, token: &str) -> Option { self.read() .await @@ -156,15 +148,22 @@ impl Service { .any(|info| info.rooms.is_exclusive_match(room_id.as_str())) } - pub fn read( - &self, - ) -> impl Future>> - { - self.registration_info.read() + pub fn iter_ids(&self) -> impl Stream + Send { + self.read() + .map(|info| info.keys().cloned().collect::>()) + .map(IntoIterator::into_iter) + .map(IterStream::stream) + .flatten_stream() } - #[inline] - pub async fn all(&self) -> Result> { self.iter_db_ids().await } + pub fn iter_db_ids(&self) -> impl Stream> + Send { + self.db + .id_appserviceregistrations + .keys() + .and_then(move |id: &str| async move { + Ok((id.to_owned(), self.get_db_registration(id).await?)) + }) + } pub async fn get_db_registration(&self, id: &str) -> Result { self.db @@ -175,16 +174,7 @@ impl Service { .map_err(|e| err!(Database("Invalid appservice {id:?} registration: {e:?}"))) } - async fn iter_db_ids(&self) -> Result> { - self.db - .id_appserviceregistrations - .keys() - .ignore_err() - .then(|id: String| async move { - let reg = self.get_db_registration(&id).await?; - Ok((id, reg)) - }) - .try_collect() - .await + pub fn read(&self) -> impl Future> + Send { + self.registration_info.read() } } From fb3020d8da5b221cba042053a65ce67034622973 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 9 Apr 2025 01:50:13 +0000 Subject: [PATCH 255/310] misc async optimizations; macro reformatting Signed-off-by: Jason Volk --- src/api/client/membership.rs | 139 ++++++++++++++++++----------------- 1 file changed, 72 insertions(+), 67 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 1eeacf83..18a1c741 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -1,6 +1,6 @@ use std::{ borrow::Borrow, - collections::{BTreeMap, HashMap, HashSet}, + collections::{HashMap, HashSet}, iter::once, net::IpAddr, sync::Arc, @@ -9,7 +9,7 @@ use std::{ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - Err, Result, at, debug, debug_info, debug_warn, err, error, info, + Err, Result, at, debug, debug_error, debug_info, debug_warn, err, error, info, is_matching, matrix::{ StateKey, pdu::{PduBuilder, PduEvent, gen_event_id, gen_event_id_canonical_json}, @@ -17,7 +17,12 @@ use conduwuit::{ }, result::{FlatOk, NotFound}, trace, - utils::{self, IterStream, ReadyExt, shuffle}, + utils::{ + self, FutureBoolExt, + future::ReadyEqExt, + shuffle, + stream::{BroadbandExt, IterStream, ReadyExt}, + }, warn, }; use conduwuit_service::{ @@ -28,7 +33,7 @@ use conduwuit_service::{ state_compressor::{CompressedState, HashSetCompressStateEvent}, }, }; -use futures::{FutureExt, StreamExt, TryFutureExt, future::join4, join}; +use futures::{FutureExt, StreamExt, TryFutureExt, join, pin_mut}; use ruma::{ CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, @@ -52,7 +57,6 @@ use ruma::{ room::{ join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, - message::RoomMessageEventContent, }, }, }; @@ -81,7 +85,7 @@ async fn banned_room_check( || services .config .forbidden_remote_server_names - .is_match(room_id.server_name().unwrap().host()) + .is_match(room_id.server_name().expect("legacy room mxid").host()) { warn!( "User {user_id} who is not an admin attempted to send an invite for or \ @@ -96,12 +100,11 @@ async fn banned_room_check( if services.server.config.admin_room_notices { services .admin - .send_message(RoomMessageEventContent::text_plain(format!( + .send_text(&format!( "Automatically deactivating user {user_id} due to attempted banned \ room join from IP {client_ip}" - ))) - .await - .ok(); + )) + .await; } let all_joined_rooms: Vec = services @@ -136,12 +139,11 @@ async fn banned_room_check( if services.server.config.admin_room_notices { services .admin - .send_message(RoomMessageEventContent::text_plain(format!( + .send_text(&format!( "Automatically deactivating user {user_id} due to attempted banned \ room join from IP {client_ip}" - ))) - .await - .ok(); + )) + .await; } let all_joined_rooms: Vec = services @@ -366,10 +368,10 @@ pub(crate) async fn knock_room_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let body = body.body; + let sender_user = body.sender_user(); + let body = &body.body; - let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) { + let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias.clone()) { | Ok(room_id) => { banned_room_check( &services, @@ -493,7 +495,7 @@ pub(crate) async fn invite_user_route( let sender_user = body.sender_user(); if !services.users.is_admin(sender_user).await && services.config.block_non_admin_invites { - info!( + debug_error!( "User {sender_user} is not an admin and attempted to send an invite to room {}", &body.room_id ); @@ -722,12 +724,10 @@ pub(crate) async fn forget_room_route( let joined = services.rooms.state_cache.is_joined(user_id, room_id); let knocked = services.rooms.state_cache.is_knocked(user_id, room_id); - let left = services.rooms.state_cache.is_left(user_id, room_id); let invited = services.rooms.state_cache.is_invited(user_id, room_id); - let (joined, knocked, left, invited) = join4(joined, knocked, left, invited).await; - - if joined || knocked || invited { + pin_mut!(joined, knocked, invited); + if joined.or(knocked).or(invited).await { return Err!(Request(Unknown("You must leave the room before forgetting it"))); } @@ -741,11 +741,11 @@ pub(crate) async fn forget_room_route( return Err!(Request(Unknown("No membership event was found, room was never joined"))); } - if left - || membership.is_ok_and(|member| { - member.membership == MembershipState::Leave - || member.membership == MembershipState::Ban - }) { + let non_membership = membership + .map(|member| member.membership) + .is_ok_and(is_matching!(MembershipState::Leave | MembershipState::Ban)); + + if non_membership || services.rooms.state_cache.is_left(user_id, room_id).await { services.rooms.state_cache.forget(room_id, user_id); } @@ -866,32 +866,32 @@ pub(crate) async fn joined_members_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user(); - if !services .rooms .state_accessor - .user_can_see_state_events(sender_user, &body.room_id) + .user_can_see_state_events(body.sender_user(), &body.room_id) .await { return Err!(Request(Forbidden("You don't have permission to view this room."))); } - let joined: BTreeMap = services - .rooms - .state_cache - .room_members(&body.room_id) - .map(ToOwned::to_owned) - .then(|user| async move { - (user.clone(), RoomMember { - display_name: services.users.displayname(&user).await.ok(), - avatar_url: services.users.avatar_url(&user).await.ok(), - }) - }) - .collect() - .await; + Ok(joined_members::v3::Response { + joined: services + .rooms + .state_cache + .room_members(&body.room_id) + .map(ToOwned::to_owned) + .broad_then(|user_id| async move { + let member = RoomMember { + display_name: services.users.displayname(&user_id).await.ok(), + avatar_url: services.users.avatar_url(&user_id).await.ok(), + }; - Ok(joined_members::v3::Response { joined }) + (user_id, member) + }) + .collect() + .await, + }) } pub async fn join_room_by_id_helper( @@ -1118,9 +1118,10 @@ async fn join_room_by_id_helper_remote( })?; if signed_event_id != event_id { - return Err!(Request(BadJson( - warn!(%signed_event_id, %event_id, "Server {remote_server} sent event with wrong event ID") - ))); + return Err!(Request(BadJson(warn!( + %signed_event_id, %event_id, + "Server {remote_server} sent event with wrong event ID" + )))); } match signed_value["signatures"] @@ -1696,19 +1697,18 @@ pub(crate) async fn invite_helper( })?; if pdu.event_id != event_id { - return Err!(Request(BadJson( - warn!(%pdu.event_id, %event_id, "Server {} sent event with wrong event ID", user_id.server_name()) - ))); + return Err!(Request(BadJson(warn!( + %pdu.event_id, %event_id, + "Server {} sent event with wrong event ID", + user_id.server_name() + )))); } - let origin: OwnedServerName = serde_json::from_value( - serde_json::to_value( - value - .get("origin") - .ok_or_else(|| err!(Request(BadJson("Event missing origin field."))))?, - ) - .expect("CanonicalJson is valid json value"), - ) + let origin: OwnedServerName = serde_json::from_value(serde_json::to_value( + value + .get("origin") + .ok_or_else(|| err!(Request(BadJson("Event missing origin field."))))?, + )?) .map_err(|e| { err!(Request(BadJson(warn!("Origin field in event is not a valid server name: {e}")))) })?; @@ -1818,9 +1818,11 @@ pub async fn leave_room( blurhash: None, }; - if services.rooms.metadata.is_banned(room_id).await - || services.rooms.metadata.is_disabled(room_id).await - { + let is_banned = services.rooms.metadata.is_banned(room_id); + let is_disabled = services.rooms.metadata.is_disabled(room_id); + + pin_mut!(is_banned, is_disabled); + if is_banned.or(is_disabled).await { // the room is banned/disabled, the room must be rejected locally since we // cant/dont want to federate with this server services @@ -1840,18 +1842,21 @@ pub async fn leave_room( return Ok(()); } - // Ask a remote server if we don't have this room and are not knocking on it - if !services + let dont_have_room = services .rooms .state_cache .server_in_room(services.globals.server_name(), room_id) - .await && !services + .eq(&false); + + let not_knocked = services .rooms .state_cache .is_knocked(user_id, room_id) - .await - { - if let Err(e) = remote_leave_room(services, user_id, room_id).await { + .eq(&false); + + // Ask a remote server if we don't have this room and are not knocking on it + if dont_have_room.and(not_knocked).await { + if let Err(e) = remote_leave_room(services, user_id, room_id).boxed().await { warn!(%user_id, "Failed to leave room {room_id} remotely: {e}"); // Don't tell the client about this error } From 4f8fec7e5a5631ef7b679a00838219e7926040dd Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 6 Apr 2025 23:41:58 +0000 Subject: [PATCH 256/310] replace admin command branches returning RoomMessageEventContent rename admin Command back to Context Signed-off-by: Jason Volk --- src/admin/admin.rs | 24 +- src/admin/appservice/commands.rs | 82 +++-- src/admin/check/commands.rs | 14 +- src/admin/{command.rs => context.rs} | 14 +- src/admin/debug/commands.rs | 481 +++++++++++---------------- src/admin/debug/tester.rs | 17 +- src/admin/federation/commands.rs | 103 +++--- src/admin/media/commands.rs | 119 +++---- src/admin/mod.rs | 8 +- src/admin/processor.rs | 8 +- src/admin/query/account_data.rs | 16 +- src/admin/query/appservice.rs | 4 +- src/admin/query/globals.rs | 4 +- src/admin/query/presence.rs | 4 +- src/admin/query/pusher.rs | 4 +- src/admin/query/raw.rs | 120 ++----- src/admin/query/resolver.rs | 13 +- src/admin/query/room_alias.rs | 4 +- src/admin/query/room_state_cache.rs | 146 ++++---- src/admin/query/room_timeline.rs | 10 +- src/admin/query/sending.rs | 90 +++-- src/admin/query/short.rs | 16 +- src/admin/query/users.rs | 175 ++++------ src/admin/room/alias.rs | 92 ++--- src/admin/room/commands.rs | 33 +- src/admin/room/directory.rs | 43 +-- src/admin/room/info.rs | 38 +-- src/admin/room/moderation.rs | 91 +++-- src/admin/server/commands.rs | 73 ++-- src/admin/user/commands.rs | 344 ++++++++----------- src/admin/utils.rs | 2 + src/macros/admin.rs | 17 +- 32 files changed, 903 insertions(+), 1306 deletions(-) rename src/admin/{command.rs => context.rs} (67%) diff --git a/src/admin/admin.rs b/src/admin/admin.rs index 9e010a59..0d636c72 100644 --- a/src/admin/admin.rs +++ b/src/admin/admin.rs @@ -2,7 +2,7 @@ use clap::Parser; use conduwuit::Result; use crate::{ - appservice, appservice::AppserviceCommand, check, check::CheckCommand, command::Command, + appservice, appservice::AppserviceCommand, check, check::CheckCommand, context::Context, debug, debug::DebugCommand, federation, federation::FederationCommand, media, media::MediaCommand, query, query::QueryCommand, room, room::RoomCommand, server, server::ServerCommand, user, user::UserCommand, @@ -49,20 +49,18 @@ pub(super) enum AdminCommand { } #[tracing::instrument(skip_all, name = "command")] -pub(super) async fn process(command: AdminCommand, context: &Command<'_>) -> Result { +pub(super) async fn process(command: AdminCommand, context: &Context<'_>) -> Result { use AdminCommand::*; match command { - | Appservices(command) => appservice::process(command, context).await?, - | Media(command) => media::process(command, context).await?, - | Users(command) => user::process(command, context).await?, - | Rooms(command) => room::process(command, context).await?, - | Federation(command) => federation::process(command, context).await?, - | Server(command) => server::process(command, context).await?, - | Debug(command) => debug::process(command, context).await?, - | Query(command) => query::process(command, context).await?, - | Check(command) => check::process(command, context).await?, + | Appservices(command) => appservice::process(command, context).await, + | Media(command) => media::process(command, context).await, + | Users(command) => user::process(command, context).await, + | Rooms(command) => room::process(command, context).await, + | Federation(command) => federation::process(command, context).await, + | Server(command) => server::process(command, context).await, + | Debug(command) => debug::process(command, context).await, + | Query(command) => query::process(command, context).await, + | Check(command) => check::process(command, context).await, } - - Ok(()) } diff --git a/src/admin/appservice/commands.rs b/src/admin/appservice/commands.rs index 88f28431..3575e067 100644 --- a/src/admin/appservice/commands.rs +++ b/src/admin/appservice/commands.rs @@ -1,84 +1,80 @@ -use ruma::{api::appservice::Registration, events::room::message::RoomMessageEventContent}; +use conduwuit::{Err, Result, checked}; +use futures::{FutureExt, StreamExt, TryFutureExt}; -use crate::{Result, admin_command}; +use crate::admin_command; #[admin_command] -pub(super) async fn register(&self) -> Result { - if self.body.len() < 2 - || !self.body[0].trim().starts_with("```") - || self.body.last().unwrap_or(&"").trim() != "```" +pub(super) async fn register(&self) -> Result { + let body = &self.body; + let body_len = self.body.len(); + if body_len < 2 + || !body[0].trim().starts_with("```") + || body.last().unwrap_or(&"").trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details."); } - let appservice_config_body = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n"); - let parsed_config = serde_yaml::from_str::(&appservice_config_body); + let range = 1..checked!(body_len - 1)?; + let appservice_config_body = body[range].join("\n"); + let parsed_config = serde_yaml::from_str(&appservice_config_body); match parsed_config { + | Err(e) => return Err!("Could not parse appservice config as YAML: {e}"), | Ok(registration) => match self .services .appservice .register_appservice(®istration, &appservice_config_body) .await + .map(|()| registration.id) { - | Ok(()) => Ok(RoomMessageEventContent::text_plain(format!( - "Appservice registered with ID: {}", - registration.id - ))), - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Failed to register appservice: {e}" - ))), + | Err(e) => return Err!("Failed to register appservice: {e}"), + | Ok(id) => write!(self, "Appservice registered with ID: {id}"), }, - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Could not parse appservice config as YAML: {e}" - ))), } + .await } #[admin_command] -pub(super) async fn unregister( - &self, - appservice_identifier: String, -) -> Result { +pub(super) async fn unregister(&self, appservice_identifier: String) -> Result { match self .services .appservice .unregister_appservice(&appservice_identifier) .await { - | Ok(()) => Ok(RoomMessageEventContent::text_plain("Appservice unregistered.")), - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Failed to unregister appservice: {e}" - ))), + | Err(e) => return Err!("Failed to unregister appservice: {e}"), + | Ok(()) => write!(self, "Appservice unregistered."), } + .await } #[admin_command] -pub(super) async fn show_appservice_config( - &self, - appservice_identifier: String, -) -> Result { +pub(super) async fn show_appservice_config(&self, appservice_identifier: String) -> Result { match self .services .appservice .get_registration(&appservice_identifier) .await { + | None => return Err!("Appservice does not exist."), | Some(config) => { - let config_str = serde_yaml::to_string(&config) - .expect("config should've been validated on register"); - let output = - format!("Config for {appservice_identifier}:\n\n```yaml\n{config_str}\n```",); - Ok(RoomMessageEventContent::notice_markdown(output)) + let config_str = serde_yaml::to_string(&config)?; + write!(self, "Config for {appservice_identifier}:\n\n```yaml\n{config_str}\n```") }, - | None => Ok(RoomMessageEventContent::text_plain("Appservice does not exist.")), } + .await } #[admin_command] -pub(super) async fn list_registered(&self) -> Result { - let appservices = self.services.appservice.iter_ids().await; - let output = format!("Appservices ({}): {}", appservices.len(), appservices.join(", ")); - Ok(RoomMessageEventContent::text_plain(output)) +pub(super) async fn list_registered(&self) -> Result { + self.services + .appservice + .iter_ids() + .collect() + .map(Ok) + .and_then(|appservices: Vec<_>| { + let len = appservices.len(); + let list = appservices.join(", "); + write!(self, "Appservices ({len}): {list}") + }) + .await } diff --git a/src/admin/check/commands.rs b/src/admin/check/commands.rs index 7e27362f..1ffc3ae5 100644 --- a/src/admin/check/commands.rs +++ b/src/admin/check/commands.rs @@ -1,15 +1,14 @@ use conduwuit::Result; use conduwuit_macros::implement; use futures::StreamExt; -use ruma::events::room::message::RoomMessageEventContent; -use crate::Command; +use crate::Context; /// Uses the iterator in `src/database/key_value/users.rs` to iterator over /// every user in our database (remote and local). Reports total count, any /// errors if there were any, etc -#[implement(Command, params = "<'_>")] -pub(super) async fn check_all_users(&self) -> Result { +#[implement(Context, params = "<'_>")] +pub(super) async fn check_all_users(&self) -> Result { let timer = tokio::time::Instant::now(); let users = self.services.users.iter().collect::>().await; let query_time = timer.elapsed(); @@ -18,11 +17,10 @@ pub(super) async fn check_all_users(&self) -> Result { let err_count = users.iter().filter(|_user| false).count(); let ok_count = users.iter().filter(|_user| true).count(); - let message = format!( + self.write_str(&format!( "Database query completed in {query_time:?}:\n\n```\nTotal entries: \ {total:?}\nFailure/Invalid user count: {err_count:?}\nSuccess/Valid user count: \ {ok_count:?}\n```" - ); - - Ok(RoomMessageEventContent::notice_markdown(message)) + )) + .await } diff --git a/src/admin/command.rs b/src/admin/context.rs similarity index 67% rename from src/admin/command.rs rename to src/admin/context.rs index 5df980d6..270537be 100644 --- a/src/admin/command.rs +++ b/src/admin/context.rs @@ -3,13 +3,13 @@ use std::{fmt, time::SystemTime}; use conduwuit::Result; use conduwuit_service::Services; use futures::{ - Future, FutureExt, + Future, FutureExt, TryFutureExt, io::{AsyncWriteExt, BufWriter}, lock::Mutex, }; use ruma::EventId; -pub(crate) struct Command<'a> { +pub(crate) struct Context<'a> { pub(crate) services: &'a Services, pub(crate) body: &'a [&'a str], pub(crate) timer: SystemTime, @@ -17,14 +17,14 @@ pub(crate) struct Command<'a> { pub(crate) output: Mutex>>, } -impl Command<'_> { +impl Context<'_> { pub(crate) fn write_fmt( &self, arguments: fmt::Arguments<'_>, ) -> impl Future + Send + '_ + use<'_> { let buf = format!("{arguments}"); - self.output.lock().then(|mut output| async move { - output.write_all(buf.as_bytes()).await.map_err(Into::into) + self.output.lock().then(async move |mut output| { + output.write_all(buf.as_bytes()).map_err(Into::into).await }) } @@ -32,8 +32,8 @@ impl Command<'_> { &'a self, s: &'a str, ) -> impl Future + Send + 'a { - self.output.lock().then(move |mut output| async move { - output.write_all(s.as_bytes()).await.map_err(Into::into) + self.output.lock().then(async move |mut output| { + output.write_all(s.as_bytes()).map_err(Into::into).await }) } } diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 6d0e375a..d0debc2a 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -6,7 +6,7 @@ use std::{ }; use conduwuit::{ - Error, Result, debug_error, err, info, + Err, Result, debug_error, err, info, matrix::pdu::{PduEvent, PduId, RawPduId}, trace, utils, utils::{ @@ -19,7 +19,7 @@ use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{ CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedServerName, RoomId, RoomVersionId, - api::federation::event::get_room_state, events::room::message::RoomMessageEventContent, + api::federation::event::get_room_state, }; use service::rooms::{ short::{ShortEventId, ShortRoomId}, @@ -30,28 +30,24 @@ use tracing_subscriber::EnvFilter; use crate::admin_command; #[admin_command] -pub(super) async fn echo(&self, message: Vec) -> Result { +pub(super) async fn echo(&self, message: Vec) -> Result { let message = message.join(" "); - - Ok(RoomMessageEventContent::notice_plain(message)) + self.write_str(&message).await } #[admin_command] -pub(super) async fn get_auth_chain( - &self, - event_id: OwnedEventId, -) -> Result { +pub(super) async fn get_auth_chain(&self, event_id: OwnedEventId) -> Result { let Ok(event) = self.services.rooms.timeline.get_pdu_json(&event_id).await else { - return Ok(RoomMessageEventContent::notice_plain("Event not found.")); + return Err!("Event not found."); }; let room_id_str = event .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + .and_then(CanonicalJsonValue::as_str) + .ok_or_else(|| err!(Database("Invalid event in database")))?; let room_id = <&RoomId>::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; + .map_err(|_| err!(Database("Invalid room id field in event in database")))?; let start = Instant::now(); let count = self @@ -64,51 +60,39 @@ pub(super) async fn get_auth_chain( .await; let elapsed = start.elapsed(); - Ok(RoomMessageEventContent::text_plain(format!( - "Loaded auth chain with length {count} in {elapsed:?}" - ))) + let out = format!("Loaded auth chain with length {count} in {elapsed:?}"); + + self.write_str(&out).await } #[admin_command] -pub(super) async fn parse_pdu(&self) -> Result { +pub(super) async fn parse_pdu(&self) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&EMPTY).trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details."); } let string = self.body[1..self.body.len().saturating_sub(1)].join("\n"); match serde_json::from_str(&string) { + | Err(e) => return Err!("Invalid json in command body: {e}"), | Ok(value) => match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) { + | Err(e) => return Err!("Could not parse PDU JSON: {e:?}"), | Ok(hash) => { let event_id = OwnedEventId::parse(format!("${hash}")); - - match serde_json::from_value::( - serde_json::to_value(value).expect("value is json"), - ) { - | Ok(pdu) => Ok(RoomMessageEventContent::text_plain(format!( - "EventId: {event_id:?}\n{pdu:#?}" - ))), - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "EventId: {event_id:?}\nCould not parse event: {e}" - ))), + match serde_json::from_value::(serde_json::to_value(value)?) { + | Err(e) => return Err!("EventId: {event_id:?}\nCould not parse event: {e}"), + | Ok(pdu) => write!(self, "EventId: {event_id:?}\n{pdu:#?}"), } }, - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Could not parse PDU JSON: {e:?}" - ))), }, - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Invalid json in command body: {e}" - ))), } + .await } #[admin_command] -pub(super) async fn get_pdu(&self, event_id: OwnedEventId) -> Result { +pub(super) async fn get_pdu(&self, event_id: OwnedEventId) -> Result { let mut outlier = false; let mut pdu_json = self .services @@ -123,21 +107,18 @@ pub(super) async fn get_pdu(&self, event_id: OwnedEventId) -> Result return Err!("PDU not found locally."), | Ok(json) => { - let json_text = - serde_json::to_string_pretty(&json).expect("canonical json is valid json"); - Ok(RoomMessageEventContent::notice_markdown(format!( - "{}\n```json\n{}\n```", - if outlier { - "Outlier (Rejected / Soft Failed) PDU found in our database" - } else { - "PDU found in our database" - }, - json_text - ))) + let text = serde_json::to_string_pretty(&json)?; + let msg = if outlier { + "Outlier (Rejected / Soft Failed) PDU found in our database" + } else { + "PDU found in our database" + }; + write!(self, "{msg}\n```json\n{text}\n```",) }, - | Err(_) => Ok(RoomMessageEventContent::text_plain("PDU not found locally.")), } + .await } #[admin_command] @@ -145,7 +126,7 @@ pub(super) async fn get_short_pdu( &self, shortroomid: ShortRoomId, shorteventid: ShortEventId, -) -> Result { +) -> Result { let pdu_id: RawPduId = PduId { shortroomid, shorteventid: shorteventid.into(), @@ -160,41 +141,33 @@ pub(super) async fn get_short_pdu( .await; match pdu_json { + | Err(_) => return Err!("PDU not found locally."), | Ok(json) => { - let json_text = - serde_json::to_string_pretty(&json).expect("canonical json is valid json"); - Ok(RoomMessageEventContent::notice_markdown(format!("```json\n{json_text}\n```",))) + let json_text = serde_json::to_string_pretty(&json)?; + write!(self, "```json\n{json_text}\n```") }, - | Err(_) => Ok(RoomMessageEventContent::text_plain("PDU not found locally.")), } + .await } #[admin_command] -pub(super) async fn get_remote_pdu_list( - &self, - server: OwnedServerName, - force: bool, -) -> Result { +pub(super) async fn get_remote_pdu_list(&self, server: OwnedServerName, force: bool) -> Result { if !self.services.server.config.allow_federation { - return Ok(RoomMessageEventContent::text_plain( - "Federation is disabled on this homeserver.", - )); + return Err!("Federation is disabled on this homeserver.",); } if server == self.services.globals.server_name() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "Not allowed to send federation requests to ourselves. Please use `get-pdu` for \ fetching local PDUs from the database.", - )); + ); } if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&EMPTY).trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details.",); } let list = self @@ -208,18 +181,19 @@ pub(super) async fn get_remote_pdu_list( let mut failed_count: usize = 0; let mut success_count: usize = 0; - for pdu in list { + for event_id in list { if force { - match self.get_remote_pdu(Box::from(pdu), server.clone()).await { + match self + .get_remote_pdu(event_id.to_owned(), server.clone()) + .await + { | Err(e) => { failed_count = failed_count.saturating_add(1); self.services .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "Failed to get remote PDU, ignoring error: {e}" - ))) - .await - .ok(); + .send_text(&format!("Failed to get remote PDU, ignoring error: {e}")) + .await; + warn!("Failed to get remote PDU, ignoring error: {e}"); }, | _ => { @@ -227,44 +201,48 @@ pub(super) async fn get_remote_pdu_list( }, } } else { - self.get_remote_pdu(Box::from(pdu), server.clone()).await?; + self.get_remote_pdu(event_id.to_owned(), server.clone()) + .await?; success_count = success_count.saturating_add(1); } } - Ok(RoomMessageEventContent::text_plain(format!( - "Fetched {success_count} remote PDUs successfully with {failed_count} failures" - ))) + let out = + format!("Fetched {success_count} remote PDUs successfully with {failed_count} failures"); + + self.write_str(&out).await } #[admin_command] pub(super) async fn get_remote_pdu( &self, - event_id: Box, - server: Box, -) -> Result { + event_id: OwnedEventId, + server: OwnedServerName, +) -> Result { if !self.services.server.config.allow_federation { - return Ok(RoomMessageEventContent::text_plain( - "Federation is disabled on this homeserver.", - )); + return Err!("Federation is disabled on this homeserver."); } if server == self.services.globals.server_name() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "Not allowed to send federation requests to ourselves. Please use `get-pdu` for \ fetching local PDUs.", - )); + ); } match self .services .sending .send_federation_request(&server, ruma::api::federation::event::get_event::v1::Request { - event_id: event_id.clone().into(), + event_id: event_id.clone(), include_unredacted_content: None, }) .await { + | Err(e) => + return Err!( + "Remote server did not have PDU or failed sending request to remote server: {e}" + ), | Ok(response) => { let json: CanonicalJsonObject = serde_json::from_str(response.pdu.get()).map_err(|e| { @@ -272,10 +250,9 @@ pub(super) async fn get_remote_pdu( "Requested event ID {event_id} from server but failed to convert from \ RawValue to CanonicalJsonObject (malformed event/response?): {e}" ); - Error::BadRequest( - ErrorKind::Unknown, - "Received response from server but failed to parse PDU", - ) + err!(Request(Unknown( + "Received response from server but failed to parse PDU" + ))) })?; trace!("Attempting to parse PDU: {:?}", &response.pdu); @@ -285,6 +262,7 @@ pub(super) async fn get_remote_pdu( .rooms .event_handler .parse_incoming_pdu(&response.pdu) + .boxed() .await; let (event_id, value, room_id) = match parsed_result { @@ -292,9 +270,7 @@ pub(super) async fn get_remote_pdu( | Err(e) => { warn!("Failed to parse PDU: {e}"); info!("Full PDU: {:?}", &response.pdu); - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to parse PDU remote server {server} sent us: {e}" - ))); + return Err!("Failed to parse PDU remote server {server} sent us: {e}"); }, }; @@ -306,30 +282,18 @@ pub(super) async fn get_remote_pdu( .rooms .timeline .backfill_pdu(&server, response.pdu) - .boxed() .await?; - let json_text = - serde_json::to_string_pretty(&json).expect("canonical json is valid json"); - - Ok(RoomMessageEventContent::notice_markdown(format!( - "{}\n```json\n{}\n```", - "Got PDU from specified server and handled as backfilled PDU successfully. \ - Event body:", - json_text - ))) + let text = serde_json::to_string_pretty(&json)?; + let msg = "Got PDU from specified server and handled as backfilled"; + write!(self, "{msg}. Event body:\n```json\n{text}\n```") }, - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Remote server did not have PDU or failed sending request to remote server: {e}" - ))), } + .await } #[admin_command] -pub(super) async fn get_room_state( - &self, - room: OwnedRoomOrAliasId, -) -> Result { +pub(super) async fn get_room_state(&self, room: OwnedRoomOrAliasId) -> Result { let room_id = self.services.rooms.alias.resolve(&room).await?; let room_state: Vec<_> = self .services @@ -341,28 +305,24 @@ pub(super) async fn get_room_state( .await?; if room_state.is_empty() { - return Ok(RoomMessageEventContent::text_plain( - "Unable to find room state in our database (vector is empty)", - )); + return Err!("Unable to find room state in our database (vector is empty)",); } let json = serde_json::to_string_pretty(&room_state).map_err(|e| { - warn!("Failed converting room state vector in our database to pretty JSON: {e}"); - Error::bad_database( + err!(Database( "Failed to convert room state events to pretty JSON, possible invalid room state \ - events in our database", - ) + events in our database {e}", + )) })?; - Ok(RoomMessageEventContent::notice_markdown(format!("```json\n{json}\n```"))) + let out = format!("```json\n{json}\n```"); + self.write_str(&out).await } #[admin_command] -pub(super) async fn ping(&self, server: OwnedServerName) -> Result { +pub(super) async fn ping(&self, server: OwnedServerName) -> Result { if server == self.services.globals.server_name() { - return Ok(RoomMessageEventContent::text_plain( - "Not allowed to send federation requests to ourselves.", - )); + return Err!("Not allowed to send federation requests to ourselves."); } let timer = tokio::time::Instant::now(); @@ -376,35 +336,27 @@ pub(super) async fn ping(&self, server: OwnedServerName) -> Result { + return Err!("Failed sending federation request to specified server:\n\n{e}"); + }, | Ok(response) => { let ping_time = timer.elapsed(); - let json_text_res = serde_json::to_string_pretty(&response.server); - if let Ok(json) = json_text_res { - return Ok(RoomMessageEventContent::notice_markdown(format!( - "Got response which took {ping_time:?} time:\n```json\n{json}\n```" - ))); - } + let out = if let Ok(json) = json_text_res { + format!("Got response which took {ping_time:?} time:\n```json\n{json}\n```") + } else { + format!("Got non-JSON response which took {ping_time:?} time:\n{response:?}") + }; - Ok(RoomMessageEventContent::text_plain(format!( - "Got non-JSON response which took {ping_time:?} time:\n{response:?}" - ))) - }, - | Err(e) => { - warn!( - "Failed sending federation request to specified server from ping debug command: \ - {e}" - ); - Ok(RoomMessageEventContent::text_plain(format!( - "Failed sending federation request to specified server:\n\n{e}", - ))) + write!(self, "{out}") }, } + .await } #[admin_command] -pub(super) async fn force_device_list_updates(&self) -> Result { +pub(super) async fn force_device_list_updates(&self) -> Result { // Force E2EE device list updates for all users self.services .users @@ -412,27 +364,17 @@ pub(super) async fn force_device_list_updates(&self) -> Result, - reset: bool, -) -> Result { +pub(super) async fn change_log_level(&self, filter: Option, reset: bool) -> Result { let handles = &["console"]; if reset { let old_filter_layer = match EnvFilter::try_new(&self.services.server.config.log) { | Ok(s) => s, - | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Log level from config appears to be invalid now: {e}" - ))); - }, + | Err(e) => return Err!("Log level from config appears to be invalid now: {e}"), }; match self @@ -442,16 +384,12 @@ pub(super) async fn change_log_level( .reload .reload(&old_filter_layer, Some(handles)) { + | Err(e) => + return Err!("Failed to modify and reload the global tracing log level: {e}"), | Ok(()) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Successfully changed log level back to config value {}", - self.services.server.config.log - ))); - }, - | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to modify and reload the global tracing log level: {e}" - ))); + let value = &self.services.server.config.log; + let out = format!("Successfully changed log level back to config value {value}"); + return self.write_str(&out).await; }, } } @@ -459,11 +397,7 @@ pub(super) async fn change_log_level( if let Some(filter) = filter { let new_filter_layer = match EnvFilter::try_new(filter) { | Ok(s) => s, - | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Invalid log level filter specified: {e}" - ))); - }, + | Err(e) => return Err!("Invalid log level filter specified: {e}"), }; match self @@ -473,90 +407,75 @@ pub(super) async fn change_log_level( .reload .reload(&new_filter_layer, Some(handles)) { - | Ok(()) => { - return Ok(RoomMessageEventContent::text_plain("Successfully changed log level")); - }, - | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to modify and reload the global tracing log level: {e}" - ))); - }, + | Ok(()) => return self.write_str("Successfully changed log level").await, + | Err(e) => + return Err!("Failed to modify and reload the global tracing log level: {e}"), } } - Ok(RoomMessageEventContent::text_plain("No log level was specified.")) + Err!("No log level was specified.") } #[admin_command] -pub(super) async fn sign_json(&self) -> Result { +pub(super) async fn sign_json(&self) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details."); } let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n"); match serde_json::from_str(&string) { + | Err(e) => return Err!("Invalid json: {e}"), | Ok(mut value) => { - self.services - .server_keys - .sign_json(&mut value) - .expect("our request json is what ruma expects"); - let json_text = - serde_json::to_string_pretty(&value).expect("canonical json is valid json"); - Ok(RoomMessageEventContent::text_plain(json_text)) + self.services.server_keys.sign_json(&mut value)?; + let json_text = serde_json::to_string_pretty(&value)?; + write!(self, "{json_text}") }, - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))), } + .await } #[admin_command] -pub(super) async fn verify_json(&self) -> Result { +pub(super) async fn verify_json(&self) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details."); } let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n"); match serde_json::from_str::(&string) { + | Err(e) => return Err!("Invalid json: {e}"), | Ok(value) => match self.services.server_keys.verify_json(&value, None).await { - | Ok(()) => Ok(RoomMessageEventContent::text_plain("Signature correct")), - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Signature verification failed: {e}" - ))), + | Err(e) => return Err!("Signature verification failed: {e}"), + | Ok(()) => write!(self, "Signature correct"), }, - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))), } + .await } #[admin_command] -pub(super) async fn verify_pdu(&self, event_id: OwnedEventId) -> Result { +pub(super) async fn verify_pdu(&self, event_id: OwnedEventId) -> Result { + use ruma::signatures::Verified; + let mut event = self.services.rooms.timeline.get_pdu_json(&event_id).await?; event.remove("event_id"); let msg = match self.services.server_keys.verify_event(&event, None).await { - | Ok(ruma::signatures::Verified::Signatures) => - "signatures OK, but content hash failed (redaction).", - | Ok(ruma::signatures::Verified::All) => "signatures and hashes OK.", | Err(e) => return Err(e), + | Ok(Verified::Signatures) => "signatures OK, but content hash failed (redaction).", + | Ok(Verified::All) => "signatures and hashes OK.", }; - Ok(RoomMessageEventContent::notice_plain(msg)) + self.write_str(msg).await } #[admin_command] #[tracing::instrument(skip(self))] -pub(super) async fn first_pdu_in_room( - &self, - room_id: OwnedRoomId, -) -> Result { +pub(super) async fn first_pdu_in_room(&self, room_id: OwnedRoomId) -> Result { if !self .services .rooms @@ -564,9 +483,7 @@ pub(super) async fn first_pdu_in_room( .server_in_room(&self.services.server.name, &room_id) .await { - return Ok(RoomMessageEventContent::text_plain( - "We are not participating in the room / we don't know about the room ID.", - )); + return Err!("We are not participating in the room / we don't know about the room ID.",); } let first_pdu = self @@ -575,17 +492,15 @@ pub(super) async fn first_pdu_in_room( .timeline .first_pdu_in_room(&room_id) .await - .map_err(|_| Error::bad_database("Failed to find the first PDU in database"))?; + .map_err(|_| err!(Database("Failed to find the first PDU in database")))?; - Ok(RoomMessageEventContent::text_plain(format!("{first_pdu:?}"))) + let out = format!("{first_pdu:?}"); + self.write_str(&out).await } #[admin_command] #[tracing::instrument(skip(self))] -pub(super) async fn latest_pdu_in_room( - &self, - room_id: OwnedRoomId, -) -> Result { +pub(super) async fn latest_pdu_in_room(&self, room_id: OwnedRoomId) -> Result { if !self .services .rooms @@ -593,9 +508,7 @@ pub(super) async fn latest_pdu_in_room( .server_in_room(&self.services.server.name, &room_id) .await { - return Ok(RoomMessageEventContent::text_plain( - "We are not participating in the room / we don't know about the room ID.", - )); + return Err!("We are not participating in the room / we don't know about the room ID."); } let latest_pdu = self @@ -604,9 +517,10 @@ pub(super) async fn latest_pdu_in_room( .timeline .latest_pdu_in_room(&room_id) .await - .map_err(|_| Error::bad_database("Failed to find the latest PDU in database"))?; + .map_err(|_| err!(Database("Failed to find the latest PDU in database")))?; - Ok(RoomMessageEventContent::text_plain(format!("{latest_pdu:?}"))) + let out = format!("{latest_pdu:?}"); + self.write_str(&out).await } #[admin_command] @@ -615,7 +529,7 @@ pub(super) async fn force_set_room_state_from_server( &self, room_id: OwnedRoomId, server_name: OwnedServerName, -) -> Result { +) -> Result { if !self .services .rooms @@ -623,9 +537,7 @@ pub(super) async fn force_set_room_state_from_server( .server_in_room(&self.services.server.name, &room_id) .await { - return Ok(RoomMessageEventContent::text_plain( - "We are not participating in the room / we don't know about the room ID.", - )); + return Err!("We are not participating in the room / we don't know about the room ID."); } let first_pdu = self @@ -634,7 +546,7 @@ pub(super) async fn force_set_room_state_from_server( .timeline .latest_pdu_in_room(&room_id) .await - .map_err(|_| Error::bad_database("Failed to find the latest PDU in database"))?; + .map_err(|_| err!(Database("Failed to find the latest PDU in database")))?; let room_version = self.services.rooms.state.get_room_version(&room_id).await?; @@ -644,10 +556,9 @@ pub(super) async fn force_set_room_state_from_server( .services .sending .send_federation_request(&server_name, get_room_state::v1::Request { - room_id: room_id.clone().into(), + room_id: room_id.clone(), event_id: first_pdu.event_id.clone(), }) - .boxed() .await?; for pdu in remote_state_response.pdus.clone() { @@ -656,7 +567,6 @@ pub(super) async fn force_set_room_state_from_server( .rooms .event_handler .parse_incoming_pdu(&pdu) - .boxed() .await { | Ok(t) => t, @@ -720,7 +630,6 @@ pub(super) async fn force_set_room_state_from_server( .rooms .event_handler .resolve_state(&room_id, &room_version, state) - .boxed() .await?; info!("Forcing new room state"); @@ -736,6 +645,7 @@ pub(super) async fn force_set_room_state_from_server( .await?; let state_lock = self.services.rooms.state.mutex.lock(&*room_id).await; + self.services .rooms .state @@ -752,11 +662,8 @@ pub(super) async fn force_set_room_state_from_server( .update_joined_count(&room_id) .await; - drop(state_lock); - - Ok(RoomMessageEventContent::text_plain( - "Successfully forced the room state from the requested remote server.", - )) + self.write_str("Successfully forced the room state from the requested remote server.") + .await } #[admin_command] @@ -765,8 +672,8 @@ pub(super) async fn get_signing_keys( server_name: Option, notary: Option, query: bool, -) -> Result { - let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone().into()); +) -> Result { + let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone()); if let Some(notary) = notary { let signing_keys = self @@ -775,9 +682,8 @@ pub(super) async fn get_signing_keys( .notary_request(¬ary, &server_name) .await?; - return Ok(RoomMessageEventContent::notice_markdown(format!( - "```rs\n{signing_keys:#?}\n```" - ))); + let out = format!("```rs\n{signing_keys:#?}\n```"); + return self.write_str(&out).await; } let signing_keys = if query { @@ -792,17 +698,13 @@ pub(super) async fn get_signing_keys( .await? }; - Ok(RoomMessageEventContent::notice_markdown(format!( - "```rs\n{signing_keys:#?}\n```" - ))) + let out = format!("```rs\n{signing_keys:#?}\n```"); + self.write_str(&out).await } #[admin_command] -pub(super) async fn get_verify_keys( - &self, - server_name: Option, -) -> Result { - let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone().into()); +pub(super) async fn get_verify_keys(&self, server_name: Option) -> Result { + let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone()); let keys = self .services @@ -817,7 +719,7 @@ pub(super) async fn get_verify_keys( writeln!(out, "| {key_id} | {key:?} |")?; } - Ok(RoomMessageEventContent::notice_markdown(out)) + self.write_str(&out).await } #[admin_command] @@ -825,18 +727,16 @@ pub(super) async fn resolve_true_destination( &self, server_name: OwnedServerName, no_cache: bool, -) -> Result { +) -> Result { if !self.services.server.config.allow_federation { - return Ok(RoomMessageEventContent::text_plain( - "Federation is disabled on this homeserver.", - )); + return Err!("Federation is disabled on this homeserver.",); } if server_name == self.services.server.name { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "Not allowed to send federation requests to ourselves. Please use `get-pdu` for \ fetching local PDUs.", - )); + ); } let actual = self @@ -845,13 +745,12 @@ pub(super) async fn resolve_true_destination( .resolve_actual_dest(&server_name, !no_cache) .await?; - let msg = format!("Destination: {}\nHostname URI: {}", actual.dest, actual.host,); - - Ok(RoomMessageEventContent::text_markdown(msg)) + let msg = format!("Destination: {}\nHostname URI: {}", actual.dest, actual.host); + self.write_str(&msg).await } #[admin_command] -pub(super) async fn memory_stats(&self, opts: Option) -> Result { +pub(super) async fn memory_stats(&self, opts: Option) -> Result { const OPTS: &str = "abcdefghijklmnopqrstuvwxyz"; let opts: String = OPTS @@ -870,13 +769,12 @@ pub(super) async fn memory_stats(&self, opts: Option) -> Result Result { +pub(super) async fn runtime_metrics(&self) -> Result { let out = self.services.server.metrics.runtime_metrics().map_or_else( || "Runtime metrics are not available.".to_owned(), |metrics| { @@ -889,51 +787,51 @@ pub(super) async fn runtime_metrics(&self) -> Result { }, ); - Ok(RoomMessageEventContent::text_markdown(out)) + self.write_str(&out).await } #[cfg(not(tokio_unstable))] #[admin_command] -pub(super) async fn runtime_metrics(&self) -> Result { - Ok(RoomMessageEventContent::text_markdown( - "Runtime metrics require building with `tokio_unstable`.", - )) +pub(super) async fn runtime_metrics(&self) -> Result { + self.write_str("Runtime metrics require building with `tokio_unstable`.") + .await } #[cfg(tokio_unstable)] #[admin_command] -pub(super) async fn runtime_interval(&self) -> Result { +pub(super) async fn runtime_interval(&self) -> Result { let out = self.services.server.metrics.runtime_interval().map_or_else( || "Runtime metrics are not available.".to_owned(), |metrics| format!("```rs\n{metrics:#?}\n```"), ); - Ok(RoomMessageEventContent::text_markdown(out)) + self.write_str(&out).await } #[cfg(not(tokio_unstable))] #[admin_command] -pub(super) async fn runtime_interval(&self) -> Result { - Ok(RoomMessageEventContent::text_markdown( - "Runtime metrics require building with `tokio_unstable`.", - )) +pub(super) async fn runtime_interval(&self) -> Result { + self.write_str("Runtime metrics require building with `tokio_unstable`.") + .await } #[admin_command] -pub(super) async fn time(&self) -> Result { +pub(super) async fn time(&self) -> Result { let now = SystemTime::now(); - Ok(RoomMessageEventContent::text_markdown(utils::time::format(now, "%+"))) + let now = utils::time::format(now, "%+"); + + self.write_str(&now).await } #[admin_command] -pub(super) async fn list_dependencies(&self, names: bool) -> Result { +pub(super) async fn list_dependencies(&self, names: bool) -> Result { if names { let out = info::cargo::dependencies_names().join(" "); - return Ok(RoomMessageEventContent::notice_markdown(out)); + return self.write_str(&out).await; } - let deps = info::cargo::dependencies(); let mut out = String::new(); + let deps = info::cargo::dependencies(); writeln!(out, "| name | version | features |")?; writeln!(out, "| ---- | ------- | -------- |")?; for (name, dep) in deps { @@ -944,10 +842,11 @@ pub(super) async fn list_dependencies(&self, names: bool) -> Result, map: Option, -) -> Result { +) -> Result { let map_name = map.as_ref().map_or(EMPTY, String::as_str); let property = property.unwrap_or_else(|| "rocksdb.stats".to_owned()); self.services @@ -967,17 +866,11 @@ pub(super) async fn database_stats( let res = map.property(&property).expect("invalid property"); writeln!(self, "##### {name}:\n```\n{}\n```", res.trim()) }) - .await?; - - Ok(RoomMessageEventContent::notice_plain("")) + .await } #[admin_command] -pub(super) async fn database_files( - &self, - map: Option, - level: Option, -) -> Result { +pub(super) async fn database_files(&self, map: Option, level: Option) -> Result { let mut files: Vec<_> = self.services.db.db.file_list().collect::>()?; files.sort_by_key(|f| f.name.clone()); @@ -1004,16 +897,12 @@ pub(super) async fn database_files( file.column_family_name, ) }) - .await?; - - Ok(RoomMessageEventContent::notice_plain("")) + .await } #[admin_command] -pub(super) async fn trim_memory(&self) -> Result { +pub(super) async fn trim_memory(&self) -> Result { conduwuit::alloc::trim(None)?; - writeln!(self, "done").await?; - - Ok(RoomMessageEventContent::notice_plain("")) + writeln!(self, "done").await } diff --git a/src/admin/debug/tester.rs b/src/admin/debug/tester.rs index 005ee775..0a2b1516 100644 --- a/src/admin/debug/tester.rs +++ b/src/admin/debug/tester.rs @@ -1,7 +1,6 @@ -use conduwuit::Err; -use ruma::events::room::message::RoomMessageEventContent; +use conduwuit::{Err, Result}; -use crate::{Result, admin_command, admin_command_dispatch}; +use crate::{admin_command, admin_command_dispatch}; #[admin_command_dispatch] #[derive(Debug, clap::Subcommand)] @@ -14,14 +13,14 @@ pub(crate) enum TesterCommand { #[rustfmt::skip] #[admin_command] -async fn panic(&self) -> Result { +async fn panic(&self) -> Result { panic!("panicked") } #[rustfmt::skip] #[admin_command] -async fn failure(&self) -> Result { +async fn failure(&self) -> Result { Err!("failed") } @@ -29,20 +28,20 @@ async fn failure(&self) -> Result { #[inline(never)] #[rustfmt::skip] #[admin_command] -async fn tester(&self) -> Result { +async fn tester(&self) -> Result { - Ok(RoomMessageEventContent::notice_plain("legacy")) + self.write_str("Ok").await } #[inline(never)] #[rustfmt::skip] #[admin_command] -async fn timer(&self) -> Result { +async fn timer(&self) -> Result { let started = std::time::Instant::now(); timed(self.body); let elapsed = started.elapsed(); - Ok(RoomMessageEventContent::notice_plain(format!("completed in {elapsed:#?}"))) + self.write_str(&format!("completed in {elapsed:#?}")).await } #[inline(never)] diff --git a/src/admin/federation/commands.rs b/src/admin/federation/commands.rs index 12ed9c25..545dcbca 100644 --- a/src/admin/federation/commands.rs +++ b/src/admin/federation/commands.rs @@ -1,49 +1,48 @@ use std::fmt::Write; -use conduwuit::Result; +use conduwuit::{Err, Result}; use futures::StreamExt; -use ruma::{ - OwnedRoomId, OwnedServerName, OwnedUserId, events::room::message::RoomMessageEventContent, -}; +use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId}; use crate::{admin_command, get_room_info}; #[admin_command] -pub(super) async fn disable_room(&self, room_id: OwnedRoomId) -> Result { +pub(super) async fn disable_room(&self, room_id: OwnedRoomId) -> Result { self.services.rooms.metadata.disable_room(&room_id, true); - Ok(RoomMessageEventContent::text_plain("Room disabled.")) + self.write_str("Room disabled.").await } #[admin_command] -pub(super) async fn enable_room(&self, room_id: OwnedRoomId) -> Result { +pub(super) async fn enable_room(&self, room_id: OwnedRoomId) -> Result { self.services.rooms.metadata.disable_room(&room_id, false); - Ok(RoomMessageEventContent::text_plain("Room enabled.")) + self.write_str("Room enabled.").await } #[admin_command] -pub(super) async fn incoming_federation(&self) -> Result { - let map = self - .services - .rooms - .event_handler - .federation_handletime - .read() - .expect("locked"); - let mut msg = format!("Handling {} incoming pdus:\n", map.len()); +pub(super) async fn incoming_federation(&self) -> Result { + let msg = { + let map = self + .services + .rooms + .event_handler + .federation_handletime + .read() + .expect("locked"); - for (r, (e, i)) in map.iter() { - let elapsed = i.elapsed(); - writeln!(msg, "{} {}: {}m{}s", r, e, elapsed.as_secs() / 60, elapsed.as_secs() % 60)?; - } + let mut msg = format!("Handling {} incoming pdus:\n", map.len()); + for (r, (e, i)) in map.iter() { + let elapsed = i.elapsed(); + writeln!(msg, "{} {}: {}m{}s", r, e, elapsed.as_secs() / 60, elapsed.as_secs() % 60)?; + } - Ok(RoomMessageEventContent::text_plain(&msg)) + msg + }; + + self.write_str(&msg).await } #[admin_command] -pub(super) async fn fetch_support_well_known( - &self, - server_name: OwnedServerName, -) -> Result { +pub(super) async fn fetch_support_well_known(&self, server_name: OwnedServerName) -> Result { let response = self .services .client @@ -55,54 +54,44 @@ pub(super) async fn fetch_support_well_known( let text = response.text().await?; if text.is_empty() { - return Ok(RoomMessageEventContent::text_plain("Response text/body is empty.")); + return Err!("Response text/body is empty."); } if text.len() > 1500 { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "Response text/body is over 1500 characters, assuming no support well-known.", - )); + ); } let json: serde_json::Value = match serde_json::from_str(&text) { | Ok(json) => json, | Err(_) => { - return Ok(RoomMessageEventContent::text_plain( - "Response text/body is not valid JSON.", - )); + return Err!("Response text/body is not valid JSON.",); }, }; let pretty_json: String = match serde_json::to_string_pretty(&json) { | Ok(json) => json, | Err(_) => { - return Ok(RoomMessageEventContent::text_plain( - "Response text/body is not valid JSON.", - )); + return Err!("Response text/body is not valid JSON.",); }, }; - Ok(RoomMessageEventContent::notice_markdown(format!( - "Got JSON response:\n\n```json\n{pretty_json}\n```" - ))) + self.write_str(&format!("Got JSON response:\n\n```json\n{pretty_json}\n```")) + .await } #[admin_command] -pub(super) async fn remote_user_in_rooms( - &self, - user_id: OwnedUserId, -) -> Result { +pub(super) async fn remote_user_in_rooms(&self, user_id: OwnedUserId) -> Result { if user_id.server_name() == self.services.server.name { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "User belongs to our server, please use `list-joined-rooms` user admin command \ instead.", - )); + ); } if !self.services.users.exists(&user_id).await { - return Ok(RoomMessageEventContent::text_plain( - "Remote user does not exist in our database.", - )); + return Err!("Remote user does not exist in our database.",); } let mut rooms: Vec<(OwnedRoomId, u64, String)> = self @@ -115,21 +104,19 @@ pub(super) async fn remote_user_in_rooms( .await; if rooms.is_empty() { - return Ok(RoomMessageEventContent::text_plain("User is not in any rooms.")); + return Err!("User is not in any rooms."); } rooms.sort_by_key(|r| r.1); rooms.reverse(); - let output = format!( - "Rooms {user_id} shares with us ({}):\n```\n{}\n```", - rooms.len(), - rooms - .iter() - .map(|(id, members, name)| format!("{id} | Members: {members} | Name: {name}")) - .collect::>() - .join("\n") - ); + let num = rooms.len(); + let body = rooms + .iter() + .map(|(id, members, name)| format!("{id} | Members: {members} | Name: {name}")) + .collect::>() + .join("\n"); - Ok(RoomMessageEventContent::text_markdown(output)) + self.write_str(&format!("Rooms {user_id} shares with us ({num}):\n```\n{body}\n```",)) + .await } diff --git a/src/admin/media/commands.rs b/src/admin/media/commands.rs index c8364969..7aed28db 100644 --- a/src/admin/media/commands.rs +++ b/src/admin/media/commands.rs @@ -1,13 +1,11 @@ use std::time::Duration; use conduwuit::{ - Result, debug, debug_info, debug_warn, error, info, trace, utils::time::parse_timepoint_ago, + Err, Result, debug, debug_info, debug_warn, error, info, trace, + utils::time::parse_timepoint_ago, warn, }; use conduwuit_service::media::Dim; -use ruma::{ - Mxc, OwnedEventId, OwnedMxcUri, OwnedServerName, - events::room::message::RoomMessageEventContent, -}; +use ruma::{Mxc, OwnedEventId, OwnedMxcUri, OwnedServerName}; use crate::{admin_command, utils::parse_local_user_id}; @@ -16,11 +14,9 @@ pub(super) async fn delete( &self, mxc: Option, event_id: Option, -) -> Result { +) -> Result { if event_id.is_some() && mxc.is_some() { - return Ok(RoomMessageEventContent::text_plain( - "Please specify either an MXC or an event ID, not both.", - )); + return Err!("Please specify either an MXC or an event ID, not both.",); } if let Some(mxc) = mxc { @@ -30,9 +26,7 @@ pub(super) async fn delete( .delete(&mxc.as_str().try_into()?) .await?; - return Ok(RoomMessageEventContent::text_plain( - "Deleted the MXC from our database and on our filesystem.", - )); + return Err!("Deleted the MXC from our database and on our filesystem.",); } if let Some(event_id) = event_id { @@ -113,41 +107,36 @@ pub(super) async fn delete( let final_url = url.to_string().replace('"', ""); mxc_urls.push(final_url); } else { - info!( + warn!( "Found a URL in the event ID {event_id} but did not \ start with mxc://, ignoring" ); } } else { - info!("No \"url\" key in \"file\" key."); + error!("No \"url\" key in \"file\" key."); } } } } else { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "Event ID does not have a \"content\" key or failed parsing the \ event ID JSON.", - )); + ); } } else { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "Event ID does not have a \"content\" key, this is not a message or an \ event type that contains media.", - )); + ); } }, | _ => { - return Ok(RoomMessageEventContent::text_plain( - "Event ID does not exist or is not known to us.", - )); + return Err!("Event ID does not exist or is not known to us.",); }, } if mxc_urls.is_empty() { - info!("Parsed event ID {event_id} but did not contain any MXC URLs."); - return Ok(RoomMessageEventContent::text_plain( - "Parsed event ID but found no MXC URLs.", - )); + return Err!("Parsed event ID but found no MXC URLs.",); } let mut mxc_deletion_count: usize = 0; @@ -170,27 +159,27 @@ pub(super) async fn delete( } } - return Ok(RoomMessageEventContent::text_plain(format!( - "Deleted {mxc_deletion_count} total MXCs from our database and the filesystem from \ - event ID {event_id}." - ))); + return self + .write_str(&format!( + "Deleted {mxc_deletion_count} total MXCs from our database and the filesystem \ + from event ID {event_id}." + )) + .await; } - Ok(RoomMessageEventContent::text_plain( + Err!( "Please specify either an MXC using --mxc or an event ID using --event-id of the \ - message containing an image. See --help for details.", - )) + message containing an image. See --help for details." + ) } #[admin_command] -pub(super) async fn delete_list(&self) -> Result { +pub(super) async fn delete_list(&self) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details.",); } let mut failed_parsed_mxcs: usize = 0; @@ -204,7 +193,6 @@ pub(super) async fn delete_list(&self) -> Result { .try_into() .inspect_err(|e| { debug_warn!("Failed to parse user-provided MXC URI: {e}"); - failed_parsed_mxcs = failed_parsed_mxcs.saturating_add(1); }) .ok() @@ -227,10 +215,11 @@ pub(super) async fn delete_list(&self) -> Result { } } - Ok(RoomMessageEventContent::text_plain(format!( + self.write_str(&format!( "Finished bulk MXC deletion, deleted {mxc_deletion_count} total MXCs from our database \ and the filesystem. {failed_parsed_mxcs} MXCs failed to be parsed from the database.", - ))) + )) + .await } #[admin_command] @@ -240,11 +229,9 @@ pub(super) async fn delete_past_remote_media( before: bool, after: bool, yes_i_want_to_delete_local_media: bool, -) -> Result { +) -> Result { if before && after { - return Ok(RoomMessageEventContent::text_plain( - "Please only pick one argument, --before or --after.", - )); + return Err!("Please only pick one argument, --before or --after.",); } assert!(!(before && after), "--before and --after should not be specified together"); @@ -260,23 +247,18 @@ pub(super) async fn delete_past_remote_media( ) .await?; - Ok(RoomMessageEventContent::text_plain(format!( - "Deleted {deleted_count} total files.", - ))) + self.write_str(&format!("Deleted {deleted_count} total files.",)) + .await } #[admin_command] -pub(super) async fn delete_all_from_user( - &self, - username: String, -) -> Result { +pub(super) async fn delete_all_from_user(&self, username: String) -> Result { let user_id = parse_local_user_id(self.services, &username)?; let deleted_count = self.services.media.delete_from_user(&user_id).await?; - Ok(RoomMessageEventContent::text_plain(format!( - "Deleted {deleted_count} total files.", - ))) + self.write_str(&format!("Deleted {deleted_count} total files.",)) + .await } #[admin_command] @@ -284,11 +266,9 @@ pub(super) async fn delete_all_from_server( &self, server_name: OwnedServerName, yes_i_want_to_delete_local_media: bool, -) -> Result { +) -> Result { if server_name == self.services.globals.server_name() && !yes_i_want_to_delete_local_media { - return Ok(RoomMessageEventContent::text_plain( - "This command only works for remote media by default.", - )); + return Err!("This command only works for remote media by default.",); } let Ok(all_mxcs) = self @@ -298,9 +278,7 @@ pub(super) async fn delete_all_from_server( .await .inspect_err(|e| error!("Failed to get MXC URIs from our database: {e}")) else { - return Ok(RoomMessageEventContent::text_plain( - "Failed to get MXC URIs from our database", - )); + return Err!("Failed to get MXC URIs from our database",); }; let mut deleted_count: usize = 0; @@ -336,17 +314,16 @@ pub(super) async fn delete_all_from_server( } } - Ok(RoomMessageEventContent::text_plain(format!( - "Deleted {deleted_count} total files.", - ))) + self.write_str(&format!("Deleted {deleted_count} total files.",)) + .await } #[admin_command] -pub(super) async fn get_file_info(&self, mxc: OwnedMxcUri) -> Result { +pub(super) async fn get_file_info(&self, mxc: OwnedMxcUri) -> Result { let mxc: Mxc<'_> = mxc.as_str().try_into()?; let metadata = self.services.media.get_metadata(&mxc).await; - Ok(RoomMessageEventContent::notice_markdown(format!("```\n{metadata:#?}\n```"))) + self.write_str(&format!("```\n{metadata:#?}\n```")).await } #[admin_command] @@ -355,7 +332,7 @@ pub(super) async fn get_remote_file( mxc: OwnedMxcUri, server: Option, timeout: u32, -) -> Result { +) -> Result { let mxc: Mxc<'_> = mxc.as_str().try_into()?; let timeout = Duration::from_millis(timeout.into()); let mut result = self @@ -368,8 +345,8 @@ pub(super) async fn get_remote_file( let len = result.content.as_ref().expect("content").len(); result.content.as_mut().expect("content").clear(); - let out = format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```"); - Ok(RoomMessageEventContent::notice_markdown(out)) + self.write_str(&format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```")) + .await } #[admin_command] @@ -380,7 +357,7 @@ pub(super) async fn get_remote_thumbnail( timeout: u32, width: u32, height: u32, -) -> Result { +) -> Result { let mxc: Mxc<'_> = mxc.as_str().try_into()?; let timeout = Duration::from_millis(timeout.into()); let dim = Dim::new(width, height, None); @@ -394,6 +371,6 @@ pub(super) async fn get_remote_thumbnail( let len = result.content.as_ref().expect("content").len(); result.content.as_mut().expect("content").clear(); - let out = format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```"); - Ok(RoomMessageEventContent::notice_markdown(out)) + self.write_str(&format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```")) + .await } diff --git a/src/admin/mod.rs b/src/admin/mod.rs index 695155e8..1f777fa9 100644 --- a/src/admin/mod.rs +++ b/src/admin/mod.rs @@ -4,7 +4,7 @@ #![allow(clippy::too_many_arguments)] pub(crate) mod admin; -pub(crate) mod command; +pub(crate) mod context; pub(crate) mod processor; mod tests; pub(crate) mod utils; @@ -23,13 +23,9 @@ extern crate conduwuit_api as api; extern crate conduwuit_core as conduwuit; extern crate conduwuit_service as service; -pub(crate) use conduwuit::Result; pub(crate) use conduwuit_macros::{admin_command, admin_command_dispatch}; -pub(crate) use crate::{ - command::Command, - utils::{escape_html, get_room_info}, -}; +pub(crate) use crate::{context::Context, utils::get_room_info}; pub(crate) const PAGE_SIZE: usize = 100; diff --git a/src/admin/processor.rs b/src/admin/processor.rs index 53a15098..8282a846 100644 --- a/src/admin/processor.rs +++ b/src/admin/processor.rs @@ -33,7 +33,7 @@ use service::{ use tracing::Level; use tracing_subscriber::{EnvFilter, filter::LevelFilter}; -use crate::{Command, admin, admin::AdminCommand}; +use crate::{admin, admin::AdminCommand, context::Context}; #[must_use] pub(super) fn complete(line: &str) -> String { complete_command(AdminCommand::command(), line) } @@ -58,7 +58,7 @@ async fn process_command(services: Arc, input: &CommandInput) -> Proce | Ok(parsed) => parsed, }; - let context = Command { + let context = Context { services: &services, body: &body, timer: SystemTime::now(), @@ -103,7 +103,7 @@ fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult { /// Parse and process a message from the admin room async fn process( - context: &Command<'_>, + context: &Context<'_>, command: AdminCommand, args: &[String], ) -> (Result, String) { @@ -132,7 +132,7 @@ async fn process( (result, output) } -fn capture_create(context: &Command<'_>) -> (Arc, Arc>) { +fn capture_create(context: &Context<'_>) -> (Arc, Arc>) { let env_config = &context.services.server.config.admin_log_capture; let env_filter = EnvFilter::try_new(env_config).unwrap_or_else(|e| { warn!("admin_log_capture filter invalid: {e:?}"); diff --git a/src/admin/query/account_data.rs b/src/admin/query/account_data.rs index 879aed16..228d2120 100644 --- a/src/admin/query/account_data.rs +++ b/src/admin/query/account_data.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::StreamExt; -use ruma::{OwnedRoomId, OwnedUserId, events::room::message::RoomMessageEventContent}; +use ruma::{OwnedRoomId, OwnedUserId}; use crate::{admin_command, admin_command_dispatch}; @@ -36,7 +36,7 @@ async fn changes_since( user_id: OwnedUserId, since: u64, room_id: Option, -) -> Result { +) -> Result { let timer = tokio::time::Instant::now(); let results: Vec<_> = self .services @@ -46,9 +46,8 @@ async fn changes_since( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")) + .await } #[admin_command] @@ -57,7 +56,7 @@ async fn account_data_get( user_id: OwnedUserId, kind: String, room_id: Option, -) -> Result { +) -> Result { let timer = tokio::time::Instant::now(); let results = self .services @@ -66,7 +65,6 @@ async fn account_data_get( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")) + .await } diff --git a/src/admin/query/appservice.rs b/src/admin/query/appservice.rs index 0359261a..28bf6451 100644 --- a/src/admin/query/appservice.rs +++ b/src/admin/query/appservice.rs @@ -2,7 +2,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::TryStreamExt; -use crate::Command; +use crate::Context; #[derive(Debug, Subcommand)] /// All the getters and iterators from src/database/key_value/appservice.rs @@ -18,7 +18,7 @@ pub(crate) enum AppserviceCommand { } /// All the getters and iterators from src/database/key_value/appservice.rs -pub(super) async fn process(subcommand: AppserviceCommand, context: &Command<'_>) -> Result { +pub(super) async fn process(subcommand: AppserviceCommand, context: &Context<'_>) -> Result { let services = context.services; match subcommand { diff --git a/src/admin/query/globals.rs b/src/admin/query/globals.rs index 33810704..3681acfd 100644 --- a/src/admin/query/globals.rs +++ b/src/admin/query/globals.rs @@ -2,7 +2,7 @@ use clap::Subcommand; use conduwuit::Result; use ruma::OwnedServerName; -use crate::Command; +use crate::Context; #[derive(Debug, Subcommand)] /// All the getters and iterators from src/database/key_value/globals.rs @@ -21,7 +21,7 @@ pub(crate) enum GlobalsCommand { } /// All the getters and iterators from src/database/key_value/globals.rs -pub(super) async fn process(subcommand: GlobalsCommand, context: &Command<'_>) -> Result { +pub(super) async fn process(subcommand: GlobalsCommand, context: &Context<'_>) -> Result { let services = context.services; match subcommand { diff --git a/src/admin/query/presence.rs b/src/admin/query/presence.rs index 65164802..5b7ead4b 100644 --- a/src/admin/query/presence.rs +++ b/src/admin/query/presence.rs @@ -3,7 +3,7 @@ use conduwuit::Result; use futures::StreamExt; use ruma::OwnedUserId; -use crate::Command; +use crate::Context; #[derive(Debug, Subcommand)] /// All the getters and iterators from src/database/key_value/presence.rs @@ -23,7 +23,7 @@ pub(crate) enum PresenceCommand { } /// All the getters and iterators in key_value/presence.rs -pub(super) async fn process(subcommand: PresenceCommand, context: &Command<'_>) -> Result { +pub(super) async fn process(subcommand: PresenceCommand, context: &Context<'_>) -> Result { let services = context.services; match subcommand { diff --git a/src/admin/query/pusher.rs b/src/admin/query/pusher.rs index 583c4999..0d0e6cc9 100644 --- a/src/admin/query/pusher.rs +++ b/src/admin/query/pusher.rs @@ -2,7 +2,7 @@ use clap::Subcommand; use conduwuit::Result; use ruma::OwnedUserId; -use crate::Command; +use crate::Context; #[derive(Debug, Subcommand)] pub(crate) enum PusherCommand { @@ -13,7 +13,7 @@ pub(crate) enum PusherCommand { }, } -pub(super) async fn process(subcommand: PusherCommand, context: &Command<'_>) -> Result { +pub(super) async fn process(subcommand: PusherCommand, context: &Context<'_>) -> Result { let services = context.services; match subcommand { diff --git a/src/admin/query/raw.rs b/src/admin/query/raw.rs index c503eee5..0e248c65 100644 --- a/src/admin/query/raw.rs +++ b/src/admin/query/raw.rs @@ -11,7 +11,6 @@ use conduwuit::{ use conduwuit_database::Map; use conduwuit_service::Services; use futures::{FutureExt, Stream, StreamExt, TryStreamExt}; -use ruma::events::room::message::RoomMessageEventContent; use tokio::time::Instant; use crate::{admin_command, admin_command_dispatch}; @@ -170,7 +169,7 @@ pub(super) async fn compact( into: Option, parallelism: Option, exhaustive: bool, -) -> Result { +) -> Result { use conduwuit_database::compact::Options; let default_all_maps: Option<_> = map.is_none().then(|| { @@ -221,17 +220,11 @@ pub(super) async fn compact( let results = results.await; let query_time = timer.elapsed(); self.write_str(&format!("Jobs completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")) - .await?; - - Ok(RoomMessageEventContent::text_plain("")) + .await } #[admin_command] -pub(super) async fn raw_count( - &self, - map: Option, - prefix: Option, -) -> Result { +pub(super) async fn raw_count(&self, map: Option, prefix: Option) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); let timer = Instant::now(); @@ -242,17 +235,11 @@ pub(super) async fn raw_count( let query_time = timer.elapsed(); self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{count:#?}\n```")) - .await?; - - Ok(RoomMessageEventContent::text_plain("")) + .await } #[admin_command] -pub(super) async fn raw_keys( - &self, - map: String, - prefix: Option, -) -> Result { +pub(super) async fn raw_keys(&self, map: String, prefix: Option) -> Result { writeln!(self, "```").boxed().await?; let map = self.services.db.get(map.as_str())?; @@ -266,18 +253,12 @@ pub(super) async fn raw_keys( .await?; let query_time = timer.elapsed(); - let out = format!("\n```\n\nQuery completed in {query_time:?}"); - self.write_str(out.as_str()).await?; - - Ok(RoomMessageEventContent::text_plain("")) + self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}")) + .await } #[admin_command] -pub(super) async fn raw_keys_sizes( - &self, - map: Option, - prefix: Option, -) -> Result { +pub(super) async fn raw_keys_sizes(&self, map: Option, prefix: Option) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); let timer = Instant::now(); @@ -294,18 +275,12 @@ pub(super) async fn raw_keys_sizes( .await; let query_time = timer.elapsed(); - let result = format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}"); - self.write_str(result.as_str()).await?; - - Ok(RoomMessageEventContent::text_plain("")) + self.write_str(&format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}")) + .await } #[admin_command] -pub(super) async fn raw_keys_total( - &self, - map: Option, - prefix: Option, -) -> Result { +pub(super) async fn raw_keys_total(&self, map: Option, prefix: Option) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); let timer = Instant::now(); @@ -318,19 +293,12 @@ pub(super) async fn raw_keys_total( .await; let query_time = timer.elapsed(); - self.write_str(&format!("```\n{result:#?}\n\n```\n\nQuery completed in {query_time:?}")) - .await?; - - Ok(RoomMessageEventContent::text_plain("")) + .await } #[admin_command] -pub(super) async fn raw_vals_sizes( - &self, - map: Option, - prefix: Option, -) -> Result { +pub(super) async fn raw_vals_sizes(&self, map: Option, prefix: Option) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); let timer = Instant::now(); @@ -348,18 +316,12 @@ pub(super) async fn raw_vals_sizes( .await; let query_time = timer.elapsed(); - let result = format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}"); - self.write_str(result.as_str()).await?; - - Ok(RoomMessageEventContent::text_plain("")) + self.write_str(&format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}")) + .await } #[admin_command] -pub(super) async fn raw_vals_total( - &self, - map: Option, - prefix: Option, -) -> Result { +pub(super) async fn raw_vals_total(&self, map: Option, prefix: Option) -> Result { let prefix = prefix.as_deref().unwrap_or(EMPTY); let timer = Instant::now(); @@ -373,19 +335,12 @@ pub(super) async fn raw_vals_total( .await; let query_time = timer.elapsed(); - self.write_str(&format!("```\n{result:#?}\n\n```\n\nQuery completed in {query_time:?}")) - .await?; - - Ok(RoomMessageEventContent::text_plain("")) + .await } #[admin_command] -pub(super) async fn raw_iter( - &self, - map: String, - prefix: Option, -) -> Result { +pub(super) async fn raw_iter(&self, map: String, prefix: Option) -> Result { writeln!(self, "```").await?; let map = self.services.db.get(&map)?; @@ -401,9 +356,7 @@ pub(super) async fn raw_iter( let query_time = timer.elapsed(); self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}")) - .await?; - - Ok(RoomMessageEventContent::text_plain("")) + .await } #[admin_command] @@ -412,7 +365,7 @@ pub(super) async fn raw_keys_from( map: String, start: String, limit: Option, -) -> Result { +) -> Result { writeln!(self, "```").await?; let map = self.services.db.get(&map)?; @@ -426,9 +379,7 @@ pub(super) async fn raw_keys_from( let query_time = timer.elapsed(); self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}")) - .await?; - - Ok(RoomMessageEventContent::text_plain("")) + .await } #[admin_command] @@ -437,7 +388,7 @@ pub(super) async fn raw_iter_from( map: String, start: String, limit: Option, -) -> Result { +) -> Result { let map = self.services.db.get(&map)?; let timer = Instant::now(); let result = map @@ -449,41 +400,38 @@ pub(super) async fn raw_iter_from( .await?; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -pub(super) async fn raw_del(&self, map: String, key: String) -> Result { +pub(super) async fn raw_del(&self, map: String, key: String) -> Result { let map = self.services.db.get(&map)?; let timer = Instant::now(); map.remove(&key); - let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Operation completed in {query_time:?}" - ))) + let query_time = timer.elapsed(); + self.write_str(&format!("Operation completed in {query_time:?}")) + .await } #[admin_command] -pub(super) async fn raw_get(&self, map: String, key: String) -> Result { +pub(super) async fn raw_get(&self, map: String, key: String) -> Result { let map = self.services.db.get(&map)?; let timer = Instant::now(); let handle = map.get(&key).await?; + let query_time = timer.elapsed(); let result = String::from_utf8_lossy(&handle); - - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```")) + .await } #[admin_command] -pub(super) async fn raw_maps(&self) -> Result { +pub(super) async fn raw_maps(&self) -> Result { let list: Vec<_> = self.services.db.iter().map(at!(0)).copied().collect(); - Ok(RoomMessageEventContent::notice_markdown(format!("{list:#?}"))) + self.write_str(&format!("{list:#?}")).await } fn with_maps_or<'a>( diff --git a/src/admin/query/resolver.rs b/src/admin/query/resolver.rs index 10748d88..4a39a40e 100644 --- a/src/admin/query/resolver.rs +++ b/src/admin/query/resolver.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::{Result, utils::time}; use futures::StreamExt; -use ruma::{OwnedServerName, events::room::message::RoomMessageEventContent}; +use ruma::OwnedServerName; use crate::{admin_command, admin_command_dispatch}; @@ -21,10 +21,7 @@ pub(crate) enum ResolverCommand { } #[admin_command] -async fn destinations_cache( - &self, - server_name: Option, -) -> Result { +async fn destinations_cache(&self, server_name: Option) -> Result { use service::resolver::cache::CachedDest; writeln!(self, "| Server Name | Destination | Hostname | Expires |").await?; @@ -44,11 +41,11 @@ async fn destinations_cache( .await?; } - Ok(RoomMessageEventContent::notice_plain("")) + Ok(()) } #[admin_command] -async fn overrides_cache(&self, server_name: Option) -> Result { +async fn overrides_cache(&self, server_name: Option) -> Result { use service::resolver::cache::CachedOverride; writeln!(self, "| Server Name | IP | Port | Expires | Overriding |").await?; @@ -70,5 +67,5 @@ async fn overrides_cache(&self, server_name: Option) -> Result) -> Result { +pub(super) async fn process(subcommand: RoomAliasCommand, context: &Context<'_>) -> Result { let services = context.services; match subcommand { diff --git a/src/admin/query/room_state_cache.rs b/src/admin/query/room_state_cache.rs index 7f5e2536..c64cd173 100644 --- a/src/admin/query/room_state_cache.rs +++ b/src/admin/query/room_state_cache.rs @@ -1,11 +1,9 @@ use clap::Subcommand; -use conduwuit::{Error, Result}; +use conduwuit::Result; use futures::StreamExt; -use ruma::{ - OwnedRoomId, OwnedServerName, OwnedUserId, events::room::message::RoomMessageEventContent, -}; +use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId}; -use crate::Command; +use crate::Context; #[derive(Debug, Subcommand)] pub(crate) enum RoomStateCacheCommand { @@ -78,10 +76,10 @@ pub(crate) enum RoomStateCacheCommand { }, } -pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command<'_>) -> Result { +pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context<'_>) -> Result { let services = context.services; - let c = match subcommand { + match subcommand { | RoomStateCacheCommand::ServerInRoom { server, room_id } => { let timer = tokio::time::Instant::now(); let result = services @@ -91,9 +89,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomServers { room_id } => { let timer = tokio::time::Instant::now(); @@ -106,9 +106,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::ServerRooms { server } => { let timer = tokio::time::Instant::now(); @@ -121,9 +123,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomMembers { room_id } => { let timer = tokio::time::Instant::now(); @@ -136,9 +140,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::LocalUsersInRoom { room_id } => { let timer = tokio::time::Instant::now(); @@ -151,9 +157,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::ActiveLocalUsersInRoom { room_id } => { let timer = tokio::time::Instant::now(); @@ -166,18 +174,22 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomJoinedCount { room_id } => { let timer = tokio::time::Instant::now(); let results = services.rooms.state_cache.room_joined_count(&room_id).await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomInvitedCount { room_id } => { let timer = tokio::time::Instant::now(); @@ -188,9 +200,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomUserOnceJoined { room_id } => { let timer = tokio::time::Instant::now(); @@ -203,9 +217,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomMembersInvited { room_id } => { let timer = tokio::time::Instant::now(); @@ -218,9 +234,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::GetInviteCount { room_id, user_id } => { let timer = tokio::time::Instant::now(); @@ -231,9 +249,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::GetLeftCount { room_id, user_id } => { let timer = tokio::time::Instant::now(); @@ -244,9 +264,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomsJoined { user_id } => { let timer = tokio::time::Instant::now(); @@ -259,9 +281,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomsInvited { user_id } => { let timer = tokio::time::Instant::now(); @@ -273,9 +297,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::RoomsLeft { user_id } => { let timer = tokio::time::Instant::now(); @@ -287,9 +313,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, | RoomStateCacheCommand::InviteState { user_id, room_id } => { let timer = tokio::time::Instant::now(); @@ -300,13 +328,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command .await; let query_time = timer.elapsed(); - Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, - }?; - - context.write_str(c.body()).await?; - - Ok(()) + } } diff --git a/src/admin/query/room_timeline.rs b/src/admin/query/room_timeline.rs index 6f08aee9..0fd22ca7 100644 --- a/src/admin/query/room_timeline.rs +++ b/src/admin/query/room_timeline.rs @@ -1,7 +1,7 @@ use clap::Subcommand; use conduwuit::{PduCount, Result, utils::stream::TryTools}; use futures::TryStreamExt; -use ruma::{OwnedRoomOrAliasId, events::room::message::RoomMessageEventContent}; +use ruma::OwnedRoomOrAliasId; use crate::{admin_command, admin_command_dispatch}; @@ -24,7 +24,7 @@ pub(crate) enum RoomTimelineCommand { } #[admin_command] -pub(super) async fn last(&self, room_id: OwnedRoomOrAliasId) -> Result { +pub(super) async fn last(&self, room_id: OwnedRoomOrAliasId) -> Result { let room_id = self.services.rooms.alias.resolve(&room_id).await?; let result = self @@ -34,7 +34,7 @@ pub(super) async fn last(&self, room_id: OwnedRoomOrAliasId) -> Result, limit: Option, -) -> Result { +) -> Result { let room_id = self.services.rooms.alias.resolve(&room_id).await?; let from: Option = from.as_deref().map(str::parse).transpose()?; @@ -57,5 +57,5 @@ pub(super) async fn pdus( .try_collect() .await?; - Ok(RoomMessageEventContent::notice_markdown(format!("{result:#?}"))) + self.write_str(&format!("{result:#?}")).await } diff --git a/src/admin/query/sending.rs b/src/admin/query/sending.rs index 860bca4a..8b1676bc 100644 --- a/src/admin/query/sending.rs +++ b/src/admin/query/sending.rs @@ -1,10 +1,10 @@ use clap::Subcommand; -use conduwuit::Result; +use conduwuit::{Err, Result}; use futures::StreamExt; -use ruma::{OwnedServerName, OwnedUserId, events::room::message::RoomMessageEventContent}; +use ruma::{OwnedServerName, OwnedUserId}; use service::sending::Destination; -use crate::Command; +use crate::Context; #[derive(Debug, Subcommand)] /// All the getters and iterators from src/database/key_value/sending.rs @@ -62,17 +62,7 @@ pub(crate) enum SendingCommand { } /// All the getters and iterators in key_value/sending.rs -pub(super) async fn process(subcommand: SendingCommand, context: &Command<'_>) -> Result { - let c = reprocess(subcommand, context).await?; - context.write_str(c.body()).await?; - Ok(()) -} - -/// All the getters and iterators in key_value/sending.rs -pub(super) async fn reprocess( - subcommand: SendingCommand, - context: &Command<'_>, -) -> Result { +pub(super) async fn process(subcommand: SendingCommand, context: &Context<'_>) -> Result { let services = context.services; match subcommand { @@ -82,9 +72,11 @@ pub(super) async fn reprocess( let active_requests = results.collect::>().await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```" + )) + .await }, | SendingCommand::QueuedRequests { appservice_id, @@ -97,19 +89,19 @@ pub(super) async fn reprocess( && user_id.is_none() && push_key.is_none() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); } let timer = tokio::time::Instant::now(); let results = match (appservice_id, server_name, user_id, push_key) { | (Some(appservice_id), None, None, None) => { if appservice_id.is_empty() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); } services @@ -123,10 +115,10 @@ pub(super) async fn reprocess( .queued_requests(&Destination::Federation(server_name)), | (None, None, Some(user_id), Some(push_key)) => { if push_key.is_empty() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); } services @@ -135,25 +127,27 @@ pub(super) async fn reprocess( .queued_requests(&Destination::Push(user_id, push_key)) }, | (Some(_), Some(_), Some(_), Some(_)) => { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. Not all of them See --help for more details.", - )); + ); }, | _ => { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); }, }; let queued_requests = results.collect::>().await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{queued_requests:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{queued_requests:#?}\n```" + )) + .await }, | SendingCommand::ActiveRequestsFor { appservice_id, @@ -166,20 +160,20 @@ pub(super) async fn reprocess( && user_id.is_none() && push_key.is_none() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); } let timer = tokio::time::Instant::now(); let results = match (appservice_id, server_name, user_id, push_key) { | (Some(appservice_id), None, None, None) => { if appservice_id.is_empty() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); } services @@ -193,10 +187,10 @@ pub(super) async fn reprocess( .active_requests_for(&Destination::Federation(server_name)), | (None, None, Some(user_id), Some(push_key)) => { if push_key.is_empty() { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); } services @@ -205,34 +199,38 @@ pub(super) async fn reprocess( .active_requests_for(&Destination::Push(user_id, push_key)) }, | (Some(_), Some(_), Some(_), Some(_)) => { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. Not all of them See --help for more details.", - )); + ); }, | _ => { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "An appservice ID, server name, or a user ID with push key must be \ specified via arguments. See --help for more details.", - )); + ); }, }; let active_requests = results.collect::>().await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```" + )) + .await }, | SendingCommand::GetLatestEduCount { server_name } => { let timer = tokio::time::Instant::now(); let results = services.sending.db.get_latest_educount(&server_name).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" - ))) + context + .write_str(&format!( + "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```" + )) + .await }, } } diff --git a/src/admin/query/short.rs b/src/admin/query/short.rs index 0957c15e..aa7c8666 100644 --- a/src/admin/query/short.rs +++ b/src/admin/query/short.rs @@ -1,6 +1,6 @@ use clap::Subcommand; use conduwuit::Result; -use ruma::{OwnedEventId, OwnedRoomOrAliasId, events::room::message::RoomMessageEventContent}; +use ruma::{OwnedEventId, OwnedRoomOrAliasId}; use crate::{admin_command, admin_command_dispatch}; @@ -18,10 +18,7 @@ pub(crate) enum ShortCommand { } #[admin_command] -pub(super) async fn short_event_id( - &self, - event_id: OwnedEventId, -) -> Result { +pub(super) async fn short_event_id(&self, event_id: OwnedEventId) -> Result { let shortid = self .services .rooms @@ -29,17 +26,14 @@ pub(super) async fn short_event_id( .get_shorteventid(&event_id) .await?; - Ok(RoomMessageEventContent::notice_markdown(format!("{shortid:#?}"))) + self.write_str(&format!("{shortid:#?}")).await } #[admin_command] -pub(super) async fn short_room_id( - &self, - room_id: OwnedRoomOrAliasId, -) -> Result { +pub(super) async fn short_room_id(&self, room_id: OwnedRoomOrAliasId) -> Result { let room_id = self.services.rooms.alias.resolve(&room_id).await?; let shortid = self.services.rooms.short.get_shortroomid(&room_id).await?; - Ok(RoomMessageEventContent::notice_markdown(format!("{shortid:#?}"))) + self.write_str(&format!("{shortid:#?}")).await } diff --git a/src/admin/query/users.rs b/src/admin/query/users.rs index 5995bc62..0f34d13f 100644 --- a/src/admin/query/users.rs +++ b/src/admin/query/users.rs @@ -1,9 +1,7 @@ use clap::Subcommand; use conduwuit::Result; use futures::stream::StreamExt; -use ruma::{ - OwnedDeviceId, OwnedRoomId, OwnedUserId, events::room::message::RoomMessageEventContent, -}; +use ruma::{OwnedDeviceId, OwnedRoomId, OwnedUserId}; use crate::{admin_command, admin_command_dispatch}; @@ -99,11 +97,7 @@ pub(crate) enum UsersCommand { } #[admin_command] -async fn get_shared_rooms( - &self, - user_a: OwnedUserId, - user_b: OwnedUserId, -) -> Result { +async fn get_shared_rooms(&self, user_a: OwnedUserId, user_b: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let result: Vec<_> = self .services @@ -115,9 +109,8 @@ async fn get_shared_rooms( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] @@ -127,7 +120,7 @@ async fn get_backup_session( version: String, room_id: OwnedRoomId, session_id: String, -) -> Result { +) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -136,9 +129,8 @@ async fn get_backup_session( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] @@ -147,7 +139,7 @@ async fn get_room_backups( user_id: OwnedUserId, version: String, room_id: OwnedRoomId, -) -> Result { +) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -156,32 +148,22 @@ async fn get_room_backups( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn get_all_backups( - &self, - user_id: OwnedUserId, - version: String, -) -> Result { +async fn get_all_backups(&self, user_id: OwnedUserId, version: String) -> Result { let timer = tokio::time::Instant::now(); let result = self.services.key_backups.get_all(&user_id, &version).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn get_backup_algorithm( - &self, - user_id: OwnedUserId, - version: String, -) -> Result { +async fn get_backup_algorithm(&self, user_id: OwnedUserId, version: String) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -190,16 +172,12 @@ async fn get_backup_algorithm( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn get_latest_backup_version( - &self, - user_id: OwnedUserId, -) -> Result { +async fn get_latest_backup_version(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -208,36 +186,33 @@ async fn get_latest_backup_version( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn get_latest_backup(&self, user_id: OwnedUserId) -> Result { +async fn get_latest_backup(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let result = self.services.key_backups.get_latest_backup(&user_id).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn iter_users(&self) -> Result { +async fn iter_users(&self) -> Result { let timer = tokio::time::Instant::now(); let result: Vec = self.services.users.stream().map(Into::into).collect().await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn iter_users2(&self) -> Result { +async fn iter_users2(&self) -> Result { let timer = tokio::time::Instant::now(); let result: Vec<_> = self.services.users.stream().collect().await; let result: Vec<_> = result @@ -248,35 +223,32 @@ async fn iter_users2(&self) -> Result { let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```")) + .await } #[admin_command] -async fn count_users(&self) -> Result { +async fn count_users(&self) -> Result { let timer = tokio::time::Instant::now(); let result = self.services.users.count().await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn password_hash(&self, user_id: OwnedUserId) -> Result { +async fn password_hash(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let result = self.services.users.password_hash(&user_id).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn list_devices(&self, user_id: OwnedUserId) -> Result { +async fn list_devices(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let devices = self .services @@ -288,13 +260,12 @@ async fn list_devices(&self, user_id: OwnedUserId) -> Result Result { +async fn list_devices_metadata(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let devices = self .services @@ -304,17 +275,12 @@ async fn list_devices_metadata(&self, user_id: OwnedUserId) -> Result Result { +async fn get_device_metadata(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result { let timer = tokio::time::Instant::now(); let device = self .services @@ -323,28 +289,22 @@ async fn get_device_metadata( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```")) + .await } #[admin_command] -async fn get_devices_version(&self, user_id: OwnedUserId) -> Result { +async fn get_devices_version(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let device = self.services.users.get_devicelist_version(&user_id).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```")) + .await } #[admin_command] -async fn count_one_time_keys( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, -) -> Result { +async fn count_one_time_keys(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -353,17 +313,12 @@ async fn count_one_time_keys( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn get_device_keys( - &self, - user_id: OwnedUserId, - device_id: OwnedDeviceId, -) -> Result { +async fn get_device_keys(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -372,24 +327,22 @@ async fn get_device_keys( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn get_user_signing_key(&self, user_id: OwnedUserId) -> Result { +async fn get_user_signing_key(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let result = self.services.users.get_user_signing_key(&user_id).await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } #[admin_command] -async fn get_master_key(&self, user_id: OwnedUserId) -> Result { +async fn get_master_key(&self, user_id: OwnedUserId) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -398,17 +351,12 @@ async fn get_master_key(&self, user_id: OwnedUserId) -> Result Result { +async fn get_to_device_events(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result { let timer = tokio::time::Instant::now(); let result = self .services @@ -418,7 +366,6 @@ async fn get_to_device_events( .await; let query_time = timer.elapsed(); - Ok(RoomMessageEventContent::notice_markdown(format!( - "Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```" - ))) + self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```")) + .await } diff --git a/src/admin/room/alias.rs b/src/admin/room/alias.rs index 4cfff2e5..6b37ffe4 100644 --- a/src/admin/room/alias.rs +++ b/src/admin/room/alias.rs @@ -1,11 +1,11 @@ use std::fmt::Write; use clap::Subcommand; -use conduwuit::Result; +use conduwuit::{Err, Result}; use futures::StreamExt; -use ruma::{OwnedRoomAliasId, OwnedRoomId, events::room::message::RoomMessageEventContent}; +use ruma::{OwnedRoomAliasId, OwnedRoomId}; -use crate::{Command, escape_html}; +use crate::Context; #[derive(Debug, Subcommand)] pub(crate) enum RoomAliasCommand { @@ -42,17 +42,7 @@ pub(crate) enum RoomAliasCommand { }, } -pub(super) async fn process(command: RoomAliasCommand, context: &Command<'_>) -> Result { - let c = reprocess(command, context).await?; - context.write_str(c.body()).await?; - - Ok(()) -} - -pub(super) async fn reprocess( - command: RoomAliasCommand, - context: &Command<'_>, -) -> Result { +pub(super) async fn process(command: RoomAliasCommand, context: &Context<'_>) -> Result { let services = context.services; let server_user = &services.globals.server_user; @@ -65,9 +55,7 @@ pub(super) async fn reprocess( let room_alias = match OwnedRoomAliasId::parse(room_alias_str) { | Ok(alias) => alias, | Err(err) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to parse alias: {err}" - ))); + return Err!("Failed to parse alias: {err}"); }, }; match command { @@ -79,60 +67,50 @@ pub(super) async fn reprocess( &room_id, server_user, ) { - | Ok(()) => Ok(RoomMessageEventContent::text_plain(format!( - "Successfully overwrote alias (formerly {id})" - ))), - | Err(err) => Ok(RoomMessageEventContent::text_plain(format!( - "Failed to remove alias: {err}" - ))), + | Err(err) => Err!("Failed to remove alias: {err}"), + | Ok(()) => + context + .write_str(&format!( + "Successfully overwrote alias (formerly {id})" + )) + .await, } }, - | (false, Ok(id)) => Ok(RoomMessageEventContent::text_plain(format!( + | (false, Ok(id)) => Err!( "Refusing to overwrite in use alias for {id}, use -f or --force to \ overwrite" - ))), + ), | (_, Err(_)) => { match services.rooms.alias.set_alias( &room_alias, &room_id, server_user, ) { - | Ok(()) => Ok(RoomMessageEventContent::text_plain( - "Successfully set alias", - )), - | Err(err) => Ok(RoomMessageEventContent::text_plain(format!( - "Failed to remove alias: {err}" - ))), + | Err(err) => Err!("Failed to remove alias: {err}"), + | Ok(()) => context.write_str("Successfully set alias").await, } }, } }, | RoomAliasCommand::Remove { .. } => { match services.rooms.alias.resolve_local_alias(&room_alias).await { + | Err(_) => Err!("Alias isn't in use."), | Ok(id) => match services .rooms .alias .remove_alias(&room_alias, server_user) .await { - | Ok(()) => Ok(RoomMessageEventContent::text_plain(format!( - "Removed alias from {id}" - ))), - | Err(err) => Ok(RoomMessageEventContent::text_plain(format!( - "Failed to remove alias: {err}" - ))), + | Err(err) => Err!("Failed to remove alias: {err}"), + | Ok(()) => + context.write_str(&format!("Removed alias from {id}")).await, }, - | Err(_) => - Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")), } }, | RoomAliasCommand::Which { .. } => { match services.rooms.alias.resolve_local_alias(&room_alias).await { - | Ok(id) => Ok(RoomMessageEventContent::text_plain(format!( - "Alias resolves to {id}" - ))), - | Err(_) => - Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")), + | Err(_) => Err!("Alias isn't in use."), + | Ok(id) => context.write_str(&format!("Alias resolves to {id}")).await, } }, | RoomAliasCommand::List { .. } => unreachable!(), @@ -154,15 +132,8 @@ pub(super) async fn reprocess( output }); - let html_list = aliases.iter().fold(String::new(), |mut output, alias| { - writeln!(output, "

  • {}
  • ", escape_html(alias.as_ref())) - .expect("should be able to write to string buffer"); - output - }); - let plain = format!("Aliases for {room_id}:\n{plain_list}"); - let html = format!("Aliases for {room_id}:\n
      {html_list}
    "); - Ok(RoomMessageEventContent::text_html(plain, html)) + context.write_str(&plain).await } else { let aliases = services .rooms @@ -181,23 +152,8 @@ pub(super) async fn reprocess( output }); - let html_list = aliases - .iter() - .fold(String::new(), |mut output, (alias, id)| { - writeln!( - output, - "
  • {} -> #{}:{}
  • ", - escape_html(alias.as_ref()), - escape_html(id), - server_name - ) - .expect("should be able to write to string buffer"); - output - }); - let plain = format!("Aliases:\n{plain_list}"); - let html = format!("Aliases:\n
      {html_list}
    "); - Ok(RoomMessageEventContent::text_html(plain, html)) + context.write_str(&plain).await }, } } diff --git a/src/admin/room/commands.rs b/src/admin/room/commands.rs index 6dd31b48..81f36f15 100644 --- a/src/admin/room/commands.rs +++ b/src/admin/room/commands.rs @@ -1,6 +1,6 @@ -use conduwuit::Result; +use conduwuit::{Err, Result}; use futures::StreamExt; -use ruma::{OwnedRoomId, events::room::message::RoomMessageEventContent}; +use ruma::OwnedRoomId; use crate::{PAGE_SIZE, admin_command, get_room_info}; @@ -11,7 +11,7 @@ pub(super) async fn list_rooms( exclude_disabled: bool, exclude_banned: bool, no_details: bool, -) -> Result { +) -> Result { // TODO: i know there's a way to do this with clap, but i can't seem to find it let page = page.unwrap_or(1); let mut rooms = self @@ -41,29 +41,28 @@ pub(super) async fn list_rooms( .collect::>(); if rooms.is_empty() { - return Ok(RoomMessageEventContent::text_plain("No more rooms.")); + return Err!("No more rooms."); } - let output_plain = format!( - "Rooms ({}):\n```\n{}\n```", - rooms.len(), - rooms - .iter() - .map(|(id, members, name)| if no_details { + let body = rooms + .iter() + .map(|(id, members, name)| { + if no_details { format!("{id}") } else { format!("{id}\tMembers: {members}\tName: {name}") - }) - .collect::>() - .join("\n") - ); + } + }) + .collect::>() + .join("\n"); - Ok(RoomMessageEventContent::notice_markdown(output_plain)) + self.write_str(&format!("Rooms ({}):\n```\n{body}\n```", rooms.len(),)) + .await } #[admin_command] -pub(super) async fn exists(&self, room_id: OwnedRoomId) -> Result { +pub(super) async fn exists(&self, room_id: OwnedRoomId) -> Result { let result = self.services.rooms.metadata.exists(&room_id).await; - Ok(RoomMessageEventContent::notice_markdown(format!("{result}"))) + self.write_str(&format!("{result}")).await } diff --git a/src/admin/room/directory.rs b/src/admin/room/directory.rs index 179131e4..a6be9a15 100644 --- a/src/admin/room/directory.rs +++ b/src/admin/room/directory.rs @@ -1,9 +1,9 @@ use clap::Subcommand; -use conduwuit::Result; +use conduwuit::{Err, Result}; use futures::StreamExt; -use ruma::{OwnedRoomId, events::room::message::RoomMessageEventContent}; +use ruma::OwnedRoomId; -use crate::{Command, PAGE_SIZE, get_room_info}; +use crate::{Context, PAGE_SIZE, get_room_info}; #[derive(Debug, Subcommand)] pub(crate) enum RoomDirectoryCommand { @@ -25,25 +25,16 @@ pub(crate) enum RoomDirectoryCommand { }, } -pub(super) async fn process(command: RoomDirectoryCommand, context: &Command<'_>) -> Result { - let c = reprocess(command, context).await?; - context.write_str(c.body()).await?; - Ok(()) -} - -pub(super) async fn reprocess( - command: RoomDirectoryCommand, - context: &Command<'_>, -) -> Result { +pub(super) async fn process(command: RoomDirectoryCommand, context: &Context<'_>) -> Result { let services = context.services; match command { | RoomDirectoryCommand::Publish { room_id } => { services.rooms.directory.set_public(&room_id); - Ok(RoomMessageEventContent::notice_plain("Room published")) + context.write_str("Room published").await }, | RoomDirectoryCommand::Unpublish { room_id } => { services.rooms.directory.set_not_public(&room_id); - Ok(RoomMessageEventContent::notice_plain("Room unpublished")) + context.write_str("Room unpublished").await }, | RoomDirectoryCommand::List { page } => { // TODO: i know there's a way to do this with clap, but i can't seem to find it @@ -66,20 +57,18 @@ pub(super) async fn reprocess( .collect(); if rooms.is_empty() { - return Ok(RoomMessageEventContent::text_plain("No more rooms.")); + return Err!("No more rooms."); } - let output = format!( - "Rooms (page {page}):\n```\n{}\n```", - rooms - .iter() - .map(|(id, members, name)| format!( - "{id} | Members: {members} | Name: {name}" - )) - .collect::>() - .join("\n") - ); - Ok(RoomMessageEventContent::text_markdown(output)) + let body = rooms + .iter() + .map(|(id, members, name)| format!("{id} | Members: {members} | Name: {name}")) + .collect::>() + .join("\n"); + + context + .write_str(&format!("Rooms (page {page}):\n```\n{body}\n```",)) + .await }, } } diff --git a/src/admin/room/info.rs b/src/admin/room/info.rs index 35a92b6a..1278e820 100644 --- a/src/admin/room/info.rs +++ b/src/admin/room/info.rs @@ -1,7 +1,7 @@ use clap::Subcommand; -use conduwuit::{Result, utils::ReadyExt}; +use conduwuit::{Err, Result, utils::ReadyExt}; use futures::StreamExt; -use ruma::{OwnedRoomId, events::room::message::RoomMessageEventContent}; +use ruma::OwnedRoomId; use crate::{admin_command, admin_command_dispatch}; @@ -27,11 +27,7 @@ pub(crate) enum RoomInfoCommand { } #[admin_command] -async fn list_joined_members( - &self, - room_id: OwnedRoomId, - local_only: bool, -) -> Result { +async fn list_joined_members(&self, room_id: OwnedRoomId, local_only: bool) -> Result { let room_name = self .services .rooms @@ -64,22 +60,19 @@ async fn list_joined_members( .collect() .await; - let output_plain = format!( - "{} Members in Room \"{}\":\n```\n{}\n```", - member_info.len(), - room_name, - member_info - .into_iter() - .map(|(displayname, mxid)| format!("{mxid} | {displayname}")) - .collect::>() - .join("\n") - ); + let num = member_info.len(); + let body = member_info + .into_iter() + .map(|(displayname, mxid)| format!("{mxid} | {displayname}")) + .collect::>() + .join("\n"); - Ok(RoomMessageEventContent::notice_markdown(output_plain)) + self.write_str(&format!("{num} Members in Room \"{room_name}\":\n```\n{body}\n```",)) + .await } #[admin_command] -async fn view_room_topic(&self, room_id: OwnedRoomId) -> Result { +async fn view_room_topic(&self, room_id: OwnedRoomId) -> Result { let Ok(room_topic) = self .services .rooms @@ -87,10 +80,9 @@ async fn view_room_topic(&self, room_id: OwnedRoomId) -> Result Result { +async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result { debug!("Got room alias or ID: {}", room); let admin_room_alias = &self.services.globals.admin_alias; if let Ok(admin_room_id) = self.services.admin.get_admin_room().await { if room.to_string().eq(&admin_room_id) || room.to_string().eq(admin_room_alias) { - return Ok(RoomMessageEventContent::text_plain("Not allowed to ban the admin room.")); + return Err!("Not allowed to ban the admin room."); } } @@ -64,11 +61,11 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result room_id, | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( + return Err!( "Failed to parse room ID {room}. Please note that this requires a full room \ ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`): {e}" - ))); + ); }, }; @@ -80,11 +77,11 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result room_alias, | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( + return Err!( "Failed to parse room ID {room}. Please note that this requires a full room \ ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`): {e}" - ))); + ); }, }; @@ -123,9 +120,9 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result { - return Ok(RoomMessageEventContent::notice_plain(format!( + return Err!( "Failed to resolve room alias {room_alias} to a room ID: {e}" - ))); + ); }, } }, @@ -135,11 +132,11 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result Result Result { +async fn ban_list_of_rooms(&self) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details.",); } let rooms_s = self @@ -356,23 +352,24 @@ async fn ban_list_of_rooms(&self) -> Result { self.services.rooms.metadata.disable_room(&room_id, true); } - Ok(RoomMessageEventContent::text_plain(format!( + self.write_str(&format!( "Finished bulk room ban, banned {room_ban_count} total rooms, evicted all users, and \ disabled incoming federation with the room." - ))) + )) + .await } #[admin_command] -async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result { +async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result { let room_id = if room.is_room_id() { let room_id = match RoomId::parse(&room) { | Ok(room_id) => room_id, | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( + return Err!( "Failed to parse room ID {room}. Please note that this requires a full room \ ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`): {e}" - ))); + ); }, }; @@ -384,11 +381,11 @@ async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result room_alias, | Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( + return Err!( "Failed to parse room ID {room}. Please note that this requires a full room \ ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \ (`#roomalias:example.com`): {e}" - ))); + ); }, }; @@ -427,9 +424,7 @@ async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result { - return Ok(RoomMessageEventContent::text_plain(format!( - "Failed to resolve room alias {room} to a room ID: {e}" - ))); + return Err!("Failed to resolve room alias {room} to a room ID: {e}"); }, } }, @@ -439,19 +434,20 @@ async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result Result { +async fn list_banned_rooms(&self, no_details: bool) -> Result { let room_ids: Vec = self .services .rooms @@ -462,7 +458,7 @@ async fn list_banned_rooms(&self, no_details: bool) -> Result Result>() - .join("\n") - ); + } + }) + .collect::>() + .join("\n"); - Ok(RoomMessageEventContent::notice_markdown(output_plain)) + self.write_str(&format!("Rooms Banned ({num}):\n```\n{body}\n```",)) + .await } diff --git a/src/admin/server/commands.rs b/src/admin/server/commands.rs index 17bf9ec0..b01e9296 100644 --- a/src/admin/server/commands.rs +++ b/src/admin/server/commands.rs @@ -1,12 +1,11 @@ use std::{fmt::Write, path::PathBuf, sync::Arc}; use conduwuit::{Err, Result, info, utils::time, warn}; -use ruma::events::room::message::RoomMessageEventContent; use crate::admin_command; #[admin_command] -pub(super) async fn uptime(&self) -> Result { +pub(super) async fn uptime(&self) -> Result { let elapsed = self .services .server @@ -15,47 +14,36 @@ pub(super) async fn uptime(&self) -> Result { .expect("standard duration"); let result = time::pretty(elapsed); - Ok(RoomMessageEventContent::notice_plain(format!("{result}."))) + self.write_str(&format!("{result}.")).await } #[admin_command] -pub(super) async fn show_config(&self) -> Result { - // Construct and send the response - Ok(RoomMessageEventContent::text_markdown(format!( - "{}", - *self.services.server.config - ))) +pub(super) async fn show_config(&self) -> Result { + self.write_str(&format!("{}", *self.services.server.config)) + .await } #[admin_command] -pub(super) async fn reload_config( - &self, - path: Option, -) -> Result { +pub(super) async fn reload_config(&self, path: Option) -> Result { let path = path.as_deref().into_iter(); self.services.config.reload(path)?; - Ok(RoomMessageEventContent::text_plain("Successfully reconfigured.")) + self.write_str("Successfully reconfigured.").await } #[admin_command] -pub(super) async fn list_features( - &self, - available: bool, - enabled: bool, - comma: bool, -) -> Result { +pub(super) async fn list_features(&self, available: bool, enabled: bool, comma: bool) -> Result { let delim = if comma { "," } else { " " }; if enabled && !available { let features = info::rustc::features().join(delim); let out = format!("`\n{features}\n`"); - return Ok(RoomMessageEventContent::text_markdown(out)); + return self.write_str(&out).await; } if available && !enabled { let features = info::cargo::features().join(delim); let out = format!("`\n{features}\n`"); - return Ok(RoomMessageEventContent::text_markdown(out)); + return self.write_str(&out).await; } let mut features = String::new(); @@ -68,41 +56,42 @@ pub(super) async fn list_features( writeln!(features, "{emoji} {feature} {remark}")?; } - Ok(RoomMessageEventContent::text_markdown(features)) + self.write_str(&features).await } #[admin_command] -pub(super) async fn memory_usage(&self) -> Result { +pub(super) async fn memory_usage(&self) -> Result { let services_usage = self.services.memory_usage().await?; let database_usage = self.services.db.db.memory_usage()?; let allocator_usage = conduwuit::alloc::memory_usage().map_or(String::new(), |s| format!("\nAllocator:\n{s}")); - Ok(RoomMessageEventContent::text_plain(format!( + self.write_str(&format!( "Services:\n{services_usage}\nDatabase:\n{database_usage}{allocator_usage}", - ))) + )) + .await } #[admin_command] -pub(super) async fn clear_caches(&self) -> Result { +pub(super) async fn clear_caches(&self) -> Result { self.services.clear_cache().await; - Ok(RoomMessageEventContent::text_plain("Done.")) + self.write_str("Done.").await } #[admin_command] -pub(super) async fn list_backups(&self) -> Result { +pub(super) async fn list_backups(&self) -> Result { let result = self.services.db.db.backup_list()?; if result.is_empty() { - Ok(RoomMessageEventContent::text_plain("No backups found.")) - } else { - Ok(RoomMessageEventContent::text_plain(result)) + return Err!("No backups found."); } + + self.write_str(&result).await } #[admin_command] -pub(super) async fn backup_database(&self) -> Result { +pub(super) async fn backup_database(&self) -> Result { let db = Arc::clone(&self.services.db); let mut result = self .services @@ -118,27 +107,27 @@ pub(super) async fn backup_database(&self) -> Result { result = self.services.db.db.backup_list()?; } - Ok(RoomMessageEventContent::notice_markdown(result)) + self.write_str(&result).await } #[admin_command] -pub(super) async fn admin_notice(&self, message: Vec) -> Result { +pub(super) async fn admin_notice(&self, message: Vec) -> Result { let message = message.join(" "); self.services.admin.send_text(&message).await; - Ok(RoomMessageEventContent::notice_plain("Notice was sent to #admins")) + self.write_str("Notice was sent to #admins").await } #[admin_command] -pub(super) async fn reload_mods(&self) -> Result { +pub(super) async fn reload_mods(&self) -> Result { self.services.server.reload()?; - Ok(RoomMessageEventContent::notice_plain("Reloading server...")) + self.write_str("Reloading server...").await } #[admin_command] #[cfg(unix)] -pub(super) async fn restart(&self, force: bool) -> Result { +pub(super) async fn restart(&self, force: bool) -> Result { use conduwuit::utils::sys::current_exe_deleted; if !force && current_exe_deleted() { @@ -150,13 +139,13 @@ pub(super) async fn restart(&self, force: bool) -> Result Result { +pub(super) async fn shutdown(&self) -> Result { warn!("shutdown command"); self.services.server.shutdown()?; - Ok(RoomMessageEventContent::notice_plain("Shutting down server...")) + self.write_str("Shutting down server...").await } diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 84795f9b..e5e481e5 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -2,7 +2,7 @@ use std::{collections::BTreeMap, fmt::Write as _}; use api::client::{full_user_deactivate, join_room_by_id_helper, leave_room}; use conduwuit::{ - Result, debug, debug_warn, error, info, is_equal_to, + Err, Result, debug, debug_warn, error, info, is_equal_to, matrix::pdu::PduBuilder, utils::{self, ReadyExt}, warn, @@ -14,7 +14,6 @@ use ruma::{ events::{ RoomAccountDataEventType, StateEventType, room::{ - message::RoomMessageEventContent, power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, redaction::RoomRedactionEventContent, }, @@ -31,7 +30,7 @@ const AUTO_GEN_PASSWORD_LENGTH: usize = 25; const BULK_JOIN_REASON: &str = "Bulk force joining this room as initiated by the server admin."; #[admin_command] -pub(super) async fn list_users(&self) -> Result { +pub(super) async fn list_users(&self) -> Result { let users: Vec<_> = self .services .users @@ -44,30 +43,22 @@ pub(super) async fn list_users(&self) -> Result { plain_msg += users.join("\n").as_str(); plain_msg += "\n```"; - self.write_str(plain_msg.as_str()).await?; - - Ok(RoomMessageEventContent::text_plain("")) + self.write_str(&plain_msg).await } #[admin_command] -pub(super) async fn create_user( - &self, - username: String, - password: Option, -) -> Result { +pub(super) async fn create_user(&self, username: String, password: Option) -> Result { // Validate user id let user_id = parse_local_user_id(self.services, &username)?; if let Err(e) = user_id.validate_strict() { if self.services.config.emergency_password.is_none() { - return Ok(RoomMessageEventContent::text_plain(format!( - "Username {user_id} contains disallowed characters or spaces: {e}" - ))); + return Err!("Username {user_id} contains disallowed characters or spaces: {e}"); } } if self.services.users.exists(&user_id).await { - return Ok(RoomMessageEventContent::text_plain(format!("User {user_id} already exists"))); + return Err!("User {user_id} already exists"); } let password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH)); @@ -89,8 +80,7 @@ pub(super) async fn create_user( .new_user_displayname_suffix .is_empty() { - write!(displayname, " {}", self.services.server.config.new_user_displayname_suffix) - .expect("should be able to write to string buffer"); + write!(displayname, " {}", self.services.server.config.new_user_displayname_suffix)?; } self.services @@ -110,15 +100,17 @@ pub(super) async fn create_user( content: ruma::events::push_rules::PushRulesEventContent { global: ruma::push::Ruleset::server_default(&user_id), }, - }) - .expect("to json value always works"), + })?, ) .await?; if !self.services.server.config.auto_join_rooms.is_empty() { for room in &self.services.server.config.auto_join_rooms { let Ok(room_id) = self.services.rooms.alias.resolve(room).await else { - error!(%user_id, "Failed to resolve room alias to room ID when attempting to auto join {room}, skipping"); + error!( + %user_id, + "Failed to resolve room alias to room ID when attempting to auto join {room}, skipping" + ); continue; }; @@ -154,18 +146,17 @@ pub(super) async fn create_user( info!("Automatically joined room {room} for user {user_id}"); }, | Err(e) => { - self.services - .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "Failed to automatically join room {room} for user {user_id}: \ - {e}" - ))) - .await - .ok(); // don't return this error so we don't fail registrations error!( "Failed to automatically join room {room} for user {user_id}: {e}" ); + self.services + .admin + .send_text(&format!( + "Failed to automatically join room {room} for user {user_id}: \ + {e}" + )) + .await; }, } } @@ -192,25 +183,18 @@ pub(super) async fn create_user( debug!("create_user admin command called without an admin room being available"); } - Ok(RoomMessageEventContent::text_plain(format!( - "Created user with user_id: {user_id} and password: `{password}`" - ))) + self.write_str(&format!("Created user with user_id: {user_id} and password: `{password}`")) + .await } #[admin_command] -pub(super) async fn deactivate( - &self, - no_leave_rooms: bool, - user_id: String, -) -> Result { +pub(super) async fn deactivate(&self, no_leave_rooms: bool, user_id: String) -> Result { // Validate user id let user_id = parse_local_user_id(self.services, &user_id)?; // don't deactivate the server service account if user_id == self.services.globals.server_user { - return Ok(RoomMessageEventContent::text_plain( - "Not allowed to deactivate the server service account.", - )); + return Err!("Not allowed to deactivate the server service account.",); } self.services.users.deactivate_account(&user_id).await?; @@ -218,11 +202,8 @@ pub(super) async fn deactivate( if !no_leave_rooms { self.services .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "Making {user_id} leave all rooms after deactivation..." - ))) - .await - .ok(); + .send_text(&format!("Making {user_id} leave all rooms after deactivation...")) + .await; let all_joined_rooms: Vec = self .services @@ -239,24 +220,19 @@ pub(super) async fn deactivate( leave_all_rooms(self.services, &user_id).await; } - Ok(RoomMessageEventContent::text_plain(format!( - "User {user_id} has been deactivated" - ))) + self.write_str(&format!("User {user_id} has been deactivated")) + .await } #[admin_command] -pub(super) async fn reset_password( - &self, - username: String, - password: Option, -) -> Result { +pub(super) async fn reset_password(&self, username: String, password: Option) -> Result { let user_id = parse_local_user_id(self.services, &username)?; if user_id == self.services.globals.server_user { - return Ok(RoomMessageEventContent::text_plain( + return Err!( "Not allowed to set the password for the server account. Please use the emergency \ password config option.", - )); + ); } let new_password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH)); @@ -266,28 +242,20 @@ pub(super) async fn reset_password( .users .set_password(&user_id, Some(new_password.as_str())) { - | Ok(()) => Ok(RoomMessageEventContent::text_plain(format!( - "Successfully reset the password for user {user_id}: `{new_password}`" - ))), - | Err(e) => Ok(RoomMessageEventContent::text_plain(format!( - "Couldn't reset the password for user {user_id}: {e}" - ))), + | Err(e) => return Err!("Couldn't reset the password for user {user_id}: {e}"), + | Ok(()) => + write!(self, "Successfully reset the password for user {user_id}: `{new_password}`"), } + .await } #[admin_command] -pub(super) async fn deactivate_all( - &self, - no_leave_rooms: bool, - force: bool, -) -> Result { +pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details.",); } let usernames = self @@ -301,15 +269,23 @@ pub(super) async fn deactivate_all( for username in usernames { match parse_active_local_user_id(self.services, username).await { + | Err(e) => { + self.services + .admin + .send_text(&format!("{username} is not a valid username, skipping over: {e}")) + .await; + + continue; + }, | Ok(user_id) => { if self.services.users.is_admin(&user_id).await && !force { self.services .admin - .send_message(RoomMessageEventContent::text_plain(format!( + .send_text(&format!( "{username} is an admin and --force is not set, skipping over" - ))) - .await - .ok(); + )) + .await; + admins.push(username); continue; } @@ -318,26 +294,16 @@ pub(super) async fn deactivate_all( if user_id == self.services.globals.server_user { self.services .admin - .send_message(RoomMessageEventContent::text_plain(format!( + .send_text(&format!( "{username} is the server service account, skipping over" - ))) - .await - .ok(); + )) + .await; + continue; } user_ids.push(user_id); }, - | Err(e) => { - self.services - .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "{username} is not a valid username, skipping over: {e}" - ))) - .await - .ok(); - continue; - }, } } @@ -345,6 +311,12 @@ pub(super) async fn deactivate_all( for user_id in user_ids { match self.services.users.deactivate_account(&user_id).await { + | Err(e) => { + self.services + .admin + .send_text(&format!("Failed deactivating user: {e}")) + .await; + }, | Ok(()) => { deactivation_count = deactivation_count.saturating_add(1); if !no_leave_rooms { @@ -365,33 +337,24 @@ pub(super) async fn deactivate_all( leave_all_rooms(self.services, &user_id).await; } }, - | Err(e) => { - self.services - .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "Failed deactivating user: {e}" - ))) - .await - .ok(); - }, } } if admins.is_empty() { - Ok(RoomMessageEventContent::text_plain(format!( - "Deactivated {deactivation_count} accounts." - ))) + write!(self, "Deactivated {deactivation_count} accounts.") } else { - Ok(RoomMessageEventContent::text_plain(format!( + write!( + self, "Deactivated {deactivation_count} accounts.\nSkipped admin accounts: {}. Use \ --force to deactivate admin accounts", admins.join(", ") - ))) + ) } + .await } #[admin_command] -pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result { +pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result { // Validate user id let user_id = parse_local_user_id(self.services, &user_id)?; @@ -405,23 +368,20 @@ pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result>() - .join("\n") - ); + let body = rooms + .iter() + .map(|(id, members, name)| format!("{id}\tMembers: {members}\tName: {name}")) + .collect::>() + .join("\n"); - Ok(RoomMessageEventContent::notice_markdown(output_plain)) + self.write_str(&format!("Rooms {user_id} Joined ({}):\n```\n{body}\n```", rooms.len(),)) + .await } #[admin_command] @@ -429,27 +389,23 @@ pub(super) async fn force_join_list_of_local_users( &self, room_id: OwnedRoomOrAliasId, yes_i_want_to_do_this: bool, -) -> Result { +) -> Result { if self.body.len() < 2 || !self.body[0].trim().starts_with("```") || self.body.last().unwrap_or(&"").trim() != "```" { - return Ok(RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - )); + return Err!("Expected code block in command body. Add --help for details.",); } if !yes_i_want_to_do_this { - return Ok(RoomMessageEventContent::notice_markdown( + return Err!( "You must pass the --yes-i-want-to-do-this-flag to ensure you really want to force \ bulk join all specified local users.", - )); + ); } let Ok(admin_room) = self.services.admin.get_admin_room().await else { - return Ok(RoomMessageEventContent::notice_markdown( - "There is not an admin room to check for server admins.", - )); + return Err!("There is not an admin room to check for server admins.",); }; let (room_id, servers) = self @@ -466,7 +422,7 @@ pub(super) async fn force_join_list_of_local_users( .server_in_room(self.services.globals.server_name(), &room_id) .await { - return Ok(RoomMessageEventContent::notice_markdown("We are not joined in this room.")); + return Err!("We are not joined in this room."); } let server_admins: Vec<_> = self @@ -486,9 +442,7 @@ pub(super) async fn force_join_list_of_local_users( .ready_any(|user_id| server_admins.contains(&user_id.to_owned())) .await { - return Ok(RoomMessageEventContent::notice_markdown( - "There is not a single server admin in the room.", - )); + return Err!("There is not a single server admin in the room.",); } let usernames = self @@ -506,11 +460,11 @@ pub(super) async fn force_join_list_of_local_users( if user_id == self.services.globals.server_user { self.services .admin - .send_message(RoomMessageEventContent::text_plain(format!( + .send_text(&format!( "{username} is the server service account, skipping over" - ))) - .await - .ok(); + )) + .await; + continue; } @@ -519,11 +473,9 @@ pub(super) async fn force_join_list_of_local_users( | Err(e) => { self.services .admin - .send_message(RoomMessageEventContent::text_plain(format!( - "{username} is not a valid username, skipping over: {e}" - ))) - .await - .ok(); + .send_text(&format!("{username} is not a valid username, skipping over: {e}")) + .await; + continue; }, } @@ -554,10 +506,11 @@ pub(super) async fn force_join_list_of_local_users( } } - Ok(RoomMessageEventContent::notice_markdown(format!( + self.write_str(&format!( "{successful_joins} local users have been joined to {room_id}. {failed_joins} joins \ failed.", - ))) + )) + .await } #[admin_command] @@ -565,18 +518,16 @@ pub(super) async fn force_join_all_local_users( &self, room_id: OwnedRoomOrAliasId, yes_i_want_to_do_this: bool, -) -> Result { +) -> Result { if !yes_i_want_to_do_this { - return Ok(RoomMessageEventContent::notice_markdown( + return Err!( "You must pass the --yes-i-want-to-do-this-flag to ensure you really want to force \ bulk join all local users.", - )); + ); } let Ok(admin_room) = self.services.admin.get_admin_room().await else { - return Ok(RoomMessageEventContent::notice_markdown( - "There is not an admin room to check for server admins.", - )); + return Err!("There is not an admin room to check for server admins.",); }; let (room_id, servers) = self @@ -593,7 +544,7 @@ pub(super) async fn force_join_all_local_users( .server_in_room(self.services.globals.server_name(), &room_id) .await { - return Ok(RoomMessageEventContent::notice_markdown("We are not joined in this room.")); + return Err!("We are not joined in this room."); } let server_admins: Vec<_> = self @@ -613,9 +564,7 @@ pub(super) async fn force_join_all_local_users( .ready_any(|user_id| server_admins.contains(&user_id.to_owned())) .await { - return Ok(RoomMessageEventContent::notice_markdown( - "There is not a single server admin in the room.", - )); + return Err!("There is not a single server admin in the room.",); } let mut failed_joins: usize = 0; @@ -650,10 +599,11 @@ pub(super) async fn force_join_all_local_users( } } - Ok(RoomMessageEventContent::notice_markdown(format!( + self.write_str(&format!( "{successful_joins} local users have been joined to {room_id}. {failed_joins} joins \ failed.", - ))) + )) + .await } #[admin_command] @@ -661,7 +611,7 @@ pub(super) async fn force_join_room( &self, user_id: String, room_id: OwnedRoomOrAliasId, -) -> Result { +) -> Result { let user_id = parse_local_user_id(self.services, &user_id)?; let (room_id, servers) = self .services @@ -677,9 +627,8 @@ pub(super) async fn force_join_room( join_room_by_id_helper(self.services, &user_id, &room_id, None, &servers, None, &None) .await?; - Ok(RoomMessageEventContent::notice_markdown(format!( - "{user_id} has been joined to {room_id}.", - ))) + self.write_str(&format!("{user_id} has been joined to {room_id}.",)) + .await } #[admin_command] @@ -687,7 +636,7 @@ pub(super) async fn force_leave_room( &self, user_id: String, room_id: OwnedRoomOrAliasId, -) -> Result { +) -> Result { let user_id = parse_local_user_id(self.services, &user_id)?; let room_id = self.services.rooms.alias.resolve(&room_id).await?; @@ -703,24 +652,17 @@ pub(super) async fn force_leave_room( .is_joined(&user_id, &room_id) .await { - return Ok(RoomMessageEventContent::notice_markdown(format!( - "{user_id} is not joined in the room" - ))); + return Err!("{user_id} is not joined in the room"); } leave_room(self.services, &user_id, &room_id, None).await?; - Ok(RoomMessageEventContent::notice_markdown(format!( - "{user_id} has left {room_id}.", - ))) + self.write_str(&format!("{user_id} has left {room_id}.",)) + .await } #[admin_command] -pub(super) async fn force_demote( - &self, - user_id: String, - room_id: OwnedRoomOrAliasId, -) -> Result { +pub(super) async fn force_demote(&self, user_id: String, room_id: OwnedRoomOrAliasId) -> Result { let user_id = parse_local_user_id(self.services, &user_id)?; let room_id = self.services.rooms.alias.resolve(&room_id).await?; @@ -731,15 +673,11 @@ pub(super) async fn force_demote( let state_lock = self.services.rooms.state.mutex.lock(&room_id).await; - let room_power_levels = self + let room_power_levels: Option = self .services .rooms .state_accessor - .room_state_get_content::( - &room_id, - &StateEventType::RoomPowerLevels, - "", - ) + .room_state_get_content(&room_id, &StateEventType::RoomPowerLevels, "") .await .ok(); @@ -757,9 +695,7 @@ pub(super) async fn force_demote( .is_ok_and(|event| event.sender == user_id); if !user_can_demote_self { - return Ok(RoomMessageEventContent::notice_markdown( - "User is not allowed to modify their own power levels in the room.", - )); + return Err!("User is not allowed to modify their own power levels in the room.",); } let mut power_levels_content = room_power_levels.unwrap_or_default(); @@ -777,25 +713,25 @@ pub(super) async fn force_demote( ) .await?; - Ok(RoomMessageEventContent::notice_markdown(format!( + self.write_str(&format!( "User {user_id} demoted themselves to the room default power level in {room_id} - \ {event_id}" - ))) + )) + .await } #[admin_command] -pub(super) async fn make_user_admin(&self, user_id: String) -> Result { +pub(super) async fn make_user_admin(&self, user_id: String) -> Result { let user_id = parse_local_user_id(self.services, &user_id)?; - assert!( self.services.globals.user_is_local(&user_id), "Parsed user_id must be a local user" ); + self.services.admin.make_user_admin(&user_id).await?; - Ok(RoomMessageEventContent::notice_markdown(format!( - "{user_id} has been granted admin privileges.", - ))) + self.write_str(&format!("{user_id} has been granted admin privileges.",)) + .await } #[admin_command] @@ -804,7 +740,7 @@ pub(super) async fn put_room_tag( user_id: String, room_id: OwnedRoomId, tag: String, -) -> Result { +) -> Result { let user_id = parse_active_local_user_id(self.services, &user_id).await?; let mut tags_event = self @@ -831,9 +767,10 @@ pub(super) async fn put_room_tag( ) .await?; - Ok(RoomMessageEventContent::text_plain(format!( + self.write_str(&format!( "Successfully updated room account data for {user_id} and room {room_id} with tag {tag}" - ))) + )) + .await } #[admin_command] @@ -842,7 +779,7 @@ pub(super) async fn delete_room_tag( user_id: String, room_id: OwnedRoomId, tag: String, -) -> Result { +) -> Result { let user_id = parse_active_local_user_id(self.services, &user_id).await?; let mut tags_event = self @@ -866,18 +803,15 @@ pub(super) async fn delete_room_tag( ) .await?; - Ok(RoomMessageEventContent::text_plain(format!( + self.write_str(&format!( "Successfully updated room account data for {user_id} and room {room_id}, deleting room \ tag {tag}" - ))) + )) + .await } #[admin_command] -pub(super) async fn get_room_tags( - &self, - user_id: String, - room_id: OwnedRoomId, -) -> Result { +pub(super) async fn get_room_tags(&self, user_id: String, room_id: OwnedRoomId) -> Result { let user_id = parse_active_local_user_id(self.services, &user_id).await?; let tags_event = self @@ -889,17 +823,12 @@ pub(super) async fn get_room_tags( content: TagEventContent { tags: BTreeMap::new() }, }); - Ok(RoomMessageEventContent::notice_markdown(format!( - "```\n{:#?}\n```", - tags_event.content.tags - ))) + self.write_str(&format!("```\n{:#?}\n```", tags_event.content.tags)) + .await } #[admin_command] -pub(super) async fn redact_event( - &self, - event_id: OwnedEventId, -) -> Result { +pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result { let Ok(event) = self .services .rooms @@ -907,20 +836,18 @@ pub(super) async fn redact_event( .get_non_outlier_pdu(&event_id) .await else { - return Ok(RoomMessageEventContent::text_plain("Event does not exist in our database.")); + return Err!("Event does not exist in our database."); }; if event.is_redacted() { - return Ok(RoomMessageEventContent::text_plain("Event is already redacted.")); + return Err!("Event is already redacted."); } let room_id = event.room_id; let sender_user = event.sender; if !self.services.globals.user_is_local(&sender_user) { - return Ok(RoomMessageEventContent::text_plain( - "This command only works on local users.", - )); + return Err!("This command only works on local users."); } let reason = format!( @@ -949,9 +876,8 @@ pub(super) async fn redact_event( .await? }; - let out = format!("Successfully redacted event. Redaction event ID: {redaction_event_id}"); - - self.write_str(out.as_str()).await?; - - Ok(RoomMessageEventContent::text_plain("")) + self.write_str(&format!( + "Successfully redacted event. Redaction event ID: {redaction_event_id}" + )) + .await } diff --git a/src/admin/utils.rs b/src/admin/utils.rs index a2696c50..ea9696b2 100644 --- a/src/admin/utils.rs +++ b/src/admin/utils.rs @@ -1,3 +1,5 @@ +#![allow(dead_code)] + use conduwuit_core::{Err, Result, err}; use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; use service::Services; diff --git a/src/macros/admin.rs b/src/macros/admin.rs index bf1586a0..fe227b43 100644 --- a/src/macros/admin.rs +++ b/src/macros/admin.rs @@ -8,7 +8,7 @@ use crate::{Result, utils::camel_to_snake_string}; pub(super) fn command(mut item: ItemFn, _args: &[Meta]) -> Result { let attr: Attribute = parse_quote! { - #[conduwuit_macros::implement(crate::Command, params = "<'_>")] + #[conduwuit_macros::implement(crate::Context, params = "<'_>")] }; item.attrs.push(attr); @@ -19,15 +19,16 @@ pub(super) fn command_dispatch(item: ItemEnum, _args: &[Meta]) -> Result = item.variants.iter().map(dispatch_arm).try_collect()?; let switch = quote! { + #[allow(clippy::large_stack_frames)] //TODO: fixme pub(super) async fn process( command: #name, - context: &crate::Command<'_> + context: &crate::Context<'_> ) -> Result { use #name::*; #[allow(non_snake_case)] - Ok(match command { + match command { #( #arm )* - }) + } } }; @@ -47,8 +48,7 @@ fn dispatch_arm(v: &Variant) -> Result { let arg = field.clone(); quote! { #name { #( #field ),* } => { - let c = Box::pin(context.#handler(#( #arg ),*)).await?; - Box::pin(context.write_str(c.body())).await?; + Box::pin(context.#handler(#( #arg ),*)).await }, } }, @@ -58,15 +58,14 @@ fn dispatch_arm(v: &Variant) -> Result { }; quote! { #name ( #field ) => { - Box::pin(#handler::process(#field, context)).await?; + Box::pin(#handler::process(#field, context)).await } } }, | Fields::Unit => { quote! { #name => { - let c = Box::pin(context.#handler()).await?; - Box::pin(context.write_str(c.body())).await?; + Box::pin(context.#handler()).await }, } }, From 3c5bbd4f0505bb1faf6cc5985f0f43fc76cd94b6 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Thu, 10 Apr 2025 20:55:41 +0000 Subject: [PATCH 257/310] simplify database backup interface related Signed-off-by: Jason Volk --- src/admin/server/commands.rs | 35 ++++++++------- src/database/engine/backup.rs | 80 ++++++++++++++++++++++------------- src/service/globals/data.rs | 6 --- 3 files changed, 69 insertions(+), 52 deletions(-) diff --git a/src/admin/server/commands.rs b/src/admin/server/commands.rs index b01e9296..6027a9eb 100644 --- a/src/admin/server/commands.rs +++ b/src/admin/server/commands.rs @@ -1,6 +1,11 @@ use std::{fmt::Write, path::PathBuf, sync::Arc}; -use conduwuit::{Err, Result, info, utils::time, warn}; +use conduwuit::{ + Err, Result, info, + utils::{stream::IterStream, time}, + warn, +}; +use futures::TryStreamExt; use crate::admin_command; @@ -81,33 +86,31 @@ pub(super) async fn clear_caches(&self) -> Result { #[admin_command] pub(super) async fn list_backups(&self) -> Result { - let result = self.services.db.db.backup_list()?; - - if result.is_empty() { - return Err!("No backups found."); - } - - self.write_str(&result).await + self.services + .db + .db + .backup_list()? + .try_stream() + .try_for_each(|result| write!(self, "{result}")) + .await } #[admin_command] pub(super) async fn backup_database(&self) -> Result { let db = Arc::clone(&self.services.db); - let mut result = self + let result = self .services .server .runtime() .spawn_blocking(move || match db.db.backup() { - | Ok(()) => String::new(), - | Err(e) => e.to_string(), + | Ok(()) => "Done".to_owned(), + | Err(e) => format!("Failed: {e}"), }) .await?; - if result.is_empty() { - result = self.services.db.db.backup_list()?; - } - - self.write_str(&result).await + let count = self.services.db.db.backup_count()?; + self.write_str(&format!("{result}. Currently have {count} backups.")) + .await } #[admin_command] diff --git a/src/database/engine/backup.rs b/src/database/engine/backup.rs index bb110630..ac72e6d4 100644 --- a/src/database/engine/backup.rs +++ b/src/database/engine/backup.rs @@ -1,24 +1,16 @@ -use std::fmt::Write; +use std::{ffi::OsString, path::PathBuf}; -use conduwuit::{Result, error, implement, info, utils::time::rfc2822_from_seconds, warn}; +use conduwuit::{Err, Result, error, implement, info, utils::time::rfc2822_from_seconds, warn}; use rocksdb::backup::{BackupEngine, BackupEngineOptions}; use super::Engine; -use crate::{or_else, util::map_err}; +use crate::util::map_err; #[implement(Engine)] #[tracing::instrument(skip(self))] pub fn backup(&self) -> Result { - let server = &self.ctx.server; - let config = &server.config; - let path = config.database_backup_path.as_ref(); - if path.is_none() || path.is_some_and(|path| path.as_os_str().is_empty()) { - return Ok(()); - } - - let options = - BackupEngineOptions::new(path.expect("valid database backup path")).map_err(map_err)?; - let mut engine = BackupEngine::open(&options, &*self.ctx.env.lock()?).map_err(map_err)?; + let mut engine = self.backup_engine()?; + let config = &self.ctx.server.config; if config.database_backups_to_keep > 0 { let flush = !self.is_read_only(); engine @@ -40,34 +32,62 @@ pub fn backup(&self) -> Result { } } + if config.database_backups_to_keep == 0 { + warn!("Configuration item `database_backups_to_keep` is set to 0."); + } + Ok(()) } #[implement(Engine)] -pub fn backup_list(&self) -> Result { - let server = &self.ctx.server; - let config = &server.config; - let path = config.database_backup_path.as_ref(); - if path.is_none() || path.is_some_and(|path| path.as_os_str().is_empty()) { - return Ok("Configure database_backup_path to enable backups, or the path specified is \ - not valid" - .to_owned()); +pub fn backup_list(&self) -> Result + Send> { + let info = self.backup_engine()?.get_backup_info(); + + if info.is_empty() { + return Err!("No backups found."); } - let mut res = String::new(); - let options = - BackupEngineOptions::new(path.expect("valid database backup path")).or_else(or_else)?; - let engine = BackupEngine::open(&options, &*self.ctx.env.lock()?).or_else(or_else)?; - for info in engine.get_backup_info() { - writeln!( - res, + let list = info.into_iter().map(|info| { + format!( "#{} {}: {} bytes, {} files", info.backup_id, rfc2822_from_seconds(info.timestamp), info.size, info.num_files, - )?; + ) + }); + + Ok(list) +} + +#[implement(Engine)] +pub fn backup_count(&self) -> Result { + let info = self.backup_engine()?.get_backup_info(); + + Ok(info.len()) +} + +#[implement(Engine)] +fn backup_engine(&self) -> Result { + let path = self.backup_path()?; + let options = BackupEngineOptions::new(path).map_err(map_err)?; + BackupEngine::open(&options, &*self.ctx.env.lock()?).map_err(map_err) +} + +#[implement(Engine)] +fn backup_path(&self) -> Result { + let path = self + .ctx + .server + .config + .database_backup_path + .clone() + .map(PathBuf::into_os_string) + .unwrap_or_default(); + + if path.is_empty() { + return Err!(Config("database_backup_path", "Configure path to enable backups")); } - Ok(res) + Ok(path) } diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index b43b7c5f..21c09252 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -72,10 +72,4 @@ impl Data { pub fn bump_database_version(&self, new_version: u64) { self.global.raw_put(b"version", new_version); } - - #[inline] - pub fn backup(&self) -> Result { self.db.db.backup() } - - #[inline] - pub fn backup_list(&self) -> Result { self.db.db.backup_list() } } From 21ec2551598247dc5f081aae73748861085d0ce0 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 11 Apr 2025 01:29:26 +0000 Subject: [PATCH 258/310] eliminate Arc impl for trait Event Signed-off-by: Jason Volk --- src/core/matrix/event.rs | 29 ------------------- src/core/matrix/state_res/benches.rs | 33 ++++++++++------------ src/core/matrix/state_res/event_auth.rs | 14 ++++------ src/core/matrix/state_res/mod.rs | 11 +++----- src/core/matrix/state_res/test_utils.rs | 37 ++++++++++++------------- 5 files changed, 42 insertions(+), 82 deletions(-) diff --git a/src/core/matrix/event.rs b/src/core/matrix/event.rs index ac9e29d6..29153334 100644 --- a/src/core/matrix/event.rs +++ b/src/core/matrix/event.rs @@ -2,7 +2,6 @@ use std::{ borrow::Borrow, fmt::{Debug, Display}, hash::Hash, - sync::Arc, }; use ruma::{EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId, events::TimelineEventType}; @@ -72,31 +71,3 @@ impl Event for &T { fn redacts(&self) -> Option<&Self::Id> { (*self).redacts() } } - -impl Event for Arc { - type Id = T::Id; - - fn event_id(&self) -> &Self::Id { (**self).event_id() } - - fn room_id(&self) -> &RoomId { (**self).room_id() } - - fn sender(&self) -> &UserId { (**self).sender() } - - fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { (**self).origin_server_ts() } - - fn event_type(&self) -> &TimelineEventType { (**self).event_type() } - - fn content(&self) -> &RawJsonValue { (**self).content() } - - fn state_key(&self) -> Option<&str> { (**self).state_key() } - - fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { - (**self).prev_events() - } - - fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { - (**self).auth_events() - } - - fn redacts(&self) -> Option<&Self::Id> { (**self).redacts() } -} diff --git a/src/core/matrix/state_res/benches.rs b/src/core/matrix/state_res/benches.rs index 7a1ae5bf..01218b01 100644 --- a/src/core/matrix/state_res/benches.rs +++ b/src/core/matrix/state_res/benches.rs @@ -4,10 +4,7 @@ extern crate test; use std::{ borrow::Borrow, collections::{HashMap, HashSet}, - sync::{ - Arc, - atomic::{AtomicU64, Ordering::SeqCst}, - }, + sync::atomic::{AtomicU64, Ordering::SeqCst}, }; use futures::{future, future::ready}; @@ -64,7 +61,7 @@ fn resolution_shallow_auth_chain(c: &mut test::Bencher) { c.iter(|| async { let ev_map = store.0.clone(); let state_sets = [&state_at_bob, &state_at_charlie]; - let fetch = |id: OwnedEventId| ready(ev_map.get(&id).map(Arc::clone)); + let fetch = |id: OwnedEventId| ready(ev_map.get(&id).clone()); let exists = |id: OwnedEventId| ready(ev_map.get(&id).is_some()); let auth_chain_sets: Vec> = state_sets .iter() @@ -148,7 +145,7 @@ fn resolve_deeper_event_set(c: &mut test::Bencher) { }) .collect(); - let fetch = |id: OwnedEventId| ready(inner.get(&id).map(Arc::clone)); + let fetch = |id: OwnedEventId| ready(inner.get(&id).clone()); let exists = |id: OwnedEventId| ready(inner.get(&id).is_some()); let _ = match state_res::resolve( &RoomVersionId::V6, @@ -171,20 +168,20 @@ fn resolve_deeper_event_set(c: &mut test::Bencher) { // IMPLEMENTATION DETAILS AHEAD // /////////////////////////////////////////////////////////////////////*/ -struct TestStore(HashMap>); +struct TestStore(HashMap); #[allow(unused)] -impl TestStore { - fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> Result> { +impl TestStore { + fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> Result { self.0 .get(event_id) - .map(Arc::clone) + .cloned() .ok_or_else(|| Error::NotFound(format!("{} not found", event_id))) } /// Returns the events that correspond to the `event_ids` sorted in the same /// order. - fn get_events(&self, room_id: &RoomId, event_ids: &[OwnedEventId]) -> Result>> { + fn get_events(&self, room_id: &RoomId, event_ids: &[OwnedEventId]) -> Result> { let mut events = vec![]; for id in event_ids { events.push(self.get_event(room_id, id)?); @@ -264,7 +261,7 @@ impl TestStore { &[], ); let cre = create_event.event_id().to_owned(); - self.0.insert(cre.clone(), Arc::clone(&create_event)); + self.0.insert(cre.clone(), create_event.clone()); let alice_mem = to_pdu_event( "IMA", @@ -276,7 +273,7 @@ impl TestStore { &[cre.clone()], ); self.0 - .insert(alice_mem.event_id().to_owned(), Arc::clone(&alice_mem)); + .insert(alice_mem.event_id().to_owned(), alice_mem.clone()); let join_rules = to_pdu_event( "IJR", @@ -383,7 +380,7 @@ fn to_pdu_event( content: Box, auth_events: &[S], prev_events: &[S], -) -> Arc +) -> PduEvent where S: AsRef, { @@ -407,7 +404,7 @@ where .collect::>(); let state_key = state_key.map(ToOwned::to_owned); - Arc::new(PduEvent { + PduEvent { event_id: id.try_into().unwrap(), rest: Pdu::RoomV3Pdu(RoomV3Pdu { room_id: room_id().to_owned(), @@ -424,12 +421,12 @@ where hashes: EventHash::new(String::new()), signatures: Signatures::new(), }), - }) + } } // all graphs start with these input events #[allow(non_snake_case)] -fn INITIAL_EVENTS() -> HashMap> { +fn INITIAL_EVENTS() -> HashMap { vec![ to_pdu_event::<&EventId>( "CREATE", @@ -511,7 +508,7 @@ fn INITIAL_EVENTS() -> HashMap> { // all graphs start with these input events #[allow(non_snake_case)] -fn BAN_STATE_SET() -> HashMap> { +fn BAN_STATE_SET() -> HashMap { vec![ to_pdu_event( "PA", diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index 65bec802..8c9339ec 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -1112,8 +1112,6 @@ fn verify_third_party_invite( #[cfg(test)] mod tests { - use std::sync::Arc; - use ruma::events::{ StateEventType, TimelineEventType, room::{ @@ -1143,7 +1141,7 @@ mod tests { let auth_events = events .values() - .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) .collect::>(); let requester = to_pdu_event( @@ -1188,7 +1186,7 @@ mod tests { let auth_events = events .values() - .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) .collect::>(); let requester = to_pdu_event( @@ -1233,7 +1231,7 @@ mod tests { let auth_events = events .values() - .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) .collect::>(); let requester = to_pdu_event( @@ -1278,7 +1276,7 @@ mod tests { let auth_events = events .values() - .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) .collect::>(); let requester = to_pdu_event( @@ -1340,7 +1338,7 @@ mod tests { let auth_events = events .values() - .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) .collect::>(); let requester = to_pdu_event( @@ -1412,7 +1410,7 @@ mod tests { let auth_events = events .values() - .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev))) + .map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone())) .collect::>(); let requester = to_pdu_event( diff --git a/src/core/matrix/state_res/mod.rs b/src/core/matrix/state_res/mod.rs index ce6b7e89..2ab7cb64 100644 --- a/src/core/matrix/state_res/mod.rs +++ b/src/core/matrix/state_res/mod.rs @@ -861,10 +861,7 @@ where #[cfg(test)] mod tests { - use std::{ - collections::{HashMap, HashSet}, - sync::Arc, - }; + use std::collections::{HashMap, HashSet}; use maplit::{hashmap, hashset}; use rand::seq::SliceRandom; @@ -906,7 +903,7 @@ mod tests { let power_events = event_map .values() - .filter(|&pdu| is_power_event(&**pdu)) + .filter(|&pdu| is_power_event(&*pdu)) .map(|pdu| pdu.event_id.clone()) .collect::>(); @@ -1489,7 +1486,7 @@ mod tests { } #[allow(non_snake_case)] - fn BAN_STATE_SET() -> HashMap> { + fn BAN_STATE_SET() -> HashMap { vec![ to_pdu_event( "PA", @@ -1534,7 +1531,7 @@ mod tests { } #[allow(non_snake_case)] - fn JOIN_RULE() -> HashMap> { + fn JOIN_RULE() -> HashMap { vec![ to_pdu_event( "JR", diff --git a/src/core/matrix/state_res/test_utils.rs b/src/core/matrix/state_res/test_utils.rs index f2ee4238..a666748a 100644 --- a/src/core/matrix/state_res/test_utils.rs +++ b/src/core/matrix/state_res/test_utils.rs @@ -1,10 +1,7 @@ use std::{ borrow::Borrow, collections::{BTreeMap, HashMap, HashSet}, - sync::{ - Arc, - atomic::{AtomicU64, Ordering::SeqCst}, - }, + sync::atomic::{AtomicU64, Ordering::SeqCst}, }; use futures::future::ready; @@ -36,7 +33,7 @@ use crate::{ static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); pub(crate) async fn do_check( - events: &[Arc], + events: &[PduEvent], edges: Vec>, expected_state_ids: Vec, ) { @@ -85,7 +82,7 @@ pub(crate) async fn do_check( } // event_id -> PduEvent - let mut event_map: HashMap> = HashMap::new(); + let mut event_map: HashMap = HashMap::new(); // event_id -> StateMap let mut state_at_event: HashMap> = HashMap::new(); @@ -194,7 +191,7 @@ pub(crate) async fn do_check( store.0.insert(ev_id.to_owned(), event.clone()); state_at_event.insert(node, state_after); - event_map.insert(event_id.to_owned(), Arc::clone(store.0.get(ev_id).unwrap())); + event_map.insert(event_id.to_owned(), store.0.get(ev_id).unwrap().clone()); } let mut expected_state = StateMap::new(); @@ -235,10 +232,10 @@ pub(crate) async fn do_check( } #[allow(clippy::exhaustive_structs)] -pub(crate) struct TestStore(pub(crate) HashMap>); +pub(crate) struct TestStore(pub(crate) HashMap); -impl TestStore { - pub(crate) fn get_event(&self, _: &RoomId, event_id: &EventId) -> Result> { +impl TestStore { + pub(crate) fn get_event(&self, _: &RoomId, event_id: &EventId) -> Result { self.0 .get(event_id) .cloned() @@ -288,7 +285,7 @@ impl TestStore { &[], ); let cre = create_event.event_id().to_owned(); - self.0.insert(cre.clone(), Arc::clone(&create_event)); + self.0.insert(cre.clone(), create_event.clone()); let alice_mem = to_pdu_event( "IMA", @@ -300,7 +297,7 @@ impl TestStore { &[cre.clone()], ); self.0 - .insert(alice_mem.event_id().to_owned(), Arc::clone(&alice_mem)); + .insert(alice_mem.event_id().to_owned(), alice_mem.clone()); let join_rules = to_pdu_event( "IJR", @@ -399,7 +396,7 @@ pub(crate) fn to_init_pdu_event( ev_type: TimelineEventType, state_key: Option<&str>, content: Box, -) -> Arc { +) -> PduEvent { let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst); let id = if id.contains('$') { id.to_owned() @@ -408,7 +405,7 @@ pub(crate) fn to_init_pdu_event( }; let state_key = state_key.map(ToOwned::to_owned); - Arc::new(PduEvent { + PduEvent { event_id: id.try_into().unwrap(), rest: Pdu::RoomV3Pdu(RoomV3Pdu { room_id: room_id().to_owned(), @@ -425,7 +422,7 @@ pub(crate) fn to_init_pdu_event( hashes: EventHash::new("".to_owned()), signatures: ServerSignatures::default(), }), - }) + } } pub(crate) fn to_pdu_event( @@ -436,7 +433,7 @@ pub(crate) fn to_pdu_event( content: Box, auth_events: &[S], prev_events: &[S], -) -> Arc +) -> PduEvent where S: AsRef, { @@ -458,7 +455,7 @@ where .collect::>(); let state_key = state_key.map(ToOwned::to_owned); - Arc::new(PduEvent { + PduEvent { event_id: id.try_into().unwrap(), rest: Pdu::RoomV3Pdu(RoomV3Pdu { room_id: room_id().to_owned(), @@ -475,12 +472,12 @@ where hashes: EventHash::new("".to_owned()), signatures: ServerSignatures::default(), }), - }) + } } // all graphs start with these input events #[allow(non_snake_case)] -pub(crate) fn INITIAL_EVENTS() -> HashMap> { +pub(crate) fn INITIAL_EVENTS() -> HashMap { vec![ to_pdu_event::<&EventId>( "CREATE", @@ -562,7 +559,7 @@ pub(crate) fn INITIAL_EVENTS() -> HashMap> { // all graphs start with these input events #[allow(non_snake_case)] -pub(crate) fn INITIAL_EVENTS_CREATE_ROOM() -> HashMap> { +pub(crate) fn INITIAL_EVENTS_CREATE_ROOM() -> HashMap { vec![to_pdu_event::<&EventId>( "CREATE", alice(), From 576a783a6f98bde5c04171f881c8a18e70222ac3 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 1 Apr 2025 08:39:41 +0000 Subject: [PATCH 259/310] add missing feature-projections between intra-workspace crates Signed-off-by: Jason Volk --- Cargo.lock | 1 - src/admin/Cargo.toml | 49 +++++++++++++++++++++++++++ src/api/Cargo.toml | 50 +++++++++++++++++++++------- src/core/Cargo.toml | 38 ++++++++++----------- src/core/info/cargo.rs | 2 +- src/database/Cargo.toml | 30 ++++++++++++----- src/main/Cargo.toml | 2 ++ src/router/Cargo.toml | 73 +++++++++++++++++++++++++++++++++-------- src/service/Cargo.toml | 34 +++++++++++++++++-- 9 files changed, 221 insertions(+), 58 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index def41f68..00aeca81 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -784,7 +784,6 @@ dependencies = [ "base64 0.22.1", "bytes", "conduwuit_core", - "conduwuit_database", "conduwuit_service", "const-str", "futures", diff --git a/src/admin/Cargo.toml b/src/admin/Cargo.toml index ca865969..7896ef97 100644 --- a/src/admin/Cargo.toml +++ b/src/admin/Cargo.toml @@ -17,12 +17,61 @@ crate-type = [ ] [features] +brotli_compression = [ + "conduwuit-api/brotli_compression", + "conduwuit-core/brotli_compression", + "conduwuit-service/brotli_compression", +] +gzip_compression = [ + "conduwuit-api/gzip_compression", + "conduwuit-core/gzip_compression", + "conduwuit-service/gzip_compression", +] +io_uring = [ + "conduwuit-api/io_uring", + "conduwuit-database/io_uring", + "conduwuit-service/io_uring", +] +jemalloc = [ + "conduwuit-api/jemalloc", + "conduwuit-core/jemalloc", + "conduwuit-database/jemalloc", + "conduwuit-service/jemalloc", +] +jemalloc_conf = [ + "conduwuit-api/jemalloc_conf", + "conduwuit-core/jemalloc_conf", + "conduwuit-database/jemalloc_conf", + "conduwuit-service/jemalloc_conf", +] +jemalloc_prof = [ + "conduwuit-api/jemalloc_prof", + "conduwuit-core/jemalloc_prof", + "conduwuit-database/jemalloc_prof", + "conduwuit-service/jemalloc_prof", +] +jemalloc_stats = [ + "conduwuit-api/jemalloc_stats", + "conduwuit-core/jemalloc_stats", + "conduwuit-database/jemalloc_stats", + "conduwuit-service/jemalloc_stats", +] release_max_log_level = [ + "conduwuit-api/release_max_log_level", + "conduwuit-core/release_max_log_level", + "conduwuit-database/release_max_log_level", + "conduwuit-service/release_max_log_level", "tracing/max_level_trace", "tracing/release_max_level_info", "log/max_level_trace", "log/release_max_level_info", ] +zstd_compression = [ + "conduwuit-api/zstd_compression", + "conduwuit-core/zstd_compression", + "conduwuit-database/zstd_compression", + "conduwuit-service/zstd_compression", +] [dependencies] clap.workspace = true diff --git a/src/api/Cargo.toml b/src/api/Cargo.toml index 7890561c..15ada812 100644 --- a/src/api/Cargo.toml +++ b/src/api/Cargo.toml @@ -17,21 +17,50 @@ crate-type = [ ] [features] -element_hacks = [] -release_max_log_level = [ - "tracing/max_level_trace", - "tracing/release_max_level_info", - "log/max_level_trace", - "log/release_max_level_info", +brotli_compression = [ + "conduwuit-core/brotli_compression", + "conduwuit-service/brotli_compression", + "reqwest/brotli", ] -zstd_compression = [ - "reqwest/zstd", +element_hacks = [ + "conduwuit-service/element_hacks", ] gzip_compression = [ + "conduwuit-core/gzip_compression", + "conduwuit-service/gzip_compression", "reqwest/gzip", ] -brotli_compression = [ - "reqwest/brotli", +io_uring = [ + "conduwuit-service/io_uring", +] +jemalloc = [ + "conduwuit-core/jemalloc", + "conduwuit-service/jemalloc", +] +jemalloc_conf = [ + "conduwuit-core/jemalloc_conf", + "conduwuit-service/jemalloc_conf", +] +jemalloc_prof = [ + "conduwuit-core/jemalloc_prof", + "conduwuit-service/jemalloc_prof", +] +jemalloc_stats = [ + "conduwuit-core/jemalloc_stats", + "conduwuit-service/jemalloc_stats", +] +release_max_log_level = [ + "conduwuit-core/release_max_log_level", + "conduwuit-service/release_max_log_level", + "log/max_level_trace", + "log/release_max_level_info", + "tracing/max_level_trace", + "tracing/release_max_level_info", +] +zstd_compression = [ + "conduwuit-core/zstd_compression", + "conduwuit-service/zstd_compression", + "reqwest/zstd", ] [dependencies] @@ -42,7 +71,6 @@ axum.workspace = true base64.workspace = true bytes.workspace = true conduwuit-core.workspace = true -conduwuit-database.workspace = true conduwuit-service.workspace = true const-str.workspace = true futures.workspace = true diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index 4848e742..f42b049b 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -17,17 +17,24 @@ crate-type = [ ] [features] -release_max_log_level = [ - "tracing/max_level_trace", - "tracing/release_max_level_info", - "log/max_level_trace", - "log/release_max_level_info", +brotli_compression = [ + "reqwest/brotli", +] +conduwuit_mods = [ + "dep:libloading" +] +gzip_compression = [ + "reqwest/gzip", +] +hardened_malloc = [ + "dep:hardened_malloc-rs" ] jemalloc = [ "dep:tikv-jemalloc-sys", "dep:tikv-jemalloc-ctl", "dep:tikv-jemallocator", ] +jemalloc_conf = [] jemalloc_prof = [ "tikv-jemalloc-sys/profiling", ] @@ -36,24 +43,17 @@ jemalloc_stats = [ "tikv-jemalloc-ctl/stats", "tikv-jemallocator/stats", ] -jemalloc_conf = [] -hardened_malloc = [ - "dep:hardened_malloc-rs" -] -gzip_compression = [ - "reqwest/gzip", -] -brotli_compression = [ - "reqwest/brotli", +perf_measurements = [] +release_max_log_level = [ + "tracing/max_level_trace", + "tracing/release_max_level_info", + "log/max_level_trace", + "log/release_max_level_info", ] +sentry_telemetry = [] zstd_compression = [ "reqwest/zstd", ] -perf_measurements = [] -sentry_telemetry = [] -conduwuit_mods = [ - "dep:libloading" -] [dependencies] argon2.workspace = true diff --git a/src/core/info/cargo.rs b/src/core/info/cargo.rs index c5a1d167..28c6590e 100644 --- a/src/core/info/cargo.rs +++ b/src/core/info/cargo.rs @@ -31,7 +31,7 @@ const ROUTER_MANIFEST: &'static str = (); #[cargo_manifest(crate = "main")] const MAIN_MANIFEST: &'static str = (); -/// Processed list of features access all project crates. This is generated from +/// Processed list of features across all project crates. This is generated from /// the data in the MANIFEST strings and contains all possible project features. /// For *enabled* features see the info::rustc module instead. static FEATURES: OnceLock> = OnceLock::new(); diff --git a/src/database/Cargo.toml b/src/database/Cargo.toml index 067c6f5f..55d4793f 100644 --- a/src/database/Cargo.toml +++ b/src/database/Cargo.toml @@ -17,19 +17,31 @@ crate-type = [ ] [features] -release_max_log_level = [ - "tracing/max_level_trace", - "tracing/release_max_level_info", - "log/max_level_trace", - "log/release_max_level_info", -] -jemalloc = [ - "rust-rocksdb/jemalloc", -] io_uring = [ "rust-rocksdb/io-uring", ] +jemalloc = [ + "conduwuit-core/jemalloc", + "rust-rocksdb/jemalloc", +] +jemalloc_conf = [ + "conduwuit-core/jemalloc_conf", +] +jemalloc_prof = [ + "conduwuit-core/jemalloc_prof", +] +jemalloc_stats = [ + "conduwuit-core/jemalloc_stats", +] +release_max_log_level = [ + "conduwuit-core/release_max_log_level", + "log/max_level_trace", + "log/release_max_level_info", + "tracing/max_level_trace", + "tracing/release_max_level_info", +] zstd_compression = [ + "conduwuit-core/zstd_compression", "rust-rocksdb/zstd", ] diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml index 87ca48c8..e2fed5d5 100644 --- a/src/main/Cargo.toml +++ b/src/main/Cargo.toml @@ -70,6 +70,7 @@ element_hacks = [ ] gzip_compression = [ "conduwuit-api/gzip_compression", + "conduwuit-core/gzip_compression", "conduwuit-router/gzip_compression", "conduwuit-service/gzip_compression", ] @@ -141,6 +142,7 @@ zstd_compression = [ "conduwuit-core/zstd_compression", "conduwuit-database/zstd_compression", "conduwuit-router/zstd_compression", + "conduwuit-service/zstd_compression", ] conduwuit_mods = [ "conduwuit-core/conduwuit_mods", diff --git a/src/router/Cargo.toml b/src/router/Cargo.toml index 51e15aed..31a44983 100644 --- a/src/router/Cargo.toml +++ b/src/router/Cargo.toml @@ -17,34 +17,79 @@ crate-type = [ ] [features] +brotli_compression = [ + "conduwuit-admin/brotli_compression", + "conduwuit-api/brotli_compression", + "conduwuit-core/brotli_compression", + "conduwuit-service/brotli_compression", + "tower-http/compression-br", +] +direct_tls = [ + "axum-server/tls-rustls", + "dep:rustls", + "dep:axum-server-dual-protocol", +] +gzip_compression = [ + "conduwuit-admin/gzip_compression", + "conduwuit-api/gzip_compression", + "conduwuit-core/gzip_compression", + "conduwuit-service/gzip_compression", + "tower-http/compression-gzip", +] +io_uring = [ + "conduwuit-admin/io_uring", + "conduwuit-api/io_uring", + "conduwuit-service/io_uring", + "conduwuit-api/io_uring", +] +jemalloc = [ + "conduwuit-admin/jemalloc", + "conduwuit-api/jemalloc", + "conduwuit-core/jemalloc", + "conduwuit-service/jemalloc", +] +jemalloc_conf = [ + "conduwuit-admin/jemalloc_conf", + "conduwuit-api/jemalloc_conf", + "conduwuit-core/jemalloc_conf", + "conduwuit-service/jemalloc_conf", +] +jemalloc_prof = [ + "conduwuit-admin/jemalloc_prof", + "conduwuit-api/jemalloc_prof", + "conduwuit-core/jemalloc_prof", + "conduwuit-service/jemalloc_prof", +] +jemalloc_stats = [ + "conduwuit-admin/jemalloc_stats", + "conduwuit-api/jemalloc_stats", + "conduwuit-core/jemalloc_stats", + "conduwuit-service/jemalloc_stats", +] release_max_log_level = [ + "conduwuit-admin/release_max_log_level", + "conduwuit-api/release_max_log_level", + "conduwuit-core/release_max_log_level", + "conduwuit-service/release_max_log_level", "tracing/max_level_trace", "tracing/release_max_level_info", "log/max_level_trace", "log/release_max_level_info", ] sentry_telemetry = [ + "conduwuit-core/sentry_telemetry", "dep:sentry", "dep:sentry-tracing", "dep:sentry-tower", ] -zstd_compression = [ - "tower-http/compression-zstd", -] -gzip_compression = [ - "tower-http/compression-gzip", -] -brotli_compression = [ - "tower-http/compression-br", -] systemd = [ "dep:sd-notify", ] - -direct_tls = [ - "axum-server/tls-rustls", - "dep:rustls", - "dep:axum-server-dual-protocol", +zstd_compression = [ + "conduwuit-api/zstd_compression", + "conduwuit-core/zstd_compression", + "conduwuit-service/zstd_compression", + "tower-http/compression-zstd", ] [dependencies] diff --git a/src/service/Cargo.toml b/src/service/Cargo.toml index caeea318..8b0d1405 100644 --- a/src/service/Cargo.toml +++ b/src/service/Cargo.toml @@ -17,7 +17,12 @@ crate-type = [ ] [features] +blurhashing = [ + "dep:image", + "dep:blurhash", +] brotli_compression = [ + "conduwuit-core/brotli_compression", "reqwest/brotli", ] console = [ @@ -26,25 +31,48 @@ console = [ ] element_hacks = [] gzip_compression = [ + "conduwuit-core/gzip_compression", "reqwest/gzip", ] +io_uring = [ + "conduwuit-database/io_uring", +] +jemalloc = [ + "conduwuit-core/jemalloc", + "conduwuit-database/jemalloc", +] +jemalloc_conf = [ + "conduwuit-core/jemalloc_conf", + "conduwuit-database/jemalloc_conf", +] +jemalloc_prof = [ + "conduwuit-core/jemalloc_prof", + "conduwuit-database/jemalloc_prof", +] +jemalloc_stats = [ + "conduwuit-core/jemalloc_stats", + "conduwuit-database/jemalloc_stats", +] media_thumbnail = [ "dep:image", ] release_max_log_level = [ - "tracing/max_level_trace", - "tracing/release_max_level_info", + "conduwuit-core/release_max_log_level", + "conduwuit-database/release_max_log_level", "log/max_level_trace", "log/release_max_level_info", + "tracing/max_level_trace", + "tracing/release_max_level_info", ] url_preview = [ "dep:image", "dep:webpage", ] zstd_compression = [ + "conduwuit-core/zstd_compression", + "conduwuit-database/zstd_compression", "reqwest/zstd", ] -blurhashing = ["dep:image","dep:blurhash"] [dependencies] async-trait.workspace = true From 8e7373c02790a4e48e29346f678a0181de6c42f6 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Fri, 4 Apr 2025 23:04:13 +0000 Subject: [PATCH 260/310] mitigate additional debuginfo expansions Signed-off-by: Jason Volk --- src/core/debug.rs | 1 + src/core/error/err.rs | 3 +++ src/core/log/mod.rs | 1 + src/core/utils/math.rs | 4 ++++ src/core/utils/mod.rs | 4 ---- src/core/utils/string.rs | 2 ++ src/core/utils/sys/storage.rs | 2 +- src/service/mod.rs | 2 +- 8 files changed, 13 insertions(+), 6 deletions(-) diff --git a/src/core/debug.rs b/src/core/debug.rs index b9a53038..21a5ada4 100644 --- a/src/core/debug.rs +++ b/src/core/debug.rs @@ -12,6 +12,7 @@ pub use crate::{result::DebugInspect, utils::debug::*}; /// Log event at given level in debug-mode (when debug-assertions are enabled). /// In release-mode it becomes DEBUG level, and possibly subject to elision. #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! debug_event { ( $level:expr_2021, $($x:tt)+ ) => { if $crate::debug::logging() { diff --git a/src/core/error/err.rs b/src/core/error/err.rs index 9c24d3b4..2eb6823a 100644 --- a/src/core/error/err.rs +++ b/src/core/error/err.rs @@ -33,6 +33,7 @@ //! option of replacing `error!` with `debug_error!`. #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! Err { ($($args:tt)*) => { Err($crate::err!($($args)*)) @@ -40,6 +41,7 @@ macro_rules! Err { } #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! err { (Request(Forbidden($level:ident!($($args:tt)+)))) => {{ let mut buf = String::new(); @@ -109,6 +111,7 @@ macro_rules! err { /// can share the same callsite metadata for the source of our Error and the /// associated logging and tracing event dispatches. #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! err_log { ($out:ident, $level:ident, $($fields:tt)+) => {{ use $crate::tracing::{ diff --git a/src/core/log/mod.rs b/src/core/log/mod.rs index 5ac374e8..f7b2521a 100644 --- a/src/core/log/mod.rs +++ b/src/core/log/mod.rs @@ -33,6 +33,7 @@ pub struct Log { // the crate namespace like these. #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! event { ( $level:expr_2021, $($x:tt)+ ) => { ::tracing::event!( $level, $($x)+ ) } } diff --git a/src/core/utils/math.rs b/src/core/utils/math.rs index 488f2a13..9316731c 100644 --- a/src/core/utils/math.rs +++ b/src/core/utils/math.rs @@ -10,6 +10,7 @@ use crate::{Err, Error, Result, debug::type_name, err}; /// Checked arithmetic expression. Returns a Result #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! checked { ($($input:tt)+) => { $crate::utils::math::checked_ops!($($input)+) @@ -22,6 +23,7 @@ macro_rules! checked { /// has no realistic expectation for error and no interest in cluttering the /// callsite with result handling from checked!. #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! expected { ($msg:literal, $($input:tt)+) => { $crate::checked!($($input)+).expect($msg) @@ -37,6 +39,7 @@ macro_rules! expected { /// regression analysis. #[cfg(not(debug_assertions))] #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! validated { ($($input:tt)+) => { //#[allow(clippy::arithmetic_side_effects)] { @@ -53,6 +56,7 @@ macro_rules! validated { /// the expression is obviously safe. The check is elided in release-mode. #[cfg(debug_assertions)] #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! validated { ($($input:tt)+) => { $crate::expected!($($input)+) } } diff --git a/src/core/utils/mod.rs b/src/core/utils/mod.rs index 5e6f2868..54404e4c 100644 --- a/src/core/utils/mod.rs +++ b/src/core/utils/mod.rs @@ -173,7 +173,6 @@ macro_rules! is_equal { /// Functor for |x| *x.$i #[macro_export] -#[collapse_debuginfo(yes)] macro_rules! deref_at { ($idx:tt) => { |t| *t.$idx @@ -182,7 +181,6 @@ macro_rules! deref_at { /// Functor for |ref x| x.$i #[macro_export] -#[collapse_debuginfo(yes)] macro_rules! ref_at { ($idx:tt) => { |ref t| &t.$idx @@ -191,7 +189,6 @@ macro_rules! ref_at { /// Functor for |&x| x.$i #[macro_export] -#[collapse_debuginfo(yes)] macro_rules! val_at { ($idx:tt) => { |&t| t.$idx @@ -200,7 +197,6 @@ macro_rules! val_at { /// Functor for |x| x.$i #[macro_export] -#[collapse_debuginfo(yes)] macro_rules! at { ($idx:tt) => { |t| t.$idx diff --git a/src/core/utils/string.rs b/src/core/utils/string.rs index d8fa3f95..7d81903d 100644 --- a/src/core/utils/string.rs +++ b/src/core/utils/string.rs @@ -14,6 +14,7 @@ pub const EMPTY: &str = ""; /// returned otherwise the input (i.e. &'static str) is returned. If multiple /// arguments are provided the first is assumed to be a format string. #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! format_maybe { ($s:literal $(,)?) => { if $crate::is_format!($s) { std::format!($s).into() } else { $s.into() } @@ -27,6 +28,7 @@ macro_rules! format_maybe { /// Constant expression to decide if a literal is a format string. Note: could /// use some improvement. #[macro_export] +#[collapse_debuginfo(yes)] macro_rules! is_format { ($s:literal) => { ::const_str::contains!($s, "{") && ::const_str::contains!($s, "}") diff --git a/src/core/utils/sys/storage.rs b/src/core/utils/sys/storage.rs index 452b04b2..b71c3437 100644 --- a/src/core/utils/sys/storage.rs +++ b/src/core/utils/sys/storage.rs @@ -117,7 +117,7 @@ pub fn name_from_path(path: &Path) -> Result { /// Get the (major, minor) of the block device on which Path is mounted. #[allow(clippy::useless_conversion, clippy::unnecessary_fallible_conversions)] -pub fn dev_from_path(path: &Path) -> Result<(dev_t, dev_t)> { +fn dev_from_path(path: &Path) -> Result<(dev_t, dev_t)> { #[cfg(target_family = "unix")] use std::os::unix::fs::MetadataExt; diff --git a/src/service/mod.rs b/src/service/mod.rs index 63a51213..2be16f79 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -1,4 +1,4 @@ -#![type_length_limit = "2048"] +#![type_length_limit = "8192"] #![allow(refining_impl_trait)] mod manager; From e71138ab6ffbea621120c41bafb1c65c7b1a3e39 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 8 Apr 2025 03:17:23 +0000 Subject: [PATCH 261/310] reduce large stack frames --- src/service/admin/create.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/service/admin/create.rs b/src/service/admin/create.rs index 4de37092..cd0fc5a9 100644 --- a/src/service/admin/create.rs +++ b/src/service/admin/create.rs @@ -1,6 +1,7 @@ use std::collections::BTreeMap; use conduwuit::{Result, pdu::PduBuilder}; +use futures::FutureExt; use ruma::{ RoomId, RoomVersionId, events::room::{ @@ -63,6 +64,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; // 2. Make server user/bot join @@ -78,6 +80,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; // 3. Power levels @@ -95,6 +98,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; // 4.1 Join Rules @@ -107,6 +111,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; // 4.2 History Visibility @@ -122,6 +127,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; // 4.3 Guest Access @@ -137,6 +143,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; // 5. Events implied by name and topic @@ -150,6 +157,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; services @@ -163,6 +171,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; // 6. Room alias @@ -180,6 +189,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; services @@ -197,6 +207,7 @@ pub async fn create_admin_room(services: &Services) -> Result { &room_id, &state_lock, ) + .boxed() .await?; Ok(()) From 0eb9e4f3d2284a9c96b4c781e25328f1a6e9f9e2 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 19 Apr 2025 23:02:43 +0100 Subject: [PATCH 262/310] refactor: Centralize server forbidden checks into moderation module This moves all checks related to `forbidden_remote_server_names`, `forbidden_remote_room_directory_server_names` and `prevent_media_downloads_from` to a new `moderation` module. This is useful for implementing more complicated logic globally. Mostly the changes from #673, but is also relevant for #750 --- conduwuit-example.toml | 3 ++ src/api/client/directory.rs | 19 ++-------- src/api/client/membership.rs | 5 +-- src/api/client/message.rs | 5 +-- src/api/router/auth.rs | 8 +--- src/api/server/invite.rs | 11 ++---- src/api/server/make_join.rs | 11 ++---- src/api/server/make_knock.rs | 11 ++---- src/api/server/send_join.rs | 22 +++-------- src/api/server/send_knock.rs | 11 ++---- src/core/config/mod.rs | 5 ++- src/service/federation/execute.rs | 8 +--- src/service/federation/mod.rs | 4 +- src/service/media/mod.rs | 4 +- src/service/media/remote.rs | 12 +----- src/service/mod.rs | 1 + src/service/moderation.rs | 62 +++++++++++++++++++++++++++++++ src/service/services.rs | 4 +- 18 files changed, 109 insertions(+), 97 deletions(-) create mode 100644 src/service/moderation.rs diff --git a/conduwuit-example.toml b/conduwuit-example.toml index af8da6bb..5a4b7b3f 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1197,6 +1197,9 @@ # incoming AND outgoing federation with, and block client room joins / # remote user invites. # +# Additionally, it will hide messages from these servers for all users +# on this server. +# # This check is applied on the room ID, room alias, sender server name, # sender user's server name, inbound federation X-Matrix origin, and # outbound federation handler. diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index b44b9f64..aa6ae168 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -52,13 +52,8 @@ pub(crate) async fn get_public_rooms_filtered_route( ) -> Result { if let Some(server) = &body.server { if services - .config - .forbidden_remote_room_directory_server_names - .is_match(server.host()) - || services - .config - .forbidden_remote_server_names - .is_match(server.host()) + .moderation + .is_remote_server_room_directory_forbidden(server) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } @@ -92,15 +87,7 @@ pub(crate) async fn get_public_rooms_route( body: Ruma, ) -> Result { if let Some(server) = &body.server { - if services - .config - .forbidden_remote_room_directory_server_names - .is_match(server.host()) - || services - .config - .forbidden_remote_server_names - .is_match(server.host()) - { + if services.moderation.is_remote_server_forbidden(server) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } } diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 18a1c741..b1b85b81 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -83,9 +83,8 @@ async fn banned_room_check( if let Some(room_id) = room_id { if services.rooms.metadata.is_banned(room_id).await || services - .config - .forbidden_remote_server_names - .is_match(room_id.server_name().expect("legacy room mxid").host()) + .moderation + .is_remote_server_forbidden(room_id.server_name().expect("legacy room mxid")) { warn!( "User {user_id} who is not an admin attempted to send an invite for or \ diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 9c2c4057..08887e18 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -274,9 +274,8 @@ pub(crate) async fn is_ignored_pdu( let ignored_type = IGNORED_MESSAGE_TYPES.binary_search(&pdu.kind).is_ok(); let ignored_server = services - .config - .forbidden_remote_server_names - .is_match(pdu.sender().server_name().host()); + .moderation + .is_remote_server_forbidden(pdu.sender().server_name()); if ignored_type && (ignored_server || services.users.user_is_ignored(&pdu.sender, user_id).await) diff --git a/src/api/router/auth.rs b/src/api/router/auth.rs index 0eb61ca6..01254c32 100644 --- a/src/api/router/auth.rs +++ b/src/api/router/auth.rs @@ -306,7 +306,7 @@ async fn auth_server( } fn auth_server_checks(services: &Services, x_matrix: &XMatrix) -> Result<()> { - if !services.server.config.allow_federation { + if !services.config.allow_federation { return Err!(Config("allow_federation", "Federation is disabled.")); } @@ -316,11 +316,7 @@ fn auth_server_checks(services: &Services, x_matrix: &XMatrix) -> Result<()> { } let origin = &x_matrix.origin; - if services - .config - .forbidden_remote_server_names - .is_match(origin.host()) - { + if services.moderation.is_remote_server_forbidden(origin) { return Err!(Request(Forbidden(debug_warn!( "Federation requests from {origin} denied." )))); diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index edd6ac16..f53e1a15 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -37,19 +37,14 @@ pub(crate) async fn create_invite_route( } if let Some(server) = body.room_id.server_name() { - if services - .config - .forbidden_remote_server_names - .is_match(server.host()) - { + if services.moderation.is_remote_server_forbidden(server) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } } if services - .config - .forbidden_remote_server_names - .is_match(body.origin().host()) + .moderation + .is_remote_server_forbidden(body.origin()) { warn!( "Received federated/remote invite from banned server {} for room ID {}. Rejecting.", diff --git a/src/api/server/make_join.rs b/src/api/server/make_join.rs index ac2c5485..3204c30c 100644 --- a/src/api/server/make_join.rs +++ b/src/api/server/make_join.rs @@ -42,9 +42,8 @@ pub(crate) async fn create_join_event_template_route( .await?; if services - .config - .forbidden_remote_server_names - .is_match(body.origin().host()) + .moderation + .is_remote_server_forbidden(body.origin()) { warn!( "Server {} for remote user {} tried joining room ID {} which has a server name that \ @@ -57,11 +56,7 @@ pub(crate) async fn create_join_event_template_route( } if let Some(server) = body.room_id.server_name() { - if services - .config - .forbidden_remote_server_names - .is_match(server.host()) - { + if services.moderation.is_remote_server_forbidden(server) { return Err!(Request(Forbidden(warn!( "Room ID server name {server} is banned on this homeserver." )))); diff --git a/src/api/server/make_knock.rs b/src/api/server/make_knock.rs index 511c13b2..423c8e81 100644 --- a/src/api/server/make_knock.rs +++ b/src/api/server/make_knock.rs @@ -33,9 +33,8 @@ pub(crate) async fn create_knock_event_template_route( .await?; if services - .config - .forbidden_remote_server_names - .is_match(body.origin().host()) + .moderation + .is_remote_server_forbidden(body.origin()) { warn!( "Server {} for remote user {} tried knocking room ID {} which has a server name \ @@ -48,11 +47,7 @@ pub(crate) async fn create_knock_event_template_route( } if let Some(server) = body.room_id.server_name() { - if services - .config - .forbidden_remote_server_names - .is_match(server.host()) - { + if services.moderation.is_remote_server_forbidden(server) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } } diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index a66d8890..895eca81 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -268,9 +268,8 @@ pub(crate) async fn create_join_event_v1_route( body: Ruma, ) -> Result { if services - .config - .forbidden_remote_server_names - .is_match(body.origin().host()) + .moderation + .is_remote_server_forbidden(body.origin()) { warn!( "Server {} tried joining room ID {} through us who has a server name that is \ @@ -282,11 +281,7 @@ pub(crate) async fn create_join_event_v1_route( } if let Some(server) = body.room_id.server_name() { - if services - .config - .forbidden_remote_server_names - .is_match(server.host()) - { + if services.moderation.is_remote_server_forbidden(server) { warn!( "Server {} tried joining room ID {} through us which has a server name that is \ globally forbidden. Rejecting.", @@ -314,19 +309,14 @@ pub(crate) async fn create_join_event_v2_route( body: Ruma, ) -> Result { if services - .config - .forbidden_remote_server_names - .is_match(body.origin().host()) + .moderation + .is_remote_server_forbidden(body.origin()) { return Err!(Request(Forbidden("Server is banned on this homeserver."))); } if let Some(server) = body.room_id.server_name() { - if services - .config - .forbidden_remote_server_names - .is_match(server.host()) - { + if services.moderation.is_remote_server_forbidden(server) { warn!( "Server {} tried joining room ID {} through us which has a server name that is \ globally forbidden. Rejecting.", diff --git a/src/api/server/send_knock.rs b/src/api/server/send_knock.rs index ee7b6cba..8d3697d2 100644 --- a/src/api/server/send_knock.rs +++ b/src/api/server/send_knock.rs @@ -26,9 +26,8 @@ pub(crate) async fn create_knock_event_v1_route( body: Ruma, ) -> Result { if services - .config - .forbidden_remote_server_names - .is_match(body.origin().host()) + .moderation + .is_remote_server_forbidden(body.origin()) { warn!( "Server {} tried knocking room ID {} who has a server name that is globally \ @@ -40,11 +39,7 @@ pub(crate) async fn create_knock_event_v1_route( } if let Some(server) = body.room_id.server_name() { - if services - .config - .forbidden_remote_server_names - .is_match(server.host()) - { + if services.moderation.is_remote_server_forbidden(server) { warn!( "Server {} tried knocking room ID {} which has a server name that is globally \ forbidden. Rejecting.", diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index a7205423..2de3b710 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1374,6 +1374,9 @@ pub struct Config { /// incoming AND outgoing federation with, and block client room joins / /// remote user invites. /// + /// Additionally, it will hide messages from these servers for all users + /// on this server. + /// /// This check is applied on the room ID, room alias, sender server name, /// sender user's server name, inbound federation X-Matrix origin, and /// outbound federation handler. @@ -1954,7 +1957,7 @@ impl Config { let mut addrs = Vec::with_capacity( self.get_bind_hosts() .len() - .saturating_add(self.get_bind_ports().len()), + .saturating_mul(self.get_bind_ports().len()), ); for host in &self.get_bind_hosts() { for port in &self.get_bind_ports() { diff --git a/src/service/federation/execute.rs b/src/service/federation/execute.rs index 97314ffb..1d1d1154 100644 --- a/src/service/federation/execute.rs +++ b/src/service/federation/execute.rs @@ -64,13 +64,7 @@ where return Err!(Config("allow_federation", "Federation is disabled.")); } - if self - .services - .server - .config - .forbidden_remote_server_names - .is_match(dest.host()) - { + if self.services.moderation.is_remote_server_forbidden(dest) { return Err!(Request(Forbidden(debug_warn!("Federation with {dest} is not allowed.")))); } diff --git a/src/service/federation/mod.rs b/src/service/federation/mod.rs index ce7765ee..15521875 100644 --- a/src/service/federation/mod.rs +++ b/src/service/federation/mod.rs @@ -4,7 +4,7 @@ use std::sync::Arc; use conduwuit::{Result, Server}; -use crate::{Dep, client, resolver, server_keys}; +use crate::{Dep, client, moderation, resolver, server_keys}; pub struct Service { services: Services, @@ -15,6 +15,7 @@ struct Services { client: Dep, resolver: Dep, server_keys: Dep, + moderation: Dep, } impl crate::Service for Service { @@ -25,6 +26,7 @@ impl crate::Service for Service { client: args.depend::("client"), resolver: args.depend::("resolver"), server_keys: args.depend::("server_keys"), + moderation: args.depend::("moderation"), }, })) } diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 5c26efe8..d053ba54 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -22,7 +22,7 @@ use tokio::{ use self::data::{Data, Metadata}; pub use self::thumbnail::Dim; -use crate::{Dep, client, globals, sending}; +use crate::{Dep, client, globals, moderation, sending}; #[derive(Debug)] pub struct FileMeta { @@ -42,6 +42,7 @@ struct Services { client: Dep, globals: Dep, sending: Dep, + moderation: Dep, } /// generated MXC ID (`media-id`) length @@ -64,6 +65,7 @@ impl crate::Service for Service { client: args.depend::("client"), globals: args.depend::("globals"), sending: args.depend::("sending"), + moderation: args.depend::("moderation"), }, })) } diff --git a/src/service/media/remote.rs b/src/service/media/remote.rs index cdcb429e..a1e874d8 100644 --- a/src/service/media/remote.rs +++ b/src/service/media/remote.rs @@ -423,16 +423,8 @@ pub async fn fetch_remote_content_legacy( fn check_fetch_authorized(&self, mxc: &Mxc<'_>) -> Result<()> { if self .services - .server - .config - .prevent_media_downloads_from - .is_match(mxc.server_name.host()) - || self - .services - .server - .config - .forbidden_remote_server_names - .is_match(mxc.server_name.host()) + .moderation + .is_remote_server_media_downloads_forbidden(mxc.server_name) { // we'll lie to the client and say the blocked server's media was not found and // log. the client has no way of telling anyways so this is a security bonus. diff --git a/src/service/mod.rs b/src/service/mod.rs index 2be16f79..a3214408 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -16,6 +16,7 @@ pub mod federation; pub mod globals; pub mod key_backups; pub mod media; +pub mod moderation; pub mod presence; pub mod pusher; pub mod resolver; diff --git a/src/service/moderation.rs b/src/service/moderation.rs new file mode 100644 index 00000000..bd2616f6 --- /dev/null +++ b/src/service/moderation.rs @@ -0,0 +1,62 @@ +use std::sync::Arc; + +use conduwuit::{Result, Server, implement}; +use ruma::ServerName; + +pub struct Service { + services: Services, +} + +struct Services { + pub server: Arc, +} + +impl crate::Service for Service { + fn build(args: crate::Args<'_>) -> Result> { + Ok(Arc::new(Self { + services: Services { server: args.server.clone() }, + })) + } + + fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } +} + +#[implement(Service)] +#[must_use] +pub fn is_remote_server_forbidden(&self, server_name: &ServerName) -> bool { + // Forbidden if NOT (allowed is empty OR allowed contains server OR is self) + // OR forbidden contains server + self.services + .server + .config + .forbidden_remote_server_names + .is_match(server_name.host()) +} + +#[implement(Service)] +#[must_use] +pub fn is_remote_server_room_directory_forbidden(&self, server_name: &ServerName) -> bool { + // Forbidden if NOT (allowed is empty OR allowed contains server OR is self) + // OR forbidden contains server + self.is_remote_server_forbidden(server_name) + || self + .services + .server + .config + .forbidden_remote_room_directory_server_names + .is_match(server_name.host()) +} + +#[implement(Service)] +#[must_use] +pub fn is_remote_server_media_downloads_forbidden(&self, server_name: &ServerName) -> bool { + // Forbidden if NOT (allowed is empty OR allowed contains server OR is self) + // OR forbidden contains server + self.is_remote_server_forbidden(server_name) + || self + .services + .server + .config + .prevent_media_downloads_from + .is_match(server_name.host()) +} diff --git a/src/service/services.rs b/src/service/services.rs index dc390054..5dcc120e 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -12,7 +12,7 @@ use tokio::sync::Mutex; use crate::{ account_data, admin, appservice, client, config, emergency, federation, globals, key_backups, manager::Manager, - media, presence, pusher, resolver, rooms, sending, server_keys, service, + media, moderation, presence, pusher, resolver, rooms, sending, server_keys, service, service::{Args, Map, Service}, sync, transaction_ids, uiaa, updates, users, }; @@ -39,6 +39,7 @@ pub struct Services { pub uiaa: Arc, pub updates: Arc, pub users: Arc, + pub moderation: Arc, manager: Mutex>>, pub(crate) service: Arc, @@ -106,6 +107,7 @@ impl Services { uiaa: build!(uiaa::Service), updates: build!(updates::Service), users: build!(users::Service), + moderation: build!(moderation::Service), manager: Mutex::new(None), service, From 9e62076baa2fb4a6bb46f8a763e38240c98be5ee Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 19 Apr 2025 23:29:33 +0100 Subject: [PATCH 263/310] feat: Add `allowed_remote_server_names` This allows explicitly allowing servers. Can be combined with the opposite to create allowlist-only federation. See also #31 Closes #673 --- conduwuit-example.toml | 10 ++++++++++ src/core/config/mod.rs | 12 ++++++++++++ src/service/moderation.rs | 19 +++++++++++++++++-- 3 files changed, 39 insertions(+), 2 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 5a4b7b3f..326127c3 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1206,10 +1206,20 @@ # # Basically "global" ACLs. # +# You can set this to ["*"] to block all servers by default, and then +# use `allowed_remote_server_names` to allow only specific servers. +# # example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] # #forbidden_remote_server_names = [] +# List of allowed server names via regex patterns that we will allow, +# regardless of if they match `forbidden_remote_server_names`. +# +# example: ["goodserver\.tld$", "goodphrase"] +# +#allowed_remote_server_names = [] + # List of forbidden server names via regex patterns that we will block all # outgoing federated room directory requests for. Useful for preventing # our users from wandering into bad servers or spaces. diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 2de3b710..22e09956 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1383,12 +1383,24 @@ pub struct Config { /// /// Basically "global" ACLs. /// + /// You can set this to ["*"] to block all servers by default, and then + /// use `allowed_remote_server_names` to allow only specific servers. + /// /// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] /// /// default: [] #[serde(default, with = "serde_regex")] pub forbidden_remote_server_names: RegexSet, + /// List of allowed server names via regex patterns that we will allow, + /// regardless of if they match `forbidden_remote_server_names`. + /// + /// example: ["goodserver\.tld$", "goodphrase"] + /// + /// default: [] + #[serde(default, with = "serde_regex")] + pub allowed_remote_server_names: RegexSet, + /// List of forbidden server names via regex patterns that we will block all /// outgoing federated room directory requests for. Useful for preventing /// our users from wandering into bad servers or spaces. diff --git a/src/service/moderation.rs b/src/service/moderation.rs index bd2616f6..d571de88 100644 --- a/src/service/moderation.rs +++ b/src/service/moderation.rs @@ -24,8 +24,23 @@ impl crate::Service for Service { #[implement(Service)] #[must_use] pub fn is_remote_server_forbidden(&self, server_name: &ServerName) -> bool { - // Forbidden if NOT (allowed is empty OR allowed contains server OR is self) - // OR forbidden contains server + // We must never block federating with ourselves + if server_name == self.services.server.config.server_name { + return false; + } + + // Check if server is explicitly allowed + if self + .services + .server + .config + .allowed_remote_server_names + .is_match(server_name.host()) + { + return false; + } + + // Check if server is explicitly forbidden self.services .server .config From 84445b84580720b5f296f525d7df655f4195d833 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 20 Apr 2025 00:16:29 +0100 Subject: [PATCH 264/310] docs: Document backfill bypassing federation restrictions --- conduwuit-example.toml | 4 ++++ src/core/config/mod.rs | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 326127c3..8f86fdd0 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1200,6 +1200,10 @@ # Additionally, it will hide messages from these servers for all users # on this server. # +# Note that your messages can still make it to forbidden servers through +# backfilling. Events we receive from forbidden servers via backfill will +# be stored in the database, but will not be sent to the client. +# # This check is applied on the room ID, room alias, sender server name, # sender user's server name, inbound federation X-Matrix origin, and # outbound federation handler. diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 22e09956..cde5c313 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1377,6 +1377,10 @@ pub struct Config { /// Additionally, it will hide messages from these servers for all users /// on this server. /// + /// Note that your messages can still make it to forbidden servers through + /// backfilling. Events we receive from forbidden servers via backfill will + /// be stored in the database, but will not be sent to the client. + /// /// This check is applied on the room ID, room alias, sender server name, /// sender user's server name, inbound federation X-Matrix origin, and /// outbound federation handler. From fe7963d30648addbc4ecfd1df3798cf0f5c0c8fa Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 20 Apr 2025 00:31:08 +0100 Subject: [PATCH 265/310] docs: Clarify --- src/core/config/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index cde5c313..800ffc8d 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1385,8 +1385,6 @@ pub struct Config { /// sender user's server name, inbound federation X-Matrix origin, and /// outbound federation handler. /// - /// Basically "global" ACLs. - /// /// You can set this to ["*"] to block all servers by default, and then /// use `allowed_remote_server_names` to allow only specific servers. /// @@ -1399,6 +1397,8 @@ pub struct Config { /// List of allowed server names via regex patterns that we will allow, /// regardless of if they match `forbidden_remote_server_names`. /// + /// This option has no effect if `forbidden_remote_server_names` is empty. + /// /// example: ["goodserver\.tld$", "goodphrase"] /// /// default: [] From 6920814da9867a74fa1b87fb4776c2587fe3bd54 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sun, 20 Apr 2025 02:31:58 +0100 Subject: [PATCH 266/310] Support fi.mau.room_id, and fully qualified room_id in /createRoom --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 2 +- src/api/client/room/create.rs | 25 +++++++++++-------------- 3 files changed, 23 insertions(+), 26 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 00aeca81..cf3ac6db 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3652,7 +3652,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" dependencies = [ "assign", "js_int", @@ -3672,7 +3672,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" dependencies = [ "js_int", "ruma-common", @@ -3684,7 +3684,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" dependencies = [ "as_variant", "assign", @@ -3707,7 +3707,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" dependencies = [ "as_variant", "base64 0.22.1", @@ -3739,7 +3739,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" dependencies = [ "as_variant", "indexmap 2.8.0", @@ -3764,7 +3764,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" dependencies = [ "bytes", "headers", @@ -3786,7 +3786,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" dependencies = [ "js_int", "thiserror 2.0.12", @@ -3795,7 +3795,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" dependencies = [ "js_int", "ruma-common", @@ -3805,7 +3805,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3820,7 +3820,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" dependencies = [ "js_int", "ruma-common", @@ -3832,7 +3832,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=920148dca1076454ca0ca5d43b5ce1aa708381d4#920148dca1076454ca0ca5d43b5ce1aa708381d4" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index e9ae0007..1ad11256 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -350,7 +350,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://forgejo.ellis.link/continuwuation/ruwuma" #branch = "conduwuit-changes" -rev = "920148dca1076454ca0ca5d43b5ce1aa708381d4" +rev = "fa3c868e5a1c049dc9472310dc4955289a96bb35" features = [ "compat", "rand", diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index 4ce53f15..bba5939e 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -606,23 +606,20 @@ fn custom_room_id_check(services: &Services, custom_room_id: &str) -> Result Date: Sun, 20 Apr 2025 02:46:16 +0100 Subject: [PATCH 267/310] Prevent creating custom room IDs belonging to other servers --- src/api/client/room/create.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index bba5939e..2bc6033c 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -621,6 +621,11 @@ fn custom_room_id_check(services: &Services, custom_room_id: &str) -> Result Date: Sun, 20 Apr 2025 15:41:19 +0100 Subject: [PATCH 268/310] Fix invalid room ID check & prevent room IDs being prefixed with ! --- src/api/client/room/create.rs | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index 2bc6033c..f5f61784 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -107,7 +107,6 @@ pub(crate) async fn create_room_route( return Err!(Request(Forbidden("Publishing rooms to the room directory is not allowed"))); } - let _short_id = services .rooms .short @@ -615,17 +614,26 @@ fn custom_room_id_check(services: &Services, custom_room_id: &str) -> Result Date: Wed, 9 Apr 2025 19:17:21 +0200 Subject: [PATCH 269/310] config: rocksdb_compaction help was inverted :-) You seem to have replaced `disable_rocksdb_compaction` with `rocksdb_compaction`, since the help is blackmailing me never to set it to `true`, except **true is the default**. I have tried to make it say what you possibly meant. --- conduwuit-example.toml | 8 ++++---- src/core/config/mod.rs | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 8f86fdd0..273d5ea5 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -970,8 +970,8 @@ # #rocksdb_compaction_ioprio_idle = true -# Disables RocksDB compaction. You should never ever have to set this -# option to true. If you for some reason find yourself needing to use this +# Enables RocksDB compaction. You should never ever have to set this +# option to false. If you for some reason find yourself needing to use this # option as part of troubleshooting or a bug, please reach out to us in # the conduwuit Matrix room with information and details. # @@ -1208,8 +1208,6 @@ # sender user's server name, inbound federation X-Matrix origin, and # outbound federation handler. # -# Basically "global" ACLs. -# # You can set this to ["*"] to block all servers by default, and then # use `allowed_remote_server_names` to allow only specific servers. # @@ -1220,6 +1218,8 @@ # List of allowed server names via regex patterns that we will allow, # regardless of if they match `forbidden_remote_server_names`. # +# This option has no effect if `forbidden_remote_server_names` is empty. +# # example: ["goodserver\.tld$", "goodphrase"] # #allowed_remote_server_names = [] diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 800ffc8d..bdfcee41 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1133,8 +1133,8 @@ pub struct Config { #[serde(default = "true_fn")] pub rocksdb_compaction_ioprio_idle: bool, - /// Disables RocksDB compaction. You should never ever have to set this - /// option to true. If you for some reason find yourself needing to use this + /// Enables RocksDB compaction. You should never ever have to set this + /// option to false. If you for some reason find yourself needing to use this /// option as part of troubleshooting or a bug, please reach out to us in /// the conduwuit Matrix room with information and details. /// From 2d9bdc0979ecb1102ca2cc3f6b33d1090bd08025 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 20 Apr 2025 19:30:02 +0100 Subject: [PATCH 270/310] refactor: The update checker has become the announcements checker Replaces June's endpoint with a continuwuity endpoint. Adds a JSON schema. Closes #89 Closes #760 --- .forgejo/workflows/documentation.yml | 4 + conduwuit-example.toml | 10 +- docs/static/_headers | 3 + docs/static/announcements.json | 9 ++ docs/static/announcements.schema.json | 31 +++++ src/admin/query/globals.rs | 9 +- src/core/config/mod.rs | 12 +- src/service/announcements/mod.rs | 169 ++++++++++++++++++++++++++ src/service/globals/mod.rs | 4 +- src/service/mod.rs | 2 +- src/service/services.rs | 9 +- src/service/updates/mod.rs | 142 ---------------------- 12 files changed, 238 insertions(+), 166 deletions(-) create mode 100644 docs/static/announcements.json create mode 100644 docs/static/announcements.schema.json create mode 100644 src/service/announcements/mod.rs delete mode 100644 src/service/updates/mod.rs diff --git a/.forgejo/workflows/documentation.yml b/.forgejo/workflows/documentation.yml index c08c1abb..c84c566b 100644 --- a/.forgejo/workflows/documentation.yml +++ b/.forgejo/workflows/documentation.yml @@ -36,9 +36,13 @@ jobs: - name: Prepare static files for deployment run: | mkdir -p ./public/.well-known/matrix + mkdir -p ./public/.well-known/continuwuity + mkdir -p ./public/schema # Copy the Matrix .well-known files cp ./docs/static/server ./public/.well-known/matrix/server cp ./docs/static/client ./public/.well-known/matrix/client + cp ./docs/static/announcements.json ./public/.well-known/continuwuity/announcements + cp ./docs/static/announcements.schema.json ./public/schema/announcements.schema.json # Copy the custom headers file cp ./docs/static/_headers ./public/_headers echo "Copied .well-known files and _headers to ./public" diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 273d5ea5..b6bfd092 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -113,14 +113,10 @@ #new_user_displayname_suffix = "🏳️‍⚧️" # If enabled, conduwuit will send a simple GET request periodically to -# `https://pupbrain.dev/check-for-updates/stable` for any new -# announcements made. Despite the name, this is not an update check -# endpoint, it is simply an announcement check endpoint. +# `https://continuwuity.org/.well-known/continuwuity/announcements` for any new +# announcements or major updates. This is not an update check endpoint. # -# This is disabled by default as this is rarely used except for security -# updates or major updates. -# -#allow_check_for_updates = false +#allow_announcements_check = # Set this to any float value to multiply conduwuit's in-memory LRU caches # with such as "auth_chain_cache_capacity". diff --git a/docs/static/_headers b/docs/static/_headers index 5e960241..6e52de9f 100644 --- a/docs/static/_headers +++ b/docs/static/_headers @@ -1,3 +1,6 @@ /.well-known/matrix/* Access-Control-Allow-Origin: * Content-Type: application/json +/.well-known/continuwuity/* + Access-Control-Allow-Origin: * + Content-Type: application/json \ No newline at end of file diff --git a/docs/static/announcements.json b/docs/static/announcements.json new file mode 100644 index 00000000..9b97d091 --- /dev/null +++ b/docs/static/announcements.json @@ -0,0 +1,9 @@ +{ + "$schema": "https://continuwuity.org/schema/announcements.schema.json", + "announcements": [ + { + "id": 1, + "message": "Welcome to Continuwuity! Important announcements about the project will appear here." + } + ] +} \ No newline at end of file diff --git a/docs/static/announcements.schema.json b/docs/static/announcements.schema.json new file mode 100644 index 00000000..95b1d153 --- /dev/null +++ b/docs/static/announcements.schema.json @@ -0,0 +1,31 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "$id": "https://continwuity.org/schema/announcements.schema.json", + "type": "object", + "properties": { + "updates": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "type": "integer" + }, + "message": { + "type": "string" + }, + "date": { + "type": "string" + } + }, + "required": [ + "id", + "message" + ] + } + } + }, + "required": [ + "updates" + ] + } \ No newline at end of file diff --git a/src/admin/query/globals.rs b/src/admin/query/globals.rs index 3681acfd..c8c1f512 100644 --- a/src/admin/query/globals.rs +++ b/src/admin/query/globals.rs @@ -11,7 +11,7 @@ pub(crate) enum GlobalsCommand { CurrentCount, - LastCheckForUpdatesId, + LastCheckForAnnouncementsId, /// - This returns an empty `Ok(BTreeMap<..>)` when there are no keys found /// for the server. @@ -39,9 +39,12 @@ pub(super) async fn process(subcommand: GlobalsCommand, context: &Context<'_>) - write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") }, - | GlobalsCommand::LastCheckForUpdatesId => { + | GlobalsCommand::LastCheckForAnnouncementsId => { let timer = tokio::time::Instant::now(); - let results = services.updates.last_check_for_updates_id().await; + let results = services + .announcements + .last_check_for_announcements_id() + .await; let query_time = timer.elapsed(); write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```") diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index bdfcee41..033be40a 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -161,14 +161,10 @@ pub struct Config { pub new_user_displayname_suffix: String, /// If enabled, conduwuit will send a simple GET request periodically to - /// `https://pupbrain.dev/check-for-updates/stable` for any new - /// announcements made. Despite the name, this is not an update check - /// endpoint, it is simply an announcement check endpoint. - /// - /// This is disabled by default as this is rarely used except for security - /// updates or major updates. - #[serde(default, alias = "allow_announcements_check")] - pub allow_check_for_updates: bool, + /// `https://continuwuity.org/.well-known/continuwuity/announcements` for any new + /// announcements or major updates. This is not an update check endpoint. + #[serde(alias = "allow_check_for_updates", default = "true_fn")] + pub allow_announcements_check: bool, /// Set this to any float value to multiply conduwuit's in-memory LRU caches /// with such as "auth_chain_cache_capacity". diff --git a/src/service/announcements/mod.rs b/src/service/announcements/mod.rs new file mode 100644 index 00000000..4df8971b --- /dev/null +++ b/src/service/announcements/mod.rs @@ -0,0 +1,169 @@ +//! # Announcements service +//! +//! This service is responsible for checking for announcements and sending them +//! to the client. +//! +//! It is used to send announcements to the admin room and logs. +//! Annuncements are stored in /docs/static/announcements right now. +//! The highest seen announcement id is stored in the database. When the +//! announcement check is run, all announcements with an ID higher than those +//! seen before are printed to the console and sent to the admin room. +//! +//! Old announcements should be deleted to avoid spamming the room on first +//! install. +//! +//! Announcements are displayed as markdown in the admin room, but plain text in +//! the console. + +use std::{sync::Arc, time::Duration}; + +use async_trait::async_trait; +use conduwuit::{Result, Server, debug, info, warn}; +use database::{Deserialized, Map}; +use ruma::events::room::message::RoomMessageEventContent; +use serde::Deserialize; +use tokio::{ + sync::Notify, + time::{MissedTickBehavior, interval}, +}; + +use crate::{Dep, admin, client, globals}; + +pub struct Service { + interval: Duration, + interrupt: Notify, + db: Arc, + services: Services, +} + +struct Services { + admin: Dep, + client: Dep, + globals: Dep, + server: Arc, +} + +#[derive(Debug, Deserialize)] +struct CheckForAnnouncementsResponse { + announcements: Vec, +} + +#[derive(Debug, Deserialize)] +struct CheckForAnnouncementsResponseEntry { + id: u64, + date: Option, + message: String, +} + +const CHECK_FOR_ANNOUNCEMENTS_URL: &str = + "https://continuwuity.org/.well-known/continuwuity/announcements"; +const CHECK_FOR_ANNOUNCEMENTS_INTERVAL: u64 = 7200; // 2 hours +const LAST_CHECK_FOR_ANNOUNCEMENTS_ID: &[u8; 25] = b"last_seen_announcement_id"; +// In conduwuit, this was under b"a" + +#[async_trait] +impl crate::Service for Service { + fn build(args: crate::Args<'_>) -> Result> { + Ok(Arc::new(Self { + interval: Duration::from_secs(CHECK_FOR_ANNOUNCEMENTS_INTERVAL), + interrupt: Notify::new(), + db: args.db["global"].clone(), + services: Services { + globals: args.depend::("globals"), + admin: args.depend::("admin"), + client: args.depend::("client"), + server: args.server.clone(), + }, + })) + } + + #[tracing::instrument(skip_all, name = "announcements", level = "debug")] + async fn worker(self: Arc) -> Result<()> { + if !self.services.globals.allow_announcements_check() { + debug!("Disabling announcements check"); + return Ok(()); + } + + let mut i = interval(self.interval); + i.set_missed_tick_behavior(MissedTickBehavior::Delay); + i.reset_after(self.interval); + loop { + tokio::select! { + () = self.interrupt.notified() => break, + _ = i.tick() => (), + } + + if let Err(e) = self.check().await { + warn!(%e, "Failed to check for announcements"); + } + } + + Ok(()) + } + + fn interrupt(&self) { self.interrupt.notify_waiters(); } + + fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } +} + +impl Service { + #[tracing::instrument(skip_all)] + async fn check(&self) -> Result<()> { + debug_assert!(self.services.server.running(), "server must not be shutting down"); + + let response = self + .services + .client + .default + .get(CHECK_FOR_ANNOUNCEMENTS_URL) + .send() + .await? + .text() + .await?; + + let response = serde_json::from_str::(&response)?; + for announcement in &response.announcements { + if announcement.id > self.last_check_for_announcements_id().await { + self.handle(announcement).await; + self.update_check_for_announcements_id(announcement.id); + } + } + + Ok(()) + } + + #[tracing::instrument(skip_all)] + async fn handle(&self, announcement: &CheckForAnnouncementsResponseEntry) { + if let Some(date) = &announcement.date { + info!("[announcements] {date} {:#}", announcement.message); + } else { + info!("[announcements] {:#}", announcement.message); + } + + self.services + .admin + .send_message(RoomMessageEventContent::text_markdown(format!( + "### New announcement{}\n\n{}", + announcement + .date + .as_ref() + .map_or_else(String::new, |date| format!(" - `{date}`")), + announcement.message + ))) + .await + .ok(); + } + + #[inline] + pub fn update_check_for_announcements_id(&self, id: u64) { + self.db.raw_put(LAST_CHECK_FOR_ANNOUNCEMENTS_ID, id); + } + + pub async fn last_check_for_announcements_id(&self) -> u64 { + self.db + .get(LAST_CHECK_FOR_ANNOUNCEMENTS_ID) + .await + .deserialized() + .unwrap_or(0_u64) + } +} diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index a7a9be9d..a23a4c21 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -127,7 +127,9 @@ impl Service { &self.server.config.new_user_displayname_suffix } - pub fn allow_check_for_updates(&self) -> bool { self.server.config.allow_check_for_updates } + pub fn allow_announcements_check(&self) -> bool { + self.server.config.allow_announcements_check + } pub fn trusted_servers(&self) -> &[OwnedServerName] { &self.server.config.trusted_servers } diff --git a/src/service/mod.rs b/src/service/mod.rs index a3214408..eb15e5ec 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -8,6 +8,7 @@ pub mod services; pub mod account_data; pub mod admin; +pub mod announcements; pub mod appservice; pub mod client; pub mod config; @@ -26,7 +27,6 @@ pub mod server_keys; pub mod sync; pub mod transaction_ids; pub mod uiaa; -pub mod updates; pub mod users; extern crate conduwuit_core as conduwuit; diff --git a/src/service/services.rs b/src/service/services.rs index 5dcc120e..daece245 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -10,11 +10,12 @@ use futures::{Stream, StreamExt, TryStreamExt}; use tokio::sync::Mutex; use crate::{ - account_data, admin, appservice, client, config, emergency, federation, globals, key_backups, + account_data, admin, announcements, appservice, client, config, emergency, federation, + globals, key_backups, manager::Manager, media, moderation, presence, pusher, resolver, rooms, sending, server_keys, service, service::{Args, Map, Service}, - sync, transaction_ids, uiaa, updates, users, + sync, transaction_ids, uiaa, users, }; pub struct Services { @@ -37,9 +38,9 @@ pub struct Services { pub sync: Arc, pub transaction_ids: Arc, pub uiaa: Arc, - pub updates: Arc, pub users: Arc, pub moderation: Arc, + pub announcements: Arc, manager: Mutex>>, pub(crate) service: Arc, @@ -105,9 +106,9 @@ impl Services { sync: build!(sync::Service), transaction_ids: build!(transaction_ids::Service), uiaa: build!(uiaa::Service), - updates: build!(updates::Service), users: build!(users::Service), moderation: build!(moderation::Service), + announcements: build!(announcements::Service), manager: Mutex::new(None), service, diff --git a/src/service/updates/mod.rs b/src/service/updates/mod.rs deleted file mode 100644 index 28bee65a..00000000 --- a/src/service/updates/mod.rs +++ /dev/null @@ -1,142 +0,0 @@ -use std::{sync::Arc, time::Duration}; - -use async_trait::async_trait; -use conduwuit::{Result, Server, debug, info, warn}; -use database::{Deserialized, Map}; -use ruma::events::room::message::RoomMessageEventContent; -use serde::Deserialize; -use tokio::{ - sync::Notify, - time::{MissedTickBehavior, interval}, -}; - -use crate::{Dep, admin, client, globals}; - -pub struct Service { - interval: Duration, - interrupt: Notify, - db: Arc, - services: Services, -} - -struct Services { - admin: Dep, - client: Dep, - globals: Dep, - server: Arc, -} - -#[derive(Debug, Deserialize)] -struct CheckForUpdatesResponse { - updates: Vec, -} - -#[derive(Debug, Deserialize)] -struct CheckForUpdatesResponseEntry { - id: u64, - date: String, - message: String, -} - -const CHECK_FOR_UPDATES_URL: &str = "https://pupbrain.dev/check-for-updates/stable"; -const CHECK_FOR_UPDATES_INTERVAL: u64 = 7200; // 2 hours -const LAST_CHECK_FOR_UPDATES_COUNT: &[u8; 1] = b"u"; - -#[async_trait] -impl crate::Service for Service { - fn build(args: crate::Args<'_>) -> Result> { - Ok(Arc::new(Self { - interval: Duration::from_secs(CHECK_FOR_UPDATES_INTERVAL), - interrupt: Notify::new(), - db: args.db["global"].clone(), - services: Services { - globals: args.depend::("globals"), - admin: args.depend::("admin"), - client: args.depend::("client"), - server: args.server.clone(), - }, - })) - } - - #[tracing::instrument(skip_all, name = "updates", level = "debug")] - async fn worker(self: Arc) -> Result<()> { - if !self.services.globals.allow_check_for_updates() { - debug!("Disabling update check"); - return Ok(()); - } - - let mut i = interval(self.interval); - i.set_missed_tick_behavior(MissedTickBehavior::Delay); - i.reset_after(self.interval); - loop { - tokio::select! { - () = self.interrupt.notified() => break, - _ = i.tick() => (), - } - - if let Err(e) = self.check().await { - warn!(%e, "Failed to check for updates"); - } - } - - Ok(()) - } - - fn interrupt(&self) { self.interrupt.notify_waiters(); } - - fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } -} - -impl Service { - #[tracing::instrument(skip_all)] - async fn check(&self) -> Result<()> { - debug_assert!(self.services.server.running(), "server must not be shutting down"); - - let response = self - .services - .client - .default - .get(CHECK_FOR_UPDATES_URL) - .send() - .await? - .text() - .await?; - - let response = serde_json::from_str::(&response)?; - for update in &response.updates { - if update.id > self.last_check_for_updates_id().await { - self.handle(update).await; - self.update_check_for_updates_id(update.id); - } - } - - Ok(()) - } - - #[tracing::instrument(skip_all)] - async fn handle(&self, update: &CheckForUpdatesResponseEntry) { - info!("{} {:#}", update.date, update.message); - self.services - .admin - .send_message(RoomMessageEventContent::text_markdown(format!( - "### the following is a message from the conduwuit puppy\n\nit was sent on \ - `{}`:\n\n@room: {}", - update.date, update.message - ))) - .await - .ok(); - } - - #[inline] - pub fn update_check_for_updates_id(&self, id: u64) { - self.db.raw_put(LAST_CHECK_FOR_UPDATES_COUNT, id); - } - - pub async fn last_check_for_updates_id(&self) -> u64 { - self.db - .get(LAST_CHECK_FOR_UPDATES_COUNT) - .await - .deserialized() - .unwrap_or(0_u64) - } -} From b7b7d3a9e70007efb7128ee7d108ed15fe5362db Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 20 Apr 2025 23:07:01 +0100 Subject: [PATCH 271/310] chore: Add the current prerelease to cargo.toml --- Cargo.lock | 16 ++++++++-------- Cargo.toml | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cf3ac6db..616b1034 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -725,7 +725,7 @@ dependencies = [ [[package]] name = "conduwuit" -version = "0.5.0" +version = "0.5.0-rc.4" dependencies = [ "clap", "conduwuit_admin", @@ -754,7 +754,7 @@ dependencies = [ [[package]] name = "conduwuit_admin" -version = "0.5.0" +version = "0.5.0-rc.4" dependencies = [ "clap", "conduwuit_api", @@ -775,7 +775,7 @@ dependencies = [ [[package]] name = "conduwuit_api" -version = "0.5.0" +version = "0.5.0-rc.4" dependencies = [ "async-trait", "axum", @@ -807,7 +807,7 @@ dependencies = [ [[package]] name = "conduwuit_core" -version = "0.5.0" +version = "0.5.0-rc.4" dependencies = [ "argon2", "arrayvec", @@ -865,7 +865,7 @@ dependencies = [ [[package]] name = "conduwuit_database" -version = "0.5.0" +version = "0.5.0-rc.4" dependencies = [ "async-channel", "conduwuit_core", @@ -883,7 +883,7 @@ dependencies = [ [[package]] name = "conduwuit_macros" -version = "0.5.0" +version = "0.5.0-rc.4" dependencies = [ "itertools 0.14.0", "proc-macro2", @@ -893,7 +893,7 @@ dependencies = [ [[package]] name = "conduwuit_router" -version = "0.5.0" +version = "0.5.0-rc.4" dependencies = [ "axum", "axum-client-ip", @@ -926,7 +926,7 @@ dependencies = [ [[package]] name = "conduwuit_service" -version = "0.5.0" +version = "0.5.0-rc.4" dependencies = [ "async-trait", "base64 0.22.1", diff --git a/Cargo.toml b/Cargo.toml index 1ad11256..5feba474 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ license = "Apache-2.0" readme = "README.md" repository = "https://forgejo.ellis.link/continuwuation/continuwuity" rust-version = "1.86.0" -version = "0.5.0" +version = "0.5.0-rc.4" [workspace.metadata.crane] name = "conduwuit" From 22e7617362880ba9723fc239e5bd7b978599c866 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 20 Apr 2025 23:07:20 +0100 Subject: [PATCH 272/310] chore: Release --- Cargo.lock | 16 ++++++++-------- Cargo.toml | 2 +- src/router/Cargo.toml | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 616b1034..afaa5622 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -725,7 +725,7 @@ dependencies = [ [[package]] name = "conduwuit" -version = "0.5.0-rc.4" +version = "0.5.0-rc.5" dependencies = [ "clap", "conduwuit_admin", @@ -754,7 +754,7 @@ dependencies = [ [[package]] name = "conduwuit_admin" -version = "0.5.0-rc.4" +version = "0.5.0-rc.5" dependencies = [ "clap", "conduwuit_api", @@ -775,7 +775,7 @@ dependencies = [ [[package]] name = "conduwuit_api" -version = "0.5.0-rc.4" +version = "0.5.0-rc.5" dependencies = [ "async-trait", "axum", @@ -807,7 +807,7 @@ dependencies = [ [[package]] name = "conduwuit_core" -version = "0.5.0-rc.4" +version = "0.5.0-rc.5" dependencies = [ "argon2", "arrayvec", @@ -865,7 +865,7 @@ dependencies = [ [[package]] name = "conduwuit_database" -version = "0.5.0-rc.4" +version = "0.5.0-rc.5" dependencies = [ "async-channel", "conduwuit_core", @@ -883,7 +883,7 @@ dependencies = [ [[package]] name = "conduwuit_macros" -version = "0.5.0-rc.4" +version = "0.5.0-rc.5" dependencies = [ "itertools 0.14.0", "proc-macro2", @@ -893,7 +893,7 @@ dependencies = [ [[package]] name = "conduwuit_router" -version = "0.5.0-rc.4" +version = "0.5.0-rc.5" dependencies = [ "axum", "axum-client-ip", @@ -926,7 +926,7 @@ dependencies = [ [[package]] name = "conduwuit_service" -version = "0.5.0-rc.4" +version = "0.5.0-rc.5" dependencies = [ "async-trait", "base64 0.22.1", diff --git a/Cargo.toml b/Cargo.toml index 5feba474..1517cfc1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ license = "Apache-2.0" readme = "README.md" repository = "https://forgejo.ellis.link/continuwuation/continuwuity" rust-version = "1.86.0" -version = "0.5.0-rc.4" +version = "0.5.0-rc.5" [workspace.metadata.crane] name = "conduwuit" diff --git a/src/router/Cargo.toml b/src/router/Cargo.toml index 31a44983..e4ddcb9b 100644 --- a/src/router/Cargo.toml +++ b/src/router/Cargo.toml @@ -114,11 +114,11 @@ ruma.workspace = true rustls.workspace = true rustls.optional = true sentry.optional = true +sentry.workspace = true sentry-tower.optional = true sentry-tower.workspace = true sentry-tracing.optional = true sentry-tracing.workspace = true -sentry.workspace = true serde_json.workspace = true tokio.workspace = true tower.workspace = true From ff93cfdc6454b990f35424d3d7c17fae14df2c4d Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sun, 20 Apr 2025 23:50:48 +0100 Subject: [PATCH 273/310] Fix up the docs, replace a lot of conduwuit references --- docs/appservices.md | 10 ++-- docs/conduwuit_coc.md | 28 +++++----- docs/configuration.md | 10 ++-- docs/deploying.md | 2 +- docs/deploying/arch-linux.md | 16 +----- docs/deploying/docker-compose.for-traefik.yml | 2 +- docs/deploying/docker-compose.with-caddy.yml | 2 +- .../deploying/docker-compose.with-traefik.yml | 2 +- docs/deploying/docker-compose.yml | 2 +- docs/deploying/docker.md | 44 +++++---------- docs/deploying/freebsd.md | 6 +- docs/deploying/generic.md | 52 +++++++++--------- docs/deploying/nixos.md | 55 ++++--------------- docs/development.md | 20 +++---- docs/introduction.md | 4 +- docs/maintenance.md | 26 ++++----- docs/troubleshooting.md | 38 ++++++------- docs/turn.md | 8 +-- 18 files changed, 133 insertions(+), 194 deletions(-) diff --git a/docs/appservices.md b/docs/appservices.md index 28ea9717..57cd031c 100644 --- a/docs/appservices.md +++ b/docs/appservices.md @@ -3,8 +3,8 @@ ## Getting help If you run into any problems while setting up an Appservice: ask us in -[#conduwuit:puppygock.gay](https://matrix.to/#/#conduwuit:puppygock.gay) or -[open an issue on GitHub](https://github.com/girlbossceo/conduwuit/issues/new). +[#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org) or +[open an issue on Forgejo](https://forgejo.ellis.link/continuwuation/continuwuity/issues/new). ## Set up the appservice - general instructions @@ -14,7 +14,7 @@ later starting it. At some point the appservice guide should ask you to add a registration yaml file to the homeserver. In Synapse you would do this by adding the path to the -homeserver.yaml, but in conduwuit you can do this from within Matrix: +homeserver.yaml, but in Continuwuity you can do this from within Matrix: First, go into the `#admins` room of your homeserver. The first person that registered on the homeserver automatically joins it. Then send a message into @@ -37,9 +37,9 @@ You can confirm it worked by sending a message like this: The server bot should answer with `Appservices (1): your-bridge` -Then you are done. conduwuit will send messages to the appservices and the +Then you are done. Continuwuity will send messages to the appservices and the appservice can send requests to the homeserver. You don't need to restart -conduwuit, but if it doesn't work, restarting while the appservice is running +Continuwuity, but if it doesn't work, restarting while the appservice is running could help. ## Appservice-specific instructions diff --git a/docs/conduwuit_coc.md b/docs/conduwuit_coc.md index 0fce2fe3..19262765 100644 --- a/docs/conduwuit_coc.md +++ b/docs/conduwuit_coc.md @@ -1,17 +1,17 @@ -# conduwuit Community Code of Conduct +# Continuwuity Community Code of Conduct -Welcome to the conduwuit community! We’re excited to have you here. conduwuit is +Welcome to the Continuwuity community! We’re excited to have you here. Continuwuity is a hard-fork of the Conduit homeserver, aimed at making Matrix more accessible and inclusive for everyone. This space is dedicated to fostering a positive, supportive, and inclusive -environment for everyone. This Code of Conduct applies to all conduwuit spaces, +environment for everyone. This Code of Conduct applies to all Continuwuity spaces, including any further community rooms that reference this CoC. Here are our -guidelines to help maintain the welcoming atmosphere that sets conduwuit apart. +guidelines to help maintain the welcoming atmosphere that sets Continuwuity apart. For the general foundational rules, please refer to the [Contributor's -Covenant](https://github.com/girlbossceo/conduwuit/blob/main/CODE_OF_CONDUCT.md). -Below are additional guidelines specific to the conduwuit community. +Covenant](https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/CODE_OF_CONDUCT.md). +Below are additional guidelines specific to the Continuwuity community. ## Our Values and Guidelines @@ -48,25 +48,25 @@ members. ## Matrix Community -This Code of Conduct applies to the entire [conduwuit Matrix -Space](https://matrix.to/#/#conduwuit-space:puppygock.gay) and its rooms, +This Code of Conduct applies to the entire [Continuwuity Matrix +Space](https://matrix.to/#/#space:continuwuity.org) and its rooms, including: -### [#conduwuit:puppygock.gay](https://matrix.to/#/#conduwuit:puppygock.gay) +### [#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org) -This room is for support and discussions about conduwuit. Ask questions, share +This room is for support and discussions about Continuwuity. Ask questions, share insights, and help each other out. -### [#conduwuit-offtopic:girlboss.ceo](https://matrix.to/#/#conduwuit-offtopic:girlboss.ceo) +### [#continuwuity-offtopic:continuwuity.org](https://matrix.to/#/#continuwuity-offtopic:continuwuity.org) For off-topic community conversations about any subject. While this room allows for a wide range of topics, the same CoC applies. Keep discussions respectful and inclusive, and avoid divisive subjects like country/world politics. General topics, such as world events, are welcome as long as they follow the CoC. -### [#conduwuit-dev:puppygock.gay](https://matrix.to/#/#conduwuit-dev:puppygock.gay) +### [#dev:continuwuity.org](https://matrix.to/#/#dev:continuwuity.org) -This room is dedicated to discussing active development of conduwuit. Posting +This room is dedicated to discussing active development of Continuwuity. Posting requires an elevated power level, which can be requested in one of the other rooms. Use this space to collaborate and innovate. @@ -90,4 +90,4 @@ comfortable doing that, then please send a DM to one of the moderators directly. Together, let’s build a community where everyone feels valued and respected. -— The conduwuit Moderation Team +— The Continuwuity Moderation Team diff --git a/docs/configuration.md b/docs/configuration.md index 0c670210..778e5c56 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -1,10 +1,10 @@ # Configuration -This chapter describes various ways to configure conduwuit. +This chapter describes various ways to configure Continuwuity. ## Basics -conduwuit uses a config file for the majority of the settings, but also supports +Continuwuity uses a config file for the majority of the settings, but also supports setting individual config options via commandline. Please refer to the [example config @@ -12,13 +12,13 @@ file](./configuration/examples.md#example-configuration) for all of those settings. The config file to use can be specified on the commandline when running -conduwuit by specifying the `-c`, `--config` flag. Alternatively, you can use +Continuwuity by specifying the `-c`, `--config` flag. Alternatively, you can use the environment variable `CONDUWUIT_CONFIG` to specify the config file to used. Conduit's environment variables are supported for backwards compatibility. ## Option commandline flag -conduwuit supports setting individual config options in TOML format from the +Continuwuity supports setting individual config options in TOML format from the `-O` / `--option` flag. For example, you can set your server name via `-O server_name=\"example.com\"`. @@ -33,7 +33,7 @@ string. This does not apply to options that take booleans or numbers: ## Execute commandline flag -conduwuit supports running admin commands on startup using the commandline +Continuwuity supports running admin commands on startup using the commandline argument `--execute`. The most notable use for this is to create an admin user on first startup. diff --git a/docs/deploying.md b/docs/deploying.md index 86277aba..be1bf736 100644 --- a/docs/deploying.md +++ b/docs/deploying.md @@ -1,3 +1,3 @@ # Deploying -This chapter describes various ways to deploy conduwuit. +This chapter describes various ways to deploy Continuwuity. diff --git a/docs/deploying/arch-linux.md b/docs/deploying/arch-linux.md index 7436e5bf..a14201e3 100644 --- a/docs/deploying/arch-linux.md +++ b/docs/deploying/arch-linux.md @@ -1,15 +1,3 @@ -# conduwuit for Arch Linux +# Continuwuity for Arch Linux -Currently conduwuit is only on the Arch User Repository (AUR). - -The conduwuit AUR packages are community maintained and are not maintained by -conduwuit development team, but the AUR package maintainers are in the Matrix -room. Please attempt to verify your AUR package's PKGBUILD file looks fine -before asking for support. - -- [conduwuit](https://aur.archlinux.org/packages/conduwuit) - latest tagged -conduwuit -- [conduwuit-git](https://aur.archlinux.org/packages/conduwuit-git) - latest git -conduwuit from `main` branch -- [conduwuit-bin](https://aur.archlinux.org/packages/conduwuit-bin) - latest -tagged conduwuit static binary +Continuwuity does not have any Arch Linux packages at this time. diff --git a/docs/deploying/docker-compose.for-traefik.yml b/docs/deploying/docker-compose.for-traefik.yml index 366f6999..694bd112 100644 --- a/docs/deploying/docker-compose.for-traefik.yml +++ b/docs/deploying/docker-compose.for-traefik.yml @@ -4,7 +4,7 @@ services: homeserver: ### If you already built the conduduwit image with 'docker build' or want to use the Docker Hub image, ### then you are ready to go. - image: girlbossceo/conduwuit:latest + image: forgejo.ellis.link/continuwuation/continuwuity:latest restart: unless-stopped volumes: - db:/var/lib/conduwuit diff --git a/docs/deploying/docker-compose.with-caddy.yml b/docs/deploying/docker-compose.with-caddy.yml index 431cf2d4..8ff8076a 100644 --- a/docs/deploying/docker-compose.with-caddy.yml +++ b/docs/deploying/docker-compose.with-caddy.yml @@ -22,7 +22,7 @@ services: homeserver: ### If you already built the conduwuit image with 'docker build' or want to use a registry image, ### then you are ready to go. - image: girlbossceo/conduwuit:latest + image: forgejo.ellis.link/continuwuation/continuwuity:latest restart: unless-stopped volumes: - db:/var/lib/conduwuit diff --git a/docs/deploying/docker-compose.with-traefik.yml b/docs/deploying/docker-compose.with-traefik.yml index 89118c74..842bf945 100644 --- a/docs/deploying/docker-compose.with-traefik.yml +++ b/docs/deploying/docker-compose.with-traefik.yml @@ -4,7 +4,7 @@ services: homeserver: ### If you already built the conduwuit image with 'docker build' or want to use the Docker Hub image, ### then you are ready to go. - image: girlbossceo/conduwuit:latest + image: forgejo.ellis.link/continuwuation/continuwuity:latest restart: unless-stopped volumes: - db:/var/lib/conduwuit diff --git a/docs/deploying/docker-compose.yml b/docs/deploying/docker-compose.yml index ca33b5f5..ca56d0b0 100644 --- a/docs/deploying/docker-compose.yml +++ b/docs/deploying/docker-compose.yml @@ -4,7 +4,7 @@ services: homeserver: ### If you already built the conduwuit image with 'docker build' or want to use a registry image, ### then you are ready to go. - image: girlbossceo/conduwuit:latest + image: forgejo.ellis.link/continuwuation/continuwuity:latest restart: unless-stopped ports: - 8448:6167 diff --git a/docs/deploying/docker.md b/docs/deploying/docker.md index bdbfb59c..bd6eff1d 100644 --- a/docs/deploying/docker.md +++ b/docs/deploying/docker.md @@ -1,31 +1,20 @@ -# conduwuit for Docker +# Continuwuity for Docker ## Docker -To run conduwuit with Docker you can either build the image yourself or pull it +To run Continuwuity with Docker you can either build the image yourself or pull it from a registry. ### Use a registry -OCI images for conduwuit are available in the registries listed below. +OCI images for Continuwuity are available in the registries listed below. -| Registry | Image | Size | Notes | -| --------------- | --------------------------------------------------------------- | ----------------------------- | ---------------------- | -| GitHub Registry | [ghcr.io/girlbossceo/conduwuit:latest][gh] | ![Image Size][shield-latest] | Stable latest tagged image. | -| GitLab Registry | [registry.gitlab.com/conduwuit/conduwuit:latest][gl] | ![Image Size][shield-latest] | Stable latest tagged image. | -| Docker Hub | [docker.io/girlbossceo/conduwuit:latest][dh] | ![Image Size][shield-latest] | Stable latest tagged image. | -| GitHub Registry | [ghcr.io/girlbossceo/conduwuit:main][gh] | ![Image Size][shield-main] | Stable main branch. | -| GitLab Registry | [registry.gitlab.com/conduwuit/conduwuit:main][gl] | ![Image Size][shield-main] | Stable main branch. | -| Docker Hub | [docker.io/girlbossceo/conduwuit:main][dh] | ![Image Size][shield-main] | Stable main branch. | +| Registry | Image | Notes | +| --------------- | --------------------------------------------------------------- | -----------------------| +| Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:latest][fj] | Latest tagged image. | +| Forgejo Registry| [forgejo.ellis.link/continuwuation/continuwuity:main][fj] | Main branch image. | -[dh]: https://hub.docker.com/r/girlbossceo/conduwuit -[gh]: https://github.com/girlbossceo/conduwuit/pkgs/container/conduwuit -[gl]: https://gitlab.com/conduwuit/conduwuit/container_registry/6369729 -[shield-latest]: https://img.shields.io/docker/image-size/girlbossceo/conduwuit/latest -[shield-main]: https://img.shields.io/docker/image-size/girlbossceo/conduwuit/main - -OCI image `.tar.gz` files are also hosted directly at when uploaded by CI with a -commit hash/revision or a tagged release: +[fj]: https://forgejo.ellis.link/continuwuation/-/packages/container/continuwuity Use @@ -52,11 +41,11 @@ or you can use [docker compose](#docker-compose). The `-d` flag lets the container run in detached mode. You may supply an optional `conduwuit.toml` config file, the example config can be found [here](../configuration/examples.md). You can pass in different env vars to -change config values on the fly. You can even configure conduwuit completely by +change config values on the fly. You can even configure Continuwuity completely by using env vars. For an overview of possible values, please take a look at the [`docker-compose.yml`](docker-compose.yml) file. -If you just want to test conduwuit for a short time, you can use the `--rm` +If you just want to test Continuwuity for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it. @@ -91,11 +80,11 @@ docker network create caddy After that, you can rename it so it matches `docker-compose.yml` and spin up the containers! -Additional info about deploying conduwuit can be found [here](generic.md). +Additional info about deploying Continuwuity can be found [here](generic.md). ### Build -Official conduwuit images are built using Nix's +Official Continuwuity images are built using Nix's [`buildLayeredImage`][nix-buildlayeredimage]. This ensures all OCI images are repeatable and reproducible by anyone, keeps the images lightweight, and can be built offline. @@ -104,13 +93,11 @@ This also ensures portability of our images because `buildLayeredImage` builds OCI images, not Docker images, and works with other container software. The OCI images are OS-less with only a very minimal environment of the `tini` -init system, CA certificates, and the conduwuit binary. This does mean there is +init system, CA certificates, and the Continuwuity binary. This does mean there is not a shell, but in theory you can get a shell by adding the necessary layers to the layered image. However it's very unlikely you will need a shell for any real troubleshooting. -The flake file for the OCI image definition is at [`nix/pkgs/oci-image/default.nix`][oci-image-def]. - To build an OCI image using Nix, the following outputs can be built: - `nix build -L .#oci-image` (default features, x86_64 glibc) - `nix build -L .#oci-image-x86_64-linux-musl` (default features, x86_64 musl) @@ -138,10 +125,10 @@ web. With the two provided files, [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or [`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and [`docker-compose.override.yml`](docker-compose.override.yml), it is equally easy -to deploy and use conduwuit, with a little caveat. If you already took a look at +to deploy and use Continuwuity, with a little caveat. If you already took a look at the files, then you should have seen the `well-known` service, and that is the little caveat. Traefik is simply a proxy and loadbalancer and is not able to -serve any kind of content, but for conduwuit to federate, we need to either +serve any kind of content, but for Continuwuity to federate, we need to either expose ports `443` and `8448` or serve two endpoints `.well-known/matrix/client` and `.well-known/matrix/server`. @@ -153,4 +140,3 @@ those two files. See the [TURN](../turn.md) page. [nix-buildlayeredimage]: https://ryantm.github.io/nixpkgs/builders/images/dockertools/#ssec-pkgs-dockerTools-buildLayeredImage -[oci-image-def]: https://github.com/girlbossceo/conduwuit/blob/main/nix/pkgs/oci-image/default.nix diff --git a/docs/deploying/freebsd.md b/docs/deploying/freebsd.md index 65b40204..3764ffa8 100644 --- a/docs/deploying/freebsd.md +++ b/docs/deploying/freebsd.md @@ -1,5 +1,5 @@ -# conduwuit for FreeBSD +# Continuwuity for FreeBSD -conduwuit at the moment does not provide FreeBSD builds or have FreeBSD packaging, however conduwuit does build and work on FreeBSD using the system-provided RocksDB. +Continuwuity at the moment does not provide FreeBSD builds or have FreeBSD packaging, however Continuwuity does build and work on FreeBSD using the system-provided RocksDB. -Contributions for getting conduwuit packaged are welcome. +Contributions for getting Continuwuity packaged are welcome. diff --git a/docs/deploying/generic.md b/docs/deploying/generic.md index a07da560..46b9b439 100644 --- a/docs/deploying/generic.md +++ b/docs/deploying/generic.md @@ -2,11 +2,11 @@ > ### Getting help > -> If you run into any problems while setting up conduwuit, ask us in -> `#conduwuit:puppygock.gay` or [open an issue on -> GitHub](https://github.com/girlbossceo/conduwuit/issues/new). +> If you run into any problems while setting up Continuwuity, ask us in +> `#continuwuity:continuwuity.org` or [open an issue on +> Forgejo](https://forgejo.ellis.link/continuwuation/continuwuity/issues/new). -## Installing conduwuit +## Installing Continuwuity ### Static prebuilt binary @@ -14,12 +14,10 @@ You may simply download the binary that fits your machine architecture (x86_64 or aarch64). Run `uname -m` to see what you need. Prebuilt fully static musl binaries can be downloaded from the latest tagged -release [here](https://github.com/girlbossceo/conduwuit/releases/latest) or +release [here](https://forgejo.ellis.link/continuwuation/continuwuity/releases/latest) or `main` CI branch workflow artifact output. These also include Debian/Ubuntu packages. -Binaries are also available on my website directly at: - These can be curl'd directly from. `ci-bins` are CI workflow binaries by commit hash/revision, and `releases` are tagged releases. Sort by descending last modified for the latest. @@ -37,7 +35,7 @@ for performance. ### Compiling Alternatively, you may compile the binary yourself. We recommend using -Nix (or [Lix](https://lix.systems)) to build conduwuit as this has the most +Nix (or [Lix](https://lix.systems)) to build Continuwuity as this has the most guaranteed reproducibiltiy and easiest to get a build environment and output going. This also allows easy cross-compilation. @@ -51,35 +49,35 @@ If wanting to build using standard Rust toolchains, make sure you install: - `liburing-dev` on the compiling machine, and `liburing` on the target host - LLVM and libclang for RocksDB -You can build conduwuit using `cargo build --release --all-features` +You can build Continuwuity using `cargo build --release --all-features` -## Adding a conduwuit user +## Adding a Continuwuity user -While conduwuit can run as any user it is better to use dedicated users for +While Continuwuity can run as any user it is better to use dedicated users for different services. This also allows you to make sure that the file permissions are correctly set up. -In Debian, you can use this command to create a conduwuit user: +In Debian, you can use this command to create a Continuwuity user: ```bash -sudo adduser --system conduwuit --group --disabled-login --no-create-home +sudo adduser --system continuwuity --group --disabled-login --no-create-home ``` For distros without `adduser` (or where it's a symlink to `useradd`): ```bash -sudo useradd -r --shell /usr/bin/nologin --no-create-home conduwuit +sudo useradd -r --shell /usr/bin/nologin --no-create-home continuwuity ``` ## Forwarding ports in the firewall or the router Matrix's default federation port is port 8448, and clients must be using port 443. If you would like to use only port 443, or a different port, you will need to setup -delegation. conduwuit has config options for doing delegation, or you can configure +delegation. Continuwuity has config options for doing delegation, or you can configure your reverse proxy to manually serve the necessary JSON files to do delegation (see the `[global.well_known]` config section). -If conduwuit runs behind a router or in a container and has a different public +If Continuwuity runs behind a router or in a container and has a different public IP address than the host system these public ports need to be forwarded directly or indirectly to the port mentioned in the config. @@ -94,9 +92,9 @@ on the network level, consider something like NextDNS or Pi-Hole. ## Setting up a systemd service -Two example systemd units for conduwuit can be found +Two example systemd units for Continuwuity can be found [on the configuration page](../configuration/examples.md#debian-systemd-unit-file). -You may need to change the `ExecStart=` path to where you placed the conduwuit +You may need to change the `ExecStart=` path to where you placed the Continuwuity binary if it is not `/usr/bin/conduwuit`. On systems where rsyslog is used alongside journald (i.e. Red Hat-based distros @@ -114,9 +112,9 @@ and entering the following: ReadWritePaths=/path/to/custom/database/path ``` -## Creating the conduwuit configuration file +## Creating the Continuwuity configuration file -Now we need to create the conduwuit's config file in +Now we need to create the Continuwuity's config file in `/etc/conduwuit/conduwuit.toml`. The example config can be found at [conduwuit-example.toml](../configuration/examples.md). @@ -127,7 +125,7 @@ RocksDB is the only supported database backend. ## Setting the correct file permissions -If you are using a dedicated user for conduwuit, you will need to allow it to +If you are using a dedicated user for Continuwuity, you will need to allow it to read the config. To do that you can run this: ```bash @@ -139,7 +137,7 @@ If you use the default database path you also need to run this: ```bash sudo mkdir -p /var/lib/conduwuit/ -sudo chown -R conduwuit:conduwuit /var/lib/conduwuit/ +sudo chown -R continuwuity:continuwuity /var/lib/conduwuit/ sudo chmod 700 /var/lib/conduwuit/ ``` @@ -174,13 +172,13 @@ As we would prefer our users to use Caddy, we will not provide configuration fil You will need to reverse proxy everything under following routes: - `/_matrix/` - core Matrix C-S and S-S APIs -- `/_conduwuit/` - ad-hoc conduwuit routes such as `/local_user_count` and +- `/_conduwuit/` - ad-hoc Continuwuity routes such as `/local_user_count` and `/server_version` You can optionally reverse proxy the following individual routes: - `/.well-known/matrix/client` and `/.well-known/matrix/server` if using -conduwuit to perform delegation (see the `[global.well_known]` config section) -- `/.well-known/matrix/support` if using conduwuit to send the homeserver admin +Continuwuity to perform delegation (see the `[global.well_known]` config section) +- `/.well-known/matrix/support` if using Continuwuity to send the homeserver admin contact and support page (formerly known as MSC1929) - `/` if you would like to see `hewwo from conduwuit woof!` at the root @@ -200,7 +198,7 @@ header, making federation non-functional. If a workaround is found, feel free to If using Apache, you need to use `nocanon` in your `ProxyPass` directive to prevent httpd from messing with the `X-Matrix` header (note that Apache isn't very good as a general reverse proxy and we discourage the usage of it if you can). -If using Nginx, you need to give conduwuit the request URI using `$request_uri`, or like so: +If using Nginx, you need to give Continuwuity the request URI using `$request_uri`, or like so: - `proxy_pass http://127.0.0.1:6167$request_uri;` - `proxy_pass http://127.0.0.1:6167;` @@ -209,7 +207,7 @@ Nginx users need to increase `client_max_body_size` (default is 1M) to match ## You're done -Now you can start conduwuit with: +Now you can start Continuwuity with: ```bash sudo systemctl start conduwuit diff --git a/docs/deploying/nixos.md b/docs/deploying/nixos.md index 3c5b0e69..cf2c09e4 100644 --- a/docs/deploying/nixos.md +++ b/docs/deploying/nixos.md @@ -1,66 +1,33 @@ -# conduwuit for NixOS +# Continuwuity for NixOS -conduwuit can be acquired by Nix (or [Lix][lix]) from various places: +Continuwuity can be acquired by Nix (or [Lix][lix]) from various places: * The `flake.nix` at the root of the repo * The `default.nix` at the root of the repo -* From conduwuit's binary cache - -A community maintained NixOS package is available at [`conduwuit`](https://search.nixos.org/packages?channel=unstable&show=conduwuit&from=0&size=50&sort=relevance&type=packages&query=conduwuit) - -### Binary cache - -A binary cache for conduwuit that the CI/CD publishes to is available at the -following places (both are the same just different names): - -``` -https://attic.kennel.juneis.dog/conduit -conduit:eEKoUwlQGDdYmAI/Q/0slVlegqh/QmAvQd7HBSm21Wk= - -https://attic.kennel.juneis.dog/conduwuit -conduwuit:BbycGUgTISsltcmH0qNjFR9dbrQNYgdIAcmViSGoVTE= -``` - -The binary caches were recreated some months ago due to attic issues. The old public -keys were: - -``` -conduit:Isq8FGyEC6FOXH6nD+BOeAA+bKp6X6UIbupSlGEPuOg= -conduwuit:lYPVh7o1hLu1idH4Xt2QHaRa49WRGSAqzcfFd94aOTw= -``` - -If needed, we have a binary cache on Cachix but it is only limited to 5GB: - -``` -https://conduwuit.cachix.org -conduwuit.cachix.org-1:MFRm6jcnfTf0jSAbmvLfhO3KBMt4px+1xaereWXp8Xg= -``` - -If specifying a Git remote URL in your flake, you can use any remotes that -are specified on the README (the mirrors), such as the GitHub: `github:girlbossceo/conduwuit` +* From Continuwuity's binary cache ### NixOS module The `flake.nix` and `default.nix` do not currently provide a NixOS module (contributions welcome!), so [`services.matrix-conduit`][module] from Nixpkgs can be used to configure -conduwuit. +Continuwuity. ### Conduit NixOS Config Module and SQLite Beware! The [`services.matrix-conduit`][module] module defaults to SQLite as a database backend. -Conduwuit dropped SQLite support in favor of exclusively supporting the much faster RocksDB. +Continuwuity dropped SQLite support in favor of exclusively supporting the much faster RocksDB. Make sure that you are using the RocksDB backend before migrating! There is a [tool to migrate a Conduit SQLite database to RocksDB](https://github.com/ShadowJonathan/conduit_toolbox/). -If you want to run the latest code, you should get conduwuit from the `flake.nix` +If you want to run the latest code, you should get Continuwuity from the `flake.nix` or `default.nix` and set [`services.matrix-conduit.package`][package] -appropriately to use conduwuit instead of Conduit. +appropriately to use Continuwuity instead of Conduit. ### UNIX sockets -Due to the lack of a conduwuit NixOS module, when using the `services.matrix-conduit` module +Due to the lack of a Continuwuity NixOS module, when using the `services.matrix-conduit` module a workaround like the one below is necessary to use UNIX sockets. This is because the UNIX socket option does not exist in Conduit, and the module forcibly sets the `address` and `port` config options. @@ -84,13 +51,13 @@ disallows the namespace from accessing or creating UNIX sockets and has to be en systemd.services.conduit.serviceConfig.RestrictAddressFamilies = [ "AF_UNIX" ]; ``` -Even though those workarounds are feasible a conduwuit NixOS configuration module, developed and +Even though those workarounds are feasible a Continuwuity NixOS configuration module, developed and published by the community, would be appreciated. ### jemalloc and hardened profile -conduwuit uses jemalloc by default. This may interfere with the [`hardened.nix` profile][hardened.nix] -due to them using `scudo` by default. You must either disable/hide `scudo` from conduwuit, or +Continuwuity uses jemalloc by default. This may interfere with the [`hardened.nix` profile][hardened.nix] +due to them using `scudo` by default. You must either disable/hide `scudo` from Continuwuity, or disable jemalloc like so: ```nix diff --git a/docs/development.md b/docs/development.md index fa7519c0..1e344f41 100644 --- a/docs/development.md +++ b/docs/development.md @@ -4,9 +4,9 @@ Information about developing the project. If you are only interested in using it, you can safely ignore this page. If you plan on contributing, see the [contributor's guide](./contributing.md). -## conduwuit project layout +## Continuwuity project layout -conduwuit uses a collection of sub-crates, packages, or workspace members +Continuwuity uses a collection of sub-crates, packages, or workspace members that indicate what each general area of code is for. All of the workspace members are under `src/`. The workspace definition is at the top level / root `Cargo.toml`. @@ -14,11 +14,11 @@ members are under `src/`. The workspace definition is at the top level / root The crate names are generally self-explanatory: - `admin` is the admin room - `api` is the HTTP API, Matrix C-S and S-S endpoints, etc -- `core` is core conduwuit functionality like config loading, error definitions, +- `core` is core Continuwuity functionality like config loading, error definitions, global utilities, logging infrastructure, etc - `database` is RocksDB methods, helpers, RocksDB config, and general database definitions, utilities, or functions -- `macros` are conduwuit Rust [macros][macros] like general helper macros, logging +- `macros` are Continuwuity Rust [macros][macros] like general helper macros, logging and error handling macros, and [syn][syn] and [procedural macros][proc-macro] used for admin room commands and others - `main` is the "primary" sub-crate. This is where the `main()` function lives, @@ -35,7 +35,7 @@ if you truly find yourself needing to, we recommend reaching out to us in the Matrix room for discussions about it beforehand. The primary inspiration for this design was apart of hot reloadable development, -to support "conduwuit as a library" where specific parts can simply be swapped out. +to support "Continuwuity as a library" where specific parts can simply be swapped out. There is evidence Conduit wanted to go this route too as `axum` is technically an optional feature in Conduit, and can be compiled without the binary or axum library for handling inbound web requests; but it was never completed or worked. @@ -68,10 +68,10 @@ do this if Rust supported workspace-level features to begin with. ## List of forked dependencies -During conduwuit development, we have had to fork +During Continuwuity development, we have had to fork some dependencies to support our use-cases in some areas. This ranges from things said upstream project won't accept for any reason, faster-paced -development (unresponsive or slow upstream), conduwuit-specific usecases, or +development (unresponsive or slow upstream), Continuwuity-specific usecases, or lack of time to upstream some things. - [ruma/ruma][1]: - various performance @@ -84,7 +84,7 @@ builds seem to be broken on upstream, fixes some broken/suspicious code in places, additional safety measures, and support redzones for Valgrind - [zyansheep/rustyline-async][4]: - tab completion callback and -`CTRL+\` signal quit event for conduwuit console CLI +`CTRL+\` signal quit event for Continuwuity console CLI - [rust-rocksdb/rust-rocksdb][5]: - [`@zaidoon1`][8]'s fork has quicker updates, more up to date dependencies, etc. Our fork fixes musl build @@ -97,7 +97,7 @@ alongside other logging/metrics things ## Debugging with `tokio-console` [`tokio-console`][7] can be a useful tool for debugging and profiling. To make a -`tokio-console`-enabled build of conduwuit, enable the `tokio_console` feature, +`tokio-console`-enabled build of Continuwuity, enable the `tokio_console` feature, disable the default `release_max_log_level` feature, and set the `--cfg tokio_unstable` flag to enable experimental tokio APIs. A build might look like this: @@ -109,7 +109,7 @@ RUSTFLAGS="--cfg tokio_unstable" cargo +nightly build \ --features=systemd,element_hacks,gzip_compression,brotli_compression,zstd_compression,tokio_console ``` -You will also need to enable the `tokio_console` config option in conduwuit when +You will also need to enable the `tokio_console` config option in Continuwuity when starting it. This was due to tokio-console causing gradual memory leak/usage if left enabled. diff --git a/docs/introduction.md b/docs/introduction.md index 9d3a294a..d193f7c7 100644 --- a/docs/introduction.md +++ b/docs/introduction.md @@ -1,4 +1,4 @@ -# conduwuit +# Continuwuity {{#include ../README.md:catchphrase}} @@ -8,7 +8,7 @@ - [Deployment options](deploying.md) -If you want to connect an appservice to conduwuit, take a look at the +If you want to connect an appservice to Continuwuity, take a look at the [appservices documentation](appservices.md). #### How can I contribute? diff --git a/docs/maintenance.md b/docs/maintenance.md index 5c8c853a..b85a1971 100644 --- a/docs/maintenance.md +++ b/docs/maintenance.md @@ -1,14 +1,14 @@ -# Maintaining your conduwuit setup +# Maintaining your Continuwuity setup ## Moderation -conduwuit has moderation through admin room commands. "binary commands" (medium +Continuwuity has moderation through admin room commands. "binary commands" (medium priority) and an admin API (low priority) is planned. Some moderation-related config options are available in the example config such as "global ACLs" and blocking media requests to certain servers. See the example config for the moderation config options under the "Moderation / Privacy / Security" section. -conduwuit has moderation admin commands for: +Continuwuity has moderation admin commands for: - managing room aliases (`!admin rooms alias`) - managing room directory (`!admin rooms directory`) @@ -36,7 +36,7 @@ each object being newline delimited. An example of doing this is: ## Database (RocksDB) Generally there is very little you need to do. [Compaction][rocksdb-compaction] -is ran automatically based on various defined thresholds tuned for conduwuit to +is ran automatically based on various defined thresholds tuned for Continuwuity to be high performance with the least I/O amplifcation or overhead. Manually running compaction is not recommended, or compaction via a timer, due to creating unnecessary I/O amplification. RocksDB is built with io_uring support @@ -50,7 +50,7 @@ Some RocksDB settings can be adjusted such as the compression method chosen. See the RocksDB section in the [example config](configuration/examples.md). btrfs users have reported that database compression does not need to be disabled -on conduwuit as the filesystem already does not attempt to compress. This can be +on Continuwuity as the filesystem already does not attempt to compress. This can be validated by using `filefrag -v` on a `.SST` file in your database, and ensure the `physical_offset` matches (no filesystem compression). It is very important to ensure no additional filesystem compression takes place as this can render @@ -70,7 +70,7 @@ they're server logs or database logs, however they are critical RocksDB files related to WAL tracking. The only safe files that can be deleted are the `LOG` files (all caps). These -are the real RocksDB telemetry/log files, however conduwuit has already +are the real RocksDB telemetry/log files, however Continuwuity has already configured to only store up to 3 RocksDB `LOG` files due to generall being useless for average users unless troubleshooting something low-level. If you would like to store nearly none at all, see the `rocksdb_max_log_files` @@ -88,7 +88,7 @@ still be joined together. To restore a backup from an online RocksDB backup: -- shutdown conduwuit +- shutdown Continuwuity - create a new directory for merging together the data - in the online backup created, copy all `.sst` files in `$DATABASE_BACKUP_PATH/shared_checksum` to your new directory @@ -99,9 +99,9 @@ To restore a backup from an online RocksDB backup: if you have multiple) to your new directory - set your `database_path` config option to your new directory, or replace your old one with the new one you crafted -- start up conduwuit again and it should open as normal +- start up Continuwuity again and it should open as normal -If you'd like to do an offline backup, shutdown conduwuit and copy your +If you'd like to do an offline backup, shutdown Continuwuity and copy your `database_path` directory elsewhere. This can be restored with no modifications needed. @@ -110,7 +110,7 @@ directory. ## Media -Media still needs various work, however conduwuit implements media deletion via: +Media still needs various work, however Continuwuity implements media deletion via: - MXC URI or Event ID (unencrypted and attempts to find the MXC URI in the event) @@ -118,17 +118,17 @@ event) - Delete remote media in the past `N` seconds/minutes via filesystem metadata on the file created time (`btime`) or file modified time (`mtime`) -See the `!admin media` command for further information. All media in conduwuit +See the `!admin media` command for further information. All media in Continuwuity is stored at `$DATABASE_DIR/media`. This will be configurable soon. If you are finding yourself needing extensive granular control over media, we recommend looking into [Matrix Media -Repo](https://github.com/t2bot/matrix-media-repo). conduwuit intends to +Repo](https://github.com/t2bot/matrix-media-repo). Continuwuity intends to implement various utilities for media, but MMR is dedicated to extensive media management. Built-in S3 support is also planned, but for now using a "S3 filesystem" on -`media/` works. conduwuit also sends a `Cache-Control` header of 1 year and +`media/` works. Continuwuity also sends a `Cache-Control` header of 1 year and immutable for all media requests (download and thumbnail) to reduce unnecessary media requests from browsers, reduce bandwidth usage, and reduce load. diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index d25c9762..37b1a5cd 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -1,9 +1,9 @@ -# Troubleshooting conduwuit +# Troubleshooting Continuwuity > ## Docker users ⚠️ > > Docker is extremely UX unfriendly. Because of this, a ton of issues or support -> is actually Docker support, not conduwuit support. We also cannot document the +> is actually Docker support, not Continuwuity support. We also cannot document the > ever-growing list of Docker issues here. > > If you intend on asking for support and you are using Docker, **PLEASE** @@ -13,14 +13,14 @@ > If there are things like Compose file issues or Dockerhub image issues, those > can still be mentioned as long as they're something we can fix. -## conduwuit and Matrix issues +## Continuwuity and Matrix issues #### Lost access to admin room You can reinvite yourself to the admin room through the following methods: -- Use the `--execute "users make_user_admin "` conduwuit binary +- Use the `--execute "users make_user_admin "` Continuwuity binary argument once to invite yourslf to the admin room on startup -- Use the conduwuit console/CLI to run the `users make_user_admin` command +- Use the Continuwuity console/CLI to run the `users make_user_admin` command - Or specify the `emergency_password` config option to allow you to temporarily log into the server account (`@conduit`) from a web client @@ -29,12 +29,12 @@ log into the server account (`@conduit`) from a web client #### Potential DNS issues when using Docker Docker has issues with its default DNS setup that may cause DNS to not be -properly functional when running conduwuit, resulting in federation issues. The +properly functional when running Continuwuity, resulting in federation issues. The symptoms of this have shown in excessively long room joins (30+ minutes) from very long DNS timeouts, log entries of "mismatching responding nameservers", and/or partial or non-functional inbound/outbound federation. -This is **not** a conduwuit issue, and is purely a Docker issue. It is not +This is **not** a Continuwuity issue, and is purely a Docker issue. It is not sustainable for heavy DNS activity which is normal for Matrix federation. The workarounds for this are: - Use DNS over TCP via the config option `query_over_tcp_only = true` @@ -64,7 +64,7 @@ very computationally expensive, and is extremely susceptible to denial of service, especially on Matrix. Many servers also strangely have broken DNSSEC setups and will result in non-functional federation. -conduwuit cannot provide a "works-for-everyone" Unbound DNS setup guide, but +Continuwuity cannot provide a "works-for-everyone" Unbound DNS setup guide, but the [official Unbound tuning guide][unbound-tuning] and the [Unbound Arch Linux wiki page][unbound-arch] may be of interest. Disabling DNSSEC on Unbound is commenting out trust-anchors config options and removing the `validator` module. @@ -75,9 +75,9 @@ high load, and we have identified its DNS caching to not be very effective. dnsmasq can possibly work, but it does **not** support TCP fallback which can be problematic when receiving large DNS responses such as from large SRV records. If you still want to use dnsmasq, make sure you **disable** `dns_tcp_fallback` -in conduwuit config. +in Continuwuity config. -Raising `dns_cache_entries` in conduwuit config from the default can also assist +Raising `dns_cache_entries` in Continuwuity config from the default can also assist in DNS caching, but a full-fledged external caching resolver is better and more reliable. @@ -97,7 +97,7 @@ If your database is corrupted *and* is failing to start (e.g. checksum mismatch), it may be recoverable but careful steps must be taken, and there is no guarantee it may be recoverable. -The first thing that can be done is launching conduwuit with the +The first thing that can be done is launching Continuwuity with the `rocksdb_repair` config option set to true. This will tell RocksDB to attempt to repair itself at launch. If this does not work, disable the option and continue reading. @@ -109,7 +109,7 @@ RocksDB has the following recovery modes: - `PointInTime` - `SkipAnyCorruptedRecord` -By default, conduwuit uses `TolerateCorruptedTailRecords` as generally these may +By default, Continuwuity uses `TolerateCorruptedTailRecords` as generally these may be due to bad federation and we can re-fetch the correct data over federation. The RocksDB default is `PointInTime` which will attempt to restore a "snapshot" of the data when it was last known to be good. This data can be either a few @@ -126,12 +126,12 @@ if `PointInTime` does not work as a last ditch effort. With this in mind: -- First start conduwuit with the `PointInTime` recovery method. See the [example +- First start Continuwuity with the `PointInTime` recovery method. See the [example config](configuration/examples.md) for how to do this using `rocksdb_recovery_mode` - If your database successfully opens, clients are recommended to clear their client cache to account for the rollback -- Leave your conduwuit running in `PointInTime` for at least 30-60 minutes so as +- Leave your Continuwuity running in `PointInTime` for at least 30-60 minutes so as much possible corruption is restored - If all goes will, you should be able to restore back to using `TolerateCorruptedTailRecords` and you have successfully recovered your database @@ -144,14 +144,14 @@ Various debug commands can be found in `!admin debug`. #### Debug/Trace log level -conduwuit builds without debug or trace log levels at compile time by default +Continuwuity builds without debug or trace log levels at compile time by default for substantial performance gains in CPU usage and improved compile times. If you need to access debug/trace log levels, you will need to build without the `release_max_log_level` feature or use our provided static debug binaries. #### Changing log level dynamically -conduwuit supports changing the tracing log environment filter on-the-fly using +Continuwuity supports changing the tracing log environment filter on-the-fly using the admin command `!admin debug change-log-level `. This accepts a string **without quotes** the same format as the `log` config option. @@ -168,7 +168,7 @@ load, simply pass the `--reset` flag. #### Pinging servers -conduwuit can ping other servers using `!admin debug ping `. This takes +Continuwuity can ping other servers using `!admin debug ping `. This takes a server name and goes through the server discovery process and queries `/_matrix/federation/v1/version`. Errors are outputted. @@ -180,12 +180,12 @@ bandwidth and computationally. #### Allocator memory stats When using jemalloc with jemallocator's `stats` feature (`--enable-stats`), you -can see conduwuit's high-level allocator stats by using +can see Continuwuity's high-level allocator stats by using `!admin server memory-usage` at the bottom. If you are a developer, you can also view the raw jemalloc statistics with `!admin debug memory-stats`. Please note that this output is extremely large -which may only be visible in the conduwuit console CLI due to PDU size limits, +which may only be visible in the Continuwuity console CLI due to PDU size limits, and is not easy for non-developers to understand. [unbound-tuning]: https://unbound.docs.nlnetlabs.nl/en/latest/topics/core/performance.html diff --git a/docs/turn.md b/docs/turn.md index 287f2545..5dba823c 100644 --- a/docs/turn.md +++ b/docs/turn.md @@ -1,6 +1,6 @@ # Setting up TURN/STURN -In order to make or receive calls, a TURN server is required. conduwuit suggests +In order to make or receive calls, a TURN server is required. Continuwuity suggests using [Coturn](https://github.com/coturn/coturn) for this purpose, which is also available as a Docker image. @@ -17,9 +17,9 @@ realm= A common way to generate a suitable alphanumeric secret key is by using `pwgen -s 64 1`. -These same values need to be set in conduwuit. See the [example +These same values need to be set in Continuwuity. See the [example config](configuration/examples.md) in the TURN section for configuring these and -restart conduwuit after. +restart Continuwuity after. `turn_secret` or a path to `turn_secret_file` must have a value of your coturn `static-auth-secret`, or use `turn_username` and `turn_password` @@ -34,7 +34,7 @@ If you are using TURN over TLS, you can replace `turn:` with `turns:` in the TURN over TLS. This is highly recommended. If you need unauthenticated access to the TURN URIs, or some clients may be -having trouble, you can enable `turn_guest_access` in conduwuit which disables +having trouble, you can enable `turn_guest_access` in Continuwuity which disables authentication for the TURN URI endpoint `/_matrix/client/v3/voip/turnServer` ### Run From fbd404fa84647c1e4794852028a1b1e5cc27c97d Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Mon, 21 Apr 2025 00:19:08 +0100 Subject: [PATCH 274/310] docs: Update docker documentation --- docs/deploying/docker.md | 36 +++++++++++++++++++----------------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/docs/deploying/docker.md b/docs/deploying/docker.md index bd6eff1d..08a0dc4f 100644 --- a/docs/deploying/docker.md +++ b/docs/deploying/docker.md @@ -84,26 +84,28 @@ Additional info about deploying Continuwuity can be found [here](generic.md). ### Build -Official Continuwuity images are built using Nix's -[`buildLayeredImage`][nix-buildlayeredimage]. This ensures all OCI images are -repeatable and reproducible by anyone, keeps the images lightweight, and can be -built offline. +Official Continuwuity images are built using **Docker Buildx** and the Dockerfile found at [`docker/Dockerfile`][dockerfile-path]. This approach uses common Docker tooling and enables multi-platform builds efficiently. -This also ensures portability of our images because `buildLayeredImage` builds -OCI images, not Docker images, and works with other container software. +The resulting images are broadly compatible with Docker and other container runtimes like Podman or containerd. -The OCI images are OS-less with only a very minimal environment of the `tini` -init system, CA certificates, and the Continuwuity binary. This does mean there is -not a shell, but in theory you can get a shell by adding the necessary layers -to the layered image. However it's very unlikely you will need a shell for any -real troubleshooting. +The images *do not contain a shell*. They contain only the Continuwuity binary, required libraries, TLS certificates and metadata. Please refer to the [`docker/Dockerfile`][dockerfile-path] for the specific details of the image composition. -To build an OCI image using Nix, the following outputs can be built: -- `nix build -L .#oci-image` (default features, x86_64 glibc) -- `nix build -L .#oci-image-x86_64-linux-musl` (default features, x86_64 musl) -- `nix build -L .#oci-image-aarch64-linux-musl` (default features, aarch64 musl) -- `nix build -L .#oci-image-x86_64-linux-musl-all-features` (all features, x86_64 musl) -- `nix build -L .#oci-image-aarch64-linux-musl-all-features` (all features, aarch64 musl) +To build an image locally using Docker Buildx, you can typically run a command like: + +```bash +# Build for the current platform and load into the local Docker daemon +docker buildx build --load --tag continuwuity:latest -f docker/Dockerfile . + +# Example: Build for specific platforms and push to a registry. +# docker buildx build --platform linux/amd64,linux/arm64 --tag registry.io/org/continuwuity:latest -f docker/Dockerfile . --push + +# Example: Build binary optimized for the current CPU +# docker buildx build --load --tag continuwuity:latest --build-arg TARGET_CPU=native -f docker/Dockerfile . +``` + +Refer to the Docker Buildx documentation for more advanced build options. + +[dockerfile-path]: ../../docker/Dockerfile ### Run From c68378ffe34574d8e876e98ec6b961f08a0041a8 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Mon, 21 Apr 2025 00:38:47 +0100 Subject: [PATCH 275/310] docs: Update 'Try it out' section --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index deaed364..9b8f142b 100644 --- a/README.md +++ b/README.md @@ -46,8 +46,9 @@ Continuwuity aims to: ### Can I try it out? -Not right now. We've still got work to do! +Check out the [documentation](introduction) for installation instructions. +There are currently no open registration Continuwuity instances available. ### What are we working on? @@ -111,3 +112,4 @@ When incorporating code from other forks: [continuwuity]: https://forgejo.ellis.link/continuwuation/continuwuity +F From c7ac2483a968560b922ae7d37adc564d828ed9d1 Mon Sep 17 00:00:00 2001 From: n Date: Tue, 22 Apr 2025 01:27:50 +0000 Subject: [PATCH 276/310] Fix offtopic room link Signed-off-by: n --- docs/conduwuit_coc.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/conduwuit_coc.md b/docs/conduwuit_coc.md index 19262765..9a084150 100644 --- a/docs/conduwuit_coc.md +++ b/docs/conduwuit_coc.md @@ -57,7 +57,7 @@ including: This room is for support and discussions about Continuwuity. Ask questions, share insights, and help each other out. -### [#continuwuity-offtopic:continuwuity.org](https://matrix.to/#/#continuwuity-offtopic:continuwuity.org) +### [#offtopic:continuwuity.org](https://matrix.to/#/#offtopic:continuwuity.org) For off-topic community conversations about any subject. While this room allows for a wide range of topics, the same CoC applies. Keep discussions respectful From 0c302f31371aedbd313228004f1971b9e0a5d64c Mon Sep 17 00:00:00 2001 From: nex Date: Tue, 22 Apr 2025 01:33:09 +0000 Subject: [PATCH 277/310] Don't re-build images for docs changes Ironically, this will trigger a rebuild anyway --- .forgejo/workflows/release-image.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index adf70594..2cb6a329 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -11,6 +11,7 @@ on: - 'renovate.json' - 'debian/**' - 'docker/**' + - 'docs/**' # Allows you to run this workflow manually from the Actions tab workflow_dispatch: From 7beff25d3d3d59cba3a8b634a92e9a562267b1fc Mon Sep 17 00:00:00 2001 From: Nyx Tutt Date: Mon, 21 Apr 2025 20:45:05 -0500 Subject: [PATCH 278/310] Update welcome message --- src/service/admin/grant.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/admin/grant.rs b/src/service/admin/grant.rs index 6780b7ae..2d90ea52 100644 --- a/src/service/admin/grant.rs +++ b/src/service/admin/grant.rs @@ -126,7 +126,7 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result { if self.services.server.config.admin_room_notices { let welcome_message = String::from( - "## Thank you for trying out conduwuit!\n\nconduwuit is technically a hard fork of Conduit, which is in Beta. The Beta status initially was inherited from Conduit, however overtime this Beta status is rapidly becoming less and less relevant as our codebase significantly diverges more and more. conduwuit is quite stable and very usable as a daily driver and for a low-medium sized homeserver. There is still a lot of more work to be done, but it is in a far better place than the project was in early 2024.\n\nHelpful links:\n> GitHub Repo: https://github.com/girlbossceo/conduwuit\n> Documentation: https://conduwuit.puppyirl.gay/\n> Report issues: https://github.com/girlbossceo/conduwuit/issues\n\nFor a list of available commands, send the following message in this room: `!admin --help`\n\nHere are some rooms you can join (by typing the command into your client) -\n\nconduwuit space: `/join #conduwuit-space:puppygock.gay`\nconduwuit main room (Ask questions and get notified on updates): `/join #conduwuit:puppygock.gay`\nconduwuit offtopic room: `/join #conduwuit-offtopic:puppygock.gay`", + "## Thank you for trying out Continuwuity!\n\nContinuwuity is a hard fork of conduwuit, which is also a hard fork of Conduit, currently in Beta. The Beta status initially was inherited from Conduit, however overtime this Beta status is rapidly becoming less and less relevant as our codebase significantly diverges more and more. Continuwuity is quite stable and very usable as a daily driver and for a low-medium sized homeserver. There is still a lot of more work to be done, but it is in a far better place than the project was in early 2024.\n\nHelpful links:\n> Source code: https://forgejo.ellis.link/continuwuation/continuwuity\n> Documentation: https://continuwuity.org/\n> Report issues: https://forgejo.ellis.link/continuwuation/continuwuity/issues\n\nFor a list of available commands, send the following message in this room: `!admin --help`\n\nHere are some rooms you can join (by typing the command into your client) -\n\nContinuwuity space: `/join #space:continuwuity.org`\nContinuwuity main room (Ask questions and get notified on updates): `/join #continuwuity:continuwuity.org`\nContinuwuity offtopic room: `/join #offtopic:continuwuity.org`", ); // Send welcome message From 66e8cd8908a7a96e1f929c948ccef0238a790a60 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Mon, 21 Apr 2025 15:19:30 +0100 Subject: [PATCH 279/310] docs: Tone down the docker warning --- docs/troubleshooting.md | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 37b1a5cd..81e90636 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -2,16 +2,9 @@ > ## Docker users ⚠️ > -> Docker is extremely UX unfriendly. Because of this, a ton of issues or support -> is actually Docker support, not Continuwuity support. We also cannot document the -> ever-growing list of Docker issues here. -> -> If you intend on asking for support and you are using Docker, **PLEASE** -> triple validate your issues are **NOT** because you have a misconfiguration in -> your Docker setup. -> -> If there are things like Compose file issues or Dockerhub image issues, those -> can still be mentioned as long as they're something we can fix. +> Docker can be difficult to use and debug. It's common for Docker +> misconfigurations to cause issues, particularly with networking and permissions. +> Please check that your issues are not due to problems with your Docker setup. ## Continuwuity and Matrix issues From 81f8151acabc3f0f824c7c04b47e31ed61422a8b Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Mon, 21 Apr 2025 16:15:31 +0100 Subject: [PATCH 280/310] docs: Add matrix rooms to README --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 9b8f142b..bf4f5613 100644 --- a/README.md +++ b/README.md @@ -106,10 +106,10 @@ When incorporating code from other forks: #### Contact - +Join our [Matrix room](https://matrix.to/#/#continuwuity:continuwuity.org) and [space](https://matrix.to/#/#space:continuwuity.org) to chat with us about the project! [continuwuity]: https://forgejo.ellis.link/continuwuation/continuwuity -F + From 1d42b88f501fb2c5115f2f4d569cd9b07171fcb6 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Mon, 21 Apr 2025 16:26:38 +0100 Subject: [PATCH 281/310] docs: Update Docker DNS troubleshooting section --- docs/troubleshooting.md | 46 ++++++++++++++++++++++++----------------- 1 file changed, 27 insertions(+), 19 deletions(-) diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md index 81e90636..d84dbc7a 100644 --- a/docs/troubleshooting.md +++ b/docs/troubleshooting.md @@ -1,6 +1,6 @@ # Troubleshooting Continuwuity -> ## Docker users ⚠️ +> **Docker users ⚠️** > > Docker can be difficult to use and debug. It's common for Docker > misconfigurations to cause issues, particularly with networking and permissions. @@ -8,9 +8,10 @@ ## Continuwuity and Matrix issues -#### Lost access to admin room +### Lost access to admin room You can reinvite yourself to the admin room through the following methods: + - Use the `--execute "users make_user_admin "` Continuwuity binary argument once to invite yourslf to the admin room on startup - Use the Continuwuity console/CLI to run the `users make_user_admin` command @@ -19,22 +20,29 @@ log into the server account (`@conduit`) from a web client ## General potential issues -#### Potential DNS issues when using Docker +### Potential DNS issues when using Docker -Docker has issues with its default DNS setup that may cause DNS to not be -properly functional when running Continuwuity, resulting in federation issues. The -symptoms of this have shown in excessively long room joins (30+ minutes) from -very long DNS timeouts, log entries of "mismatching responding nameservers", +Docker's DNS setup for containers in a non-default network intercepts queries to +enable resolving of container hostnames to IP addresses. However, due to +performance issues with Docker's built-in resolver, this can cause DNS queries +to take a long time to resolve, resulting in federation issues. + +This is particularly common with Docker Compose, as custom networks are easily +created and configured. + +Symptoms of this include excessively long room joins (30+ minutes) from very +long DNS timeouts, log entries of "mismatching responding nameservers", and/or partial or non-functional inbound/outbound federation. -This is **not** a Continuwuity issue, and is purely a Docker issue. It is not -sustainable for heavy DNS activity which is normal for Matrix federation. The -workarounds for this are: -- Use DNS over TCP via the config option `query_over_tcp_only = true` -- Don't use Docker's default DNS setup and instead allow the container to use -and communicate with your host's DNS servers (host's `/etc/resolv.conf`) +This is not a bug in continuwuity. Docker's default DNS resolver is not suitable +for heavy DNS activity, which is normal for federated protocols like Matrix. -#### DNS No connections available error message +Workarounds: + +- Use DNS over TCP via the config option `query_over_tcp_only = true` +- Bypass Docker's default DNS setup and instead allow the container to use and communicate with your host's DNS servers. Typically, this can be done by mounting the host's `/etc/resolv.conf`. + +### DNS No connections available error message If you receive spurious amounts of error logs saying "DNS No connections available", this is due to your DNS server (servers from `/etc/resolv.conf`) @@ -84,7 +92,7 @@ reliability at a slight performance cost due to TCP overhead. ## RocksDB / database issues -#### Database corruption +### Database corruption If your database is corrupted *and* is failing to start (e.g. checksum mismatch), it may be recoverable but careful steps must be taken, and there is @@ -135,14 +143,14 @@ Note that users should not really be debugging things. If you find yourself debugging and find the issue, please let us know and/or how we can fix it. Various debug commands can be found in `!admin debug`. -#### Debug/Trace log level +### Debug/Trace log level Continuwuity builds without debug or trace log levels at compile time by default for substantial performance gains in CPU usage and improved compile times. If you need to access debug/trace log levels, you will need to build without the `release_max_log_level` feature or use our provided static debug binaries. -#### Changing log level dynamically +### Changing log level dynamically Continuwuity supports changing the tracing log environment filter on-the-fly using the admin command `!admin debug change-log-level `. This accepts @@ -159,7 +167,7 @@ load, simply pass the `--reset` flag. `!admin debug change-log-level --reset` -#### Pinging servers +### Pinging servers Continuwuity can ping other servers using `!admin debug ping `. This takes a server name and goes through the server discovery process and queries @@ -170,7 +178,7 @@ server performance on either side as that endpoint is completely unauthenticated and simply fetches a string on a static JSON endpoint. It is very low cost both bandwidth and computationally. -#### Allocator memory stats +### Allocator memory stats When using jemalloc with jemallocator's `stats` feature (`--enable-stats`), you can see Continuwuity's high-level allocator stats by using From 0307238bf890a3a0249cd7d91c0ae3728664fe4b Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 22 Apr 2025 14:29:02 +0100 Subject: [PATCH 282/310] docs: Work around DNS issues in example compose files --- docs/deploying/docker-compose.for-traefik.yml | 1 + docs/deploying/docker-compose.with-caddy.yml | 1 + docs/deploying/docker-compose.with-traefik.yml | 1 + 3 files changed, 3 insertions(+) diff --git a/docs/deploying/docker-compose.for-traefik.yml b/docs/deploying/docker-compose.for-traefik.yml index 694bd112..57b124c7 100644 --- a/docs/deploying/docker-compose.for-traefik.yml +++ b/docs/deploying/docker-compose.for-traefik.yml @@ -8,6 +8,7 @@ services: restart: unless-stopped volumes: - db:/var/lib/conduwuit + - /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's. #- ./conduwuit.toml:/etc/conduwuit.toml networks: - proxy diff --git a/docs/deploying/docker-compose.with-caddy.yml b/docs/deploying/docker-compose.with-caddy.yml index 8ff8076a..ac4fb1ff 100644 --- a/docs/deploying/docker-compose.with-caddy.yml +++ b/docs/deploying/docker-compose.with-caddy.yml @@ -26,6 +26,7 @@ services: restart: unless-stopped volumes: - db:/var/lib/conduwuit + - /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's. #- ./conduwuit.toml:/etc/conduwuit.toml environment: CONDUWUIT_SERVER_NAME: example.com # EDIT THIS diff --git a/docs/deploying/docker-compose.with-traefik.yml b/docs/deploying/docker-compose.with-traefik.yml index 842bf945..86ad9cb6 100644 --- a/docs/deploying/docker-compose.with-traefik.yml +++ b/docs/deploying/docker-compose.with-traefik.yml @@ -8,6 +8,7 @@ services: restart: unless-stopped volumes: - db:/var/lib/conduwuit + - /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's. #- ./conduwuit.toml:/etc/conduwuit.toml networks: - proxy From 4fbecca2d31f5cbc8992a6ab854910e6afddeea6 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Wed, 23 Apr 2025 17:39:26 +0100 Subject: [PATCH 283/310] Add well-known/matrix/support --- docs/static/support | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 docs/static/support diff --git a/docs/static/support b/docs/static/support new file mode 100644 index 00000000..3fee0187 --- /dev/null +++ b/docs/static/support @@ -0,0 +1,24 @@ +{ + "contacts": [ + { + "email_address": "security@continuwuity.org", + "role": "m.role.security" + }, + { + "matrix_id": "@tom:continuwuity.org", + "email_address": "tom@continuwuity.org", + "role": "m.role.admin" + }, + { + "matrix_id": "@jade:continuwuity.org", + "email_address": "jade@continuwuity.org", + "role": "m.role.admin" + }, + { + "matrix_id": "@nex:continuwuity.org", + "email_address": "nex@continuwuity.org", + "role": "m.role.admin" + } + ], + "support_page": "https://continuwuity.org/introduction#contact" +} \ No newline at end of file From 8f21403796078d586746829eba1701b9810fd462 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Wed, 23 Apr 2025 17:39:36 +0100 Subject: [PATCH 284/310] Use any runner for docs --- .forgejo/workflows/documentation.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.forgejo/workflows/documentation.yml b/.forgejo/workflows/documentation.yml index c84c566b..55f25058 100644 --- a/.forgejo/workflows/documentation.yml +++ b/.forgejo/workflows/documentation.yml @@ -16,7 +16,7 @@ concurrency: jobs: docs: name: Build and Deploy Documentation - runs-on: not-nexy + runs-on: ubuntu-latest steps: - name: Sync repository @@ -41,6 +41,7 @@ jobs: # Copy the Matrix .well-known files cp ./docs/static/server ./public/.well-known/matrix/server cp ./docs/static/client ./public/.well-known/matrix/client + cp ./docs/static/client ./public/.well-known/matrix/support cp ./docs/static/announcements.json ./public/.well-known/continuwuity/announcements cp ./docs/static/announcements.schema.json ./public/schema/announcements.schema.json # Copy the custom headers file From 3eb4ee7af1b8980009bcbe0f6c0585a9d235ffb6 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Wed, 23 Apr 2025 19:30:20 +0100 Subject: [PATCH 285/310] Change tom's email address --- docs/static/support | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/static/support b/docs/static/support index 3fee0187..6b7a9860 100644 --- a/docs/static/support +++ b/docs/static/support @@ -6,7 +6,7 @@ }, { "matrix_id": "@tom:continuwuity.org", - "email_address": "tom@continuwuity.org", + "email_address": "tom@tcpip.uk", "role": "m.role.admin" }, { From f791dc69185b853139ed7f6705ad6d095b398e33 Mon Sep 17 00:00:00 2001 From: Nyx Tutt Date: Tue, 22 Apr 2025 07:56:42 -0500 Subject: [PATCH 286/310] docs: Rename in more places --- docs/deploying/docker-compose.for-traefik.yml | 6 ++--- docs/deploying/docker-compose.override.yml | 6 ++--- docs/deploying/docker-compose.with-caddy.yml | 4 ++-- .../deploying/docker-compose.with-traefik.yml | 10 ++++----- docs/deploying/docker-compose.yml | 6 ++--- docs/deploying/kubernetes.md | 8 +++---- docs/development/hot_reload.md | 22 +++++++++---------- src/admin/server/mod.rs | 2 +- 8 files changed, 32 insertions(+), 32 deletions(-) diff --git a/docs/deploying/docker-compose.for-traefik.yml b/docs/deploying/docker-compose.for-traefik.yml index 57b124c7..04142e0c 100644 --- a/docs/deploying/docker-compose.for-traefik.yml +++ b/docs/deploying/docker-compose.for-traefik.yml @@ -1,4 +1,4 @@ -# conduwuit - Behind Traefik Reverse Proxy +# Continuwuity - Behind Traefik Reverse Proxy services: homeserver: @@ -36,14 +36,14 @@ services: server=your.server.name.example:443 } #cpuset: "0-4" # Uncomment to limit to specific CPU cores - ulimits: # conduwuit uses quite a few file descriptors, and on some systems it defaults to 1024, so you can tell docker to increase it + ulimits: # Continuwuity uses quite a few file descriptors, and on some systems it defaults to 1024, so you can tell docker to increase it nofile: soft: 1048567 hard: 1048567 ### Uncomment if you want to use your own Element-Web App. ### Note: You need to provide a config.json for Element and you also need a second - ### Domain or Subdomain for the communication between Element and conduwuit + ### Domain or Subdomain for the communication between Element and Continuwuity ### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md # element-web: # image: vectorim/element-web:latest diff --git a/docs/deploying/docker-compose.override.yml b/docs/deploying/docker-compose.override.yml index a343eeee..ec82fac3 100644 --- a/docs/deploying/docker-compose.override.yml +++ b/docs/deploying/docker-compose.override.yml @@ -1,4 +1,4 @@ -# conduwuit - Traefik Reverse Proxy Labels +# Continuwuity - Traefik Reverse Proxy Labels services: homeserver: @@ -6,7 +6,7 @@ services: - "traefik.enable=true" - "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network - - "traefik.http.routers.to-conduwuit.rule=Host(`.`)" # Change to the address on which conduwuit is hosted + - "traefik.http.routers.to-conduwuit.rule=Host(`.`)" # Change to the address on which Continuwuity is hosted - "traefik.http.routers.to-conduwuit.tls=true" - "traefik.http.routers.to-conduwuit.tls.certresolver=letsencrypt" - "traefik.http.routers.to-conduwuit.middlewares=cors-headers@docker" @@ -16,7 +16,7 @@ services: - "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization" - "traefik.http.middlewares.cors-headers.headers.accessControlAllowMethods=GET, POST, PUT, DELETE, OPTIONS" - # If you want to have your account on , but host conduwuit on a subdomain, + # If you want to have your account on , but host Continuwuity on a subdomain, # you can let it only handle the well known file on that domain instead #- "traefik.http.routers.to-matrix-wellknown.rule=Host(``) && PathPrefix(`/.well-known/matrix`)" #- "traefik.http.routers.to-matrix-wellknown.tls=true" diff --git a/docs/deploying/docker-compose.with-caddy.yml b/docs/deploying/docker-compose.with-caddy.yml index ac4fb1ff..9ee98428 100644 --- a/docs/deploying/docker-compose.with-caddy.yml +++ b/docs/deploying/docker-compose.with-caddy.yml @@ -1,6 +1,6 @@ services: caddy: - # This compose file uses caddy-docker-proxy as the reverse proxy for conduwuit! + # This compose file uses caddy-docker-proxy as the reverse proxy for Continuwuity! # For more info, visit https://github.com/lucaslorentz/caddy-docker-proxy image: lucaslorentz/caddy-docker-proxy:ci-alpine ports: @@ -20,7 +20,7 @@ services: caddy.1_respond: /.well-known/matrix/client {"m.server":{"base_url":"https://matrix.example.com"},"m.homeserver":{"base_url":"https://matrix.example.com"},"org.matrix.msc3575.proxy":{"url":"https://matrix.example.com"}} homeserver: - ### If you already built the conduwuit image with 'docker build' or want to use a registry image, + ### If you already built the Continuwuity image with 'docker build' or want to use a registry image, ### then you are ready to go. image: forgejo.ellis.link/continuwuation/continuwuity:latest restart: unless-stopped diff --git a/docs/deploying/docker-compose.with-traefik.yml b/docs/deploying/docker-compose.with-traefik.yml index 86ad9cb6..9083b796 100644 --- a/docs/deploying/docker-compose.with-traefik.yml +++ b/docs/deploying/docker-compose.with-traefik.yml @@ -1,8 +1,8 @@ -# conduwuit - Behind Traefik Reverse Proxy +# Continuwuity - Behind Traefik Reverse Proxy services: homeserver: - ### If you already built the conduwuit image with 'docker build' or want to use the Docker Hub image, + ### If you already built the Continuwuity image with 'docker build' or want to use the Docker Hub image, ### then you are ready to go. image: forgejo.ellis.link/continuwuation/continuwuity:latest restart: unless-stopped @@ -22,7 +22,7 @@ services: CONDUWUIT_PORT: 6167 # you need to match this with the traefik load balancer label if you're want to change it CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit #CONDUWUIT_CONFIG: '/etc/conduit.toml' # Uncomment if you mapped config toml above - ### Uncomment and change values as desired, note that conduwuit has plenty of config options, so you should check out the example example config too + ### Uncomment and change values as desired, note that Continuwuity has plenty of config options, so you should check out the example example config too # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging # CONDUWUIT_LOG: info # default is: "warn,state_res=warn" # CONDUWUIT_ALLOW_ENCRYPTION: 'true' @@ -44,14 +44,14 @@ services: server=your.server.name.example:443 } #cpuset: "0-4" # Uncomment to limit to specific CPU cores - ulimits: # conduwuit uses quite a few file descriptors, and on some systems it defaults to 1024, so you can tell docker to increase it + ulimits: # Continuwuity uses quite a few file descriptors, and on some systems it defaults to 1024, so you can tell docker to increase it nofile: soft: 1048567 hard: 1048567 ### Uncomment if you want to use your own Element-Web App. ### Note: You need to provide a config.json for Element and you also need a second - ### Domain or Subdomain for the communication between Element and conduwuit + ### Domain or Subdomain for the communication between Element and Continuwuity ### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md # element-web: # image: vectorim/element-web:latest diff --git a/docs/deploying/docker-compose.yml b/docs/deploying/docker-compose.yml index ca56d0b0..1a3ab811 100644 --- a/docs/deploying/docker-compose.yml +++ b/docs/deploying/docker-compose.yml @@ -1,8 +1,8 @@ -# conduwuit +# Continuwuity services: homeserver: - ### If you already built the conduwuit image with 'docker build' or want to use a registry image, + ### If you already built the Continuwuity image with 'docker build' or want to use a registry image, ### then you are ready to go. image: forgejo.ellis.link/continuwuation/continuwuity:latest restart: unless-stopped @@ -28,7 +28,7 @@ services: # ### Uncomment if you want to use your own Element-Web App. ### Note: You need to provide a config.json for Element and you also need a second - ### Domain or Subdomain for the communication between Element and conduwuit + ### Domain or Subdomain for the communication between Element and Continuwuity ### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md # element-web: # image: vectorim/element-web:latest diff --git a/docs/deploying/kubernetes.md b/docs/deploying/kubernetes.md index d7721722..aceb2d52 100644 --- a/docs/deploying/kubernetes.md +++ b/docs/deploying/kubernetes.md @@ -1,8 +1,8 @@ -# conduwuit for Kubernetes +# Continuwuity for Kubernetes -conduwuit doesn't support horizontal scalability or distributed loading +Continuwuity doesn't support horizontal scalability or distributed loading natively, however a community maintained Helm Chart is available here to run -conduwuit on Kubernetes: +Continuwuity on Kubernetes: Should changes need to be made, please reach out to the maintainer in our -Matrix room as this is not maintained/controlled by the conduwuit maintainers. +Matrix room as this is not maintained/controlled by the Continuwuity maintainers. diff --git a/docs/development/hot_reload.md b/docs/development/hot_reload.md index 65fd4adf..ecfb6396 100644 --- a/docs/development/hot_reload.md +++ b/docs/development/hot_reload.md @@ -5,7 +5,7 @@ guaranteed to work at this time. ### Summary -When developing in debug-builds with the nightly toolchain, conduwuit is modular +When developing in debug-builds with the nightly toolchain, Continuwuity is modular using dynamic libraries and various parts of the application are hot-reloadable while the server is running: http api handlers, admin commands, services, database, etc. These are all split up into individual workspace crates as seen @@ -42,7 +42,7 @@ library, macOS, and likely other host architectures are not supported (if other architectures work, feel free to let us know and/or make a PR updating this). This should work on GNU ld and lld (rust-lld) and gcc/clang, however if you happen to have linker issues it's recommended to try using `mold` or `gold` -linkers, and please let us know in the [conduwuit Matrix room][7] the linker +linkers, and please let us know in the [Continuwuity Matrix room][7] the linker error and what linker solved this issue so we can figure out a solution. Ideally there should be minimal friction to using this, and in the future a build script (`build.rs`) may be suitable to making this easier to use if the capabilities @@ -52,13 +52,13 @@ allow us. As of 19 May 2024, the instructions for using this are: -0. Have patience. Don't hesitate to join the [conduwuit Matrix room][7] to +0. Have patience. Don't hesitate to join the [Continuwuity Matrix room][7] to receive help using this. As indicated by the various rustflags used and some of the interesting issues linked at the bottom, this is definitely not something the Rust ecosystem or toolchain is used to doing. 1. Install the nightly toolchain using rustup. You may need to use `rustup - override set nightly` in your local conduwuit directory, or use `cargo + override set nightly` in your local Continuwuity directory, or use `cargo +nightly` for all actions. 2. Uncomment `cargo-features` at the top level / root Cargo.toml @@ -85,14 +85,14 @@ LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$HOME/.rustup/toolchains/nightly-x86_64-unknown Cargo should only rebuild what was changed / what's necessary, so it should not be rebuilding all the crates. -9. In your conduwuit server terminal, hit/send `CTRL+C` signal. This will tell - conduwuit to find which libraries need to be reloaded, and reloads them as +9. In your Continuwuity server terminal, hit/send `CTRL+C` signal. This will tell + Continuwuity to find which libraries need to be reloaded, and reloads them as necessary. 10. If there were no errors, it will tell you it successfully reloaded `#` modules, and your changes should now be visible. Repeat 7 - 9 as needed. -To shutdown conduwuit in this setup, hit/send `CTRL+\`. Normal builds still +To shutdown Continuwuity in this setup, hit/send `CTRL+\`. Normal builds still shutdown with `CTRL+C` as usual. Steps 1 - 5 are the initial first-time steps for using this. To remove the hot @@ -101,7 +101,7 @@ reload setup, revert/comment all the Cargo.toml changes. As mentioned in the requirements section, if you happen to have some linker issues, try using the `-fuse-ld=` rustflag and specify mold or gold in all the `rustflags` definitions in the top level Cargo.toml, and please let us know in -the [conduwuit Matrix room][7] the problem. mold can be installed typically +the [Continuwuity Matrix room][7] the problem. mold can be installed typically through your distro, and gold is provided by the binutils package. It's possible a helper script can be made to do all of this, or most preferably @@ -136,7 +136,7 @@ acyclic graph. The primary rule is simple and illustrated in the figure below: **no crate is allowed to call a function or use a variable from a crate below it.** -![conduwuit's dynamic library setup diagram - created by Jason +![Continuwuity's dynamic library setup diagram - created by Jason Volk](assets/libraries.png) When a symbol is referenced between crates they become bound: **crates cannot be @@ -147,7 +147,7 @@ by using an `RTLD_LOCAL` binding for just one link between the main executable and the first crate, freeing the executable from all modules as no global binding ever occurs between them. -![conduwuit's reload and load order diagram - created by Jason +![Continuwuity's reload and load order diagram - created by Jason Volk](assets/reload_order.png) Proper resource management is essential for reliable reloading to occur. This is @@ -196,5 +196,5 @@ The initial implementation PR is available [here][1]. [4]: https://github.com/rust-lang/rust/issues/28794#issuecomment-368693049 [5]: https://github.com/rust-lang/cargo/issues/12746 [6]: https://crates.io/crates/hot-lib-reloader/ -[7]: https://matrix.to/#/#conduwuit:puppygock.gay +[7]: https://matrix.to/#/#continuwuity:continuwuity.org [8]: https://crates.io/crates/libloading diff --git a/src/admin/server/mod.rs b/src/admin/server/mod.rs index 60615365..6b99e5de 100644 --- a/src/admin/server/mod.rs +++ b/src/admin/server/mod.rs @@ -36,7 +36,7 @@ pub(super) enum ServerCommand { /// - Print database memory usage statistics MemoryUsage, - /// - Clears all of Conduwuit's caches + /// - Clears all of Continuwuity's caches ClearCaches, /// - Performs an online backup of the database (only available for RocksDB From 1d840950b3a1c721c6c98ac3bb5c843f7fee7b9e Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Wed, 23 Apr 2025 22:20:08 +0100 Subject: [PATCH 287/310] docs: Mention Helm chart is for conduwuit --- docs/deploying/kubernetes.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/deploying/kubernetes.md b/docs/deploying/kubernetes.md index aceb2d52..0cbfbbc0 100644 --- a/docs/deploying/kubernetes.md +++ b/docs/deploying/kubernetes.md @@ -2,7 +2,8 @@ Continuwuity doesn't support horizontal scalability or distributed loading natively, however a community maintained Helm Chart is available here to run -Continuwuity on Kubernetes: +conduwuit on Kubernetes: -Should changes need to be made, please reach out to the maintainer in our -Matrix room as this is not maintained/controlled by the Continuwuity maintainers. +This should be compatible with continuwuity, but you will need to change the image reference. + +Should changes need to be made, please reach out to the maintainer as this is not maintained/controlled by the Continuwuity maintainers. From ee11afb460f03b4f40f4b533f5e3a908d598cd87 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Wed, 23 Apr 2025 01:51:52 +0100 Subject: [PATCH 288/310] Inject reason into federated leave request membership --- src/api/client/membership.rs | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index b1b85b81..2847d668 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -1855,7 +1855,10 @@ pub async fn leave_room( // Ask a remote server if we don't have this room and are not knocking on it if dont_have_room.and(not_knocked).await { - if let Err(e) = remote_leave_room(services, user_id, room_id).boxed().await { + if let Err(e) = remote_leave_room(services, user_id, room_id, reason.clone()) + .boxed() + .await + { warn!(%user_id, "Failed to leave room {room_id} remotely: {e}"); // Don't tell the client about this error } @@ -1940,6 +1943,7 @@ async fn remote_leave_room( services: &Services, user_id: &UserId, room_id: &RoomId, + reason: Option, ) -> Result<()> { let mut make_leave_response_and_server = Err!(BadServerResponse("No remote server available to assist in leaving {room_id}.")); @@ -2056,6 +2060,12 @@ async fn remote_leave_room( .expect("Timestamp is valid js_int value"), ), ); + // Inject the reason key into the event content dict if it exists + if let Some(reason) = reason { + if let Some(CanonicalJsonValue::Object(content)) = leave_event_stub.get_mut("content") { + content.insert("reason".to_owned(), CanonicalJsonValue::String(reason)); + } + } // room v3 and above removed the "event_id" field from remote PDU format match room_version_id { From bfd7ab5a22129d01b3e87c290414c0148ad635bf Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Wed, 23 Apr 2025 17:21:59 +0100 Subject: [PATCH 289/310] Bump ruwuma to 652cc48 --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 2 +- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index afaa5622..216114af 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3652,7 +3652,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" dependencies = [ "assign", "js_int", @@ -3672,7 +3672,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" dependencies = [ "js_int", "ruma-common", @@ -3684,7 +3684,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" dependencies = [ "as_variant", "assign", @@ -3707,7 +3707,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" dependencies = [ "as_variant", "base64 0.22.1", @@ -3739,7 +3739,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" dependencies = [ "as_variant", "indexmap 2.8.0", @@ -3764,7 +3764,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" dependencies = [ "bytes", "headers", @@ -3786,7 +3786,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" dependencies = [ "js_int", "thiserror 2.0.12", @@ -3795,7 +3795,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" dependencies = [ "js_int", "ruma-common", @@ -3805,7 +3805,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3820,7 +3820,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" dependencies = [ "js_int", "ruma-common", @@ -3832,7 +3832,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=fa3c868e5a1c049dc9472310dc4955289a96bb35#fa3c868e5a1c049dc9472310dc4955289a96bb35" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index 1517cfc1..c0f857c6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -350,7 +350,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://forgejo.ellis.link/continuwuation/ruwuma" #branch = "conduwuit-changes" -rev = "fa3c868e5a1c049dc9472310dc4955289a96bb35" +rev = "652cc4864203ab7ca60cf9c47b931c0385304cc7" features = [ "compat", "rand", From 4c8dfc4c2c53be1dfd2d8c79182240dd0f4c9cec Mon Sep 17 00:00:00 2001 From: Tom Foster Date: Wed, 23 Apr 2025 22:47:45 +0100 Subject: [PATCH 290/310] Suggested community guidelines changes --- docs/SUMMARY.md | 2 +- docs/community.md | 139 ++++++++++++++++++++++++++++++++++++++++++ docs/conduwuit_coc.md | 93 ---------------------------- 3 files changed, 140 insertions(+), 94 deletions(-) create mode 100644 docs/community.md delete mode 100644 docs/conduwuit_coc.md diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index ad0f8135..473c9e74 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -19,4 +19,4 @@ - [Contributing](contributing.md) - [Testing](development/testing.md) - [Hot Reloading ("Live" Development)](development/hot_reload.md) -- [conduwuit Community Code of Conduct](conduwuit_coc.md) +- [Community (and Guidelines)](community.md) diff --git a/docs/community.md b/docs/community.md new file mode 100644 index 00000000..a6852c0f --- /dev/null +++ b/docs/community.md @@ -0,0 +1,139 @@ +# Continuwuity Community Guidelines + +Welcome to the Continuwuity commuwunity! We're excited to have you here. Continuwuity is a +continuation of the conduwuit homeserver, which in turn is a hard-fork of the Conduit homeserver, +aimed at making Matrix more accessible and inclusive for everyone. + +This space is dedicated to fostering a positive, supportive, and welcoming environment for everyone. +These guidelines apply to all Continuwuity spaces, including our Matrix rooms and any other +community channels that reference them. We've written these guidelines to help us all create an +environment where everyone feels safe and respected. + +For code and contribution guidelines, please refer to the +[Contributor's Covenant](https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/CODE_OF_CONDUCT.md). +Below are additional guidelines specific to the Continuwuity community. + +## Our Values and Expected Behaviors + +We strive to create a community based on mutual respect, collaboration, and inclusivity. We expect +all members to: + +1. **Be Respectful and Inclusive**: Treat everyone with respect. We're committed to a community + where everyone feels safe, regardless of background, identity, or experience. Discrimination, + harassment, or hate speech won't be tolerated. Remember that each person experiences the world + differently; share your own perspective and be open to learning about others'. + +2. **Be Positive and Constructive**: Engage in discussions constructively and support each other. + If you feel angry or frustrated, take a break before participating. Approach disagreements with + the goal of understanding, not winning. Focus on the issue, not the person. + +3. **Communicate Clearly and Kindly**: Our community includes neurodivergent individuals and those + who may not appreciate sarcasm or subtlety. Communicate clearly and kindly. Avoid ambiguity and + ensure your messages can be easily understood by all. Avoid placing the burden of education on + marginalized groups; please make an effort to look into your questions before asking others for + detailed explanations. + +4. **Be Open to Improving Inclusivity**: Actively participate in making our community more inclusive. + Report behaviour that contradicts these guidelines (see Reporting and Enforcement below) and be + open to constructive feedback aimed at improving our community. Understand that discussing + negative experiences can be emotionally taxing; focus on the message, not the tone. + +5. **Commit to Our Values**: Building an inclusive community requires ongoing effort from everyone. + Recognise that addressing bias and discrimination is a continuous process that needs commitment + and action from all members. + +## Unacceptable Behaviors + +To ensure everyone feels safe and welcome, the following behaviors are considered unacceptable +within the Continuwuity community: + +* **Harassment and Discrimination**: Avoid offensive comments related to background, family status, + gender, gender identity or expression, marital status, sex, sexual orientation, native language, + age, ability, race and/or ethnicity, caste, national origin, socioeconomic status, religion, + geographic location, or any other dimension of diversity. Don't deliberately misgender someone or + question the legitimacy of their gender identity. + +* **Violence and Threats**: Do not engage in any form of violence or threats, including inciting + violence towards anyone or encouraging self-harm. Posting or threatening to post someone else's + personally identifying information ("doxxing") is also forbidden. + +* **Personal Attacks**: Disagreements happen, but they should never turn into personal attacks. + Don't insult, demean, or belittle others. + +* **Unwelcome Attention or Contact**: Avoid unwelcome sexual attention, inappropriate physical + contact (or simulation thereof), sexualized comments, jokes, or imagery. + +* **Disruption**: Do not engage in sustained disruption of discussions, events, or other + community activities. + +* **Bad Faith Actions**: Do not intentionally make false reports or otherwise abuse the reporting + process. + +This is not an exhaustive list. Any behaviour that makes others feel unsafe or unwelcome may be +subject to enforcement action. + +## Matrix Community + +These Community Guidelines apply to the entire +[Continuwuity Matrix Space](https://matrix.to/#/#space:continuwuity.org) and its rooms, including: + +### [#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org) + +This room is for support and discussions about Continuwuity. Ask questions, share insights, and help +each other out while adhering to these guidelines. + +We ask that this room remain focused on the Continuwuity software specifically: the team are +typically happy to engage in conversations about related subjects in the off-topic room. + +### [#offtopic:continuwuity.org](https://matrix.to/#/#offtopic:continuwuity.org) + +For off-topic community conversations about any subject. While this room allows for a wide range of +topics, the same guidelines apply. Please keep discussions respectful and inclusive, and avoid +divisive or stressful subjects like specific country/world politics unless handled with exceptional +care and respect for diverse viewpoints. + +General topics, such as world events, are welcome as long as they follow the guidelines. If a member +of the team asks for the conversation to end, please respect their decision. + +### [#dev:continuwuity.org](https://matrix.to/#/#dev:continuwuity.org) + +This room is dedicated to discussing active development of Continuwuity, including ongoing issues or +code development. Collaboration here must follow these guidelines, and please consider raising +[an issue](https://forgejo.ellis.link/continuwuation/continuwuity/issues) on the repository to help +track progress. + +## Reporting and Enforcement + +We take these Community Guidelines seriously to protect our community members. If you witness or +experience unacceptable behaviour, or have any other concerns, please report it. + +**How to Report:** + +* **Alert Moderators in the Room:** If you feel comfortable doing so, you can address the issue + publicly in the relevant room by mentioning the moderation bot, `@rock:continuwuity.org`, which + will immediately alert all available moderators. +* **Direct Message:** If you're not comfortable raising the issue publicly, please send a direct + message (DM) to one of the room moderators. + +Reports will be handled with discretion. We will investigate promptly and thoroughly. + +**Enforcement Actions:** + +Anyone asked to stop unacceptable behaviour is expected to comply immediately. Failure to do so, or +engaging in prohibited behaviour, may result in enforcement action. Moderators may take actions they +deem appropriate, including but not limited to: + +1. **Warning**: A direct message or public warning identifying the violation and requesting + corrective action. +2. **Temporary Mute**: Temporary restriction from participating in discussions for a specified + period. +3. **Kick or Ban**: Removal from a room (kick) or the entire community space (ban). Egregious or + repeated violations may result in an immediate ban. Bans are typically permanent and reviewed + only in exceptional circumstances. + +Retaliation against those who report concerns in good faith will not be tolerated and will be +subject to the same enforcement actions. + +Together, let's build and maintain a community where everyone feels valued, safe, and respected. + +— The Continuwuity Moderation Team diff --git a/docs/conduwuit_coc.md b/docs/conduwuit_coc.md deleted file mode 100644 index 9a084150..00000000 --- a/docs/conduwuit_coc.md +++ /dev/null @@ -1,93 +0,0 @@ -# Continuwuity Community Code of Conduct - -Welcome to the Continuwuity community! We’re excited to have you here. Continuwuity is -a hard-fork of the Conduit homeserver, aimed at making Matrix more accessible -and inclusive for everyone. - -This space is dedicated to fostering a positive, supportive, and inclusive -environment for everyone. This Code of Conduct applies to all Continuwuity spaces, -including any further community rooms that reference this CoC. Here are our -guidelines to help maintain the welcoming atmosphere that sets Continuwuity apart. - -For the general foundational rules, please refer to the [Contributor's -Covenant](https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/CODE_OF_CONDUCT.md). -Below are additional guidelines specific to the Continuwuity community. - -## Our Values and Guidelines - -1. **Respect and Inclusivity**: We are committed to maintaining a community - where everyone feels safe and respected. Discrimination, harassment, or hate -speech of any kind will not be tolerated. Recognise that each community member -experiences the world differently based on their past experiences, background, -and identity. Share your own experiences and be open to learning about others' -diverse perspectives. - -2. **Positivity and Constructiveness**: Engage in constructive discussions and - support each other. If you feel angry, negative, or aggressive, take a break -until you can participate in a positive and constructive manner. Process intense -feelings with a friend or in a private setting before engaging in community -conversations to help maintain a supportive and focused environment. - -3. **Clarity and Understanding**: Our community includes neurodivergent - individuals and those who may not appreciate sarcasm or subtlety. Communicate -clearly and kindly, avoiding sarcasm and ensuring your messages are easily -understood by all. Additionally, avoid putting the burden of education on -marginalized groups by doing your own research before asking for explanations. - -4. **Be Open to Inclusivity**: Actively engage in conversations about making our - community more inclusive. Report discriminatory behavior to the moderators -and be open to constructive feedback that aims to improve our community. -Understand that discussing discrimination and negative experiences can be -emotionally taxing, so focus on the message rather than critiquing the tone -used. - -5. **Commit to Inclusivity**: Building an inclusive community requires time, - energy, and resources. Recognise that addressing discrimination and bias is -an ongoing process that necessitates commitment and action from all community -members. - -## Matrix Community - -This Code of Conduct applies to the entire [Continuwuity Matrix -Space](https://matrix.to/#/#space:continuwuity.org) and its rooms, -including: - -### [#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org) - -This room is for support and discussions about Continuwuity. Ask questions, share -insights, and help each other out. - -### [#offtopic:continuwuity.org](https://matrix.to/#/#offtopic:continuwuity.org) - -For off-topic community conversations about any subject. While this room allows -for a wide range of topics, the same CoC applies. Keep discussions respectful -and inclusive, and avoid divisive subjects like country/world politics. General -topics, such as world events, are welcome as long as they follow the CoC. - -### [#dev:continuwuity.org](https://matrix.to/#/#dev:continuwuity.org) - -This room is dedicated to discussing active development of Continuwuity. Posting -requires an elevated power level, which can be requested in one of the other -rooms. Use this space to collaborate and innovate. - -## Enforcement - -We have a zero-tolerance policy for violations of this Code of Conduct. If -someone’s behavior makes you uncomfortable, please report it to the moderators. -Actions we may take include: - -1. **Warning**: A warning given directly in the room or via a private message - from the moderators, identifying the violation and requesting corrective -action. -2. **Temporary Mute**: Temporary restriction from participating in discussions - for a specified period to allow for reflection and cooling off. -3. **Kick or Ban**: Egregious behavior may result in an immediate kick or ban to - protect other community members. Bans are considered permanent and will only -be reversed in exceptional circumstances after proven good behavior. - -Please highlight issues directly in rooms when possible, but if you don't feel -comfortable doing that, then please send a DM to one of the moderators directly. - -Together, let’s build a community where everyone feels valued and respected. - -— The Continuwuity Moderation Team From f83238df783efd38f3da43d565710708b30ba52b Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Wed, 23 Apr 2025 17:48:26 +0100 Subject: [PATCH 291/310] refactor: Use config service --- src/service/moderation.rs | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/src/service/moderation.rs b/src/service/moderation.rs index d571de88..4a32404c 100644 --- a/src/service/moderation.rs +++ b/src/service/moderation.rs @@ -3,18 +3,24 @@ use std::sync::Arc; use conduwuit::{Result, Server, implement}; use ruma::ServerName; +use crate::{Dep, config}; + pub struct Service { services: Services, } struct Services { pub server: Arc, + pub config: Dep, } impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { - services: Services { server: args.server.clone() }, + services: Services { + server: args.server.clone(), + config: args.depend::("config"), + }, })) } @@ -25,14 +31,13 @@ impl crate::Service for Service { #[must_use] pub fn is_remote_server_forbidden(&self, server_name: &ServerName) -> bool { // We must never block federating with ourselves - if server_name == self.services.server.config.server_name { + if server_name == self.services.config.server_name { return false; } // Check if server is explicitly allowed if self .services - .server .config .allowed_remote_server_names .is_match(server_name.host()) @@ -42,7 +47,6 @@ pub fn is_remote_server_forbidden(&self, server_name: &ServerName) -> bool { // Check if server is explicitly forbidden self.services - .server .config .forbidden_remote_server_names .is_match(server_name.host()) @@ -56,7 +60,6 @@ pub fn is_remote_server_room_directory_forbidden(&self, server_name: &ServerName self.is_remote_server_forbidden(server_name) || self .services - .server .config .forbidden_remote_room_directory_server_names .is_match(server_name.host()) @@ -70,7 +73,6 @@ pub fn is_remote_server_media_downloads_forbidden(&self, server_name: &ServerNam self.is_remote_server_forbidden(server_name) || self .services - .server .config .prevent_media_downloads_from .is_match(server_name.host()) From 45872ede7a4b5a2335295e5ba52dd1bd393d21ec Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Wed, 23 Apr 2025 17:48:33 +0100 Subject: [PATCH 292/310] chore: Fix formatting --- conduwuit-example.toml | 6 +++--- src/api/client/room/create.rs | 29 ++++++++++++++++++----------- src/core/config/mod.rs | 6 +++--- 3 files changed, 24 insertions(+), 17 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index b6bfd092..c87f21ef 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -967,9 +967,9 @@ #rocksdb_compaction_ioprio_idle = true # Enables RocksDB compaction. You should never ever have to set this -# option to false. If you for some reason find yourself needing to use this -# option as part of troubleshooting or a bug, please reach out to us in -# the conduwuit Matrix room with information and details. +# option to false. If you for some reason find yourself needing to use +# this option as part of troubleshooting or a bug, please reach out to us +# in the conduwuit Matrix room with information and details. # # Disabling compaction will lead to a significantly bloated and # explosively large database, gradually poor performance, unnecessarily diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index f5f61784..be3fd23b 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -614,24 +614,31 @@ fn custom_room_id_check(services: &Services, custom_room_id: &str) -> Result Date: Wed, 23 Apr 2025 23:54:16 +0100 Subject: [PATCH 293/310] docs: Fix configuration examples and defaults --- conduwuit-example.toml | 6 +++--- src/core/config/mod.rs | 6 ++++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index c87f21ef..b0b59344 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -116,7 +116,7 @@ # `https://continuwuity.org/.well-known/continuwuity/announcements` for any new # announcements or major updates. This is not an update check endpoint. # -#allow_announcements_check = +#allow_announcements_check = true # Set this to any float value to multiply conduwuit's in-memory LRU caches # with such as "auth_chain_cache_capacity". @@ -1207,7 +1207,7 @@ # You can set this to ["*"] to block all servers by default, and then # use `allowed_remote_server_names` to allow only specific servers. # -# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] +# example: ["badserver\\.tld$", "badphrase", "19dollarfortnitecards"] # #forbidden_remote_server_names = [] @@ -1216,7 +1216,7 @@ # # This option has no effect if `forbidden_remote_server_names` is empty. # -# example: ["goodserver\.tld$", "goodphrase"] +# example: ["goodserver\\.tld$", "goodphrase"] # #allowed_remote_server_names = [] diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 6252f177..e3b2a531 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -163,6 +163,8 @@ pub struct Config { /// If enabled, conduwuit will send a simple GET request periodically to /// `https://continuwuity.org/.well-known/continuwuity/announcements` for any new /// announcements or major updates. This is not an update check endpoint. + /// + /// default: true #[serde(alias = "allow_check_for_updates", default = "true_fn")] pub allow_announcements_check: bool, @@ -1384,7 +1386,7 @@ pub struct Config { /// You can set this to ["*"] to block all servers by default, and then /// use `allowed_remote_server_names` to allow only specific servers. /// - /// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] + /// example: ["badserver\\.tld$", "badphrase", "19dollarfortnitecards"] /// /// default: [] #[serde(default, with = "serde_regex")] @@ -1395,7 +1397,7 @@ pub struct Config { /// /// This option has no effect if `forbidden_remote_server_names` is empty. /// - /// example: ["goodserver\.tld$", "goodphrase"] + /// example: ["goodserver\\.tld$", "goodphrase"] /// /// default: [] #[serde(default, with = "serde_regex")] From cdf105a24eb7398d6e09d0840ff2c11c43c05ce9 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Fri, 25 Apr 2025 02:18:00 +0100 Subject: [PATCH 294/310] Don't serialize the x-key before storing it Co-authored-by: dasha --- src/service/users/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 1eb289fc..701561a8 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -577,7 +577,7 @@ impl Service { self.db .userid_usersigningkeyid - .put(user_id, user_signing_key_key); + .raw_put(user_id, user_signing_key_key); } if notify { From c203c1fead9a040de28b952eafdfdeab9cec77bc Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Thu, 24 Apr 2025 22:49:47 +0100 Subject: [PATCH 295/310] chore: Enable blurhashing by default --- src/main/Cargo.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml index e2fed5d5..0c5e2b6f 100644 --- a/src/main/Cargo.toml +++ b/src/main/Cargo.toml @@ -36,6 +36,7 @@ assets = [ [features] default = [ + "blurhashing", "brotli_compression", "element_hacks", "gzip_compression", From dcbacb5b78ac679ddb17465c9e60042cff4ab3e7 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Thu, 24 Apr 2025 00:40:36 +0100 Subject: [PATCH 296/310] feat: Allow controlling client message filtering --- conduwuit-example.toml | 44 ++++++++++++++++++++++++--------- src/api/client/message.rs | 6 +++-- src/core/config/mod.rs | 52 ++++++++++++++++++++++++++++----------- src/service/moderation.rs | 20 ++++++++++++--- 4 files changed, 90 insertions(+), 32 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index b0b59344..3d92ab15 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1182,23 +1182,13 @@ # #prune_missing_media = false -# Vector list of regex patterns of server names that conduwuit will refuse -# to download remote media from. -# -# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] -# -#prevent_media_downloads_from = [] - # List of forbidden server names via regex patterns that we will block # incoming AND outgoing federation with, and block client room joins / # remote user invites. # -# Additionally, it will hide messages from these servers for all users -# on this server. -# # Note that your messages can still make it to forbidden servers through -# backfilling. Events we receive from forbidden servers via backfill will -# be stored in the database, but will not be sent to the client. +# backfilling. Events we receive from forbidden servers via backfill +# from servers we *do* federate with will be stored in the database. # # This check is applied on the room ID, room alias, sender server name, # sender user's server name, inbound federation X-Matrix origin, and @@ -1220,6 +1210,13 @@ # #allowed_remote_server_names = [] +# Vector list of regex patterns of server names that conduwuit will refuse +# to download remote media from. +# +# example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] +# +#prevent_media_downloads_from = [] + # List of forbidden server names via regex patterns that we will block all # outgoing federated room directory requests for. Useful for preventing # our users from wandering into bad servers or spaces. @@ -1228,6 +1225,29 @@ # #forbidden_remote_room_directory_server_names = [] +# Vector list of regex patterns of server names that conduwuit will not +# send messages to the client from. +# +# Note that there is no way for clients to receive messages once a server +# has become unignored without doing a full sync. This is a protocol +# limitation with the current sync protocols. This means this is somewhat +# of a nuclear option. +# +# example: ["reallybadserver\.tld$", "reallybadphrase", +# "69dollarfortnitecards"] +# +#ignore_messages_from_server_names = [] + +# Send messages from users that the user has ignored to the client. +# +# There is no way for clients to receive messages sent while a user was +# ignored without doing a full sync. This is a protocol limitation with +# the current sync protocols. Disabling this option will move +# responsibility of ignoring messages to the client, which can avoid this +# limitation. +# +#send_messages_from_ignored_users_to_client = false + # Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you # do not want conduwuit to send outbound requests to. Defaults to # RFC1918, unroutable, loopback, multicast, and testnet addresses for diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 08887e18..bedfdc7a 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -275,10 +275,12 @@ pub(crate) async fn is_ignored_pdu( let ignored_server = services .moderation - .is_remote_server_forbidden(pdu.sender().server_name()); + .is_remote_server_ignored(pdu.sender().server_name()); if ignored_type - && (ignored_server || services.users.user_is_ignored(&pdu.sender, user_id).await) + && (ignored_server + || (!services.config.send_messages_from_ignored_users_to_client + && services.users.user_is_ignored(&pdu.sender, user_id).await)) { return true; } diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index e3b2a531..5374c2c2 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -163,7 +163,7 @@ pub struct Config { /// If enabled, conduwuit will send a simple GET request periodically to /// `https://continuwuity.org/.well-known/continuwuity/announcements` for any new /// announcements or major updates. This is not an update check endpoint. - /// + /// /// default: true #[serde(alias = "allow_check_for_updates", default = "true_fn")] pub allow_announcements_check: bool, @@ -1359,25 +1359,13 @@ pub struct Config { #[serde(default)] pub prune_missing_media: bool, - /// Vector list of regex patterns of server names that conduwuit will refuse - /// to download remote media from. - /// - /// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] - /// - /// default: [] - #[serde(default, with = "serde_regex")] - pub prevent_media_downloads_from: RegexSet, - /// List of forbidden server names via regex patterns that we will block /// incoming AND outgoing federation with, and block client room joins / /// remote user invites. /// - /// Additionally, it will hide messages from these servers for all users - /// on this server. - /// /// Note that your messages can still make it to forbidden servers through - /// backfilling. Events we receive from forbidden servers via backfill will - /// be stored in the database, but will not be sent to the client. + /// backfilling. Events we receive from forbidden servers via backfill + /// from servers we *do* federate with will be stored in the database. /// /// This check is applied on the room ID, room alias, sender server name, /// sender user's server name, inbound federation X-Matrix origin, and @@ -1403,6 +1391,15 @@ pub struct Config { #[serde(default, with = "serde_regex")] pub allowed_remote_server_names: RegexSet, + /// Vector list of regex patterns of server names that conduwuit will refuse + /// to download remote media from. + /// + /// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] + /// + /// default: [] + #[serde(default, with = "serde_regex")] + pub prevent_media_downloads_from: RegexSet, + /// List of forbidden server names via regex patterns that we will block all /// outgoing federated room directory requests for. Useful for preventing /// our users from wandering into bad servers or spaces. @@ -1413,6 +1410,31 @@ pub struct Config { #[serde(default, with = "serde_regex")] pub forbidden_remote_room_directory_server_names: RegexSet, + /// Vector list of regex patterns of server names that conduwuit will not + /// send messages to the client from. + /// + /// Note that there is no way for clients to receive messages once a server + /// has become unignored without doing a full sync. This is a protocol + /// limitation with the current sync protocols. This means this is somewhat + /// of a nuclear option. + /// + /// example: ["reallybadserver\.tld$", "reallybadphrase", + /// "69dollarfortnitecards"] + /// + /// default: [] + #[serde(default, with = "serde_regex")] + pub ignore_messages_from_server_names: RegexSet, + + /// Send messages from users that the user has ignored to the client. + /// + /// There is no way for clients to receive messages sent while a user was + /// ignored without doing a full sync. This is a protocol limitation with + /// the current sync protocols. Disabling this option will move + /// responsibility of ignoring messages to the client, which can avoid this + /// limitation. + #[serde(default)] + pub send_messages_from_ignored_users_to_client: bool, + /// Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you /// do not want conduwuit to send outbound requests to. Defaults to /// RFC1918, unroutable, loopback, multicast, and testnet addresses for diff --git a/src/service/moderation.rs b/src/service/moderation.rs index 4a32404c..c3e55a1d 100644 --- a/src/service/moderation.rs +++ b/src/service/moderation.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use conduwuit::{Result, Server, implement}; +use conduwuit::{Result, implement}; use ruma::ServerName; use crate::{Dep, config}; @@ -10,7 +10,7 @@ pub struct Service { } struct Services { - pub server: Arc, + // pub server: Arc, pub config: Dep, } @@ -18,7 +18,7 @@ impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { services: Services { - server: args.server.clone(), + // server: args.server.clone(), config: args.depend::("config"), }, })) @@ -27,6 +27,20 @@ impl crate::Service for Service { fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } +#[implement(Service)] +#[must_use] +pub fn is_remote_server_ignored(&self, server_name: &ServerName) -> bool { + // We must never block federating with ourselves + if server_name == self.services.config.server_name { + return false; + } + + self.services + .config + .ignore_messages_from_server_names + .is_match(server_name.host()) +} + #[implement(Service)] #[must_use] pub fn is_remote_server_forbidden(&self, server_name: &ServerName) -> bool { From 77c4f9ff2f617b92f8afd3ac837fe4adfd7147ce Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Fri, 25 Apr 2025 23:01:05 +0100 Subject: [PATCH 297/310] fix: Do not panic on invalid membership event content --- src/core/matrix/state_res/event_auth.rs | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index 8c9339ec..c69db50e 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -38,7 +38,7 @@ struct GetMembership { membership: MembershipState, } -#[derive(Deserialize)] +#[derive(Deserialize, Debug)] struct RoomMemberContentFields { membership: Option>, join_authorised_via_users_server: Option>, @@ -149,9 +149,9 @@ where Incoming: Event + Send + Sync, { debug!( - "auth_check beginning for {} ({})", - incoming_event.event_id(), - incoming_event.event_type() + event_id = format!("{}", incoming_event.event_id()), + event_type = format!("{}", incoming_event.event_type()), + "auth_check beginning" ); // [synapse] check that all the events are in the same room as `incoming_event` @@ -383,10 +383,15 @@ where let sender_membership_event_content: RoomMemberContentFields = from_json_str(sender_member_event.content().get())?; - let membership_state = sender_membership_event_content - .membership - .expect("we should test before that this field exists") - .deserialize()?; + let Some(membership_state) = sender_membership_event_content.membership else { + warn!( + sender_membership_event_content = format!("{sender_membership_event_content:?}"), + event_id = format!("{}", incoming_event.event_id()), + "Sender membership event content missing membership field" + ); + return Err(Error::InvalidPdu("Missing membership field".to_owned())); + }; + let membership_state = membership_state.deserialize()?; if !matches!(membership_state, MembershipState::Join) { warn!("sender's membership is not join"); From 1a5ab33852b1ef301d6a3ce4c3154d430ef24a03 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Fri, 25 Apr 2025 23:51:23 +0100 Subject: [PATCH 298/310] chore: Error on missing ID in messages --- src/api/client/message.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/api/client/message.rs b/src/api/client/message.rs index bedfdc7a..16b1796a 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -1,3 +1,5 @@ +use core::panic; + use axum::extract::State; use conduwuit::{ Err, Result, at, @@ -132,8 +134,6 @@ pub(crate) async fn get_message_events_route( .take(limit) .collect() .await; - // let appservice_id = body.appservice_info.map(|appservice| - // appservice.registration.id); let lazy_loading_context = lazy_loading::Context { user_id: sender_user, @@ -143,7 +143,7 @@ pub(crate) async fn get_message_events_route( if let Some(registration) = body.appservice_info.as_ref() { <&DeviceId>::from(registration.registration.id.as_str()) } else { - <&DeviceId>::from("") + panic!("No device_id provided and no appservice registration found, this should be unreachable"); }, }, room_id, From c698d65a92fb6d3378e9d493b5356ecf067ae286 Mon Sep 17 00:00:00 2001 From: Tom Foster Date: Thu, 24 Apr 2025 17:15:03 +0100 Subject: [PATCH 299/310] Make Cloudflare Pages optional in CI --- .forgejo/workflows/documentation.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.forgejo/workflows/documentation.yml b/.forgejo/workflows/documentation.yml index 55f25058..1bda64f8 100644 --- a/.forgejo/workflows/documentation.yml +++ b/.forgejo/workflows/documentation.yml @@ -57,17 +57,17 @@ jobs: run: npm install --save-dev wrangler@latest - name: Deploy to Cloudflare Pages (Production) - if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} + if: (github.event_name == 'push' && github.ref == 'refs/heads/main') && vars.CLOUDFLARE_PROJECT_NAME != '' uses: https://github.com/cloudflare/wrangler-action@v3 with: accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} - command: pages deploy ./public --branch=main --commit-dirty=true --project-name=${{ vars.CLOUDFLARE_PROJECT_NAME }}" + command: pages deploy ./public --branch=main --commit-dirty=true --project-name=${{ vars.CLOUDFLARE_PROJECT_NAME }} - name: Deploy to Cloudflare Pages (Preview) - if: ${{ github.event_name != 'push' || github.ref != 'refs/heads/main' }} + if: (github.event_name != 'push' || github.ref != 'refs/heads/main') && vars.CLOUDFLARE_PROJECT_NAME != '' uses: https://github.com/cloudflare/wrangler-action@v3 with: accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} - command: pages deploy ./public --branch=${{ github.head_ref }} --commit-dirty=true --project-name=${{ vars.CLOUDFLARE_PROJECT_NAME }}" + command: pages deploy ./public --branch=${{ github.head_ref }} --commit-dirty=true --project-name=${{ vars.CLOUDFLARE_PROJECT_NAME }} From 73c991edd03d29cbac2cd4d0be07254b2c87df82 Mon Sep 17 00:00:00 2001 From: Tom Foster Date: Thu, 24 Apr 2025 17:24:28 +0100 Subject: [PATCH 300/310] Ignore all markdown for auto image builds --- .forgejo/workflows/release-image.yml | 68 +++++++++++++--------------- 1 file changed, 31 insertions(+), 37 deletions(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 2cb6a329..141bfef9 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -1,24 +1,25 @@ name: Release Docker Image -concurrency: +concurrency: group: "release-image-${{ github.ref }}" on: pull_request: push: paths-ignore: - - '.gitlab-ci.yml' - - '.gitignore' - - 'renovate.json' - - 'debian/**' - - 'docker/**' - - 'docs/**' + - "*.md" + - "**/*.md" + - ".gitlab-ci.yml" + - ".gitignore" + - "renovate.json" + - "debian/**" + - "docker/**" + - "docs/**" # Allows you to run this workflow manually from the Actions tab workflow_dispatch: env: - BUILTIN_REGISTRY: forgejo.ellis.link - BUILTIN_REGISTRY_ENABLED: "${{ ((vars.BUILTIN_REGISTRY_USER && secrets.BUILTIN_REGISTRY_PASSWORD) || (github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)) && 'true' || 'false' }}" - + BUILTIN_REGISTRY: forgejo.ellis.link + BUILTIN_REGISTRY_ENABLED: "${{ ((vars.BUILTIN_REGISTRY_USER && secrets.BUILTIN_REGISTRY_PASSWORD) || (github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)) && 'true' || 'false' }}" jobs: define-variables: @@ -37,7 +38,7 @@ jobs: script: | const githubRepo = '${{ github.repository }}'.toLowerCase() const repoId = githubRepo.split('/')[1] - + core.setOutput('github_repository', githubRepo) const builtinImage = '${{ env.BUILTIN_REGISTRY }}/' + githubRepo let images = [] @@ -48,7 +49,7 @@ jobs: core.setOutput('images_list', images.join(",")) const platforms = ['linux/amd64', 'linux/arm64'] core.setOutput('build_matrix', JSON.stringify({ - platform: platforms, + platform: platforms, include: platforms.map(platform => { return { platform, slug: platform.replace('/', '-') @@ -65,22 +66,15 @@ jobs: attestations: write id-token: write strategy: - matrix: { - "include": [ - { - "platform": "linux/amd64", - "slug": "linux-amd64" - }, - { - "platform": "linux/arm64", - "slug": "linux-arm64" - } - ], - "platform": [ - "linux/amd64", - "linux/arm64" - ] - } + matrix: + { + "include": + [ + { "platform": "linux/amd64", "slug": "linux-amd64" }, + { "platform": "linux/arm64", "slug": "linux-arm64" }, + ], + "platform": ["linux/amd64", "linux/arm64"], + } steps: - name: Echo strategy run: echo '${{ toJSON(fromJSON(needs.define-variables.outputs.build_matrix)) }}' @@ -105,9 +99,9 @@ jobs: - name: Login to builtin registry uses: docker/login-action@v3 with: - registry: ${{ env.BUILTIN_REGISTRY }} - username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }} - password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }} + registry: ${{ env.BUILTIN_REGISTRY }} + username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }} + password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }} # This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels. - name: Extract metadata (labels, annotations) for Docker @@ -165,7 +159,7 @@ jobs: run: | mkdir -p /tmp/digests digest="${{ steps.build.outputs.digest }}" - touch "/tmp/digests/${digest#sha256:}" + touch "/tmp/digests/${digest#sha256:}" - name: Upload digest uses: forgejo/upload-artifact@v4 @@ -174,7 +168,7 @@ jobs: path: /tmp/digests/* if-no-files-found: error retention-days: 1 - + merge: runs-on: dind container: ghcr.io/catthehacker/ubuntu:act-latest @@ -190,9 +184,9 @@ jobs: - name: Login to builtin registry uses: docker/login-action@v3 with: - registry: ${{ env.BUILTIN_REGISTRY }} - username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }} - password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }} + registry: ${{ env.BUILTIN_REGISTRY }} + username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }} + password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }} - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 @@ -211,7 +205,7 @@ jobs: images: ${{needs.define-variables.outputs.images}} # default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509 env: - DOCKER_METADATA_ANNOTATIONS_LEVELS: index + DOCKER_METADATA_ANNOTATIONS_LEVELS: index - name: Create manifest list and push working-directory: /tmp/digests From eb886b6760ceea19412050df99703c4825ae56eb Mon Sep 17 00:00:00 2001 From: Tom Foster Date: Thu, 24 Apr 2025 21:24:21 +0100 Subject: [PATCH 301/310] Element Web client build --- .forgejo/workflows/element.yml | 127 +++++++++++++++++++++++++++++++++ 1 file changed, 127 insertions(+) create mode 100644 .forgejo/workflows/element.yml diff --git a/.forgejo/workflows/element.yml b/.forgejo/workflows/element.yml new file mode 100644 index 00000000..db771197 --- /dev/null +++ b/.forgejo/workflows/element.yml @@ -0,0 +1,127 @@ +name: Deploy Element Web + +on: + schedule: + - cron: "0 0 * * *" + workflow_dispatch: + +concurrency: + group: "element-${{ github.ref }}" + cancel-in-progress: true + +jobs: + build-and-deploy: + name: Build and Deploy Element Web + runs-on: ubuntu-latest + + steps: + - name: Setup Node.js + uses: https://code.forgejo.org/actions/setup-node@v4 + with: + node-version: "20" + + - name: Clone, setup, and build Element Web + run: | + echo "Cloning Element Web..." + git clone https://github.com/maunium/element-web + cd element-web + git checkout develop + git pull + + echo "Cloning matrix-js-sdk..." + git clone https://github.com/matrix-org/matrix-js-sdk.git + + echo "Installing Yarn..." + npm install -g yarn + + echo "Installing dependencies..." + yarn install + + echo "Preparing build environment..." + mkdir -p .home + + echo "Cleaning up specific node_modules paths..." + rm -rf node_modules/@types/eslint-scope/ matrix-*-sdk/node_modules/@types/eslint-scope || echo "Cleanup paths not found, continuing." + + echo "Getting matrix-js-sdk commit hash..." + cd matrix-js-sdk + jsver=$(git rev-parse HEAD) + jsver=${jsver:0:12} + cd .. + echo "matrix-js-sdk version hash: $jsver" + + echo "Getting element-web commit hash..." + ver=$(git rev-parse HEAD) + ver=${ver:0:12} + echo "element-web version hash: $ver" + + chmod +x ./build-sh + + export VERSION="$ver-js-$jsver" + echo "Building Element Web version: $VERSION" + ./build-sh + + echo "Checking for build output..." + ls -la webapp/ + + - name: Create config.json + run: | + cat < ./element-web/webapp/config.json + { + "default_server_name": "continuwuity.org", + "default_server_config": { + "m.homeserver": { + "base_url": "https://matrix.continuwuity.org" + } + }, + "default_country_code": "GB", + "default_theme": "dark", + "mobile_guide_toast": false, + "show_labs_settings": true, + "room_directory": [ + "continuwuity.org", + "matrixrooms.info" + ], + "settings_defaults": { + "UIFeature.urlPreviews": true, + "UIFeature.feedback": false, + "UIFeature.voip": false, + "UIFeature.shareQrCode": false, + "UIFeature.shareSocial": false, + "UIFeature.locationSharing": false, + "enableSyntaxHighlightLanguageDetection": true + }, + "features": { + "feature_pinning": true, + "feature_custom_themes": true + } + } + EOF + echo "Created ./element-web/webapp/config.json" + cat ./element-web/webapp/config.json + + - name: Upload Artifact + uses: https://code.forgejo.org/actions/upload-artifact@v3 + with: + name: element-web + path: ./element-web/webapp/ + retention-days: 14 + + - name: Install Wrangler + run: npm install --save-dev wrangler@latest + + - name: Deploy to Cloudflare Pages (Production) + if: github.ref == 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != '' + uses: https://github.com/cloudflare/wrangler-action@v3 + with: + accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} + apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} + command: pages deploy ./element-web/webapp --branch="main" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}-element" + + - name: Deploy to Cloudflare Pages (Preview) + if: github.ref != 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != '' + uses: https://github.com/cloudflare/wrangler-action@v3 + with: + accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} + apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} + command: pages deploy ./element-web/webapp --branch="${{ github.head_ref || github.ref_name }}" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}-element" From 60caa448b0e2119ee00ad67104da8e19a18982c4 Mon Sep 17 00:00:00 2001 From: Tom Foster Date: Thu, 24 Apr 2025 21:27:41 +0100 Subject: [PATCH 302/310] Tidy up publishing restriction check --- .forgejo/workflows/documentation.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.forgejo/workflows/documentation.yml b/.forgejo/workflows/documentation.yml index 1bda64f8..7d95a317 100644 --- a/.forgejo/workflows/documentation.yml +++ b/.forgejo/workflows/documentation.yml @@ -57,17 +57,17 @@ jobs: run: npm install --save-dev wrangler@latest - name: Deploy to Cloudflare Pages (Production) - if: (github.event_name == 'push' && github.ref == 'refs/heads/main') && vars.CLOUDFLARE_PROJECT_NAME != '' + if: github.ref == 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != '' uses: https://github.com/cloudflare/wrangler-action@v3 with: accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} - command: pages deploy ./public --branch=main --commit-dirty=true --project-name=${{ vars.CLOUDFLARE_PROJECT_NAME }} + command: pages deploy ./public --branch="main" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}" - name: Deploy to Cloudflare Pages (Preview) - if: (github.event_name != 'push' || github.ref != 'refs/heads/main') && vars.CLOUDFLARE_PROJECT_NAME != '' + if: github.ref != 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != '' uses: https://github.com/cloudflare/wrangler-action@v3 with: accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} - command: pages deploy ./public --branch=${{ github.head_ref }} --commit-dirty=true --project-name=${{ vars.CLOUDFLARE_PROJECT_NAME }} + command: pages deploy ./public --branch="${{ github.head_ref || github.ref_name }}" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}" From b2620e6922a4acf3e2b86ac220f7c4974fa268ca Mon Sep 17 00:00:00 2001 From: Kokomo Date: Sun, 27 Apr 2025 15:43:15 +0000 Subject: [PATCH 303/310] Remove email and add reference to matrix space --- CODE_OF_CONDUCT.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index e77154e7..65ee41e0 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,4 +1,3 @@ - # Contributor Covenant Code of Conduct ## Our Pledge @@ -60,8 +59,7 @@ representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported to the community leaders responsible for enforcement over email at - or over Matrix at @strawberry:puppygock.gay. +reported to the community leaders responsible for enforcement over Matrix at #continuwuity:continuwuity.org. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the From a9a478f0778a355e82a5b54e7cb292369e39a792 Mon Sep 17 00:00:00 2001 From: Kokomo Date: Sun, 27 Apr 2025 15:47:40 +0000 Subject: [PATCH 304/310] Add back space oops --- CODE_OF_CONDUCT.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 65ee41e0..3ac0a83d 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,3 +1,4 @@ + # Contributor Covenant Code of Conduct ## Our Pledge From 90f1a193e38f41c0108f6009e7a027bb7aa6753b Mon Sep 17 00:00:00 2001 From: Kokomo Date: Sun, 27 Apr 2025 16:06:34 +0000 Subject: [PATCH 305/310] Add maintainer emails --- CODE_OF_CONDUCT.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 3ac0a83d..476e68fb 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,4 +1,3 @@ - # Contributor Covenant Code of Conduct ## Our Pledge @@ -60,7 +59,7 @@ representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported to the community leaders responsible for enforcement over Matrix at #continuwuity:continuwuity.org. +reported to the community leaders responsible for enforcement over Matrix at [#continuwuity:continuwuity.org](https://matrix.to/#/#continuwuity:continuwuity.org) or email at , and respectively. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the From 6b0288dd4c4649e1f3e2772a73e5b204f645d560 Mon Sep 17 00:00:00 2001 From: Kokomo Date: Sun, 27 Apr 2025 18:42:02 +0000 Subject: [PATCH 306/310] Update Contributing.md file (#807) Cleaned up wording and adjusted the links Reviewed-on: https://forgejo.ellis.link/continuwuation/continuwuity/pulls/807 Reviewed-by: nex Reviewed-by: Jade Ellis Co-authored-by: Kokomo Co-committed-by: Kokomo --- CONTRIBUTING.md | 29 ++++++++++++----------------- 1 file changed, 12 insertions(+), 17 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fb540011..ecff7173 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -4,7 +4,7 @@ This page is for about contributing to conduwuit. The [development](./development.md) page may be of interest for you as well. If you would like to work on an [issue][issues] that is not assigned, preferably -ask in the Matrix room first at [#conduwuit:puppygock.gay][conduwuit-matrix], +ask in the Matrix room first at [#continuwuity:continuwuity.org][continuwuity-matrix], and comment on it. ### Linting and Formatting @@ -23,9 +23,9 @@ suggestion, allow the lint and mention that in a comment. ### Running CI tests locally -conduwuit's CI for tests, linting, formatting, audit, etc use +continuwuity's CI for tests, linting, formatting, audit, etc use [`engage`][engage]. engage can be installed from nixpkgs or `cargo install -engage`. conduwuit's Nix flake devshell has the nixpkgs engage with `direnv`. +engage`. continuwuity's Nix flake devshell has the nixpkgs engage with `direnv`. Use `engage --help` for more usage details. To test, format, lint, etc that CI would do, install engage, allow the `.envrc` @@ -111,33 +111,28 @@ applies here. ### Creating pull requests -Please try to keep contributions to the GitHub. While the mirrors of conduwuit -allow for pull/merge requests, there is no guarantee I will see them in a timely +Please try to keep contributions to the Forgejo Instance. While the mirrors of continuwuity +allow for pull/merge requests, there is no guarantee the maintainers will see them in a timely manner. Additionally, please mark WIP or unfinished or incomplete PRs as drafts. -This prevents me from having to ping once in a while to double check the status +This prevents us from having to ping once in a while to double check the status of it, especially when the CI completed successfully and everything so it *looks* done. -If you open a pull request on one of the mirrors, it is your responsibility to -inform me about its existence. In the future I may try to solve this with more -repo bots in the conduwuit Matrix room. There is no mailing list or email-patch -support on the sr.ht mirror, but if you'd like to email me a git patch you can -do so at `strawberry@puppygock.gay`. Direct all PRs/MRs to the `main` branch. By sending a pull request or patch, you are agreeing that your changes are allowed to be licenced under the Apache-2.0 licence and all of your conduct is -in line with the Contributor's Covenant, and conduwuit's Code of Conduct. +in line with the Contributor's Covenant, and continuwuity's Code of Conduct. Contribution by users who violate either of these code of conducts will not have their contributions accepted. This includes users who have been banned from -conduwuit Matrix rooms for Code of Conduct violations. +continuwuityMatrix rooms for Code of Conduct violations. -[issues]: https://github.com/girlbossceo/conduwuit/issues -[conduwuit-matrix]: https://matrix.to/#/#conduwuit:puppygock.gay +[issues]: https://forgejo.ellis.link/continuwuation/continuwuity/issues +[continuwuity-matrix]: https://matrix.to/#/#continuwuity:continuwuity.org [complement]: https://github.com/matrix-org/complement/ -[engage.toml]: https://github.com/girlbossceo/conduwuit/blob/main/engage.toml +[engage.toml]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/engage.toml [engage]: https://charles.page.computer.surgery/engage/ [sytest]: https://github.com/matrix-org/sytest/ [cargo-deb]: https://github.com/kornelski/cargo-deb @@ -146,4 +141,4 @@ conduwuit Matrix rooms for Code of Conduct violations. [cargo-audit]: https://github.com/RustSec/rustsec/tree/main/cargo-audit [direnv]: https://direnv.net/ [mdbook]: https://rust-lang.github.io/mdBook/ -[documentation.yml]: https://github.com/girlbossceo/conduwuit/blob/main/.github/workflows/documentation.yml +[documentation.yml]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/.forgejo/workflows/documentation.yml From edd5fc6c7ebe7c2b01d139f5bf34c97b5ac7d653 Mon Sep 17 00:00:00 2001 From: Glandos Date: Sun, 27 Apr 2025 18:52:20 +0000 Subject: [PATCH 307/310] Actualiser debian/conduwuit.service --- debian/conduwuit.service | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/conduwuit.service b/debian/conduwuit.service index a079499e..3d2fbc9b 100644 --- a/debian/conduwuit.service +++ b/debian/conduwuit.service @@ -3,7 +3,7 @@ Description=conduwuit Matrix homeserver Wants=network-online.target After=network-online.target Alias=matrix-conduwuit.service -Documentation=https://conduwuit.puppyirl.gay/ +Documentation=https://continuwuity.org/ [Service] DynamicUser=yes From 4158c1cf623a83b96d6a2d3cabb9f6aa1d618b4b Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Mon, 28 Apr 2025 20:45:08 +0100 Subject: [PATCH 308/310] fix: Hack around software treating empty join rule incorrectly --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 2 +- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 216114af..2d8a2d0f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3652,7 +3652,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "assign", "js_int", @@ -3672,7 +3672,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "js_int", "ruma-common", @@ -3684,7 +3684,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "as_variant", "assign", @@ -3707,7 +3707,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "as_variant", "base64 0.22.1", @@ -3739,7 +3739,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "as_variant", "indexmap 2.8.0", @@ -3764,7 +3764,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "bytes", "headers", @@ -3786,7 +3786,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "js_int", "thiserror 2.0.12", @@ -3795,7 +3795,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "js_int", "ruma-common", @@ -3805,7 +3805,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3820,7 +3820,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "js_int", "ruma-common", @@ -3832,7 +3832,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=652cc4864203ab7ca60cf9c47b931c0385304cc7#652cc4864203ab7ca60cf9c47b931c0385304cc7" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index c0f857c6..1ce5c1db 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -350,7 +350,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://forgejo.ellis.link/continuwuation/ruwuma" #branch = "conduwuit-changes" -rev = "652cc4864203ab7ca60cf9c47b931c0385304cc7" +rev = "d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" features = [ "compat", "rand", From 80e8900f041897847ac399135db5091df95f0e4b Mon Sep 17 00:00:00 2001 From: magmaus3 Date: Sat, 3 May 2025 15:06:11 +0200 Subject: [PATCH 309/310] add initial alpine packaging notes: - to build the package, you must use the cargo version from the edge branch (by building on edge or by installing it manually) - building from git requires some work (abuild supports snapshots for getting the release from git, but the version number would remain unchanged) - the apkbuild doesn't include any packaging tests (as i don't know what to include) --- alpine/APKBUILD | 63 +++++++++++++++++++++++++++++++++ alpine/README.md | 7 ++++ alpine/continuwuity.confd | 3 ++ alpine/continuwuity.initd | 19 ++++++++++ alpine/continuwuity.pre-install | 4 +++ 5 files changed, 96 insertions(+) create mode 100644 alpine/APKBUILD create mode 100644 alpine/README.md create mode 100644 alpine/continuwuity.confd create mode 100644 alpine/continuwuity.initd create mode 100644 alpine/continuwuity.pre-install diff --git a/alpine/APKBUILD b/alpine/APKBUILD new file mode 100644 index 00000000..97f84f65 --- /dev/null +++ b/alpine/APKBUILD @@ -0,0 +1,63 @@ +# Contributor: magmaus3 +# Maintainer: magmaus3 +pkgname=continuwuity + +# abuild doesn't like the format of v0.5.0-rc.5, so i had to change it +# see https://wiki.alpinelinux.org/wiki/Package_policies +pkgver=0.5.0_rc5 +pkgrel=0 +pkgdesc="a continuwuation of a very cool, featureful fork of conduit" +url="https://continuwuity.org/" +arch="all" +license="Apache-2.0" +depends="liburing" + +# cargo version on alpine v3.21 is too old to use the 2024 edition +# i recommend either building everything on edge, or adding +# the edge repo as a tag +makedepends="cargo liburing-dev clang-dev linux-headers" +checkdepends="" +install="$pkgname.pre-install" +subpackages="$pkgname-openrc" +source="https://forgejo.ellis.link/continuwuation/continuwuity/archive/v0.5.0-rc.5.tar.gz +continuwuity.initd +continuwuity.confd +" +builddir="$srcdir/continuwuity" +options="net !check" + +prepare() { + default_prepare + cd $srcdir/continuwuity + + # add the default database path to the config (commented out) + cat conduwuit-example.toml \ + | sed '/#database_path/ s:$: "/var/lib/continuwuity":' \ + > "$srcdir"/continuwuity.toml + + cargo fetch --target="$CTARGET" --locked +} + +build() { + cargo build --frozen --release --all-features +} + +check() { + # TODO: make sure the tests work + #cargo test --frozen + return +} + +package() { + cd $srcdir + install -Dm755 continuwuity/target/release/conduwuit "$pkgdir"/usr/bin/continuwuity + install -Dm644 "$srcdir"/continuwuity.toml -t "$pkgdir"/etc/continuwuity + install -Dm755 "$srcdir"/continuwuity.initd "$pkgdir"/etc/init.d/continuwuity + install -Dm644 "$srcdir"/continuwuity.confd "$pkgdir"/etc/conf.d/continuwuity +} + +sha512sums=" +66f6da5e98b6f7bb8c1082500101d5c87b1b79955c139b44c6ef5123919fb05feb0dffc669a3af1bc8d571ddb9f3576660f08dc10a6b19eab6db9e391175436a v0.5.0-rc.5.tar.gz +0482674be24740496d70da256d4121c5a5e3b749f2445d2bbe0e8991f1449de052724f8427da21a6f55574bc53eac9ca1e47e5012b4c13049b2b39044734d80d continuwuity.initd +38e2576278b450d16ba804dd8f4a128f18cd793e6c3ce55aedee1e186905755b31ee23baaa6586b1ab0e25a1f29bf1ea86bfaae4185b0cb1a29203726a199426 continuwuity.confd +" diff --git a/alpine/README.md b/alpine/README.md new file mode 100644 index 00000000..5f26d772 --- /dev/null +++ b/alpine/README.md @@ -0,0 +1,7 @@ +# building + +1. [set up your build + environment](https://wiki.alpinelinux.org/wiki/Include:Setup_your_system_and_account_for_building_packages) + +2. run `abuild` (or `abuild -K` if you want to keep the source directory to make + rebuilding faster) diff --git a/alpine/continuwuity.confd b/alpine/continuwuity.confd new file mode 100644 index 00000000..03d7b0a0 --- /dev/null +++ b/alpine/continuwuity.confd @@ -0,0 +1,3 @@ +supervisor=supervise-daemon +export CONTINUWUITY_CONFIG=/etc/continuwuity/continuwuity.toml + diff --git a/alpine/continuwuity.initd b/alpine/continuwuity.initd new file mode 100644 index 00000000..1354f4bd --- /dev/null +++ b/alpine/continuwuity.initd @@ -0,0 +1,19 @@ +#!/sbin/openrc-run + +command="/usr/bin/continuwuity" +command_user="continuwuity:continuwuity" +command_args="--config ${CONTINUWUITY_CONFIG=/etc/continuwuity/continuwuity.toml}" +command_background=true +pidfile="/run/$RC_SVCNAME.pid" + +output_log="/var/log/continuwuity.log" +error_log="/var/log/continuwuity.log" + +depend() { + need net +} + +start_pre() { + checkpath -d -m 0755 -o "$command_user" /var/lib/continuwuity + checkpath -f -m 0644 -o "$command_user" "$output_log" +} diff --git a/alpine/continuwuity.pre-install b/alpine/continuwuity.pre-install new file mode 100644 index 00000000..edac789f --- /dev/null +++ b/alpine/continuwuity.pre-install @@ -0,0 +1,4 @@ +#!/bin/sh +addgroup -S continuwuity 2>/dev/null +adduser -S -D -H -h /var/lib/continuwuity -s /sbin/nologin -G continuwuity -g continuwuity continuwuity 2>/dev/null +exit 0 From b562b8cf92be450b919df476d66187e6485ad042 Mon Sep 17 00:00:00 2001 From: magmaus3 Date: Mon, 5 May 2025 16:33:46 +0200 Subject: [PATCH 310/310] feat: add alpine ci --- .forgejo/workflows/build-alpine.yml | 49 +++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 .forgejo/workflows/build-alpine.yml diff --git a/.forgejo/workflows/build-alpine.yml b/.forgejo/workflows/build-alpine.yml new file mode 100644 index 00000000..b1757a60 --- /dev/null +++ b/.forgejo/workflows/build-alpine.yml @@ -0,0 +1,49 @@ +on: + - workflow-dispatch + - push + +jobs: + build: + runs-on: ubuntu-latest + container: + image: alpine:edge + + steps: + - name: set up dependencies + run: | + apk update + apk upgrade + apk add nodejs git alpine-sdk + - uses: actions/checkout@v4 + name: checkout the alpine dir + with: + sparse-checkout: "alpine/" + + # - uses: actions/checkout@v4 + # name: checkout the rest in the alpine dir + # with: + # path: 'alpine/continuwuity' + - name: set up user + run: adduser -DG abuild ci + + - name: set up keys + run: | + pwd + mkdir ~/.abuild + echo "${{ secrets.abuild_privkey }}" > ~/.abuild/ci@continuwuity.rsa + echo "${{ secrets.abuild_pubkey }}" > ~/.abuild/ci@continuwuity.rsa.pub + echo $HOME + echo 'PACKAGER_PRIVKEY="/root/.abuild/ci@continuwuity.rsa"' > ~/.abuild/abuild.conf + ls ~/.abuild + + - name: go go gadget abuild + run: | + cd alpine + # modify the APKBUILD to use the current branch instead of the release + # note that it seems to require the repo to be public (as you'll get + # a 404 even if the token is provided) + export ARCHIVE_URL="${{ github.server_url }}/${{ github.repository }}/archive/${{ github.ref_name }}.tar.gz" + echo $ARCHIVE_URL + sed -i '/^source=/c\source="'"$ARCHIVE_URL" APKBUILD + abuild -F checksum + abuild -Fr