From 01594a62439176aee1c1544c5be0cce0ddedcdf0 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 6 May 2025 20:51:12 +0100 Subject: [PATCH 01/50] chore: Fix typos across the codebase --- .typos.toml | 5 +++++ docs/deploying/docker-compose.for-traefik.yml | 2 +- docs/deploying/docker-compose.with-traefik.yml | 2 +- docs/maintenance.md | 2 +- src/api/client/capabilities.rs | 2 +- src/api/client/well_known.rs | 2 +- src/core/info/cargo.rs | 2 +- src/core/log/reload.rs | 6 +++--- src/core/utils/string/between.rs | 6 +++--- src/database/engine/cf_opts.rs | 2 +- src/main/main.rs | 2 +- src/main/restart.rs | 4 ++-- src/service/admin/execute.rs | 4 ++-- src/service/admin/mod.rs | 2 +- src/service/appservice/namespace_regex.rs | 2 +- src/service/media/remote.rs | 17 ++++++++++++----- src/service/resolver/actual.rs | 2 +- src/service/rooms/spaces/mod.rs | 2 +- src/service/rooms/timeline/mod.rs | 18 +++++++++--------- theme/css/chrome.css | 2 +- theme/css/variables.css | 12 ++++++------ 21 files changed, 55 insertions(+), 43 deletions(-) create mode 100644 .typos.toml diff --git a/.typos.toml b/.typos.toml new file mode 100644 index 00000000..1e46469c --- /dev/null +++ b/.typos.toml @@ -0,0 +1,5 @@ +[default.extend-words] +"allocatedp" = "allocatedp" +"conduwuit" = "conduwuit" +"continuwuity" = "continuwuity" +"execuse" = "execuse" diff --git a/docs/deploying/docker-compose.for-traefik.yml b/docs/deploying/docker-compose.for-traefik.yml index 04142e0c..83fb64ff 100644 --- a/docs/deploying/docker-compose.for-traefik.yml +++ b/docs/deploying/docker-compose.for-traefik.yml @@ -28,7 +28,7 @@ services: #CONDUWUIT_CONFIG: '/etc/conduwuit.toml' # Uncomment if you mapped config toml above # We need some way to serve the client and server .well-known json. The simplest way is via the CONDUWUIT_WELL_KNOWN - # variable / config option, there are multiple ways to do this, e.g. in the conduwuit.toml file, and in a seperate + # variable / config option, there are multiple ways to do this, e.g. in the conduwuit.toml file, and in a separate # see the override file for more information about delegation CONDUWUIT_WELL_KNOWN: | { diff --git a/docs/deploying/docker-compose.with-traefik.yml b/docs/deploying/docker-compose.with-traefik.yml index 9083b796..a45893da 100644 --- a/docs/deploying/docker-compose.with-traefik.yml +++ b/docs/deploying/docker-compose.with-traefik.yml @@ -36,7 +36,7 @@ services: # CONDUWUIT_NEW_USER_DISPLAYNAME_SUFFIX = "🏳<200d>⚧" # We need some way to serve the client and server .well-known json. The simplest way is via the CONDUWUIT_WELL_KNOWN - # variable / config option, there are multiple ways to do this, e.g. in the conduwuit.toml file, and in a seperate + # variable / config option, there are multiple ways to do this, e.g. in the conduwuit.toml file, and in a separate # reverse proxy, but since you do not have a reverse proxy and following this guide, this example is included CONDUWUIT_WELL_KNOWN: | { diff --git a/docs/maintenance.md b/docs/maintenance.md index b85a1971..16ec5a4e 100644 --- a/docs/maintenance.md +++ b/docs/maintenance.md @@ -71,7 +71,7 @@ related to WAL tracking. The only safe files that can be deleted are the `LOG` files (all caps). These are the real RocksDB telemetry/log files, however Continuwuity has already -configured to only store up to 3 RocksDB `LOG` files due to generall being +configured to only store up to 3 RocksDB `LOG` files due to generally being useless for average users unless troubleshooting something low-level. If you would like to store nearly none at all, see the `rocksdb_max_log_files` config option. diff --git a/src/api/client/capabilities.rs b/src/api/client/capabilities.rs index 470ff6ab..7362c4f9 100644 --- a/src/api/client/capabilities.rs +++ b/src/api/client/capabilities.rs @@ -15,7 +15,7 @@ use crate::Ruma; /// # `GET /_matrix/client/v3/capabilities` /// -/// Get information on the supported feature set and other relevent capabilities +/// Get information on the supported feature set and other relevant capabilities /// of this server. pub(crate) async fn get_capabilities_route( State(services): State, diff --git a/src/api/client/well_known.rs b/src/api/client/well_known.rs index eedab981..35b7fc1e 100644 --- a/src/api/client/well_known.rs +++ b/src/api/client/well_known.rs @@ -60,7 +60,7 @@ pub(crate) async fn well_known_support( return Err(Error::BadRequest(ErrorKind::NotFound, "Not found.")); } - // TOOD: support defining multiple contacts in the config + // TODO: support defining multiple contacts in the config let mut contacts: Vec = vec![]; if let Some(role) = role { diff --git a/src/core/info/cargo.rs b/src/core/info/cargo.rs index 28c6590e..e70bdcd5 100644 --- a/src/core/info/cargo.rs +++ b/src/core/info/cargo.rs @@ -36,7 +36,7 @@ const MAIN_MANIFEST: &'static str = (); /// For *enabled* features see the info::rustc module instead. static FEATURES: OnceLock> = OnceLock::new(); -/// Processed list of dependencies. This is generated from the datas captured in +/// Processed list of dependencies. This is generated from the data captured in /// the MANIFEST. static DEPENDENCIES: OnceLock = OnceLock::new(); diff --git a/src/core/log/reload.rs b/src/core/log/reload.rs index e6a16c9f..f72fde47 100644 --- a/src/core/log/reload.rs +++ b/src/core/log/reload.rs @@ -16,9 +16,9 @@ use crate::{Result, error}; /// pulling in a version of tracing that's incompatible with the rest of our /// deps. /// -/// To work around this, we define an trait without the S paramter that forwards -/// to the reload::Handle::reload method, and then store the handle as a trait -/// object. +/// To work around this, we define an trait without the S parameter that +/// forwards to the reload::Handle::reload method, and then store the handle as +/// a trait object. /// /// [1]: pub trait ReloadHandle { diff --git a/src/core/utils/string/between.rs b/src/core/utils/string/between.rs index 05c137b4..8d3b6979 100644 --- a/src/core/utils/string/between.rs +++ b/src/core/utils/string/between.rs @@ -1,12 +1,12 @@ type Delim<'a> = (&'a str, &'a str); -/// Slice a string between a pair of delimeters. +/// Slice a string between a pair of delimiters. pub trait Between<'a> { - /// Extract a string between the delimeters. If the delimeters were not + /// Extract a string between the delimiters. If the delimiters were not /// found None is returned, otherwise the first extraction is returned. fn between(&self, delim: Delim<'_>) -> Option<&'a str>; - /// Extract a string between the delimeters. If the delimeters were not + /// Extract a string between the delimiters. If the delimiters were not /// found the original string is returned; take note of this behavior, /// if an empty slice is desired for this case use the fallible version and /// unwrap to EMPTY. diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index 7ceec722..cbbd1012 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -193,7 +193,7 @@ fn get_cache(ctx: &Context, desc: &Descriptor) -> Option { return None; } - // Some cache capacities are overriden by server config in a strange but + // Some cache capacities are overridden by server config in a strange but // legacy-compat way let config = &ctx.server.config; let cap = match desc.name { diff --git a/src/main/main.rs b/src/main/main.rs index 1a9d3fe4..3416bc68 100644 --- a/src/main/main.rs +++ b/src/main/main.rs @@ -73,7 +73,7 @@ async fn async_main(server: &Arc) -> Result<(), Error> { .lock() .await .take() - .expect("services initialied"), + .expect("services initialized"), ) .await { diff --git a/src/main/restart.rs b/src/main/restart.rs index b9d1dc94..631c1e21 100644 --- a/src/main/restart.rs +++ b/src/main/restart.rs @@ -13,8 +13,8 @@ pub(super) fn restart() -> ! { // // We can (and do) prevent that panic by checking the result of current_exe() // prior to committing to restart, returning an error to the user without any - // unexpected shutdown. In a nutshell that is the execuse for this unsafety. - // Nevertheless, we still want a way to override the restart preventation (i.e. + // unexpected shutdown. In a nutshell that is the excuse for this unsafety. + // Nevertheless, we still want a way to override the restart presentation (i.e. // admin server restart --force). let exe = unsafe { utils::sys::current_exe().expect("program path must be available") }; let envs = env::vars(); diff --git a/src/service/admin/execute.rs b/src/service/admin/execute.rs index 174b28ed..e0d724bd 100644 --- a/src/service/admin/execute.rs +++ b/src/service/admin/execute.rs @@ -25,7 +25,7 @@ pub(super) async fn console_auto_stop(&self) { /// Execute admin commands after startup #[implement(super::Service)] pub(super) async fn startup_execute(&self) -> Result { - // List of comamnds to execute + // List of commands to execute let commands = &self.services.server.config.admin_execute; // Determine if we're running in smoketest-mode which will change some behaviors @@ -64,7 +64,7 @@ pub(super) async fn startup_execute(&self) -> Result { /// Execute admin commands after signal #[implement(super::Service)] pub(super) async fn signal_execute(&self) -> Result { - // List of comamnds to execute + // List of commands to execute let commands = self.services.server.config.admin_signal_execute.clone(); // When true, errors are ignored and execution continues. diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index b3466711..683f5400 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -166,7 +166,7 @@ impl Service { .map_err(|e| err!("Failed to enqueue admin command: {e:?}")) } - /// Dispatches a comamnd to the processor on the current task and waits for + /// Dispatches a command to the processor on the current task and waits for /// completion. pub async fn command_in_place( &self, diff --git a/src/service/appservice/namespace_regex.rs b/src/service/appservice/namespace_regex.rs index fe0fd91f..76b754ae 100644 --- a/src/service/appservice/namespace_regex.rs +++ b/src/service/appservice/namespace_regex.rs @@ -26,7 +26,7 @@ impl NamespaceRegex { false } - /// Checks if this namespace has exlusive rights to a namespace + /// Checks if this namespace has exclusive rights to a namespace #[inline] #[must_use] pub fn is_exclusive_match(&self, heystack: &str) -> bool { diff --git a/src/service/media/remote.rs b/src/service/media/remote.rs index a1e874d8..f234fa13 100644 --- a/src/service/media/remote.rs +++ b/src/service/media/remote.rs @@ -338,7 +338,7 @@ fn handle_federation_error( return fallback(); } - // Reached for 5xx errors. This is where we don't fallback given the likelyhood + // Reached for 5xx errors. This is where we don't fallback given the likelihood // the other endpoint will also be a 5xx and we're wasting time. error } @@ -356,7 +356,7 @@ pub async fn fetch_remote_thumbnail_legacy( self.check_legacy_freeze()?; self.check_fetch_authorized(&mxc)?; - let reponse = self + let response = self .services .sending .send_federation_request(mxc.server_name, media::get_content_thumbnail::v3::Request { @@ -373,10 +373,17 @@ pub async fn fetch_remote_thumbnail_legacy( .await?; let dim = Dim::from_ruma(body.width, body.height, body.method.clone())?; - self.upload_thumbnail(&mxc, None, None, reponse.content_type.as_deref(), &dim, &reponse.file) - .await?; + self.upload_thumbnail( + &mxc, + None, + None, + response.content_type.as_deref(), + &dim, + &response.file, + ) + .await?; - Ok(reponse) + Ok(response) } #[implement(super::Service)] diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index 0151c4d7..d23ef95a 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -296,7 +296,7 @@ impl super::Service { expire: CachedOverride::default_expire(), overriding: (hostname != untername) .then_some(hostname.into()) - .inspect(|_| debug_info!("{untername:?} overriden by {hostname:?}")), + .inspect(|_| debug_info!("{untername:?} overridden by {hostname:?}")), }); Ok(()) diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index ea9756ba..53d2b742 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -399,7 +399,7 @@ async fn get_room_summary( Ok(summary) } -/// With the given identifier, checks if a room is accessable +/// With the given identifier, checks if a room is accessible #[implement(Service)] async fn is_accessible_child<'a, I>( &self, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 947e1c38..4b2f3cb2 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -267,15 +267,15 @@ impl Service { /// /// Returns pdu id #[tracing::instrument(level = "debug", skip_all)] - pub async fn append_pdu<'a, Leafs>( + pub async fn append_pdu<'a, Leaves>( &'a self, pdu: &'a PduEvent, mut pdu_json: CanonicalJsonObject, - leafs: Leafs, + leaves: Leaves, state_lock: &'a RoomMutexGuard, ) -> Result where - Leafs: Iterator + Send + 'a, + Leaves: Iterator + Send + 'a, { // Coalesce database writes for the remainder of this scope. let _cork = self.db.db.cork_and_flush(); @@ -344,7 +344,7 @@ impl Service { self.services .state - .set_forward_extremities(&pdu.room_id, leafs, state_lock) + .set_forward_extremities(&pdu.room_id, leaves, state_lock) .await; let insert_lock = self.mutex_insert.lock(&pdu.room_id).await; @@ -951,17 +951,17 @@ impl Service { /// Append the incoming event setting the state snapshot to the state from /// the server that sent the event. #[tracing::instrument(level = "debug", skip_all)] - pub async fn append_incoming_pdu<'a, Leafs>( + pub async fn append_incoming_pdu<'a, Leaves>( &'a self, pdu: &'a PduEvent, pdu_json: CanonicalJsonObject, - new_room_leafs: Leafs, + new_room_leaves: Leaves, state_ids_compressed: Arc, soft_fail: bool, state_lock: &'a RoomMutexGuard, ) -> Result> where - Leafs: Iterator + Send + 'a, + Leaves: Iterator + Send + 'a, { // We append to state before appending the pdu, so we don't have a moment in // time with the pdu without it's state. This is okay because append_pdu can't @@ -978,14 +978,14 @@ impl Service { self.services .state - .set_forward_extremities(&pdu.room_id, new_room_leafs, state_lock) + .set_forward_extremities(&pdu.room_id, new_room_leaves, state_lock) .await; return Ok(None); } let pdu_id = self - .append_pdu(pdu, pdu_json, new_room_leafs, state_lock) + .append_pdu(pdu, pdu_json, new_room_leaves, state_lock) .await?; Ok(Some(pdu_id)) diff --git a/theme/css/chrome.css b/theme/css/chrome.css index 52b35c2c..d6cc2b32 100644 --- a/theme/css/chrome.css +++ b/theme/css/chrome.css @@ -495,7 +495,7 @@ ul#searchresults span.teaser em { .chapter li { display: flex; - color: var(--sidebar-non-existant); + color: var(--sidebar-non-existent); } .chapter li a { display: block; diff --git a/theme/css/variables.css b/theme/css/variables.css index e7feed98..ca9fd271 100644 --- a/theme/css/variables.css +++ b/theme/css/variables.css @@ -20,7 +20,7 @@ --sidebar-bg: #14191f; --sidebar-fg: #c8c9db; - --sidebar-non-existant: #5c6773; + --sidebar-non-existent: #5c6773; --sidebar-active: #ffb454; --sidebar-spacer: #2d334f; @@ -64,7 +64,7 @@ --sidebar-bg: #292c2f; --sidebar-fg: #a1adb8; - --sidebar-non-existant: #505254; + --sidebar-non-existent: #505254; --sidebar-active: #3473ad; --sidebar-spacer: #393939; @@ -108,7 +108,7 @@ --sidebar-bg: #fafafa; --sidebar-fg: #AE518E; - --sidebar-non-existant: #aaaaaa; + --sidebar-non-existent: #aaaaaa; --sidebar-active: #2F7E86; --sidebar-spacer: #f4f4f4; @@ -152,7 +152,7 @@ --sidebar-bg: #282d3f; --sidebar-fg: #fdcbec; - --sidebar-non-existant: #505274; + --sidebar-non-existent: #505274; --sidebar-active: #5BCEFA; --sidebar-spacer: #2d334f; @@ -196,7 +196,7 @@ --sidebar-bg: #3b2e2a; --sidebar-fg: #c8c9db; - --sidebar-non-existant: #505254; + --sidebar-non-existent: #505254; --sidebar-active: #e69f67; --sidebar-spacer: #45373a; @@ -241,7 +241,7 @@ --sidebar-bg: #292c2f; --sidebar-fg: #a1adb8; - --sidebar-non-existant: #505254; + --sidebar-non-existent: #505254; --sidebar-active: #3473ad; --sidebar-spacer: #393939; From c0f46269b58c4f6faef3084e328e12dbe9ce7742 Mon Sep 17 00:00:00 2001 From: Jade Date: Tue, 6 May 2025 21:49:41 +0000 Subject: [PATCH 02/50] docs: Fix name in README --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index bf4f5613..fdcdafb7 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ It's a community continuation of the [conduwuit](https://github.com/girlbossceo/ The original conduwuit project has been archived and is no longer maintained. Rather than letting this Rust-based Matrix homeserver disappear, a group of community contributors have forked the project to continue its development, fix outstanding issues, and add new features. -We aim to provide a stable, well-maintained alternative for current Conduit users and welcome newcomers seeking a lightweight, efficient Matrix homeserver. +We aim to provide a stable, well-maintained alternative for current conduwuit users and welcome newcomers seeking a lightweight, efficient Matrix homeserver. ### Who are we? From 5577ddca270f2b0eb453987d08482565da01af0f Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 6 May 2025 22:56:23 +0100 Subject: [PATCH 03/50] chore: Add CONTINUWUITY_ environment variables Also updates some examples to match --- arch/conduwuit.service | 8 ++++---- debian/conduwuit.service | 7 ++++--- nix/pkgs/complement/default.nix | 6 +++--- src/core/config/mod.rs | 9 +++++++-- src/main/clap.rs | 18 ++++++++++++++++-- 5 files changed, 34 insertions(+), 14 deletions(-) diff --git a/arch/conduwuit.service b/arch/conduwuit.service index 4f45ddc0..c86e37bd 100644 --- a/arch/conduwuit.service +++ b/arch/conduwuit.service @@ -1,11 +1,11 @@ [Unit] -Description=conduwuit Matrix homeserver + +Description=Continuwuity - Matrix homeserver Wants=network-online.target After=network-online.target -Documentation=https://conduwuit.puppyirl.gay/ +Documentation=https://continuwuity.org/ RequiresMountsFor=/var/lib/private/conduwuit Alias=matrix-conduwuit.service - [Service] DynamicUser=yes Type=notify-reload @@ -59,7 +59,7 @@ StateDirectory=conduwuit RuntimeDirectory=conduwuit RuntimeDirectoryMode=0750 -Environment="CONDUWUIT_CONFIG=/etc/conduwuit/conduwuit.toml" +Environment="CONTINUWUITY_CONFIG=/etc/conduwuit/conduwuit.toml" BindPaths=/var/lib/private/conduwuit:/var/lib/matrix-conduit BindPaths=/var/lib/private/conduwuit:/var/lib/private/matrix-conduit diff --git a/debian/conduwuit.service b/debian/conduwuit.service index 3d2fbc9b..be2f3dae 100644 --- a/debian/conduwuit.service +++ b/debian/conduwuit.service @@ -1,9 +1,10 @@ [Unit] -Description=conduwuit Matrix homeserver + +Description=Continuwuity - Matrix homeserver Wants=network-online.target After=network-online.target -Alias=matrix-conduwuit.service Documentation=https://continuwuity.org/ +Alias=matrix-conduwuit.service [Service] DynamicUser=yes @@ -11,7 +12,7 @@ User=conduwuit Group=conduwuit Type=notify -Environment="CONDUWUIT_CONFIG=/etc/conduwuit/conduwuit.toml" +Environment="CONTINUWUITY_CONFIG=/etc/conduwuit/conduwuit.toml" ExecStart=/usr/sbin/conduwuit diff --git a/nix/pkgs/complement/default.nix b/nix/pkgs/complement/default.nix index 9b010e14..1295cb03 100644 --- a/nix/pkgs/complement/default.nix +++ b/nix/pkgs/complement/default.nix @@ -75,9 +75,9 @@ dockerTools.buildImage { else []; Env = [ - "CONDUWUIT_TLS__KEY=${./private_key.key}" - "CONDUWUIT_TLS__CERTS=${./certificate.crt}" - "CONDUWUIT_CONFIG=${./config.toml}" + "CONTINUWUITY_TLS__KEY=${./private_key.key}" + "CONTINUWUITY_TLS__CERTS=${./certificate.crt}" + "CONTINUWUITY_CONFIG=${./config.toml}" "RUST_BACKTRACE=full" ]; diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 5374c2c2..5648a126 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1962,7 +1962,11 @@ impl Config { where I: Iterator, { - let envs = [Env::var("CONDUIT_CONFIG"), Env::var("CONDUWUIT_CONFIG")]; + let envs = [ + Env::var("CONDUIT_CONFIG"), + Env::var("CONDUWUIT_CONFIG"), + Env::var("CONTINUWUITY_CONFIG"), + ]; let config = envs .into_iter() @@ -1971,7 +1975,8 @@ impl Config { .chain(paths.map(Toml::file)) .fold(Figment::new(), |config, file| config.merge(file.nested())) .merge(Env::prefixed("CONDUIT_").global().split("__")) - .merge(Env::prefixed("CONDUWUIT_").global().split("__")); + .merge(Env::prefixed("CONDUWUIT_").global().split("__")) + .merge(Env::prefixed("CONTINUWUITY_").global().split("__")); Ok(config) } diff --git a/src/main/clap.rs b/src/main/clap.rs index 707a1c76..9b63af19 100644 --- a/src/main/clap.rs +++ b/src/main/clap.rs @@ -74,17 +74,30 @@ pub(crate) struct Args { /// with the exception of the last bucket, try increasing this value to e.g. /// 50 or 100. Inversely, decrease to 10 etc if the histogram lacks /// resolution. - #[arg(long, hide(true), env = "CONDUWUIT_RUNTIME_HISTOGRAM_INTERVAL", default_value = "25")] + #[arg( + long, + hide(true), + env = "CONTINUWUITY_RUNTIME_HISTOGRAM_INTERVAL", + env = "CONDUWUIT_RUNTIME_HISTOGRAM_INTERVAL", + default_value = "25" + )] pub(crate) worker_histogram_interval: u64, /// Set the histogram bucket count (tokio_unstable). Default is 20. - #[arg(long, hide(true), env = "CONDUWUIT_RUNTIME_HISTOGRAM_BUCKETS", default_value = "20")] + #[arg( + long, + hide(true), + env = "CONTINUWUITY_RUNTIME_HISTOGRAM_BUCKETS", + env = "CONDUWUIT_RUNTIME_HISTOGRAM_BUCKETS", + default_value = "20" + )] pub(crate) worker_histogram_buckets: usize, /// Toggles worker affinity feature. #[arg( long, hide(true), + env = "CONTINUWUITY_RUNTIME_WORKER_AFFINITY", env = "CONDUWUIT_RUNTIME_WORKER_AFFINITY", action = ArgAction::Set, num_args = 0..=1, @@ -99,6 +112,7 @@ pub(crate) struct Args { #[arg( long, hide(true), + env = "CONTINUWUITY_RUNTIME_GC_ON_PARK", env = "CONDUWUIT_RUNTIME_GC_ON_PARK", action = ArgAction::Set, num_args = 0..=1, From 7c58e40c967d984f1105738b589e81cc9f6069be Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 10 May 2025 13:29:59 +0100 Subject: [PATCH 04/50] chore(typos): Ignore certificate files --- .typos.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.typos.toml b/.typos.toml index 1e46469c..fe958292 100644 --- a/.typos.toml +++ b/.typos.toml @@ -1,3 +1,6 @@ +[files] +extend-exclude = ["*.csr"] + [default.extend-words] "allocatedp" = "allocatedp" "conduwuit" = "conduwuit" From beee996f723101d8ca61de7ebd46446f99298555 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 10 May 2025 20:37:08 +0100 Subject: [PATCH 05/50] docs: Rename conduwuit to continuwuity in more places --- .typos.toml | 1 + CONTRIBUTING.md | 4 +- Cargo.toml | 2 +- conduwuit-example.toml | 175 ++++++++--------- debian/README.md | 4 +- docs/deploying/docker-compose.for-traefik.yml | 38 ++-- docs/deploying/docker-compose.override.yml | 10 +- docs/deploying/docker-compose.with-caddy.yml | 30 +-- .../deploying/docker-compose.with-traefik.yml | 48 ++--- docs/deploying/docker-compose.yml | 30 +-- docs/deploying/docker.md | 10 +- docs/deploying/generic.md | 2 +- docs/development/hot_reload.md | 2 +- docs/development/testing.md | 5 +- nix/pkgs/oci-image/default.nix | 8 +- src/admin/processor.rs | 2 +- src/core/config/check.rs | 4 +- src/core/config/mod.rs | 177 +++++++++--------- src/service/admin/create.rs | 2 +- 19 files changed, 279 insertions(+), 275 deletions(-) diff --git a/.typos.toml b/.typos.toml index fe958292..41c81085 100644 --- a/.typos.toml +++ b/.typos.toml @@ -5,4 +5,5 @@ extend-exclude = ["*.csr"] "allocatedp" = "allocatedp" "conduwuit" = "conduwuit" "continuwuity" = "continuwuity" +"continuwity" = "continuwuity" "execuse" = "execuse" diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ecff7173..da426801 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,6 @@ # Contributing guide -This page is for about contributing to conduwuit. The +This page is for about contributing to Continuwuity. The [development](./development.md) page may be of interest for you as well. If you would like to work on an [issue][issues] that is not assigned, preferably @@ -73,7 +73,7 @@ If you'd like to run Complement locally using Nix, see the ### Writing documentation -conduwuit's website uses [`mdbook`][mdbook] and deployed via CI using GitHub +Continuwuity's website uses [`mdbook`][mdbook] and deployed via CI using GitHub Pages in the [`documentation.yml`][documentation.yml] workflow file with Nix's mdbook in the devshell. All documentation is in the `docs/` directory at the top level. The compiled mdbook website is also uploaded as an artifact. diff --git a/Cargo.toml b/Cargo.toml index 43cd3f4f..79f767a2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -298,7 +298,7 @@ version = "1.15.0" default-features = false features = ["serde"] -# Used for reading the configuration from conduwuit.toml & environment variables +# Used for reading the configuration from continuwuity.toml & environment variables [workspace.dependencies.figment] version = "0.10.19" default-features = false diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 3d92ab15..6934e67c 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1,4 +1,4 @@ -### conduwuit Configuration +### continuwuity Configuration ### ### THIS FILE IS GENERATED. CHANGES/CONTRIBUTIONS IN THE REPO WILL BE ### OVERWRITTEN! @@ -13,7 +13,7 @@ ### that say "YOU NEED TO EDIT THIS". ### ### For more information, see: -### https://conduwuit.puppyirl.gay/configuration.html +### https://continuwuity.org/configuration.html [global] @@ -21,7 +21,7 @@ # suffix for user and room IDs/aliases. # # See the docs for reverse proxying and delegation: -# https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy +# https://continuwuity.org/deploying/generic.html#setting-up-the-reverse-proxy # # Also see the `[global.well_known]` config section at the very bottom. # @@ -32,11 +32,11 @@ # YOU NEED TO EDIT THIS. THIS CANNOT BE CHANGED AFTER WITHOUT A DATABASE # WIPE. # -# example: "conduwuit.woof" +# example: "continuwuity.org" # #server_name = -# The default address (IPv4 or IPv6) conduwuit will listen on. +# The default address (IPv4 or IPv6) continuwuity will listen on. # # If you are using Docker or a container NAT networking setup, this must # be "0.0.0.0". @@ -46,10 +46,10 @@ # #address = ["127.0.0.1", "::1"] -# The port(s) conduwuit will listen on. +# The port(s) continuwuity will listen on. # # For reverse proxying, see: -# https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy +# https://continuwuity.org/deploying/generic.html#setting-up-the-reverse-proxy # # If you are using Docker, don't change this, you'll need to map an # external port to this. @@ -58,16 +58,17 @@ # #port = 8008 -# The UNIX socket conduwuit will listen on. +# The UNIX socket continuwuity will listen on. # -# conduwuit cannot listen on both an IP address and a UNIX socket. If +# continuwuity cannot listen on both an IP address and a UNIX socket. If # listening on a UNIX socket, you MUST remove/comment the `address` key. # # Remember to make sure that your reverse proxy has access to this socket -# file, either by adding your reverse proxy to the 'conduwuit' group or -# granting world R/W permissions with `unix_socket_perms` (666 minimum). +# file, either by adding your reverse proxy to the appropriate user group +# or granting world R/W permissions with `unix_socket_perms` (666 +# minimum). # -# example: "/run/conduwuit/conduwuit.sock" +# example: "/run/continuwuity/continuwuity.sock" # #unix_socket_path = @@ -75,23 +76,23 @@ # #unix_socket_perms = 660 -# This is the only directory where conduwuit will save its data, including -# media. Note: this was previously "/var/lib/matrix-conduit". +# This is the only directory where continuwuity will save its data, +# including media. Note: this was previously "/var/lib/matrix-conduit". # # YOU NEED TO EDIT THIS. # -# example: "/var/lib/conduwuit" +# example: "/var/lib/continuwuity" # #database_path = -# conduwuit supports online database backups using RocksDB's Backup engine -# API. To use this, set a database backup path that conduwuit can write -# to. +# continuwuity supports online database backups using RocksDB's Backup +# engine API. To use this, set a database backup path that continuwuity +# can write to. # # For more information, see: -# https://conduwuit.puppyirl.gay/maintenance.html#backups +# https://continuwuity.org/maintenance.html#backups # -# example: "/opt/conduwuit-db-backups" +# example: "/opt/continuwuity-db-backups" # #database_backup_path = @@ -112,14 +113,14 @@ # #new_user_displayname_suffix = "🏳️‍⚧️" -# If enabled, conduwuit will send a simple GET request periodically to +# If enabled, continuwuity will send a simple GET request periodically to # `https://continuwuity.org/.well-known/continuwuity/announcements` for any new # announcements or major updates. This is not an update check endpoint. # #allow_announcements_check = true -# Set this to any float value to multiply conduwuit's in-memory LRU caches -# with such as "auth_chain_cache_capacity". +# Set this to any float value to multiply continuwuity's in-memory LRU +# caches with such as "auth_chain_cache_capacity". # # May be useful if you have significant memory to spare to increase # performance. @@ -131,7 +132,7 @@ # #cache_capacity_modifier = 1.0 -# Set this to any float value in megabytes for conduwuit to tell the +# Set this to any float value in megabytes for continuwuity to tell the # database engine that this much memory is available for database read # caches. # @@ -145,7 +146,7 @@ # #db_cache_capacity_mb = varies by system -# Set this to any float value in megabytes for conduwuit to tell the +# Set this to any float value in megabytes for continuwuity to tell the # database engine that this much memory is available for database write # caches. # @@ -250,9 +251,9 @@ # Enable using *only* TCP for querying your specified nameservers instead # of UDP. # -# If you are running conduwuit in a container environment, this config +# If you are running continuwuity in a container environment, this config # option may need to be enabled. For more details, see: -# https://conduwuit.puppyirl.gay/troubleshooting.html#potential-dns-issues-when-using-docker +# https://continuwuity.org/troubleshooting.html#potential-dns-issues-when-using-docker # #query_over_tcp_only = false @@ -418,9 +419,9 @@ # tokens. Multiple tokens can be added if you separate them with # whitespace # -# conduwuit must be able to access the file, and it must not be empty +# continuwuity must be able to access the file, and it must not be empty # -# example: "/etc/conduwuit/.reg_token" +# example: "/etc/continuwuity/.reg_token" # #registration_token_file = @@ -512,16 +513,16 @@ #allow_room_creation = true # Set to false to disable users from joining or creating room versions -# that aren't officially supported by conduwuit. +# that aren't officially supported by continuwuity. # -# conduwuit officially supports room versions 6 - 11. +# continuwuity officially supports room versions 6 - 11. # -# conduwuit has slightly experimental (though works fine in practice) +# continuwuity has slightly experimental (though works fine in practice) # support for versions 3 - 5. # #allow_unstable_room_versions = true -# Default room version conduwuit will create rooms with. +# Default room version continuwuity will create rooms with. # # Per spec, room version 11 is the default. # @@ -587,7 +588,7 @@ # Servers listed here will be used to gather public keys of other servers # (notary trusted key servers). # -# Currently, conduwuit doesn't support inbound batched key requests, so +# Currently, continuwuity doesn't support inbound batched key requests, so # this list should only contain other Synapse servers. # # example: ["matrix.org", "tchncs.de"] @@ -628,7 +629,7 @@ # #trusted_server_batch_size = 1024 -# Max log level for conduwuit. Allows debug, info, warn, or error. +# Max log level for continuwuity. Allows debug, info, warn, or error. # # See also: # https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives @@ -649,8 +650,9 @@ # #log_span_events = "none" -# Configures whether CONDUWUIT_LOG EnvFilter matches values using regular -# expressions. See the tracing_subscriber documentation on Directives. +# Configures whether CONTINUWUITY_LOG EnvFilter matches values using +# regular expressions. See the tracing_subscriber documentation on +# Directives. # #log_filter_regex = true @@ -718,7 +720,7 @@ # This takes priority over "turn_secret" first, and falls back to # "turn_secret" if invalid or failed to open. # -# example: "/etc/conduwuit/.turn_secret" +# example: "/etc/continuwuity/.turn_secret" # #turn_secret_file = @@ -726,12 +728,12 @@ # #turn_ttl = 86400 -# List/vector of room IDs or room aliases that conduwuit will make newly -# registered users join. The rooms specified must be rooms that you have -# joined at least once on the server, and must be public. +# List/vector of room IDs or room aliases that continuwuity will make +# newly registered users join. The rooms specified must be rooms that you +# have joined at least once on the server, and must be public. # -# example: ["#conduwuit:puppygock.gay", -# "!eoIzvAvVwY23LPDay8:puppygock.gay"] +# example: ["#continuwuity:continuwuity.org", +# "!main-1:continuwuity.org"] # #auto_join_rooms = [] @@ -754,10 +756,10 @@ # #auto_deactivate_banned_room_attempts = false -# RocksDB log level. This is not the same as conduwuit's log level. This -# is the log level for the RocksDB engine/library which show up in your -# database folder/path as `LOG` files. conduwuit will log RocksDB errors -# as normal through tracing or panics if severe for safety. +# RocksDB log level. This is not the same as continuwuity's log level. +# This is the log level for the RocksDB engine/library which show up in +# your database folder/path as `LOG` files. continuwuity will log RocksDB +# errors as normal through tracing or panics if severe for safety. # #rocksdb_log_level = "error" @@ -777,7 +779,7 @@ # Set this to true to use RocksDB config options that are tailored to HDDs # (slower device storage). # -# It is worth noting that by default, conduwuit will use RocksDB with +# It is worth noting that by default, continuwuity will use RocksDB with # Direct IO enabled. *Generally* speaking this improves performance as it # bypasses buffered I/O (system page cache). However there is a potential # chance that Direct IO may cause issues with database operations if your @@ -785,7 +787,7 @@ # possibly ZFS filesystem. RocksDB generally deals/corrects these issues # but it cannot account for all setups. If you experience any weird # RocksDB issues, try enabling this option as it turns off Direct IO and -# feel free to report in the conduwuit Matrix room if this option fixes +# feel free to report in the continuwuity Matrix room if this option fixes # your DB issues. # # For more information, see: @@ -840,7 +842,7 @@ # as they all differ. See their `kDefaultCompressionLevel`. # # Note when using the default value we may override it with a setting -# tailored specifically conduwuit. +# tailored specifically for continuwuity. # #rocksdb_compression_level = 32767 @@ -856,7 +858,7 @@ # algorithm. # # Note when using the default value we may override it with a setting -# tailored specifically conduwuit. +# tailored specifically for continuwuity. # #rocksdb_bottommost_compression_level = 32767 @@ -896,13 +898,13 @@ # 0 = AbsoluteConsistency # 1 = TolerateCorruptedTailRecords (default) # 2 = PointInTime (use me if trying to recover) -# 3 = SkipAnyCorruptedRecord (you now voided your Conduwuit warranty) +# 3 = SkipAnyCorruptedRecord (you now voided your Continuwuity warranty) # # For more information on these modes, see: # https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes # # For more details on recovering a corrupt database, see: -# https://conduwuit.puppyirl.gay/troubleshooting.html#database-corruption +# https://continuwuity.org/troubleshooting.html#database-corruption # #rocksdb_recovery_mode = 1 @@ -942,7 +944,7 @@ # - Disabling repair mode and restarting the server is recommended after # running the repair. # -# See https://conduwuit.puppyirl.gay/troubleshooting.html#database-corruption for more details on recovering a corrupt database. +# See https://continuwuity.org/troubleshooting.html#database-corruption for more details on recovering a corrupt database. # #rocksdb_repair = false @@ -969,7 +971,7 @@ # Enables RocksDB compaction. You should never ever have to set this # option to false. If you for some reason find yourself needing to use # this option as part of troubleshooting or a bug, please reach out to us -# in the conduwuit Matrix room with information and details. +# in the continuwuity Matrix room with information and details. # # Disabling compaction will lead to a significantly bloated and # explosively large database, gradually poor performance, unnecessarily @@ -995,7 +997,7 @@ # purposes such as recovering/recreating your admin room, or inviting # yourself back. # -# See https://conduwuit.puppyirl.gay/troubleshooting.html#lost-access-to-admin-room for other ways to get back into your admin room. +# See https://continuwuity.org/troubleshooting.html#lost-access-to-admin-room for other ways to get back into your admin room. # # Once this password is unset, all sessions will be logged out for # security purposes. @@ -1010,8 +1012,8 @@ # Allow local (your server only) presence updates/requests. # -# Note that presence on conduwuit is very fast unlike Synapse's. If using -# outgoing presence, this MUST be enabled. +# Note that presence on continuwuity is very fast unlike Synapse's. If +# using outgoing presence, this MUST be enabled. # #allow_local_presence = true @@ -1019,7 +1021,7 @@ # # This option receives presence updates from other servers, but does not # send any unless `allow_outgoing_presence` is true. Note that presence on -# conduwuit is very fast unlike Synapse's. +# continuwuity is very fast unlike Synapse's. # #allow_incoming_presence = true @@ -1027,8 +1029,8 @@ # # This option sends presence updates to other servers, but does not # receive any unless `allow_incoming_presence` is true. Note that presence -# on conduwuit is very fast unlike Synapse's. If using outgoing presence, -# you MUST enable `allow_local_presence` as well. +# on continuwuity is very fast unlike Synapse's. If using outgoing +# presence, you MUST enable `allow_local_presence` as well. # #allow_outgoing_presence = true @@ -1081,8 +1083,8 @@ # #typing_client_timeout_max_s = 45 -# Set this to true for conduwuit to compress HTTP response bodies using -# zstd. This option does nothing if conduwuit was not built with +# Set this to true for continuwuity to compress HTTP response bodies using +# zstd. This option does nothing if continuwuity was not built with # `zstd_compression` feature. Please be aware that enabling HTTP # compression may weaken TLS. Most users should not need to enable this. # See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH @@ -1090,8 +1092,8 @@ # #zstd_compression = false -# Set this to true for conduwuit to compress HTTP response bodies using -# gzip. This option does nothing if conduwuit was not built with +# Set this to true for continuwuity to compress HTTP response bodies using +# gzip. This option does nothing if continuwuity was not built with # `gzip_compression` feature. Please be aware that enabling HTTP # compression may weaken TLS. Most users should not need to enable this. # See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before @@ -1102,8 +1104,8 @@ # #gzip_compression = false -# Set this to true for conduwuit to compress HTTP response bodies using -# brotli. This option does nothing if conduwuit was not built with +# Set this to true for continuwuity to compress HTTP response bodies using +# brotli. This option does nothing if continuwuity was not built with # `brotli_compression` feature. Please be aware that enabling HTTP # compression may weaken TLS. Most users should not need to enable this. # See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH @@ -1165,7 +1167,7 @@ # Otherwise setting this to false reduces filesystem clutter and overhead # for managing these symlinks in the directory. This is now disabled by # default. You may still return to upstream Conduit but you have to run -# conduwuit at least once with this set to true and allow the +# continuwuity at least once with this set to true and allow the # media_startup_check to take place before shutting down to return to # Conduit. # @@ -1210,8 +1212,8 @@ # #allowed_remote_server_names = [] -# Vector list of regex patterns of server names that conduwuit will refuse -# to download remote media from. +# Vector list of regex patterns of server names that continuwuity will +# refuse to download remote media from. # # example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] # @@ -1225,7 +1227,7 @@ # #forbidden_remote_room_directory_server_names = [] -# Vector list of regex patterns of server names that conduwuit will not +# Vector list of regex patterns of server names that continuwuity will not # send messages to the client from. # # Note that there is no way for clients to receive messages once a server @@ -1249,7 +1251,7 @@ #send_messages_from_ignored_users_to_client = false # Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you -# do not want conduwuit to send outbound requests to. Defaults to +# do not want continuwuity to send outbound requests to. Defaults to # RFC1918, unroutable, loopback, multicast, and testnet addresses for # security. # @@ -1399,26 +1401,26 @@ # Allow admins to enter commands in rooms other than "#admins" (admin # room) by prefixing your message with "\!admin" or "\\!admin" followed up -# a normal conduwuit admin command. The reply will be publicly visible to -# the room, originating from the sender. +# a normal continuwuity admin command. The reply will be publicly visible +# to the room, originating from the sender. # # example: \\!admin debug ping puppygock.gay # #admin_escape_commands = true -# Automatically activate the conduwuit admin room console / CLI on -# startup. This option can also be enabled with `--console` conduwuit +# Automatically activate the continuwuity admin room console / CLI on +# startup. This option can also be enabled with `--console` continuwuity # argument. # #admin_console_automatic = false # List of admin commands to execute on startup. # -# This option can also be configured with the `--execute` conduwuit +# This option can also be configured with the `--execute` continuwuity # argument and can take standard shell commands and environment variables # -# For example: `./conduwuit --execute "server admin-notice conduwuit has -# started up at $(date)"` +# For example: `./continuwuity --execute "server admin-notice continuwuity +# has started up at $(date)"` # # example: admin_execute = ["debug ping puppygock.gay", "debug echo hi"]` # @@ -1426,7 +1428,7 @@ # Ignore errors in startup commands. # -# If false, conduwuit will error and fail to start if an admin execute +# If false, continuwuity will error and fail to start if an admin execute # command (`--execute` / `admin_execute`) fails. # #admin_execute_errors_ignore = false @@ -1447,15 +1449,14 @@ # The default room tag to apply on the admin room. # # On some clients like Element, the room tag "m.server_notice" is a -# special pinned room at the very bottom of your room list. The conduwuit -# admin room can be pinned here so you always have an easy-to-access -# shortcut dedicated to your admin room. +# special pinned room at the very bottom of your room list. The +# continuwuity admin room can be pinned here so you always have an +# easy-to-access shortcut dedicated to your admin room. # #admin_room_tag = "m.server_notice" # Sentry.io crash/panic reporting, performance monitoring/metrics, etc. -# This is NOT enabled by default. conduwuit's default Sentry reporting -# endpoint domain is `o4506996327251968.ingest.us.sentry.io`. +# This is NOT enabled by default. # #sentry = false @@ -1463,7 +1464,7 @@ # #sentry_endpoint = "" -# Report your conduwuit server_name in Sentry.io crash reports and +# Report your continuwuity server_name in Sentry.io crash reports and # metrics. # #sentry_send_server_name = false @@ -1500,7 +1501,7 @@ # Enable the tokio-console. This option is only relevant to developers. # # For more information, see: -# https://conduwuit.puppyirl.gay/development.html#debugging-with-tokio-console +# https://continuwuity.org/development.html#debugging-with-tokio-console # #tokio_console = false diff --git a/debian/README.md b/debian/README.md index 800a2e09..4a8e58d2 100644 --- a/debian/README.md +++ b/debian/README.md @@ -1,4 +1,4 @@ -# conduwuit for Debian +# Continuwuity for Debian Information about downloading and deploying the Debian package. This may also be referenced for other `apt`-based distros such as Ubuntu. @@ -22,7 +22,7 @@ options in `/etc/conduwuit/conduwuit.toml`. ### Running -The package uses the [`conduwuit.service`](../configuration/examples.md#example-systemd-unit-file) systemd unit file to start and stop conduwuit. The binary is installed at `/usr/sbin/conduwuit`. +The package uses the [`conduwuit.service`](../configuration/examples.md#example-systemd-unit-file) systemd unit file to start and stop Continuwuity. The binary is installed at `/usr/sbin/conduwuit`. This package assumes by default that conduwuit will be placed behind a reverse proxy. The default config options apply (listening on `localhost` and TCP port `6167`). Matrix federation requires a valid domain name and TLS, so you will need to set up TLS certificates and renewal for it to work properly if you intend to federate. diff --git a/docs/deploying/docker-compose.for-traefik.yml b/docs/deploying/docker-compose.for-traefik.yml index 83fb64ff..547712b6 100644 --- a/docs/deploying/docker-compose.for-traefik.yml +++ b/docs/deploying/docker-compose.for-traefik.yml @@ -7,30 +7,30 @@ services: image: forgejo.ellis.link/continuwuation/continuwuity:latest restart: unless-stopped volumes: - - db:/var/lib/conduwuit - - /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's. - #- ./conduwuit.toml:/etc/conduwuit.toml + - db:/var/lib/continuwuity + - /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's. + #- ./continuwuity.toml:/etc/continuwuity.toml networks: - proxy environment: - CONDUWUIT_SERVER_NAME: your.server.name.example # EDIT THIS - CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit - CONDUWUIT_PORT: 6167 # should match the loadbalancer traefik label - CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB - CONDUWUIT_ALLOW_REGISTRATION: 'true' - CONDUWUIT_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed. - #CONDUWUIT_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true' - CONDUWUIT_ALLOW_FEDERATION: 'true' - CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true' - CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]' - #CONDUWUIT_LOG: warn,state_res=warn - CONDUWUIT_ADDRESS: 0.0.0.0 - #CONDUWUIT_CONFIG: '/etc/conduwuit.toml' # Uncomment if you mapped config toml above + CONTINUWUITY_SERVER_NAME: your.server.name.example # EDIT THIS + CONTINUWUITY_DATABASE_PATH: /var/lib/continuwuity + CONTINUWUITY_PORT: 6167 # should match the loadbalancer traefik label + CONTINUWUITY_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB + CONTINUWUITY_ALLOW_REGISTRATION: 'true' + CONTINUWUITY_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed. + #CONTINUWUITY_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true' + CONTINUWUITY_ALLOW_FEDERATION: 'true' + CONTINUWUITY_ALLOW_CHECK_FOR_UPDATES: 'true' + CONTINUWUITY_TRUSTED_SERVERS: '["matrix.org"]' + #CONTINUWUITY_LOG: warn,state_res=warn + CONTINUWUITY_ADDRESS: 0.0.0.0 + #CONTINUWUITY_CONFIG: '/etc/continuwuity.toml' # Uncomment if you mapped config toml above - # We need some way to serve the client and server .well-known json. The simplest way is via the CONDUWUIT_WELL_KNOWN - # variable / config option, there are multiple ways to do this, e.g. in the conduwuit.toml file, and in a separate + # We need some way to serve the client and server .well-known json. The simplest way is via the CONTINUWUITY_WELL_KNOWN + # variable / config option, there are multiple ways to do this, e.g. in the continuwuity.toml file, and in a separate # see the override file for more information about delegation - CONDUWUIT_WELL_KNOWN: | + CONTINUWUITY_WELL_KNOWN: | { client=https://your.server.name.example, server=your.server.name.example:443 diff --git a/docs/deploying/docker-compose.override.yml b/docs/deploying/docker-compose.override.yml index ec82fac3..168b1ae6 100644 --- a/docs/deploying/docker-compose.override.yml +++ b/docs/deploying/docker-compose.override.yml @@ -6,11 +6,11 @@ services: - "traefik.enable=true" - "traefik.docker.network=proxy" # Change this to the name of your Traefik docker proxy network - - "traefik.http.routers.to-conduwuit.rule=Host(`.`)" # Change to the address on which Continuwuity is hosted - - "traefik.http.routers.to-conduwuit.tls=true" - - "traefik.http.routers.to-conduwuit.tls.certresolver=letsencrypt" - - "traefik.http.routers.to-conduwuit.middlewares=cors-headers@docker" - - "traefik.http.services.to_conduwuit.loadbalancer.server.port=6167" + - "traefik.http.routers.to-continuwuity.rule=Host(`.`)" # Change to the address on which Continuwuity is hosted + - "traefik.http.routers.to-continuwuity.tls=true" + - "traefik.http.routers.to-continuwuity.tls.certresolver=letsencrypt" + - "traefik.http.routers.to-continuwuity.middlewares=cors-headers@docker" + - "traefik.http.services.to_continuwuity.loadbalancer.server.port=6167" - "traefik.http.middlewares.cors-headers.headers.accessControlAllowOriginList=*" - "traefik.http.middlewares.cors-headers.headers.accessControlAllowHeaders=Origin, X-Requested-With, Content-Type, Accept, Authorization" diff --git a/docs/deploying/docker-compose.with-caddy.yml b/docs/deploying/docker-compose.with-caddy.yml index 9ee98428..3dfc9d85 100644 --- a/docs/deploying/docker-compose.with-caddy.yml +++ b/docs/deploying/docker-compose.with-caddy.yml @@ -25,23 +25,23 @@ services: image: forgejo.ellis.link/continuwuation/continuwuity:latest restart: unless-stopped volumes: - - db:/var/lib/conduwuit + - db:/var/lib/continuwuity - /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's. - #- ./conduwuit.toml:/etc/conduwuit.toml + #- ./continuwuity.toml:/etc/continuwuity.toml environment: - CONDUWUIT_SERVER_NAME: example.com # EDIT THIS - CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit - CONDUWUIT_PORT: 6167 - CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB - CONDUWUIT_ALLOW_REGISTRATION: 'true' - CONDUWUIT_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed. - #CONDUWUIT_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true' - CONDUWUIT_ALLOW_FEDERATION: 'true' - CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true' - CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]' - #CONDUWUIT_LOG: warn,state_res=warn - CONDUWUIT_ADDRESS: 0.0.0.0 - #CONDUWUIT_CONFIG: '/etc/conduwuit.toml' # Uncomment if you mapped config toml above + CONTINUWUITY_SERVER_NAME: example.com # EDIT THIS + CONTINUWUITY_DATABASE_PATH: /var/lib/continuwuity + CONTINUWUITY_PORT: 6167 + CONTINUWUITY_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB + CONTINUWUITY_ALLOW_REGISTRATION: 'true' + CONTINUWUITY_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed. + #CONTINUWUITY_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true' + CONTINUWUITY_ALLOW_FEDERATION: 'true' + CONTINUWUITY_ALLOW_CHECK_FOR_UPDATES: 'true' + CONTINUWUITY_TRUSTED_SERVERS: '["matrix.org"]' + #CONTINUWUITY_LOG: warn,state_res=warn + CONTINUWUITY_ADDRESS: 0.0.0.0 + #CONTINUWUITY_CONFIG: '/etc/continuwuity.toml' # Uncomment if you mapped config toml above networks: - caddy labels: diff --git a/docs/deploying/docker-compose.with-traefik.yml b/docs/deploying/docker-compose.with-traefik.yml index a45893da..9acc4221 100644 --- a/docs/deploying/docker-compose.with-traefik.yml +++ b/docs/deploying/docker-compose.with-traefik.yml @@ -7,38 +7,38 @@ services: image: forgejo.ellis.link/continuwuation/continuwuity:latest restart: unless-stopped volumes: - - db:/var/lib/conduwuit + - db:/var/lib/continuwuity - /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's. - #- ./conduwuit.toml:/etc/conduwuit.toml + #- ./continuwuity.toml:/etc/continuwuity.toml networks: - proxy environment: - CONDUWUIT_SERVER_NAME: your.server.name.example # EDIT THIS - CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]' - CONDUWUIT_ALLOW_REGISTRATION: 'false' # After setting a secure registration token, you can enable this - CONDUWUIT_REGISTRATION_TOKEN: "" # This is a token you can use to register on the server - #CONDUWUIT_REGISTRATION_TOKEN_FILE: "" # Alternatively you can configure a path to a token file to read - CONDUWUIT_ADDRESS: 0.0.0.0 - CONDUWUIT_PORT: 6167 # you need to match this with the traefik load balancer label if you're want to change it - CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit - #CONDUWUIT_CONFIG: '/etc/conduit.toml' # Uncomment if you mapped config toml above + CONTINUWUITY_SERVER_NAME: your.server.name.example # EDIT THIS + CONTINUWUITY_TRUSTED_SERVERS: '["matrix.org"]' + CONTINUWUITY_ALLOW_REGISTRATION: 'false' # After setting a secure registration token, you can enable this + CONTINUWUITY_REGISTRATION_TOKEN: "" # This is a token you can use to register on the server + #CONTINUWUITY_REGISTRATION_TOKEN_FILE: "" # Alternatively you can configure a path to a token file to read + CONTINUWUITY_ADDRESS: 0.0.0.0 + CONTINUWUITY_PORT: 6167 # you need to match this with the traefik load balancer label if you're want to change it + CONTINUWUITY_DATABASE_PATH: /var/lib/continuwuity + #CONTINUWUITY_CONFIG: '/etc/continuwuity.toml' # Uncomment if you mapped config toml above ### Uncomment and change values as desired, note that Continuwuity has plenty of config options, so you should check out the example example config too # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging - # CONDUWUIT_LOG: info # default is: "warn,state_res=warn" - # CONDUWUIT_ALLOW_ENCRYPTION: 'true' - # CONDUWUIT_ALLOW_FEDERATION: 'true' - # CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true' - # CONDUWUIT_ALLOW_INCOMING_PRESENCE: true - # CONDUWUIT_ALLOW_OUTGOING_PRESENCE: true - # CONDUWUIT_ALLOW_LOCAL_PRESENCE: true - # CONDUWUIT_WORKERS: 10 - # CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB - # CONDUWUIT_NEW_USER_DISPLAYNAME_SUFFIX = "🏳<200d>⚧" + # CONTINUWUITY_LOG: info # default is: "warn,state_res=warn" + # CONTINUWUITY_ALLOW_ENCRYPTION: 'true' + # CONTINUWUITY_ALLOW_FEDERATION: 'true' + # CONTINUWUITY_ALLOW_CHECK_FOR_UPDATES: 'true' + # CONTINUWUITY_ALLOW_INCOMING_PRESENCE: true + # CONTINUWUITY_ALLOW_OUTGOING_PRESENCE: true + # CONTINUWUITY_ALLOW_LOCAL_PRESENCE: true + # CONTINUWUITY_WORKERS: 10 + # CONTINUWUITY_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB + # CONTINUWUITY_NEW_USER_DISPLAYNAME_SUFFIX = "🏳<200d>⚧" - # We need some way to serve the client and server .well-known json. The simplest way is via the CONDUWUIT_WELL_KNOWN - # variable / config option, there are multiple ways to do this, e.g. in the conduwuit.toml file, and in a separate + # We need some way to serve the client and server .well-known json. The simplest way is via the CONTINUWUITY_WELL_KNOWN + # variable / config option, there are multiple ways to do this, e.g. in the continuwuity.toml file, and in a separate # reverse proxy, but since you do not have a reverse proxy and following this guide, this example is included - CONDUWUIT_WELL_KNOWN: | + CONTINUWUITY_WELL_KNOWN: | { client=https://your.server.name.example, server=your.server.name.example:443 diff --git a/docs/deploying/docker-compose.yml b/docs/deploying/docker-compose.yml index 1a3ab811..fbb50e35 100644 --- a/docs/deploying/docker-compose.yml +++ b/docs/deploying/docker-compose.yml @@ -9,22 +9,22 @@ services: ports: - 8448:6167 volumes: - - db:/var/lib/conduwuit - #- ./conduwuit.toml:/etc/conduwuit.toml + - db:/var/lib/continuwuity + #- ./continuwuity.toml:/etc/continuwuity.toml environment: - CONDUWUIT_SERVER_NAME: your.server.name # EDIT THIS - CONDUWUIT_DATABASE_PATH: /var/lib/conduwuit - CONDUWUIT_PORT: 6167 - CONDUWUIT_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB - CONDUWUIT_ALLOW_REGISTRATION: 'true' - CONDUWUIT_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed. - #CONDUWUIT_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true' - CONDUWUIT_ALLOW_FEDERATION: 'true' - CONDUWUIT_ALLOW_CHECK_FOR_UPDATES: 'true' - CONDUWUIT_TRUSTED_SERVERS: '["matrix.org"]' - #CONDUWUIT_LOG: warn,state_res=warn - CONDUWUIT_ADDRESS: 0.0.0.0 - #CONDUWUIT_CONFIG: '/etc/conduwuit.toml' # Uncomment if you mapped config toml above + CONTINUWUITY_SERVER_NAME: your.server.name # EDIT THIS + CONTINUWUITY_DATABASE_PATH: /var/lib/continuwuity + CONTINUWUITY_PORT: 6167 + CONTINUWUITY_MAX_REQUEST_SIZE: 20000000 # in bytes, ~20 MB + CONTINUWUITY_ALLOW_REGISTRATION: 'true' + CONTINUWUITY_REGISTRATION_TOKEN: 'YOUR_TOKEN' # A registration token is required when registration is allowed. + #CONTINUWUITY_YES_I_AM_VERY_VERY_SURE_I_WANT_AN_OPEN_REGISTRATION_SERVER_PRONE_TO_ABUSE: 'true' + CONTINUWUITY_ALLOW_FEDERATION: 'true' + CONTINUWUITY_ALLOW_CHECK_FOR_UPDATES: 'true' + CONTINUWUITY_TRUSTED_SERVERS: '["matrix.org"]' + #CONTINUWUITY_LOG: warn,state_res=warn + CONTINUWUITY_ADDRESS: 0.0.0.0 + #CONTINUWUITY_CONFIG: '/etc/continuwuity.toml' # Uncomment if you mapped config toml above # ### Uncomment if you want to use your own Element-Web App. ### Note: You need to provide a config.json for Element and you also need a second diff --git a/docs/deploying/docker.md b/docs/deploying/docker.md index 08a0dc4f..051ed89b 100644 --- a/docs/deploying/docker.md +++ b/docs/deploying/docker.md @@ -30,16 +30,16 @@ When you have the image you can simply run it with ```bash docker run -d -p 8448:6167 \ - -v db:/var/lib/conduwuit/ \ - -e CONDUWUIT_SERVER_NAME="your.server.name" \ - -e CONDUWUIT_ALLOW_REGISTRATION=false \ - --name conduwuit $LINK + -v db:/var/lib/continuwuity/ \ + -e CONTINUWUITY_SERVER_NAME="your.server.name" \ + -e CONTINUWUITY_ALLOW_REGISTRATION=false \ + --name continuwuity $LINK ``` or you can use [docker compose](#docker-compose). The `-d` flag lets the container run in detached mode. You may supply an -optional `conduwuit.toml` config file, the example config can be found +optional `continuwuity.toml` config file, the example config can be found [here](../configuration/examples.md). You can pass in different env vars to change config values on the fly. You can even configure Continuwuity completely by using env vars. For an overview of possible values, please take a look at the diff --git a/docs/deploying/generic.md b/docs/deploying/generic.md index 46b9b439..9128f346 100644 --- a/docs/deploying/generic.md +++ b/docs/deploying/generic.md @@ -115,7 +115,7 @@ ReadWritePaths=/path/to/custom/database/path ## Creating the Continuwuity configuration file Now we need to create the Continuwuity's config file in -`/etc/conduwuit/conduwuit.toml`. The example config can be found at +`/etc/continuwuity/continuwuity.toml`. The example config can be found at [conduwuit-example.toml](../configuration/examples.md). **Please take a moment to read the config. You need to change at least the diff --git a/docs/development/hot_reload.md b/docs/development/hot_reload.md index ecfb6396..194ea3bc 100644 --- a/docs/development/hot_reload.md +++ b/docs/development/hot_reload.md @@ -190,7 +190,7 @@ The initial implementation PR is available [here][1]. - [Workspace-level metadata (cargo-deb)](https://github.com/kornelski/cargo-deb/issues/68) -[1]: https://github.com/girlbossceo/conduwuit/pull/387 +[1]: https://forgejo.ellis.link/continuwuation/continuwuity/pulls/387 [2]: https://wiki.musl-libc.org/functional-differences-from-glibc.html#Unloading-libraries [3]: https://github.com/rust-lang/rust/issues/28794 [4]: https://github.com/rust-lang/rust/issues/28794#issuecomment-368693049 diff --git a/docs/development/testing.md b/docs/development/testing.md index a577698a..d28bb874 100644 --- a/docs/development/testing.md +++ b/docs/development/testing.md @@ -24,8 +24,9 @@ and run the script. If you're on macOS and need to build an image, run `nix build .#linux-complement`. We have a Complement fork as some tests have needed to be fixed. This can be found -at: +at: -[ci-workflows]: https://github.com/girlbossceo/conduwuit/actions/workflows/ci.yml?query=event%3Apush+is%3Asuccess+actor%3Agirlbossceo +[ci-workflows]: +https://forgejo.ellis.link/continuwuation/continuwuity/actions/?workflow=ci.yml&actor=0&status=1 [complement]: https://github.com/matrix-org/complement [direnv]: https://direnv.net/docs/hook.html diff --git a/nix/pkgs/oci-image/default.nix b/nix/pkgs/oci-image/default.nix index 1650053d..953407ef 100644 --- a/nix/pkgs/oci-image/default.nix +++ b/nix/pkgs/oci-image/default.nix @@ -33,13 +33,13 @@ dockerTools.buildLayeredImage { "; "org.opencontainers.image.created" ="@${toString inputs.self.lastModified}"; "org.opencontainers.image.description" = "a very cool Matrix chat homeserver written in Rust"; - "org.opencontainers.image.documentation" = "https://conduwuit.puppyirl.gay/"; + "org.opencontainers.image.documentation" = "https://continuwuity.org/"; "org.opencontainers.image.licenses" = "Apache-2.0"; "org.opencontainers.image.revision" = inputs.self.rev or inputs.self.dirtyRev or ""; - "org.opencontainers.image.source" = "https://github.com/girlbossceo/conduwuit"; + "org.opencontainers.image.source" = "https://forgejo.ellis.link/continuwuation/continuwuity"; "org.opencontainers.image.title" = main.pname; - "org.opencontainers.image.url" = "https://conduwuit.puppyirl.gay/"; - "org.opencontainers.image.vendor" = "girlbossceo"; + "org.opencontainers.image.url" = "https://continuwuity.org/"; + "org.opencontainers.image.vendor" = "continuwuation"; "org.opencontainers.image.version" = main.version; }; }; diff --git a/src/admin/processor.rs b/src/admin/processor.rs index 8282a846..f7b7140f 100644 --- a/src/admin/processor.rs +++ b/src/admin/processor.rs @@ -94,7 +94,7 @@ async fn process_command(services: Arc, input: &CommandInput) -> Proce #[allow(clippy::result_large_err)] fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult { let link = - "Please submit a [bug report](https://github.com/girlbossceo/conduwuit/issues/new). 🥺"; + "Please submit a [bug report](https://forgejo.ellis.link/continuwuation/continuwuity/issues/new). 🥺"; let msg = format!("Panic occurred while processing command:\n```\n{error:#?}\n```\n{link}"); let content = RoomMessageEventContent::notice_markdown(msg); error!("Panic while processing command: {error:?}"); diff --git a/src/core/config/check.rs b/src/core/config/check.rs index f9d51eeb..ded9533d 100644 --- a/src/core/config/check.rs +++ b/src/core/config/check.rs @@ -118,7 +118,7 @@ pub fn check(config: &Config) -> Result { if cfg!(not(debug_assertions)) && config.server_name == "your.server.name" { return Err!(Config( "server_name", - "You must specify a valid server name for production usage of conduwuit." + "You must specify a valid server name for production usage of continuwuity." )); } @@ -290,7 +290,7 @@ fn warn_deprecated(config: &Config) { if was_deprecated { warn!( - "Read conduwuit config documentation at https://conduwuit.puppyirl.gay/configuration.html and check your \ + "Read continuwuity config documentation at https://continuwuity.org/configuration.html and check your \ configuration if any new configuration parameters should be adjusted" ); } diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 5648a126..66ed0b2e 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -27,7 +27,7 @@ use self::proxy::ProxyConfig; pub use self::{check::check, manager::Manager}; use crate::{Result, err, error::Error, utils::sys}; -/// All the config options for conduwuit. +/// All the config options for continuwuity. #[allow(clippy::struct_excessive_bools)] #[allow(rustdoc::broken_intra_doc_links, rustdoc::bare_urls)] #[derive(Clone, Debug, Deserialize)] @@ -35,7 +35,7 @@ use crate::{Result, err, error::Error, utils::sys}; filename = "conduwuit-example.toml", section = "global", undocumented = "# This item is undocumented. Please contribute documentation for it.", - header = r#"### conduwuit Configuration + header = r#"### continuwuity Configuration ### ### THIS FILE IS GENERATED. CHANGES/CONTRIBUTIONS IN THE REPO WILL BE ### OVERWRITTEN! @@ -50,7 +50,7 @@ use crate::{Result, err, error::Error, utils::sys}; ### that say "YOU NEED TO EDIT THIS". ### ### For more information, see: -### https://conduwuit.puppyirl.gay/configuration.html +### https://continuwuity.org/configuration.html "#, ignore = "catchall well_known tls blurhashing allow_invalid_tls_certificates_yes_i_know_what_the_fuck_i_am_doing_with_this_and_i_know_this_is_insecure" )] @@ -59,7 +59,7 @@ pub struct Config { /// suffix for user and room IDs/aliases. /// /// See the docs for reverse proxying and delegation: - /// https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy + /// https://continuwuity.org/deploying/generic.html#setting-up-the-reverse-proxy /// /// Also see the `[global.well_known]` config section at the very bottom. /// @@ -70,10 +70,10 @@ pub struct Config { /// YOU NEED TO EDIT THIS. THIS CANNOT BE CHANGED AFTER WITHOUT A DATABASE /// WIPE. /// - /// example: "conduwuit.woof" + /// example: "continuwuity.org" pub server_name: OwnedServerName, - /// The default address (IPv4 or IPv6) conduwuit will listen on. + /// The default address (IPv4 or IPv6) continuwuity will listen on. /// /// If you are using Docker or a container NAT networking setup, this must /// be "0.0.0.0". @@ -85,10 +85,10 @@ pub struct Config { #[serde(default = "default_address")] address: ListeningAddr, - /// The port(s) conduwuit will listen on. + /// The port(s) continuwuity will listen on. /// /// For reverse proxying, see: - /// https://conduwuit.puppyirl.gay/deploying/generic.html#setting-up-the-reverse-proxy + /// https://continuwuity.org/deploying/generic.html#setting-up-the-reverse-proxy /// /// If you are using Docker, don't change this, you'll need to map an /// external port to this. @@ -103,16 +103,17 @@ pub struct Config { #[serde(default)] pub tls: TlsConfig, - /// The UNIX socket conduwuit will listen on. + /// The UNIX socket continuwuity will listen on. /// - /// conduwuit cannot listen on both an IP address and a UNIX socket. If + /// continuwuity cannot listen on both an IP address and a UNIX socket. If /// listening on a UNIX socket, you MUST remove/comment the `address` key. /// /// Remember to make sure that your reverse proxy has access to this socket - /// file, either by adding your reverse proxy to the 'conduwuit' group or - /// granting world R/W permissions with `unix_socket_perms` (666 minimum). + /// file, either by adding your reverse proxy to the appropriate user group + /// or granting world R/W permissions with `unix_socket_perms` (666 + /// minimum). /// - /// example: "/run/conduwuit/conduwuit.sock" + /// example: "/run/continuwuity/continuwuity.sock" pub unix_socket_path: Option, /// The default permissions (in octal) to create the UNIX socket with. @@ -121,22 +122,22 @@ pub struct Config { #[serde(default = "default_unix_socket_perms")] pub unix_socket_perms: u32, - /// This is the only directory where conduwuit will save its data, including - /// media. Note: this was previously "/var/lib/matrix-conduit". + /// This is the only directory where continuwuity will save its data, + /// including media. Note: this was previously "/var/lib/matrix-conduit". /// /// YOU NEED TO EDIT THIS. /// - /// example: "/var/lib/conduwuit" + /// example: "/var/lib/continuwuity" pub database_path: PathBuf, - /// conduwuit supports online database backups using RocksDB's Backup engine - /// API. To use this, set a database backup path that conduwuit can write - /// to. + /// continuwuity supports online database backups using RocksDB's Backup + /// engine API. To use this, set a database backup path that continuwuity + /// can write to. /// /// For more information, see: - /// https://conduwuit.puppyirl.gay/maintenance.html#backups + /// https://continuwuity.org/maintenance.html#backups /// - /// example: "/opt/conduwuit-db-backups" + /// example: "/opt/continuwuity-db-backups" pub database_backup_path: Option, /// The amount of online RocksDB database backups to keep/retain, if using @@ -160,7 +161,7 @@ pub struct Config { #[serde(default = "default_new_user_displayname_suffix")] pub new_user_displayname_suffix: String, - /// If enabled, conduwuit will send a simple GET request periodically to + /// If enabled, continuwuity will send a simple GET request periodically to /// `https://continuwuity.org/.well-known/continuwuity/announcements` for any new /// announcements or major updates. This is not an update check endpoint. /// @@ -168,8 +169,8 @@ pub struct Config { #[serde(alias = "allow_check_for_updates", default = "true_fn")] pub allow_announcements_check: bool, - /// Set this to any float value to multiply conduwuit's in-memory LRU caches - /// with such as "auth_chain_cache_capacity". + /// Set this to any float value to multiply continuwuity's in-memory LRU + /// caches with such as "auth_chain_cache_capacity". /// /// May be useful if you have significant memory to spare to increase /// performance. @@ -186,7 +187,7 @@ pub struct Config { )] pub cache_capacity_modifier: f64, - /// Set this to any float value in megabytes for conduwuit to tell the + /// Set this to any float value in megabytes for continuwuity to tell the /// database engine that this much memory is available for database read /// caches. /// @@ -202,7 +203,7 @@ pub struct Config { #[serde(default = "default_db_cache_capacity_mb")] pub db_cache_capacity_mb: f64, - /// Set this to any float value in megabytes for conduwuit to tell the + /// Set this to any float value in megabytes for continuwuity to tell the /// database engine that this much memory is available for database write /// caches. /// @@ -319,9 +320,9 @@ pub struct Config { /// Enable using *only* TCP for querying your specified nameservers instead /// of UDP. /// - /// If you are running conduwuit in a container environment, this config + /// If you are running continuwuity in a container environment, this config /// option may need to be enabled. For more details, see: - /// https://conduwuit.puppyirl.gay/troubleshooting.html#potential-dns-issues-when-using-docker + /// https://continuwuity.org/troubleshooting.html#potential-dns-issues-when-using-docker #[serde(default)] pub query_over_tcp_only: bool, @@ -534,9 +535,9 @@ pub struct Config { /// tokens. Multiple tokens can be added if you separate them with /// whitespace /// - /// conduwuit must be able to access the file, and it must not be empty + /// continuwuity must be able to access the file, and it must not be empty /// - /// example: "/etc/conduwuit/.reg_token" + /// example: "/etc/continuwuity/.reg_token" pub registration_token_file: Option, /// Controls whether encrypted rooms and events are allowed. @@ -627,16 +628,16 @@ pub struct Config { pub allow_room_creation: bool, /// Set to false to disable users from joining or creating room versions - /// that aren't officially supported by conduwuit. + /// that aren't officially supported by continuwuity. /// - /// conduwuit officially supports room versions 6 - 11. + /// continuwuity officially supports room versions 6 - 11. /// - /// conduwuit has slightly experimental (though works fine in practice) + /// continuwuity has slightly experimental (though works fine in practice) /// support for versions 3 - 5. #[serde(default = "true_fn")] pub allow_unstable_room_versions: bool, - /// Default room version conduwuit will create rooms with. + /// Default room version continuwuity will create rooms with. /// /// Per spec, room version 11 is the default. /// @@ -710,7 +711,7 @@ pub struct Config { /// Servers listed here will be used to gather public keys of other servers /// (notary trusted key servers). /// - /// Currently, conduwuit doesn't support inbound batched key requests, so + /// Currently, continuwuity doesn't support inbound batched key requests, so /// this list should only contain other Synapse servers. /// /// example: ["matrix.org", "tchncs.de"] @@ -755,7 +756,7 @@ pub struct Config { #[serde(default = "default_trusted_server_batch_size")] pub trusted_server_batch_size: usize, - /// Max log level for conduwuit. Allows debug, info, warn, or error. + /// Max log level for continuwuity. Allows debug, info, warn, or error. /// /// See also: /// https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives @@ -780,8 +781,9 @@ pub struct Config { #[serde(default = "default_log_span_events")] pub log_span_events: String, - /// Configures whether CONDUWUIT_LOG EnvFilter matches values using regular - /// expressions. See the tracing_subscriber documentation on Directives. + /// Configures whether CONTINUWUITY_LOG EnvFilter matches values using + /// regular expressions. See the tracing_subscriber documentation on + /// Directives. /// /// default: true #[serde(default = "true_fn")] @@ -863,7 +865,7 @@ pub struct Config { /// This takes priority over "turn_secret" first, and falls back to /// "turn_secret" if invalid or failed to open. /// - /// example: "/etc/conduwuit/.turn_secret" + /// example: "/etc/continuwuity/.turn_secret" pub turn_secret_file: Option, /// TURN TTL, in seconds. @@ -872,12 +874,12 @@ pub struct Config { #[serde(default = "default_turn_ttl")] pub turn_ttl: u64, - /// List/vector of room IDs or room aliases that conduwuit will make newly - /// registered users join. The rooms specified must be rooms that you have - /// joined at least once on the server, and must be public. + /// List/vector of room IDs or room aliases that continuwuity will make + /// newly registered users join. The rooms specified must be rooms that you + /// have joined at least once on the server, and must be public. /// - /// example: ["#conduwuit:puppygock.gay", - /// "!eoIzvAvVwY23LPDay8:puppygock.gay"] + /// example: ["#continuwuity:continuwuity.org", + /// "!main-1:continuwuity.org"] /// /// default: [] #[serde(default = "Vec::new")] @@ -902,10 +904,10 @@ pub struct Config { #[serde(default)] pub auto_deactivate_banned_room_attempts: bool, - /// RocksDB log level. This is not the same as conduwuit's log level. This - /// is the log level for the RocksDB engine/library which show up in your - /// database folder/path as `LOG` files. conduwuit will log RocksDB errors - /// as normal through tracing or panics if severe for safety. + /// RocksDB log level. This is not the same as continuwuity's log level. + /// This is the log level for the RocksDB engine/library which show up in + /// your database folder/path as `LOG` files. continuwuity will log RocksDB + /// errors as normal through tracing or panics if severe for safety. /// /// default: "error" #[serde(default = "default_rocksdb_log_level")] @@ -930,7 +932,7 @@ pub struct Config { /// Set this to true to use RocksDB config options that are tailored to HDDs /// (slower device storage). /// - /// It is worth noting that by default, conduwuit will use RocksDB with + /// It is worth noting that by default, continuwuity will use RocksDB with /// Direct IO enabled. *Generally* speaking this improves performance as it /// bypasses buffered I/O (system page cache). However there is a potential /// chance that Direct IO may cause issues with database operations if your @@ -938,7 +940,7 @@ pub struct Config { /// possibly ZFS filesystem. RocksDB generally deals/corrects these issues /// but it cannot account for all setups. If you experience any weird /// RocksDB issues, try enabling this option as it turns off Direct IO and - /// feel free to report in the conduwuit Matrix room if this option fixes + /// feel free to report in the continuwuity Matrix room if this option fixes /// your DB issues. /// /// For more information, see: @@ -999,7 +1001,7 @@ pub struct Config { /// as they all differ. See their `kDefaultCompressionLevel`. /// /// Note when using the default value we may override it with a setting - /// tailored specifically conduwuit. + /// tailored specifically for continuwuity. /// /// default: 32767 #[serde(default = "default_rocksdb_compression_level")] @@ -1017,7 +1019,7 @@ pub struct Config { /// algorithm. /// /// Note when using the default value we may override it with a setting - /// tailored specifically conduwuit. + /// tailored specifically for continuwuity. /// /// default: 32767 #[serde(default = "default_rocksdb_bottommost_compression_level")] @@ -1059,13 +1061,13 @@ pub struct Config { /// 0 = AbsoluteConsistency /// 1 = TolerateCorruptedTailRecords (default) /// 2 = PointInTime (use me if trying to recover) - /// 3 = SkipAnyCorruptedRecord (you now voided your Conduwuit warranty) + /// 3 = SkipAnyCorruptedRecord (you now voided your Continuwuity warranty) /// /// For more information on these modes, see: /// https://github.com/facebook/rocksdb/wiki/WAL-Recovery-Modes /// /// For more details on recovering a corrupt database, see: - /// https://conduwuit.puppyirl.gay/troubleshooting.html#database-corruption + /// https://continuwuity.org/troubleshooting.html#database-corruption /// /// default: 1 #[serde(default = "default_rocksdb_recovery_mode")] @@ -1109,7 +1111,7 @@ pub struct Config { /// - Disabling repair mode and restarting the server is recommended after /// running the repair. /// - /// See https://conduwuit.puppyirl.gay/troubleshooting.html#database-corruption for more details on recovering a corrupt database. + /// See https://continuwuity.org/troubleshooting.html#database-corruption for more details on recovering a corrupt database. #[serde(default)] pub rocksdb_repair: bool, @@ -1134,7 +1136,7 @@ pub struct Config { /// Enables RocksDB compaction. You should never ever have to set this /// option to false. If you for some reason find yourself needing to use /// this option as part of troubleshooting or a bug, please reach out to us - /// in the conduwuit Matrix room with information and details. + /// in the continuwuity Matrix room with information and details. /// /// Disabling compaction will lead to a significantly bloated and /// explosively large database, gradually poor performance, unnecessarily @@ -1162,7 +1164,7 @@ pub struct Config { /// purposes such as recovering/recreating your admin room, or inviting /// yourself back. /// - /// See https://conduwuit.puppyirl.gay/troubleshooting.html#lost-access-to-admin-room for other ways to get back into your admin room. + /// See https://continuwuity.org/troubleshooting.html#lost-access-to-admin-room for other ways to get back into your admin room. /// /// Once this password is unset, all sessions will be logged out for /// security purposes. @@ -1178,8 +1180,8 @@ pub struct Config { /// Allow local (your server only) presence updates/requests. /// - /// Note that presence on conduwuit is very fast unlike Synapse's. If using - /// outgoing presence, this MUST be enabled. + /// Note that presence on continuwuity is very fast unlike Synapse's. If + /// using outgoing presence, this MUST be enabled. #[serde(default = "true_fn")] pub allow_local_presence: bool, @@ -1187,7 +1189,7 @@ pub struct Config { /// /// This option receives presence updates from other servers, but does not /// send any unless `allow_outgoing_presence` is true. Note that presence on - /// conduwuit is very fast unlike Synapse's. + /// continuwuity is very fast unlike Synapse's. #[serde(default = "true_fn")] pub allow_incoming_presence: bool, @@ -1195,8 +1197,8 @@ pub struct Config { /// /// This option sends presence updates to other servers, but does not /// receive any unless `allow_incoming_presence` is true. Note that presence - /// on conduwuit is very fast unlike Synapse's. If using outgoing presence, - /// you MUST enable `allow_local_presence` as well. + /// on continuwuity is very fast unlike Synapse's. If using outgoing + /// presence, you MUST enable `allow_local_presence` as well. #[serde(default = "true_fn")] pub allow_outgoing_presence: bool, @@ -1259,8 +1261,8 @@ pub struct Config { #[serde(default = "default_typing_client_timeout_max_s")] pub typing_client_timeout_max_s: u64, - /// Set this to true for conduwuit to compress HTTP response bodies using - /// zstd. This option does nothing if conduwuit was not built with + /// Set this to true for continuwuity to compress HTTP response bodies using + /// zstd. This option does nothing if continuwuity was not built with /// `zstd_compression` feature. Please be aware that enabling HTTP /// compression may weaken TLS. Most users should not need to enable this. /// See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH @@ -1268,8 +1270,8 @@ pub struct Config { #[serde(default)] pub zstd_compression: bool, - /// Set this to true for conduwuit to compress HTTP response bodies using - /// gzip. This option does nothing if conduwuit was not built with + /// Set this to true for continuwuity to compress HTTP response bodies using + /// gzip. This option does nothing if continuwuity was not built with /// `gzip_compression` feature. Please be aware that enabling HTTP /// compression may weaken TLS. Most users should not need to enable this. /// See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH before @@ -1280,8 +1282,8 @@ pub struct Config { #[serde(default)] pub gzip_compression: bool, - /// Set this to true for conduwuit to compress HTTP response bodies using - /// brotli. This option does nothing if conduwuit was not built with + /// Set this to true for continuwuity to compress HTTP response bodies using + /// brotli. This option does nothing if continuwuity was not built with /// `brotli_compression` feature. Please be aware that enabling HTTP /// compression may weaken TLS. Most users should not need to enable this. /// See https://breachattack.com/ and https://wikipedia.org/wiki/BREACH @@ -1342,7 +1344,7 @@ pub struct Config { /// Otherwise setting this to false reduces filesystem clutter and overhead /// for managing these symlinks in the directory. This is now disabled by /// default. You may still return to upstream Conduit but you have to run - /// conduwuit at least once with this set to true and allow the + /// continuwuity at least once with this set to true and allow the /// media_startup_check to take place before shutting down to return to /// Conduit. #[serde(default)] @@ -1391,8 +1393,8 @@ pub struct Config { #[serde(default, with = "serde_regex")] pub allowed_remote_server_names: RegexSet, - /// Vector list of regex patterns of server names that conduwuit will refuse - /// to download remote media from. + /// Vector list of regex patterns of server names that continuwuity will + /// refuse to download remote media from. /// /// example: ["badserver\.tld$", "badphrase", "19dollarfortnitecards"] /// @@ -1410,7 +1412,7 @@ pub struct Config { #[serde(default, with = "serde_regex")] pub forbidden_remote_room_directory_server_names: RegexSet, - /// Vector list of regex patterns of server names that conduwuit will not + /// Vector list of regex patterns of server names that continuwuity will not /// send messages to the client from. /// /// Note that there is no way for clients to receive messages once a server @@ -1436,7 +1438,7 @@ pub struct Config { pub send_messages_from_ignored_users_to_client: bool, /// Vector list of IPv4 and IPv6 CIDR ranges / subnets *in quotes* that you - /// do not want conduwuit to send outbound requests to. Defaults to + /// do not want continuwuity to send outbound requests to. Defaults to /// RFC1918, unroutable, loopback, multicast, and testnet addresses for /// security. /// @@ -1604,26 +1606,26 @@ pub struct Config { /// Allow admins to enter commands in rooms other than "#admins" (admin /// room) by prefixing your message with "\!admin" or "\\!admin" followed up - /// a normal conduwuit admin command. The reply will be publicly visible to - /// the room, originating from the sender. + /// a normal continuwuity admin command. The reply will be publicly visible + /// to the room, originating from the sender. /// /// example: \\!admin debug ping puppygock.gay #[serde(default = "true_fn")] pub admin_escape_commands: bool, - /// Automatically activate the conduwuit admin room console / CLI on - /// startup. This option can also be enabled with `--console` conduwuit + /// Automatically activate the continuwuity admin room console / CLI on + /// startup. This option can also be enabled with `--console` continuwuity /// argument. #[serde(default)] pub admin_console_automatic: bool, /// List of admin commands to execute on startup. /// - /// This option can also be configured with the `--execute` conduwuit + /// This option can also be configured with the `--execute` continuwuity /// argument and can take standard shell commands and environment variables /// - /// For example: `./conduwuit --execute "server admin-notice conduwuit has - /// started up at $(date)"` + /// For example: `./continuwuity --execute "server admin-notice continuwuity + /// has started up at $(date)"` /// /// example: admin_execute = ["debug ping puppygock.gay", "debug echo hi"]` /// @@ -1633,7 +1635,7 @@ pub struct Config { /// Ignore errors in startup commands. /// - /// If false, conduwuit will error and fail to start if an admin execute + /// If false, continuwuity will error and fail to start if an admin execute /// command (`--execute` / `admin_execute`) fails. #[serde(default)] pub admin_execute_errors_ignore: bool, @@ -1658,17 +1660,16 @@ pub struct Config { /// The default room tag to apply on the admin room. /// /// On some clients like Element, the room tag "m.server_notice" is a - /// special pinned room at the very bottom of your room list. The conduwuit - /// admin room can be pinned here so you always have an easy-to-access - /// shortcut dedicated to your admin room. + /// special pinned room at the very bottom of your room list. The + /// continuwuity admin room can be pinned here so you always have an + /// easy-to-access shortcut dedicated to your admin room. /// /// default: "m.server_notice" #[serde(default = "default_admin_room_tag")] pub admin_room_tag: String, /// Sentry.io crash/panic reporting, performance monitoring/metrics, etc. - /// This is NOT enabled by default. conduwuit's default Sentry reporting - /// endpoint domain is `o4506996327251968.ingest.us.sentry.io`. + /// This is NOT enabled by default. #[serde(default)] pub sentry: bool, @@ -1679,7 +1680,7 @@ pub struct Config { #[serde(default = "default_sentry_endpoint")] pub sentry_endpoint: Option, - /// Report your conduwuit server_name in Sentry.io crash reports and + /// Report your continuwuity server_name in Sentry.io crash reports and /// metrics. #[serde(default)] pub sentry_send_server_name: bool, @@ -1720,7 +1721,7 @@ pub struct Config { /// Enable the tokio-console. This option is only relevant to developers. /// /// For more information, see: - /// https://conduwuit.puppyirl.gay/development.html#debugging-with-tokio-console + /// https://continuwuity.org/development.html#debugging-with-tokio-console #[serde(default)] pub tokio_console: bool, diff --git a/src/service/admin/create.rs b/src/service/admin/create.rs index cd0fc5a9..157b4d65 100644 --- a/src/service/admin/create.rs +++ b/src/service/admin/create.rs @@ -165,7 +165,7 @@ pub async fn create_admin_room(services: &Services) -> Result { .timeline .build_and_append_pdu( PduBuilder::state(String::new(), &RoomTopicEventContent { - topic: format!("Manage {} | Run commands prefixed with `!admin` | Run `!admin -h` for help | Documentation: https://conduwuit.puppyirl.gay/", services.config.server_name), + topic: format!("Manage {} | Run commands prefixed with `!admin` | Run `!admin -h` for help | Documentation: https://continuwuity.org/", services.config.server_name), }), server_user, &room_id, From 066794fe90c4af11c5c4ae5ce55d7db2fe6cf2da Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 11 May 2025 17:15:37 +0100 Subject: [PATCH 06/50] ci: Don't try build images on PR --- .forgejo/workflows/release-image.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 704a3bbf..1c7457a5 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -3,7 +3,6 @@ concurrency: group: "release-image-${{ github.ref }}" on: - pull_request: push: paths-ignore: - "*.md" From d03325c65a6749669924c0a66b98795fe8babf26 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 11 May 2025 17:27:54 +0100 Subject: [PATCH 07/50] chore: Set editorconfig for workflows --- .editorconfig | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.editorconfig b/.editorconfig index 2d7438a4..91f073bd 100644 --- a/.editorconfig +++ b/.editorconfig @@ -22,3 +22,7 @@ indent_size = 2 [*.rs] indent_style = tab max_line_length = 98 + +[{.forgejo/**/*.yml,.github/**/*.yml}] +indent_size = 2 +indent_style = space From f14725a51b9be2b6fb6fda597e57afe770e4cdca Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 11 May 2025 17:42:57 +0100 Subject: [PATCH 08/50] ci: Check formatting Also moves rustup installation to a seperate workflow and enables caching. The sccache action required a github.com api token, so we set all that up too. --- .forgejo/actions/rust-toolchain/action.yml | 45 ++++++++++++++++++++++ .forgejo/actions/sccache/action.yml | 29 ++++++++++++++ .forgejo/workflows/formatting.yml | 44 +++++++++++++++++++++ .forgejo/workflows/release-image.yml | 7 +--- 4 files changed, 120 insertions(+), 5 deletions(-) create mode 100644 .forgejo/actions/rust-toolchain/action.yml create mode 100644 .forgejo/actions/sccache/action.yml create mode 100644 .forgejo/workflows/formatting.yml diff --git a/.forgejo/actions/rust-toolchain/action.yml b/.forgejo/actions/rust-toolchain/action.yml new file mode 100644 index 00000000..68f59d00 --- /dev/null +++ b/.forgejo/actions/rust-toolchain/action.yml @@ -0,0 +1,45 @@ +name: rust-toolchain +description: | + Install a Rust toolchain using rustup. + See https://rust-lang.github.io/rustup/concepts/toolchains.html#toolchain-specification + for more information about toolchains. +inputs: + toolchain: + description: | + Rust toolchain name. + See https://rust-lang.github.io/rustup/concepts/toolchains.html#toolchain-specification + required: false + target: + description: Target triple to install for this toolchain + required: false + components: + description: Space-separated list of components to be additionally installed for a new toolchain + required: false + +runs: + using: composite + steps: + - name: Cache rustup toolchains + uses: actions/cache@v3 + with: + path: | + ~/.rustup + !~/.rustup/tmp + !~/.rustup/downloads + # Requires repo to be cloned if toolchain is not specified + key: ${{ runner.os }}-rustup-${{ inputs.toolchain || hashFiles('**/rust-toolchain.toml') }} + - name: Install Rust toolchain + shell: bash + run: | + if ! command -v rustup &> /dev/null ; then + curl --proto '=https' --tlsv1.2 --retry 10 --retry-connrefused -fsSL "https://sh.rustup.rs" | sh -s -- --default-toolchain none -y + echo "${CARGO_HOME:-$HOME/.cargo}/bin" >> $GITHUB_PATH + fi + - shell: bash + run: | + set -x + ${{ inputs.toolchain && format('rustup override set {0}', inputs.toolchain) }} + ${{ inputs.target && format('rustup target add {0}', inputs.target) }} + ${{ inputs.components && format('rustup component add {0}', inputs.components) }} + cargo --version + rustc --version diff --git a/.forgejo/actions/sccache/action.yml b/.forgejo/actions/sccache/action.yml new file mode 100644 index 00000000..b5e5dcf4 --- /dev/null +++ b/.forgejo/actions/sccache/action.yml @@ -0,0 +1,29 @@ +name: sccache +description: | + Install sccache for caching builds in GitHub Actions. + +inputs: + token: + description: 'A Github PAT' + required: false + +runs: + using: composite + steps: + - name: Install sccache + uses: https://github.com/mozilla-actions/sccache-action@v0.0.9 + with: + token: ${{ inputs.token }} + - name: Configure sccache + uses: https://github.com/actions/github-script@v7 + with: + script: | + core.exportVariable('ACTIONS_RESULTS_URL', process.env.ACTIONS_RESULTS_URL || ''); + core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || ''); + - shell: bash + run: | + echo "SCCACHE_GHA_ENABLED=true" >> $GITHUB_ENV + echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV + echo "CMAKE_C_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV + echo "CMAKE_CXX_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV + echo "CMAKE_CUDA_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV diff --git a/.forgejo/workflows/formatting.yml b/.forgejo/workflows/formatting.yml new file mode 100644 index 00000000..e51560e7 --- /dev/null +++ b/.forgejo/workflows/formatting.yml @@ -0,0 +1,44 @@ +name: Rust Formatting + +on: + push: + pull_request: + +jobs: + format: + name: Format + runs-on: ubuntu-latest + env: + SCCACHE_GHA_ENABLED: "true" + RUSTC_WRAPPER: "sccache" + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Install rust + uses: ./.forgejo/actions/rust-toolchain + with: + toolchain: "nightly" + components: "rustfmt" + + - uses: https://github.com/actions/create-github-app-token@v2 + id: app-token + with: + app-id: ${{ vars.GH_APP_ID }} + private-key: ${{ secrets.GH_APP_PRIVATE_KEY }} + github-api-url: https://api.github.com + owner: ${{ vars.GH_APP_OWNER }} + repositories: "" + - name: Install sccache + uses: ./.forgejo/actions/sccache + with: + token: ${{ steps.app-token.outputs.token }} + - name: Check formatting + run: | + cargo +nightly fmt --all -- --check + + - name: Show sccache stats + run: sccache --show-stats diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 1c7457a5..f6064617 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -79,16 +79,13 @@ jobs: run: echo '${{ toJSON(fromJSON(needs.define-variables.outputs.build_matrix)) }}' - name: Echo matrix run: echo '${{ toJSON(matrix) }}' - - run: | - if ! command -v rustup &> /dev/null ; then - curl --proto '=https' --tlsv1.2 --retry 10 --retry-connrefused -fsSL "https://sh.rustup.rs" | sh -s -- --default-toolchain none -y - echo "${CARGO_HOME:-$HOME/.cargo}/bin" >> $GITHUB_PATH - fi - name: Checkout repository uses: actions/checkout@v4 with: persist-credentials: false + - name: Install rust + uses: ./.forgejo/actions/rust-toolchain - name: Cache timelord-cli installation id: cache-timelord-bin From ec08e16b9f6e01f356a3a32890742a293b9ecbd4 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 11 May 2025 19:39:44 +0100 Subject: [PATCH 09/50] build: Allow builder to decide on incremental or not --- Cargo.toml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 79f767a2..249ff84c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -745,7 +745,6 @@ incremental = true [profile.dev.package.conduwuit_core] inherits = "dev" -incremental = false #rustflags = [ # '--cfg', 'conduwuit_mods', # '-Ztime-passes', @@ -785,7 +784,6 @@ inherits = "dev" [profile.dev.package.'*'] inherits = "dev" debug = 'limited' -incremental = false codegen-units = 1 opt-level = 'z' #rustflags = [ @@ -807,7 +805,6 @@ inherits = "dev" strip = false opt-level = 0 codegen-units = 16 -incremental = false [profile.test.package.'*'] inherits = "dev" @@ -815,7 +812,6 @@ debug = 0 strip = false opt-level = 0 codegen-units = 16 -incremental = false ############################################################################### # From c5db43ba9aef530e02f0b6048eaa4a95662a8396 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 11 May 2025 19:43:51 +0100 Subject: [PATCH 10/50] chore: Docker ignore forgejo files --- .dockerignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.dockerignore b/.dockerignore index 8ca2e3f8..5054844f 100644 --- a/.dockerignore +++ b/.dockerignore @@ -15,6 +15,7 @@ docker/ .gitea .gitlab .github +.forgejo # Dot files .env From e31d261e668259eacd2c11799d927b3e78354b16 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 11 May 2025 19:43:56 +0100 Subject: [PATCH 11/50] ci: Run clippy check --- .forgejo/workflows/formatting.yml | 39 +++++++++++++++++++++++++------ 1 file changed, 32 insertions(+), 7 deletions(-) diff --git a/.forgejo/workflows/formatting.yml b/.forgejo/workflows/formatting.yml index e51560e7..332f98e2 100644 --- a/.forgejo/workflows/formatting.yml +++ b/.forgejo/workflows/formatting.yml @@ -1,29 +1,43 @@ name: Rust Formatting on: - push: - pull_request: + push: + pull_request: jobs: format: name: Format runs-on: ubuntu-latest - env: - SCCACHE_GHA_ENABLED: "true" - RUSTC_WRAPPER: "sccache" steps: - name: Checkout repository uses: actions/checkout@v4 with: persist-credentials: false - + - name: Install rust uses: ./.forgejo/actions/rust-toolchain with: toolchain: "nightly" components: "rustfmt" + - name: Check formatting + run: | + cargo +nightly fmt --all -- --check + + clippy: + name: Clippy + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Install rust + uses: ./.forgejo/actions/rust-toolchain + - uses: https://github.com/actions/create-github-app-token@v2 id: app-token with: @@ -36,9 +50,20 @@ jobs: uses: ./.forgejo/actions/sccache with: token: ${{ steps.app-token.outputs.token }} + - run: sudo apt-get update + - name: Install system dependencies + uses: https://github.com/awalsh128/cache-apt-pkgs-action@v1 + with: + packages: clang liburing-dev + version: 1 - name: Check formatting run: | - cargo +nightly fmt --all -- --check + cargo clippy \ + --workspace \ + --locked \ + --profile test \ + -- \ + -D warnings - name: Show sccache stats run: sccache --show-stats From 034762c6197bc48f4515ae63966fbd1849b2cccd Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 11 May 2025 20:03:14 +0100 Subject: [PATCH 12/50] chore: Allow raw string hashes for metadata crate --- Cargo.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Cargo.toml b/Cargo.toml index 249ff84c..1abff107 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -988,3 +988,6 @@ let_underscore_future = { level = "allow", priority = 1 } # rust doesnt understand conduwuit's custom log macros literal_string_with_formatting_args = { level = "allow", priority = 1 } + + +needless_raw_string_hashes = "allow" From e200a7d991ccec06282360d4e6dccd6b08e23663 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 11 May 2025 20:23:30 +0100 Subject: [PATCH 13/50] ci: Cache Rust registry --- .forgejo/workflows/formatting.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/.forgejo/workflows/formatting.yml b/.forgejo/workflows/formatting.yml index 332f98e2..7ca327b6 100644 --- a/.forgejo/workflows/formatting.yml +++ b/.forgejo/workflows/formatting.yml @@ -56,6 +56,15 @@ jobs: with: packages: clang liburing-dev version: 1 + - name: Cache Rust registry + uses: actions/cache@v3 + with: + path: | + ~/.cargo/git + !~/.cargo/git/checkouts + ~/.cargo/registry + !~/.cargo/registry/src + key: rust-registry-${{hashFiles('**/Cargo.lock') }} - name: Check formatting run: | cargo clippy \ From b5d2ef9a4a7758bc74d3a91cec9b5fc8cf9055f7 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 11 May 2025 20:34:22 +0100 Subject: [PATCH 14/50] ci: Refactor timelord to its own action --- .forgejo/actions/timelord/action.yml | 46 ++++++++++++++++++++++++++++ .forgejo/workflows/release-image.yml | 26 +++------------- 2 files changed, 50 insertions(+), 22 deletions(-) create mode 100644 .forgejo/actions/timelord/action.yml diff --git a/.forgejo/actions/timelord/action.yml b/.forgejo/actions/timelord/action.yml new file mode 100644 index 00000000..bb9766d5 --- /dev/null +++ b/.forgejo/actions/timelord/action.yml @@ -0,0 +1,46 @@ +name: timelord +description: | + Use timelord to set file timestamps +inputs: + key: + description: | + The key to use for caching the timelord data. + This should be unique to the repository and the runner. + required: true + default: timelord-v0 + path: + description: | + The path to the directory to be timestamped. + This should be the root of the repository. + required: true + default: . + +runs: + using: composite + steps: + - name: Cache timelord-cli installation + id: cache-timelord-bin + uses: actions/cache@v3 + with: + path: ~/.cargo/bin/timelord + key: timelord-cli-v3.0.1 + - name: Install timelord-cli + uses: https://github.com/cargo-bins/cargo-binstall@main + if: steps.cache-timelord-bin.outputs.cache-hit != 'true' + - run: cargo binstall timelord-cli@3.0.1 + shell: bash + if: steps.cache-timelord-bin.outputs.cache-hit != 'true' + + - name: Load timelord files + uses: actions/cache/restore@v3 + with: + path: /timelord/ + key: ${{ inputs.key }} + - name: Run timelord to set timestamps + shell: bash + run: timelord sync --source-dir ${{ inputs.path }} --cache-dir /timelord/ + - name: Save timelord + uses: actions/cache/save@v3 + with: + path: /timelord/ + key: ${{ inputs.key }} diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index f6064617..0735fec7 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -87,18 +87,6 @@ jobs: - name: Install rust uses: ./.forgejo/actions/rust-toolchain - - name: Cache timelord-cli installation - id: cache-timelord-bin - uses: actions/cache@v3 - with: - path: ~/.cargo/bin/timelord - key: timelord-cli-v3.0.1 - - name: Install timelord-cli - uses: https://github.com/cargo-bins/cargo-binstall@main - if: steps.cache-timelord-bin.outputs.cache-hit != 'true' - - run: cargo binstall timelord-cli@3.0.1 - if: steps.cache-timelord-bin.outputs.cache-hit != 'true' - - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Set up QEMU @@ -132,18 +120,12 @@ jobs: echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV - name: Get Git commit timestamps run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV - - name: Set up timelord - uses: actions/cache/restore@v3 + + - uses: ./.forgejo/actions/timelord with: - path: /timelord/ - key: timelord-v0 # Cache is already split per runner - - name: Run timelord to set timestamps - run: timelord sync --source-dir . --cache-dir /timelord/ - - name: Save timelord - uses: actions/cache/save@v3 - with: - path: /timelord/ key: timelord-v0 + path: . + - name: Build and push Docker image by digest id: build uses: docker/build-push-action@v6 From a325dfa56aa1f3a638b9fc6b760befa8ac780952 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 11 May 2025 20:39:50 +0100 Subject: [PATCH 15/50] ci: Use timelord in clippy check --- .forgejo/workflows/formatting.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.forgejo/workflows/formatting.yml b/.forgejo/workflows/formatting.yml index 7ca327b6..2f513b91 100644 --- a/.forgejo/workflows/formatting.yml +++ b/.forgejo/workflows/formatting.yml @@ -65,7 +65,11 @@ jobs: ~/.cargo/registry !~/.cargo/registry/src key: rust-registry-${{hashFiles('**/Cargo.lock') }} - - name: Check formatting + - uses: ./.forgejo/actions/timelord + with: + key: sccache-v0 + path: . + - name: Clippy run: | cargo clippy \ --workspace \ @@ -75,4 +79,4 @@ jobs: -D warnings - name: Show sccache stats - run: sccache --show-stats + run: sccache --show-stats \ No newline at end of file From 1f57508879fd6bea3eec8068d0a11882943c18b1 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 20 May 2025 21:15:32 +0100 Subject: [PATCH 16/50] ci: Don't clippy check dependancies --- .forgejo/workflows/{formatting.yml => rust-checks.yml} | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) rename .forgejo/workflows/{formatting.yml => rust-checks.yml} (94%) diff --git a/.forgejo/workflows/formatting.yml b/.forgejo/workflows/rust-checks.yml similarity index 94% rename from .forgejo/workflows/formatting.yml rename to .forgejo/workflows/rust-checks.yml index 2f513b91..eef3bd0a 100644 --- a/.forgejo/workflows/formatting.yml +++ b/.forgejo/workflows/rust-checks.yml @@ -1,8 +1,7 @@ -name: Rust Formatting +name: Rust Checks on: push: - pull_request: jobs: format: @@ -65,7 +64,8 @@ jobs: ~/.cargo/registry !~/.cargo/registry/src key: rust-registry-${{hashFiles('**/Cargo.lock') }} - - uses: ./.forgejo/actions/timelord + - name: Timelord + uses: ./.forgejo/actions/timelord with: key: sccache-v0 path: . @@ -74,6 +74,7 @@ jobs: cargo clippy \ --workspace \ --locked \ + --no-deps \ --profile test \ -- \ -D warnings From a4ad72e11ddce01d64aa5e2e3a002c45f9c5b767 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 20 May 2025 21:17:11 +0100 Subject: [PATCH 17/50] ci: Run `cargo test` --- .forgejo/workflows/rust-checks.yml | 59 +++++++++++++++++++++++++++++- 1 file changed, 58 insertions(+), 1 deletion(-) diff --git a/.forgejo/workflows/rust-checks.yml b/.forgejo/workflows/rust-checks.yml index eef3bd0a..1feb9e89 100644 --- a/.forgejo/workflows/rust-checks.yml +++ b/.forgejo/workflows/rust-checks.yml @@ -80,4 +80,61 @@ jobs: -D warnings - name: Show sccache stats - run: sccache --show-stats \ No newline at end of file + run: sccache --show-stats + + cargo-test: + name: Cargo Test + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Install rust + uses: ./.forgejo/actions/rust-toolchain + + - uses: https://github.com/actions/create-github-app-token@v2 + id: app-token + with: + app-id: ${{ vars.GH_APP_ID }} + private-key: ${{ secrets.GH_APP_PRIVATE_KEY }} + github-api-url: https://api.github.com + owner: ${{ vars.GH_APP_OWNER }} + repositories: "" + - name: Install sccache + uses: ./.forgejo/actions/sccache + with: + token: ${{ steps.app-token.outputs.token }} + - run: sudo apt-get update + - name: Install system dependencies + uses: https://github.com/awalsh128/cache-apt-pkgs-action@v1 + with: + packages: clang liburing-dev + version: 1 + - name: Cache Rust registry + uses: actions/cache@v3 + with: + path: | + ~/.cargo/git + !~/.cargo/git/checkouts + ~/.cargo/registry + !~/.cargo/registry/src + key: rust-registry-${{hashFiles('**/Cargo.lock') }} + - name: Timelord + uses: ./.forgejo/actions/timelord + with: + key: sccache-v0 + path: . + - name: Cargo Test + run: | + cargo test \ + --workspace \ + --locked \ + --profile test \ + --all-targets \ + --no-fail-fast + + - name: Show sccache stats + run: sccache --show-stats From 4ed04b343a8c8c95ffb83cd7b35bc0c1601c36c5 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 20 May 2025 22:13:13 +0100 Subject: [PATCH 18/50] build: Use xtrace in bash scripts in Dockerfile --- docker/Dockerfile | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 3029282f..44e74180 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -18,13 +18,14 @@ ARG LLVM_VERSION=19 # Line three: for xx-verify RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ -apt-get update && apt-get install -y \ + apt-get update && apt-get install -y \ clang-${LLVM_VERSION} lld-${LLVM_VERSION} pkg-config make jq \ curl git \ file # Create symlinks for LLVM tools RUN <> /etc/environment # Configure pkg-config RUN <> /etc/environment echo "PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /etc/environment echo "PKG_CONFIG_ALLOW_CROSS=true" >> /etc/environment @@ -82,12 +85,14 @@ EOF # Configure cc to use clang version RUN <> /etc/environment echo "CXX=clang++" >> /etc/environment EOF # Cross-language LTO RUN <> /etc/environment echo "CXXFLAGS=-flto" >> /etc/environment # Linker is set to target-compatible clang by xx @@ -98,6 +103,7 @@ EOF ARG TARGET_CPU= RUN <> /etc/environment @@ -118,7 +124,6 @@ COPY . . ARG TARGETPLATFORM # Verify environment configuration -RUN cat /etc/environment RUN xx-cargo --print-target-triple # Conduwuit version info @@ -142,6 +147,7 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \ --mount=type=cache,target=/app/target \ bash <<'EOF' set -o allexport + set -o xtrace . /etc/environment TARGET_DIR=($(cargo metadata --no-deps --format-version 1 | \ jq -r ".target_directory")) @@ -162,6 +168,7 @@ EOF RUN --mount=type=cache,target=/usr/local/cargo/registry \ --mount=type=cache,target=/usr/local/cargo/git/db \ bash <<'EOF' + set -o xtrace mkdir /out/sbom typeset -A PACKAGES for BINARY in /out/sbin/*; do @@ -180,6 +187,7 @@ EOF # Extract dynamically linked dependencies RUN < Date: Tue, 20 May 2025 22:47:55 +0100 Subject: [PATCH 19/50] build: Split docker target cache by target platform --- docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 44e74180..e734fb81 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -144,7 +144,7 @@ ENV CONTINUWUITY_VERSION_EXTRA=$CONTINUWUITY_VERSION_EXTRA # Build the binary RUN --mount=type=cache,target=/usr/local/cargo/registry \ --mount=type=cache,target=/usr/local/cargo/git/db \ - --mount=type=cache,target=/app/target \ + --mount=type=cache,target=/app/target,id=cargo-target-${TARGETPLATFORM} \ bash <<'EOF' set -o allexport set -o xtrace From 7a46563f23c1e4527310c400b604707c2213e498 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 20 May 2025 22:56:51 +0100 Subject: [PATCH 20/50] ci: Cache docker image build mounts --- .forgejo/actions/rust-toolchain/action.yml | 8 ++++ .forgejo/workflows/release-image.yml | 49 +++++++++++++++++++++- 2 files changed, 56 insertions(+), 1 deletion(-) diff --git a/.forgejo/actions/rust-toolchain/action.yml b/.forgejo/actions/rust-toolchain/action.yml index 68f59d00..71fb96f5 100644 --- a/.forgejo/actions/rust-toolchain/action.yml +++ b/.forgejo/actions/rust-toolchain/action.yml @@ -15,6 +15,10 @@ inputs: components: description: Space-separated list of components to be additionally installed for a new toolchain required: false +outputs: + rustc_version: + description: The rustc version installed + value: ${{ steps.rustc-version.outputs.version }} runs: using: composite @@ -43,3 +47,7 @@ runs: ${{ inputs.components && format('rustup component add {0}', inputs.components) }} cargo --version rustc --version + - id: rustc-version + shell: bash + run: | + echo "version=$(rustc --version)" >> $GITHUB_OUTPUT diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 0735fec7..ec466c58 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -79,12 +79,13 @@ jobs: run: echo '${{ toJSON(fromJSON(needs.define-variables.outputs.build_matrix)) }}' - name: Echo matrix run: echo '${{ toJSON(matrix) }}' - + - name: Checkout repository uses: actions/checkout@v4 with: persist-credentials: false - name: Install rust + id: rust-toolchain uses: ./.forgejo/actions/rust-toolchain - name: Set up Docker Buildx @@ -126,6 +127,52 @@ jobs: key: timelord-v0 path: . + - name: Cache Rust registry + uses: actions/cache@v3 + with: + path: | + .cargo/git + .cargo/git/checkouts + .cargo/registry + .cargo/registry/src + key: rust-registry-image-${{hashFiles('**/Cargo.lock') }} + - name: Cache cargo target + id: cache-cargo-target + uses: actions/cache@v3 + with: + path: | + cargo-target-${{ matrix.slug }} + key: cargo-target-${{ matrix.slug }}-${{hashFiles('**/Cargo.lock') }}-${{steps.rust-toolchain.outputs.rustc_version}} + - name: Cache apt cache + id: cache-apt + uses: actions/cache@v3 + with: + path: | + var-cache-apt-${{ matrix.slug }} + key: var-cache-apt-${{ matrix.slug }} + - name: Cache apt lib + id: cache-apt-lib + uses: actions/cache@v3 + with: + path: | + var-lib-apt-${{ matrix.slug }} + key: var-lib-apt-${{ matrix.slug }} + - name: inject cache into docker + uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.1.0 + with: + cache-map: | + { + ".cargo/registry": "/usr/local/cargo/registry", + ".cargo/git/db": "/usr/local/cargo/git/db", + "cargo-target-${{ matrix.slug }}": { + "target": "/app/target", + "id": "cargo-target-${{ matrix.platform }}" + }, + "var-cache-apt-${{ matrix.slug }}": "/var/cache/apt", + "var-lib-apt-${{ matrix.slug }}": "/var/lib/apt" + } + skip-extraction: ${{ steps.cache.outputs.cache-hit }} + - name: Build and push Docker image by digest id: build uses: docker/build-push-action@v6 From 9b8b37f162b75fe503b557876632bb5115aa35da Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Wed, 21 May 2025 02:51:09 +0100 Subject: [PATCH 21/50] docs: Badges for mirrors --- README.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index fdcdafb7..e3eb807f 100644 --- a/README.md +++ b/README.md @@ -7,10 +7,15 @@ [continuwuity] is a Matrix homeserver written in Rust. -It's a community continuation of the [conduwuit](https://github.com/girlbossceo/conduwuit) homeserver. +It's a community continuation of the [conduwuit](https://github.com/girlbossceo/conduwuit) homeserver. +[![forgejo.ellis.link](https://img.shields.io/badge/Ellis%20Git-main+packages-green?style=flat&logo=forgejo&labelColor=fff)](https://forgejo.ellis.link/continuwuation/continuwuity) ![](https://forgejo.ellis.link/continuwuation/continuwuity/badges/stars.svg?style=flat) [![](https://forgejo.ellis.link/continuwuation/continuwuity/badges/issues/open.svg?style=flat)](https://forgejo.ellis.link/continuwuation/continuwuity/issues?state=open) [![](https://forgejo.ellis.link/continuwuation/continuwuity/badges/pulls/open.svg?style=flat)](https://forgejo.ellis.link/continuwuation/continuwuity/pulls?state=open) + +[![GitHub](https://img.shields.io/badge/GitHub-mirror-blue?style=flat&logo=github&labelColor=fff&logoColor=24292f)](https://github.com/continuwuity/continuwuity) ![](https://img.shields.io/github/stars/continuwuity/continuwuity?style=flat) + +[![Codeberg](https://img.shields.io/badge/Codeberg-mirror-2185D0?style=flat&logo=codeberg&labelColor=fff)](https://codeberg.org/nexy7574/continuwuity) ![](https://codeberg.org/nexy7574/continuwuity/badges/stars.svg?style=flat) ### Why does this exist? @@ -112,4 +117,3 @@ Join our [Matrix room](https://matrix.to/#/#continuwuity:continuwuity.org) and [ [continuwuity]: https://forgejo.ellis.link/continuwuation/continuwuity - From fcd5669aa117afc229f95ee43f072bd3b462ed09 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 22 Apr 2025 06:29:30 +0000 Subject: [PATCH 22/50] Join jemalloc background threads prior to exit. Co-authored-by: Jade Ellis Signed-off-by: Jason Volk --- src/core/alloc/je.rs | 4 ++++ src/main/runtime.rs | 19 ++++++++++++------- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs index 2424e99c..e138233e 100644 --- a/src/core/alloc/je.rs +++ b/src/core/alloc/je.rs @@ -274,6 +274,10 @@ pub fn set_dirty_decay>>(arena: I, decay_ms: isize) -> Res } } +pub fn background_thread_enable(enable: bool) -> Result { + set::(&mallctl!("background_thread"), enable.into()).map(is_nonzero!()) +} + #[inline] #[must_use] pub fn is_affine_arena() -> bool { is_percpu_arena() || is_phycpu_arena() } diff --git a/src/main/runtime.rs b/src/main/runtime.rs index 1c58ea81..e9029012 100644 --- a/src/main/runtime.rs +++ b/src/main/runtime.rs @@ -98,12 +98,7 @@ pub(super) fn shutdown(server: &Arc, runtime: tokio::runtime::Runtime) { Level::INFO }; - debug!( - timeout = ?SHUTDOWN_TIMEOUT, - "Waiting for runtime..." - ); - - runtime.shutdown_timeout(SHUTDOWN_TIMEOUT); + wait_shutdown(server, runtime); let runtime_metrics = server.server.metrics.runtime_interval().unwrap_or_default(); event!(LEVEL, ?runtime_metrics, "Final runtime metrics"); @@ -111,13 +106,23 @@ pub(super) fn shutdown(server: &Arc, runtime: tokio::runtime::Runtime) { #[cfg(not(tokio_unstable))] #[tracing::instrument(name = "stop", level = "info", skip_all)] -pub(super) fn shutdown(_server: &Arc, runtime: tokio::runtime::Runtime) { +pub(super) fn shutdown(server: &Arc, runtime: tokio::runtime::Runtime) { + wait_shutdown(server, runtime); +} + +fn wait_shutdown(_server: &Arc, runtime: tokio::runtime::Runtime) { debug!( timeout = ?SHUTDOWN_TIMEOUT, "Waiting for runtime..." ); runtime.shutdown_timeout(SHUTDOWN_TIMEOUT); + + // Join any jemalloc threads so they don't appear in use at exit. + #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] + conduwuit_core::alloc::je::background_thread_enable(false) + .log_debug_err() + .ok(); } #[tracing::instrument( From bfb0a2b76a544af87fb9c2181f4f93eada14d635 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 22 Apr 2025 09:33:17 +0000 Subject: [PATCH 23/50] Remove unused Pdu::into_any_event(). Signed-off-by: Jason Volk --- src/core/matrix/pdu/strip.rs | 69 ++++++++++-------------------------- 1 file changed, 19 insertions(+), 50 deletions(-) diff --git a/src/core/matrix/pdu/strip.rs b/src/core/matrix/pdu/strip.rs index 3683caaa..a39e7d35 100644 --- a/src/core/matrix/pdu/strip.rs +++ b/src/core/matrix/pdu/strip.rs @@ -1,8 +1,8 @@ use ruma::{ events::{ - AnyEphemeralRoomEvent, AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent, - AnySyncStateEvent, AnySyncTimelineEvent, AnyTimelineEvent, StateEvent, - room::member::RoomMemberEventContent, space::child::HierarchySpaceChildEvent, + AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncStateEvent, + AnySyncTimelineEvent, AnyTimelineEvent, StateEvent, room::member::RoomMemberEventContent, + space::child::HierarchySpaceChildEvent, }, serde::Raw, }; @@ -10,41 +10,6 @@ use serde_json::{json, value::Value as JsonValue}; use crate::implement; -/// This only works for events that are also AnyRoomEvents. -#[must_use] -#[implement(super::Pdu)] -pub fn into_any_event(self) -> Raw { - serde_json::from_value(self.into_any_event_value()).expect("Raw::from_value always works") -} - -/// This only works for events that are also AnyRoomEvents. -#[implement(super::Pdu)] -#[must_use] -#[inline] -pub fn into_any_event_value(self) -> JsonValue { - let (redacts, content) = self.copy_redacts(); - let mut json = json!({ - "content": content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "room_id": self.room_id, - }); - - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } - if let Some(state_key) = &self.state_key { - json["state_key"] = json!(state_key); - } - if let Some(redacts) = &redacts { - json["redacts"] = json!(redacts); - } - - json -} - #[implement(super::Pdu)] #[must_use] #[inline] @@ -53,7 +18,8 @@ pub fn into_room_event(self) -> Raw { self.to_room_event() } #[implement(super::Pdu)] #[must_use] pub fn to_room_event(&self) -> Raw { - serde_json::from_value(self.to_room_event_value()).expect("Raw::from_value always works") + let value = self.to_room_event_value(); + serde_json::from_value(value).expect("Failed to serialize Event value") } #[implement(super::Pdu)] @@ -91,8 +57,8 @@ pub fn into_message_like_event(self) -> Raw { self.to_messa #[implement(super::Pdu)] #[must_use] pub fn to_message_like_event(&self) -> Raw { - serde_json::from_value(self.to_message_like_event_value()) - .expect("Raw::from_value always works") + let value = self.to_message_like_event_value(); + serde_json::from_value(value).expect("Failed to serialize Event value") } #[implement(super::Pdu)] @@ -130,7 +96,8 @@ pub fn into_sync_room_event(self) -> Raw { self.to_sync_ro #[implement(super::Pdu)] #[must_use] pub fn to_sync_room_event(&self) -> Raw { - serde_json::from_value(self.to_sync_room_event_value()).expect("Raw::from_value always works") + let value = self.to_sync_room_event_value(); + serde_json::from_value(value).expect("Failed to serialize Event value") } #[implement(super::Pdu)] @@ -162,7 +129,8 @@ pub fn to_sync_room_event_value(&self) -> JsonValue { #[implement(super::Pdu)] #[must_use] pub fn into_state_event(self) -> Raw { - serde_json::from_value(self.into_state_event_value()).expect("Raw::from_value always works") + let value = self.into_state_event_value(); + serde_json::from_value(value).expect("Failed to serialize Event value") } #[implement(super::Pdu)] @@ -189,8 +157,8 @@ pub fn into_state_event_value(self) -> JsonValue { #[implement(super::Pdu)] #[must_use] pub fn into_sync_state_event(self) -> Raw { - serde_json::from_value(self.into_sync_state_event_value()) - .expect("Raw::from_value always works") + let value = self.into_sync_state_event_value(); + serde_json::from_value(value).expect("Failed to serialize Event value") } #[implement(super::Pdu)] @@ -223,8 +191,8 @@ pub fn into_stripped_state_event(self) -> Raw { #[implement(super::Pdu)] #[must_use] pub fn to_stripped_state_event(&self) -> Raw { - serde_json::from_value(self.to_stripped_state_event_value()) - .expect("Raw::from_value always works") + let value = self.to_stripped_state_event_value(); + serde_json::from_value(value).expect("Failed to serialize Event value") } #[implement(super::Pdu)] @@ -242,8 +210,8 @@ pub fn to_stripped_state_event_value(&self) -> JsonValue { #[implement(super::Pdu)] #[must_use] pub fn into_stripped_spacechild_state_event(self) -> Raw { - serde_json::from_value(self.into_stripped_spacechild_state_event_value()) - .expect("Raw::from_value always works") + let value = self.into_stripped_spacechild_state_event_value(); + serde_json::from_value(value).expect("Failed to serialize Event value") } #[implement(super::Pdu)] @@ -262,7 +230,8 @@ pub fn into_stripped_spacechild_state_event_value(self) -> JsonValue { #[implement(super::Pdu)] #[must_use] pub fn into_member_event(self) -> Raw> { - serde_json::from_value(self.into_member_event_value()).expect("Raw::from_value always works") + let value = self.into_member_event_value(); + serde_json::from_value(value).expect("Failed to serialize Event value") } #[implement(super::Pdu)] From 44302ce73289bc70b0e05755f1a23f5d0770f752 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 22 Apr 2025 11:00:07 +0000 Subject: [PATCH 24/50] Eliminate explicit parallel_fetches argument. Signed-off-by: Jason Volk --- src/core/matrix/state_res/benches.rs | 4 - src/core/matrix/state_res/mod.rs | 83 +++++++------------ src/core/matrix/state_res/test_utils.rs | 16 ++-- .../rooms/event_handler/resolve_state.rs | 13 +-- 4 files changed, 38 insertions(+), 78 deletions(-) diff --git a/src/core/matrix/state_res/benches.rs b/src/core/matrix/state_res/benches.rs index 01218b01..1aa8552b 100644 --- a/src/core/matrix/state_res/benches.rs +++ b/src/core/matrix/state_res/benches.rs @@ -52,7 +52,6 @@ fn lexico_topo_sort(c: &mut test::Bencher) { #[cfg(conduwuit_bench)] #[cfg_attr(conduwuit_bench, bench)] fn resolution_shallow_auth_chain(c: &mut test::Bencher) { - let parallel_fetches = 32; let mut store = TestStore(hashmap! {}); // build up the DAG @@ -78,7 +77,6 @@ fn resolution_shallow_auth_chain(c: &mut test::Bencher) { &auth_chain_sets, &fetch, &exists, - parallel_fetches, ) .await { @@ -91,7 +89,6 @@ fn resolution_shallow_auth_chain(c: &mut test::Bencher) { #[cfg(conduwuit_bench)] #[cfg_attr(conduwuit_bench, bench)] fn resolve_deeper_event_set(c: &mut test::Bencher) { - let parallel_fetches = 32; let mut inner = INITIAL_EVENTS(); let ban = BAN_STATE_SET(); @@ -153,7 +150,6 @@ fn resolve_deeper_event_set(c: &mut test::Bencher) { &auth_chain_sets, &fetch, &exists, - parallel_fetches, ) .await { diff --git a/src/core/matrix/state_res/mod.rs b/src/core/matrix/state_res/mod.rs index 2ab7cb64..d37368c9 100644 --- a/src/core/matrix/state_res/mod.rs +++ b/src/core/matrix/state_res/mod.rs @@ -69,9 +69,6 @@ type Result = crate::Result; /// * `event_fetch` - Any event not found in the `event_map` will defer to this /// closure to find the event. /// -/// * `parallel_fetches` - The number of asynchronous fetch requests in-flight -/// for any given operation. -/// /// ## Invariants /// /// The caller of `resolve` must ensure that all the events are from the same @@ -85,7 +82,6 @@ pub async fn resolve<'a, E, Sets, SetIter, Hasher, Fetch, FetchFut, Exists, Exis auth_chain_sets: &'a [HashSet], event_fetch: &Fetch, event_exists: &Exists, - parallel_fetches: usize, ) -> Result> where Fetch: Fn(E::Id) -> FetchFut + Sync, @@ -147,13 +143,8 @@ where // Sort the control events based on power_level/clock/event_id and // outgoing/incoming edges - let sorted_control_levels = reverse_topological_power_sort( - control_events, - &all_conflicted, - &event_fetch, - parallel_fetches, - ) - .await?; + let sorted_control_levels = + reverse_topological_power_sort(control_events, &all_conflicted, &event_fetch).await?; debug!(count = sorted_control_levels.len(), "power events"); trace!(list = ?sorted_control_levels, "sorted power events"); @@ -295,7 +286,6 @@ async fn reverse_topological_power_sort( events_to_sort: Vec, auth_diff: &HashSet, fetch_event: &F, - parallel_fetches: usize, ) -> Result> where F: Fn(E::Id) -> Fut + Sync, @@ -311,26 +301,25 @@ where } // This is used in the `key_fn` passed to the lexico_topo_sort fn - let event_to_pl = graph + let event_to_pl: HashMap<_, _> = graph .keys() .stream() - .map(|event_id| { - get_power_level_for_sender(event_id.clone(), fetch_event) - .map(move |res| res.map(|pl| (event_id, pl))) + .broad_filter_map(async |event_id| { + let pl = get_power_level_for_sender(&event_id, fetch_event) + .await + .ok()?; + Some((event_id, pl)) }) - .buffer_unordered(parallel_fetches) - .ready_try_fold(HashMap::new(), |mut event_to_pl, (event_id, pl)| { + .inspect(|(event_id, pl)| { debug!( - event_id = event_id.borrow().as_str(), - power_level = i64::from(pl), + event_id = event_id.as_str(), + power_level = i64::from(*pl), "found the power level of an event's sender", ); - - event_to_pl.insert(event_id.clone(), pl); - Ok(event_to_pl) }) + .collect() .boxed() - .await?; + .await; let event_to_pl = &event_to_pl; let fetcher = |event_id: E::Id| async move { @@ -909,7 +898,7 @@ mod tests { let fetcher = |id| ready(events.get(&id).cloned()); let sorted_power_events = - super::reverse_topological_power_sort(power_events, &auth_chain, &fetcher, 1) + super::reverse_topological_power_sort(power_events, &auth_chain, &fetcher) .await .unwrap(); @@ -1312,19 +1301,13 @@ mod tests { }) .collect(); - let resolved = match super::resolve( - &RoomVersionId::V2, - &state_sets, - &auth_chain, - &fetcher, - &exists, - 1, - ) - .await - { - | Ok(state) => state, - | Err(e) => panic!("{e}"), - }; + let resolved = + match super::resolve(&RoomVersionId::V2, &state_sets, &auth_chain, &fetcher, &exists) + .await + { + | Ok(state) => state, + | Err(e) => panic!("{e}"), + }; assert_eq!(expected, resolved); } @@ -1429,21 +1412,15 @@ mod tests { }) .collect(); - let fetcher = |id: ::Id| ready(ev_map.get(&id).cloned()); - let exists = |id: ::Id| ready(ev_map.get(&id).is_some()); - let resolved = match super::resolve( - &RoomVersionId::V6, - &state_sets, - &auth_chain, - &fetcher, - &exists, - 1, - ) - .await - { - | Ok(state) => state, - | Err(e) => panic!("{e}"), - }; + let fetcher = |id: OwnedEventId| ready(ev_map.get(&id).cloned()); + let exists = |id: OwnedEventId| ready(ev_map.get(&id).is_some()); + let resolved = + match super::resolve(&RoomVersionId::V6, &state_sets, &auth_chain, &fetcher, &exists) + .await + { + | Ok(state) => state, + | Err(e) => panic!("{e}"), + }; debug!( resolved = ?resolved diff --git a/src/core/matrix/state_res/test_utils.rs b/src/core/matrix/state_res/test_utils.rs index a666748a..ff7b30d0 100644 --- a/src/core/matrix/state_res/test_utils.rs +++ b/src/core/matrix/state_res/test_utils.rs @@ -133,17 +133,11 @@ pub(crate) async fn do_check( .collect(); let event_map = &event_map; - let fetch = |id: ::Id| ready(event_map.get(&id).cloned()); - let exists = |id: ::Id| ready(event_map.get(&id).is_some()); - let resolved = super::resolve( - &RoomVersionId::V6, - state_sets, - &auth_chain_sets, - &fetch, - &exists, - 1, - ) - .await; + let fetch = |id: OwnedEventId| ready(event_map.get(&id).cloned()); + let exists = |id: OwnedEventId| ready(event_map.get(&id).is_some()); + let resolved = + super::resolve(&RoomVersionId::V6, state_sets, &auth_chain_sets, &fetch, &exists) + .await; match resolved { | Ok(state) => state, diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index b3a7a71b..a67ac3b7 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -112,14 +112,7 @@ where { let event_fetch = |event_id| self.event_fetch(event_id); let event_exists = |event_id| self.event_exists(event_id); - state_res::resolve( - room_version, - state_sets, - auth_chain_sets, - &event_fetch, - &event_exists, - automatic_width(), - ) - .map_err(|e| err!(error!("State resolution failed: {e:?}"))) - .await + state_res::resolve(room_version, state_sets, auth_chain_sets, &event_fetch, &event_exists) + .map_err(|e| err!(error!("State resolution failed: {e:?}"))) + .await } From f605913ea92d2f741616f0cfd838cf348dc22a34 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 22 Apr 2025 11:00:55 +0000 Subject: [PATCH 25/50] Eliminate associated Id type from trait Event. Co-authored-by: Jade Ellis Signed-off-by: Jason Volk --- src/core/matrix/event.rs | 26 ++-- src/core/matrix/pdu.rs | 14 +- src/core/matrix/state_res/benches.rs | 38 +++--- src/core/matrix/state_res/event_auth.rs | 8 +- src/core/matrix/state_res/mod.rs | 128 +++++++++--------- src/core/matrix/state_res/test_utils.rs | 30 ++-- .../rooms/event_handler/resolve_state.rs | 2 +- 7 files changed, 116 insertions(+), 130 deletions(-) diff --git a/src/core/matrix/event.rs b/src/core/matrix/event.rs index 29153334..e4c478cd 100644 --- a/src/core/matrix/event.rs +++ b/src/core/matrix/event.rs @@ -1,18 +1,10 @@ -use std::{ - borrow::Borrow, - fmt::{Debug, Display}, - hash::Hash, -}; - use ruma::{EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId, events::TimelineEventType}; use serde_json::value::RawValue as RawJsonValue; /// Abstraction of a PDU so users can have their own PDU types. pub trait Event { - type Id: Clone + Debug + Display + Eq + Ord + Hash + Send + Borrow; - /// The `EventId` of this event. - fn event_id(&self) -> &Self::Id; + fn event_id(&self) -> &EventId; /// The `RoomId` of this event. fn room_id(&self) -> &RoomId; @@ -34,20 +26,18 @@ pub trait Event { /// The events before this event. // Requires GATs to avoid boxing (and TAIT for making it convenient). - fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_; + fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_; /// All the authenticating events for this event. // Requires GATs to avoid boxing (and TAIT for making it convenient). - fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_; + fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_; /// If this event is a redaction event this is the event it redacts. - fn redacts(&self) -> Option<&Self::Id>; + fn redacts(&self) -> Option<&EventId>; } impl Event for &T { - type Id = T::Id; - - fn event_id(&self) -> &Self::Id { (*self).event_id() } + fn event_id(&self) -> &EventId { (*self).event_id() } fn room_id(&self) -> &RoomId { (*self).room_id() } @@ -61,13 +51,13 @@ impl Event for &T { fn state_key(&self) -> Option<&str> { (*self).state_key() } - fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { + fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { (*self).prev_events() } - fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { + fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { (*self).auth_events() } - fn redacts(&self) -> Option<&Self::Id> { (*self).redacts() } + fn redacts(&self) -> Option<&EventId> { (*self).redacts() } } diff --git a/src/core/matrix/pdu.rs b/src/core/matrix/pdu.rs index 7e1ecfa8..188586bd 100644 --- a/src/core/matrix/pdu.rs +++ b/src/core/matrix/pdu.rs @@ -79,9 +79,7 @@ impl Pdu { } impl Event for Pdu { - type Id = OwnedEventId; - - fn event_id(&self) -> &Self::Id { &self.event_id } + fn event_id(&self) -> &EventId { &self.event_id } fn room_id(&self) -> &RoomId { &self.room_id } @@ -97,15 +95,15 @@ impl Event for Pdu { fn state_key(&self) -> Option<&str> { self.state_key.as_deref() } - fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { - self.prev_events.iter() + fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { + self.prev_events.iter().map(AsRef::as_ref) } - fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { - self.auth_events.iter() + fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { + self.auth_events.iter().map(AsRef::as_ref) } - fn redacts(&self) -> Option<&Self::Id> { self.redacts.as_ref() } + fn redacts(&self) -> Option<&EventId> { self.redacts.as_deref() } } /// Prevent derived equality which wouldn't limit itself to event_id diff --git a/src/core/matrix/state_res/benches.rs b/src/core/matrix/state_res/benches.rs index 1aa8552b..12eeab9d 100644 --- a/src/core/matrix/state_res/benches.rs +++ b/src/core/matrix/state_res/benches.rs @@ -186,7 +186,11 @@ impl TestStore { } /// Returns a Vec of the related auth events to the given `event`. - fn auth_event_ids(&self, room_id: &RoomId, event_ids: Vec) -> Result> { + fn auth_event_ids( + &self, + room_id: &RoomId, + event_ids: Vec, + ) -> Result> { let mut result = HashSet::new(); let mut stack = event_ids; @@ -212,8 +216,8 @@ impl TestStore { fn auth_chain_diff( &self, room_id: &RoomId, - event_ids: Vec>, - ) -> Result> { + event_ids: Vec>, + ) -> Result> { let mut auth_chain_sets = vec![]; for ids in event_ids { // TODO state store `auth_event_ids` returns self in the event ids list @@ -234,7 +238,7 @@ impl TestStore { Ok(auth_chain_sets .into_iter() .flatten() - .filter(|id| !common.contains(id.borrow())) + .filter(|id| !common.contains(id)) .collect()) } else { Ok(vec![]) @@ -561,7 +565,7 @@ impl EventTypeExt for &TimelineEventType { mod event { use ruma::{ - MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, + EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, events::{TimelineEventType, pdu::Pdu}, }; use serde::{Deserialize, Serialize}; @@ -570,9 +574,7 @@ mod event { use super::Event; impl Event for PduEvent { - type Id = OwnedEventId; - - fn event_id(&self) -> &Self::Id { &self.event_id } + fn event_id(&self) -> &EventId { &self.event_id } fn room_id(&self) -> &RoomId { match &self.rest { @@ -628,28 +630,30 @@ mod event { } } - fn prev_events(&self) -> Box + Send + '_> { + fn prev_events(&self) -> Box + Send + '_> { match &self.rest { - | Pdu::RoomV1Pdu(ev) => Box::new(ev.prev_events.iter().map(|(id, _)| id)), - | Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter()), + | Pdu::RoomV1Pdu(ev) => + Box::new(ev.prev_events.iter().map(|(id, _)| id.as_ref())), + | Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter().map(AsRef::as_ref)), #[cfg(not(feature = "unstable-exhaustive-types"))] | _ => unreachable!("new PDU version"), } } - fn auth_events(&self) -> Box + Send + '_> { + fn auth_events(&self) -> Box + Send + '_> { match &self.rest { - | Pdu::RoomV1Pdu(ev) => Box::new(ev.auth_events.iter().map(|(id, _)| id)), - | Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter()), + | Pdu::RoomV1Pdu(ev) => + Box::new(ev.auth_events.iter().map(|(id, _)| id.as_ref())), + | Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter().map(AsRef::as_ref)), #[cfg(not(feature = "unstable-exhaustive-types"))] | _ => unreachable!("new PDU version"), } } - fn redacts(&self) -> Option<&Self::Id> { + fn redacts(&self) -> Option<&EventId> { match &self.rest { - | Pdu::RoomV1Pdu(ev) => ev.redacts.as_ref(), - | Pdu::RoomV3Pdu(ev) => ev.redacts.as_ref(), + | Pdu::RoomV1Pdu(ev) => ev.redacts.as_deref(), + | Pdu::RoomV3Pdu(ev) => ev.redacts.as_deref(), #[cfg(not(feature = "unstable-exhaustive-types"))] | _ => unreachable!("new PDU version"), } diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index c69db50e..715e5156 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -133,7 +133,7 @@ pub fn auth_types_for_event( level = "debug", skip_all, fields( - event_id = incoming_event.event_id().borrow().as_str() + event_id = incoming_event.event_id().as_str(), ) )] pub async fn auth_check( @@ -259,7 +259,7 @@ where // 3. If event does not have m.room.create in auth_events reject if !incoming_event .auth_events() - .any(|id| id.borrow() == room_create_event.event_id().borrow()) + .any(|id| id == room_create_event.event_id()) { warn!("no m.room.create event in auth events"); return Ok(false); @@ -1021,11 +1021,11 @@ fn check_redaction( // If the domain of the event_id of the event being redacted is the same as the // domain of the event_id of the m.room.redaction, allow - if redaction_event.event_id().borrow().server_name() + if redaction_event.event_id().server_name() == redaction_event .redacts() .as_ref() - .and_then(|&id| id.borrow().server_name()) + .and_then(|&id| id.server_name()) { debug!("redaction event allowed via room version 1 rules"); return Ok(true); diff --git a/src/core/matrix/state_res/mod.rs b/src/core/matrix/state_res/mod.rs index d37368c9..651f6130 100644 --- a/src/core/matrix/state_res/mod.rs +++ b/src/core/matrix/state_res/mod.rs @@ -20,7 +20,7 @@ use std::{ use futures::{Future, FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt, future}; use ruma::{ - EventId, Int, MilliSecondsSinceUnixEpoch, RoomVersionId, + EventId, Int, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomVersionId, events::{ StateEventType, TimelineEventType, room::member::{MembershipState, RoomMemberEventContent}, @@ -39,9 +39,7 @@ use crate::{ debug, debug_error, matrix::{event::Event, pdu::StateKey}, trace, - utils::stream::{ - BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, TryReadyExt, WidebandExt, - }, + utils::stream::{BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, WidebandExt}, warn, }; @@ -79,20 +77,19 @@ type Result = crate::Result; pub async fn resolve<'a, E, Sets, SetIter, Hasher, Fetch, FetchFut, Exists, ExistsFut>( room_version: &RoomVersionId, state_sets: Sets, - auth_chain_sets: &'a [HashSet], + auth_chain_sets: &'a [HashSet], event_fetch: &Fetch, event_exists: &Exists, -) -> Result> +) -> Result> where - Fetch: Fn(E::Id) -> FetchFut + Sync, + Fetch: Fn(OwnedEventId) -> FetchFut + Sync, FetchFut: Future> + Send, - Exists: Fn(E::Id) -> ExistsFut + Sync, + Exists: Fn(OwnedEventId) -> ExistsFut + Sync, ExistsFut: Future + Send, Sets: IntoIterator + Send, - SetIter: Iterator> + Clone + Send, + SetIter: Iterator> + Clone + Send, Hasher: BuildHasher + Send + Sync, E: Event + Clone + Send + Sync, - E::Id: Borrow + Send + Sync, for<'b> &'b E: Send, { debug!("State resolution starting"); @@ -153,7 +150,7 @@ where // Sequentially auth check each control event. let resolved_control = iterative_auth_check( &room_version, - sorted_control_levels.iter().stream(), + sorted_control_levels.iter().stream().map(AsRef::as_ref), clean.clone(), &event_fetch, ) @@ -170,7 +167,7 @@ where // that failed auth let events_to_resolve: Vec<_> = all_conflicted .iter() - .filter(|&id| !deduped_power_ev.contains(id.borrow())) + .filter(|&id| !deduped_power_ev.contains(id)) .cloned() .collect(); @@ -190,7 +187,7 @@ where let mut resolved_state = iterative_auth_check( &room_version, - sorted_left_events.iter().stream(), + sorted_left_events.iter().stream().map(AsRef::as_ref), resolved_control, // The control events are added to the final resolved state &event_fetch, ) @@ -283,15 +280,14 @@ where /// earlier (further back in time) origin server timestamp. #[tracing::instrument(level = "debug", skip_all)] async fn reverse_topological_power_sort( - events_to_sort: Vec, - auth_diff: &HashSet, + events_to_sort: Vec, + auth_diff: &HashSet, fetch_event: &F, -) -> Result> +) -> Result> where - F: Fn(E::Id) -> Fut + Sync, + F: Fn(OwnedEventId) -> Fut + Sync, Fut: Future> + Send, E: Event + Send + Sync, - E::Id: Borrow + Send + Sync, { debug!("reverse topological sort of power events"); @@ -303,6 +299,7 @@ where // This is used in the `key_fn` passed to the lexico_topo_sort fn let event_to_pl: HashMap<_, _> = graph .keys() + .cloned() .stream() .broad_filter_map(async |event_id| { let pl = get_power_level_for_sender(&event_id, fetch_event) @@ -321,14 +318,15 @@ where .boxed() .await; - let event_to_pl = &event_to_pl; - let fetcher = |event_id: E::Id| async move { + let fetcher = async |event_id: OwnedEventId| { let pl = *event_to_pl - .get(event_id.borrow()) + .get(&event_id) .ok_or_else(|| Error::NotFound(String::new()))?; + let ev = fetch_event(event_id) .await .ok_or_else(|| Error::NotFound(String::new()))?; + Ok((pl, ev.origin_server_ts())) }; @@ -465,18 +463,17 @@ where /// the eventId at the eventId's generation (we walk backwards to `EventId`s /// most recent previous power level event). async fn get_power_level_for_sender( - event_id: E::Id, + event_id: &EventId, fetch_event: &F, ) -> serde_json::Result where - F: Fn(E::Id) -> Fut + Sync, + F: Fn(OwnedEventId) -> Fut + Sync, Fut: Future> + Send, E: Event + Send, - E::Id: Borrow + Send, { debug!("fetch event ({event_id}) senders power level"); - let event = fetch_event(event_id).await; + let event = fetch_event(event_id.to_owned()).await; let auth_events = event.as_ref().map(Event::auth_events); @@ -484,7 +481,7 @@ where .into_iter() .flatten() .stream() - .broadn_filter_map(5, |aid| fetch_event(aid.clone())) + .broadn_filter_map(5, |aid| fetch_event(aid.to_owned())) .ready_find(|aev| is_type_and_key(aev, &TimelineEventType::RoomPowerLevels, "")) .await; @@ -517,14 +514,13 @@ where async fn iterative_auth_check<'a, E, F, Fut, S>( room_version: &RoomVersion, events_to_check: S, - unconflicted_state: StateMap, + unconflicted_state: StateMap, fetch_event: &F, -) -> Result> +) -> Result> where - F: Fn(E::Id) -> Fut + Sync, + F: Fn(OwnedEventId) -> Fut + Sync, Fut: Future> + Send, - E::Id: Borrow + Clone + Eq + Ord + Send + Sync + 'a, - S: Stream + Send + 'a, + S: Stream + Send + 'a, E: Event + Clone + Send + Sync, { debug!("starting iterative auth check"); @@ -532,7 +528,7 @@ where let events_to_check: Vec<_> = events_to_check .map(Result::Ok) .broad_and_then(async |event_id| { - fetch_event(event_id.clone()) + fetch_event(event_id.to_owned()) .await .ok_or_else(|| Error::NotFound(format!("Failed to find {event_id}"))) }) @@ -540,16 +536,16 @@ where .boxed() .await?; - let auth_event_ids: HashSet = events_to_check + let auth_event_ids: HashSet = events_to_check .iter() - .flat_map(|event: &E| event.auth_events().map(Clone::clone)) + .flat_map(|event: &E| event.auth_events().map(ToOwned::to_owned)) .collect(); - let auth_events: HashMap = auth_event_ids + let auth_events: HashMap = auth_event_ids .into_iter() .stream() .broad_filter_map(fetch_event) - .map(|auth_event| (auth_event.event_id().clone(), auth_event)) + .map(|auth_event| (auth_event.event_id().to_owned(), auth_event)) .collect() .boxed() .await; @@ -570,7 +566,7 @@ where let mut auth_state = StateMap::new(); for aid in event.auth_events() { - if let Some(ev) = auth_events.get(aid.borrow()) { + if let Some(ev) = auth_events.get(aid) { //TODO: synapse checks "rejected_reason" which is most likely related to // soft-failing auth_state.insert( @@ -581,7 +577,7 @@ where ev.clone(), ); } else { - warn!(event_id = aid.borrow().as_str(), "missing auth event"); + warn!(event_id = aid.as_str(), "missing auth event"); } } @@ -590,7 +586,7 @@ where .stream() .ready_filter_map(|key| Some((key, resolved_state.get(key)?))) .filter_map(|(key, ev_id)| async move { - if let Some(event) = auth_events.get(ev_id.borrow()) { + if let Some(event) = auth_events.get(ev_id) { Some((key, event.clone())) } else { Some((key, fetch_event(ev_id.clone()).await?)) @@ -622,7 +618,7 @@ where // add event to resolved state map resolved_state.insert( event.event_type().with_state_key(state_key), - event.event_id().clone(), + event.event_id().to_owned(), ); }, | Ok(false) => { @@ -649,15 +645,14 @@ where /// level as a parent) will be marked as depth 1. depth 1 is "older" than depth /// 0. async fn mainline_sort( - to_sort: &[E::Id], - resolved_power_level: Option, + to_sort: &[OwnedEventId], + resolved_power_level: Option, fetch_event: &F, -) -> Result> +) -> Result> where - F: Fn(E::Id) -> Fut + Sync, + F: Fn(OwnedEventId) -> Fut + Sync, Fut: Future> + Send, E: Event + Clone + Send + Sync, - E::Id: Borrow + Clone + Send + Sync, { debug!("mainline sort of events"); @@ -677,7 +672,7 @@ where pl = None; for aid in event.auth_events() { - let ev = fetch_event(aid.clone()) + let ev = fetch_event(aid.to_owned()) .await .ok_or_else(|| Error::NotFound(format!("Failed to find {aid}")))?; @@ -723,26 +718,25 @@ where /// that has an associated mainline depth. async fn get_mainline_depth( mut event: Option, - mainline_map: &HashMap, + mainline_map: &HashMap, fetch_event: &F, ) -> Result where - F: Fn(E::Id) -> Fut + Sync, + F: Fn(OwnedEventId) -> Fut + Sync, Fut: Future> + Send, E: Event + Send + Sync, - E::Id: Borrow + Send + Sync, { while let Some(sort_ev) = event { - debug!(event_id = sort_ev.event_id().borrow().as_str(), "mainline"); + debug!(event_id = sort_ev.event_id().as_str(), "mainline"); let id = sort_ev.event_id(); - if let Some(depth) = mainline_map.get(id.borrow()) { + if let Some(depth) = mainline_map.get(id) { return Ok(*depth); } event = None; for aid in sort_ev.auth_events() { - let aev = fetch_event(aid.clone()) + let aev = fetch_event(aid.to_owned()) .await .ok_or_else(|| Error::NotFound(format!("Failed to find {aid}")))?; @@ -757,15 +751,14 @@ where } async fn add_event_and_auth_chain_to_graph( - graph: &mut HashMap>, - event_id: E::Id, - auth_diff: &HashSet, + graph: &mut HashMap>, + event_id: OwnedEventId, + auth_diff: &HashSet, fetch_event: &F, ) where - F: Fn(E::Id) -> Fut + Sync, + F: Fn(OwnedEventId) -> Fut + Sync, Fut: Future> + Send, E: Event + Send + Sync, - E::Id: Borrow + Clone + Send + Sync, { let mut state = vec![event_id]; while let Some(eid) = state.pop() { @@ -775,26 +768,27 @@ async fn add_event_and_auth_chain_to_graph( // Prefer the store to event as the store filters dedups the events for aid in auth_events { - if auth_diff.contains(aid.borrow()) { - if !graph.contains_key(aid.borrow()) { + if auth_diff.contains(aid) { + if !graph.contains_key(aid) { state.push(aid.to_owned()); } - // We just inserted this at the start of the while loop - graph.get_mut(eid.borrow()).unwrap().insert(aid.to_owned()); + graph + .get_mut(&eid) + .expect("We just inserted this at the start of the while loop") + .insert(aid.to_owned()); } } } } -async fn is_power_event_id(event_id: &E::Id, fetch: &F) -> bool +async fn is_power_event_id(event_id: &EventId, fetch: &F) -> bool where - F: Fn(E::Id) -> Fut + Sync, + F: Fn(OwnedEventId) -> Fut + Sync, Fut: Future> + Send, E: Event + Send, - E::Id: Borrow + Send + Sync, { - match fetch(event_id.clone()).await.as_ref() { + match fetch(event_id.to_owned()).await.as_ref() { | Some(state) => is_power_event(state), | _ => false, } @@ -904,7 +898,7 @@ mod tests { let resolved_power = super::iterative_auth_check( &RoomVersion::V6, - sorted_power_events.iter().stream(), + sorted_power_events.iter().map(AsRef::as_ref).stream(), HashMap::new(), // unconflicted events &fetcher, ) @@ -1289,7 +1283,7 @@ mod tests { let ev_map = store.0.clone(); let fetcher = |id| ready(ev_map.get(&id).cloned()); - let exists = |id: ::Id| ready(ev_map.get(&*id).is_some()); + let exists = |id: OwnedEventId| ready(ev_map.get(&*id).is_some()); let state_sets = [state_at_bob, state_at_charlie]; let auth_chain: Vec<_> = state_sets diff --git a/src/core/matrix/state_res/test_utils.rs b/src/core/matrix/state_res/test_utils.rs index ff7b30d0..c6945f66 100644 --- a/src/core/matrix/state_res/test_utils.rs +++ b/src/core/matrix/state_res/test_utils.rs @@ -241,8 +241,8 @@ impl TestStore { pub(crate) fn auth_event_ids( &self, room_id: &RoomId, - event_ids: Vec, - ) -> Result> { + event_ids: Vec, + ) -> Result> { let mut result = HashSet::new(); let mut stack = event_ids; @@ -578,7 +578,7 @@ pub(crate) fn INITIAL_EDGES() -> Vec { pub(crate) mod event { use ruma::{ - MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, + EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, events::{TimelineEventType, pdu::Pdu}, }; use serde::{Deserialize, Serialize}; @@ -587,9 +587,7 @@ pub(crate) mod event { use crate::Event; impl Event for PduEvent { - type Id = OwnedEventId; - - fn event_id(&self) -> &Self::Id { &self.event_id } + fn event_id(&self) -> &EventId { &self.event_id } fn room_id(&self) -> &RoomId { match &self.rest { @@ -646,29 +644,31 @@ pub(crate) mod event { } #[allow(refining_impl_trait)] - fn prev_events(&self) -> Box + Send + '_> { + fn prev_events(&self) -> Box + Send + '_> { match &self.rest { - | Pdu::RoomV1Pdu(ev) => Box::new(ev.prev_events.iter().map(|(id, _)| id)), - | Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter()), + | Pdu::RoomV1Pdu(ev) => + Box::new(ev.prev_events.iter().map(|(id, _)| id.as_ref())), + | Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter().map(AsRef::as_ref)), #[allow(unreachable_patterns)] | _ => unreachable!("new PDU version"), } } #[allow(refining_impl_trait)] - fn auth_events(&self) -> Box + Send + '_> { + fn auth_events(&self) -> Box + Send + '_> { match &self.rest { - | Pdu::RoomV1Pdu(ev) => Box::new(ev.auth_events.iter().map(|(id, _)| id)), - | Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter()), + | Pdu::RoomV1Pdu(ev) => + Box::new(ev.auth_events.iter().map(|(id, _)| id.as_ref())), + | Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter().map(AsRef::as_ref)), #[allow(unreachable_patterns)] | _ => unreachable!("new PDU version"), } } - fn redacts(&self) -> Option<&Self::Id> { + fn redacts(&self) -> Option<&EventId> { match &self.rest { - | Pdu::RoomV1Pdu(ev) => ev.redacts.as_ref(), - | Pdu::RoomV3Pdu(ev) => ev.redacts.as_ref(), + | Pdu::RoomV1Pdu(ev) => ev.redacts.as_deref(), + | Pdu::RoomV3Pdu(ev) => ev.redacts.as_deref(), #[allow(unreachable_patterns)] | _ => unreachable!("new PDU version"), } diff --git a/src/service/rooms/event_handler/resolve_state.rs b/src/service/rooms/event_handler/resolve_state.rs index a67ac3b7..cd747e04 100644 --- a/src/service/rooms/event_handler/resolve_state.rs +++ b/src/service/rooms/event_handler/resolve_state.rs @@ -8,7 +8,7 @@ use conduwuit::{ Error, Result, err, implement, state_res::{self, StateMap}, trace, - utils::stream::{IterStream, ReadyExt, TryWidebandExt, WidebandExt, automatic_width}, + utils::stream::{IterStream, ReadyExt, TryWidebandExt, WidebandExt}, }; use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, future::try_join}; use ruma::{OwnedEventId, RoomId, RoomVersionId}; From 3e4e696761e75bc5e673aea66bd6407088523944 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Wed, 21 May 2025 12:35:25 +0100 Subject: [PATCH 26/50] fix: Make sure empty VERSION_EXTRA strings are ignored Also updates built & removes unused optional features --- Cargo.lock | 45 ++++++------------------------- src/build_metadata/Cargo.toml | 8 +++--- src/build_metadata/build.rs | 1 + src/build_metadata/mod.rs | 11 +++++--- src/core/info/version.rs | 2 +- src/web/mod.rs | 2 +- src/web/templates/_layout.html.j2 | 2 +- 7 files changed, 24 insertions(+), 47 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 18375234..04e4f36e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -584,9 +584,12 @@ name = "built" version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56ed6191a7e78c36abdb16ab65341eefd73d64d303fffccdbb00d51e4205967b" -dependencies = [ - "cargo-lock", -] + +[[package]] +name = "built" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4ad8f11f288f48ca24471bbd51ac257aaeaaa07adae295591266b792902ae64" [[package]] name = "bumpalo" @@ -634,19 +637,6 @@ dependencies = [ "pkg-config", ] -[[package]] -name = "cargo-lock" -version = "10.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06acb4f71407ba205a07cb453211e0e6a67b21904e47f6ba1f9589e38f2e454" -dependencies = [ - "petgraph", - "semver", - "serde", - "toml", - "url", -] - [[package]] name = "cargo_toml" version = "0.21.0" @@ -876,7 +866,7 @@ dependencies = [ name = "conduwuit_build_metadata" version = "0.5.0-rc.5" dependencies = [ - "built", + "built 0.8.0", ] [[package]] @@ -1541,12 +1531,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "fixedbitset" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" - [[package]] name = "flate2" version = "1.1.1" @@ -3164,16 +3148,6 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" -[[package]] -name = "petgraph" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" -dependencies = [ - "fixedbitset", - "indexmap 2.8.0", -] - [[package]] name = "phf" version = "0.11.3" @@ -3565,7 +3539,7 @@ dependencies = [ "arrayvec", "av1-grain", "bitstream-io", - "built", + "built 0.7.7", "cfg-if", "interpolate_name", "itertools 0.12.1", @@ -4169,9 +4143,6 @@ name = "semver" version = "1.0.26" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" -dependencies = [ - "serde", -] [[package]] name = "sentry" diff --git a/src/build_metadata/Cargo.toml b/src/build_metadata/Cargo.toml index 3a98c6bf..62c4dc70 100644 --- a/src/build_metadata/Cargo.toml +++ b/src/build_metadata/Cargo.toml @@ -13,13 +13,13 @@ version.workspace = true build = "build.rs" # [[bin]] # path = "main.rs" -# name = "conduwuit_build_metadata" +# name = "conduwuit_build_metadata" [lib] path = "mod.rs" crate-type = [ - "rlib", -# "dylib", + "rlib", + # "dylib", ] [features] @@ -28,7 +28,7 @@ crate-type = [ [dependencies] [build-dependencies] -built = {version = "0.7", features = ["cargo-lock", "dependency-tree"]} +built = { version = "0.8", features = [] } [lints] workspace = true diff --git a/src/build_metadata/build.rs b/src/build_metadata/build.rs index 2fec16a7..bfdf20b1 100644 --- a/src/build_metadata/build.rs +++ b/src/build_metadata/build.rs @@ -78,6 +78,7 @@ fn main() { } // --- Rerun Triggers --- + // TODO: The git rerun triggers seem to always run // Rerun if the git HEAD changes println!("cargo:rerun-if-changed=.git/HEAD"); // Rerun if the ref pointed to by HEAD changes (e.g., new commit on branch) diff --git a/src/build_metadata/mod.rs b/src/build_metadata/mod.rs index cf3364c1..f50018d2 100644 --- a/src/build_metadata/mod.rs +++ b/src/build_metadata/mod.rs @@ -12,11 +12,16 @@ pub static VERSION_EXTRA: Option<&str> = v } else if let v @ Some(_) = option_env!("CONDUWUIT_VERSION_EXTRA") { v - } else if let v @ Some(_) = option_env!("CONDUIT_VERSION_EXTRA") { - v } else { - GIT_COMMIT_HASH_SHORT + option_env!("CONDUIT_VERSION_EXTRA") }; + +pub fn version_tag() -> Option<&'static str> { + VERSION_EXTRA + .filter(|s| !s.is_empty()) + .or(GIT_COMMIT_HASH_SHORT) +} + pub static GIT_REMOTE_WEB_URL: Option<&str> = option_env!("GIT_REMOTE_WEB_URL"); pub static GIT_REMOTE_COMMIT_URL: Option<&str> = option_env!("GIT_REMOTE_COMMIT_URL"); diff --git a/src/core/info/version.rs b/src/core/info/version.rs index 523c40a2..c22c8ec8 100644 --- a/src/core/info/version.rs +++ b/src/core/info/version.rs @@ -26,6 +26,6 @@ pub fn user_agent() -> &'static str { USER_AGENT.get_or_init(init_user_agent) } fn init_user_agent() -> String { format!("{}/{}", name(), version()) } fn init_version() -> String { - conduwuit_build_metadata::VERSION_EXTRA + conduwuit_build_metadata::version_tag() .map_or(SEMANTIC.to_owned(), |extra| format!("{SEMANTIC} ({extra})")) } diff --git a/src/web/mod.rs b/src/web/mod.rs index 25ec868c..9c6a5d83 100644 --- a/src/web/mod.rs +++ b/src/web/mod.rs @@ -6,7 +6,7 @@ use axum::{ response::{Html, IntoResponse, Response}, routing::get, }; -use conduwuit_build_metadata::{GIT_REMOTE_COMMIT_URL, GIT_REMOTE_WEB_URL, VERSION_EXTRA}; +use conduwuit_build_metadata::{GIT_REMOTE_COMMIT_URL, GIT_REMOTE_WEB_URL, version_tag}; use conduwuit_service::state; pub fn build() -> Router { diff --git a/src/web/templates/_layout.html.j2 b/src/web/templates/_layout.html.j2 index fd0a5b29..d298b68c 100644 --- a/src/web/templates/_layout.html.j2 +++ b/src/web/templates/_layout.html.j2 @@ -18,7 +18,7 @@ {%~ block footer ~%}