Compare commits

...
Sign in to create a new pull request.

19 commits

Author SHA1 Message Date
Jade Ellis
b3679f4eeb
ci: Change label 2025-04-19 22:16:25 +01:00
Jade Ellis
1c2b32cdc8
ci: disable image cache so tom's runner doesn't fail last minute 2025-04-19 22:16:25 +01:00
Jason Volk
a5aed9f43d
reduce large stack frames 2025-04-19 22:16:25 +01:00
Jason Volk
de6d961535
mitigate additional debuginfo expansions
Signed-off-by: Jason Volk <jason@zemos.net>
2025-04-19 22:16:25 +01:00
Jason Volk
6c7845c8af
add missing feature-projections between intra-workspace crates
Signed-off-by: Jason Volk <jason@zemos.net>
2025-04-19 22:16:25 +01:00
Jason Volk
3639b93658
eliminate Arc impl for trait Event
Signed-off-by: Jason Volk <jason@zemos.net>
2025-04-19 22:16:25 +01:00
Jason Volk
200df676e9
simplify database backup interface related
Signed-off-by: Jason Volk <jason@zemos.net>
2025-04-19 22:16:25 +01:00
Jason Volk
56cc9318de
replace admin command branches returning RoomMessageEventContent
rename admin Command back to Context

Signed-off-by: Jason Volk <jason@zemos.net>
2025-04-19 22:16:24 +01:00
Jason Volk
f86d7236ac
misc async optimizations; macro reformatting
Signed-off-by: Jason Volk <jason@zemos.net>
2025-04-19 22:16:24 +01:00
Jason Volk
460cf27a03
improve appservice service async interfaces
Signed-off-by: Jason Volk <jason@zemos.net>
2025-04-19 22:16:24 +01:00
Jason Volk
3af241b947
remove box ids from admin room command arguments
Signed-off-by: Jason Volk <jason@zemos.net>
2025-04-19 22:16:24 +01:00
Jason Volk
79ae57b671
propagate better message from RustlsConfig load error. (#734)
Signed-off-by: Jason Volk <jason@zemos.net>
2025-04-19 22:16:24 +01:00
Jason Volk
11270c2d9d
slightly optimize user directory search loop
Signed-off-by: Jason Volk <jason@zemos.net>
2025-04-19 22:16:24 +01:00
Jason Volk
43ce46ff7e
increase snake sync asynchronicity
Signed-off-by: Jason Volk <jason@zemos.net>
2025-04-19 22:16:24 +01:00
Jason Volk
95f92f131b
modest cleanup of snake sync service related
Signed-off-by: Jason Volk <jason@zemos.net>
2025-04-19 22:16:23 +01:00
Jason Volk
dbb7560fa5
modernize state_res w/ stream extensions
Signed-off-by: Jason Volk <jason@zemos.net>
2025-04-19 22:16:23 +01:00
Jason Volk
208b81a18f
add ReadyEq future extension
Signed-off-by: Jason Volk <jason@zemos.net>
2025-04-19 22:16:23 +01:00
Jason Volk
0bee87b693
add ready_find() stream extension
Signed-off-by: Jason Volk <jason@zemos.net>
2025-04-19 22:16:23 +01:00
Jason Volk
173c0b35ad
relax Send requirement on some drier stream extensions
Signed-off-by: Jason Volk <jason@zemos.net>
2025-04-19 22:16:23 +01:00
78 changed files with 2038 additions and 2217 deletions

View file

@ -55,7 +55,7 @@ jobs:
}))
build-image:
runs-on: dind
runs-on: docker
container: ghcr.io/catthehacker/ubuntu:act-latest
needs: define-variables
permissions:
@ -152,8 +152,8 @@ jobs:
platforms: ${{ matrix.platform }}
labels: ${{ steps.meta.outputs.labels }}
annotations: ${{ steps.meta.outputs.annotations }}
cache-from: type=gha
cache-to: type=gha,mode=max
# cache-from: type=gha
# cache-to: type=gha,mode=max
sbom: true
outputs: type=image,"name=${{ needs.define-variables.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true
env:
@ -175,7 +175,7 @@ jobs:
retention-days: 1
merge:
runs-on: dind
runs-on: docker
container: ghcr.io/catthehacker/ubuntu:act-latest
needs: [define-variables, build-image]
steps:

1
Cargo.lock generated
View file

@ -784,7 +784,6 @@ dependencies = [
"base64 0.22.1",
"bytes",
"conduwuit_core",
"conduwuit_database",
"conduwuit_service",
"const-str",
"futures",

View file

@ -17,12 +17,61 @@ crate-type = [
]
[features]
brotli_compression = [
"conduwuit-api/brotli_compression",
"conduwuit-core/brotli_compression",
"conduwuit-service/brotli_compression",
]
gzip_compression = [
"conduwuit-api/gzip_compression",
"conduwuit-core/gzip_compression",
"conduwuit-service/gzip_compression",
]
io_uring = [
"conduwuit-api/io_uring",
"conduwuit-database/io_uring",
"conduwuit-service/io_uring",
]
jemalloc = [
"conduwuit-api/jemalloc",
"conduwuit-core/jemalloc",
"conduwuit-database/jemalloc",
"conduwuit-service/jemalloc",
]
jemalloc_conf = [
"conduwuit-api/jemalloc_conf",
"conduwuit-core/jemalloc_conf",
"conduwuit-database/jemalloc_conf",
"conduwuit-service/jemalloc_conf",
]
jemalloc_prof = [
"conduwuit-api/jemalloc_prof",
"conduwuit-core/jemalloc_prof",
"conduwuit-database/jemalloc_prof",
"conduwuit-service/jemalloc_prof",
]
jemalloc_stats = [
"conduwuit-api/jemalloc_stats",
"conduwuit-core/jemalloc_stats",
"conduwuit-database/jemalloc_stats",
"conduwuit-service/jemalloc_stats",
]
release_max_log_level = [
"conduwuit-api/release_max_log_level",
"conduwuit-core/release_max_log_level",
"conduwuit-database/release_max_log_level",
"conduwuit-service/release_max_log_level",
"tracing/max_level_trace",
"tracing/release_max_level_info",
"log/max_level_trace",
"log/release_max_level_info",
]
zstd_compression = [
"conduwuit-api/zstd_compression",
"conduwuit-core/zstd_compression",
"conduwuit-database/zstd_compression",
"conduwuit-service/zstd_compression",
]
[dependencies]
clap.workspace = true

View file

@ -2,7 +2,7 @@ use clap::Parser;
use conduwuit::Result;
use crate::{
appservice, appservice::AppserviceCommand, check, check::CheckCommand, command::Command,
appservice, appservice::AppserviceCommand, check, check::CheckCommand, context::Context,
debug, debug::DebugCommand, federation, federation::FederationCommand, media,
media::MediaCommand, query, query::QueryCommand, room, room::RoomCommand, server,
server::ServerCommand, user, user::UserCommand,
@ -49,20 +49,18 @@ pub(super) enum AdminCommand {
}
#[tracing::instrument(skip_all, name = "command")]
pub(super) async fn process(command: AdminCommand, context: &Command<'_>) -> Result {
pub(super) async fn process(command: AdminCommand, context: &Context<'_>) -> Result {
use AdminCommand::*;
match command {
| Appservices(command) => appservice::process(command, context).await?,
| Media(command) => media::process(command, context).await?,
| Users(command) => user::process(command, context).await?,
| Rooms(command) => room::process(command, context).await?,
| Federation(command) => federation::process(command, context).await?,
| Server(command) => server::process(command, context).await?,
| Debug(command) => debug::process(command, context).await?,
| Query(command) => query::process(command, context).await?,
| Check(command) => check::process(command, context).await?,
| Appservices(command) => appservice::process(command, context).await,
| Media(command) => media::process(command, context).await,
| Users(command) => user::process(command, context).await,
| Rooms(command) => room::process(command, context).await,
| Federation(command) => federation::process(command, context).await,
| Server(command) => server::process(command, context).await,
| Debug(command) => debug::process(command, context).await,
| Query(command) => query::process(command, context).await,
| Check(command) => check::process(command, context).await,
}
Ok(())
}

View file

@ -1,84 +1,80 @@
use ruma::{api::appservice::Registration, events::room::message::RoomMessageEventContent};
use conduwuit::{Err, Result, checked};
use futures::{FutureExt, StreamExt, TryFutureExt};
use crate::{Result, admin_command};
use crate::admin_command;
#[admin_command]
pub(super) async fn register(&self) -> Result<RoomMessageEventContent> {
if self.body.len() < 2
|| !self.body[0].trim().starts_with("```")
|| self.body.last().unwrap_or(&"").trim() != "```"
pub(super) async fn register(&self) -> Result {
let body = &self.body;
let body_len = self.body.len();
if body_len < 2
|| !body[0].trim().starts_with("```")
|| body.last().unwrap_or(&"").trim() != "```"
{
return Ok(RoomMessageEventContent::text_plain(
"Expected code block in command body. Add --help for details.",
));
return Err!("Expected code block in command body. Add --help for details.");
}
let appservice_config_body = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n");
let parsed_config = serde_yaml::from_str::<Registration>(&appservice_config_body);
let range = 1..checked!(body_len - 1)?;
let appservice_config_body = body[range].join("\n");
let parsed_config = serde_yaml::from_str(&appservice_config_body);
match parsed_config {
| Err(e) => return Err!("Could not parse appservice config as YAML: {e}"),
| Ok(registration) => match self
.services
.appservice
.register_appservice(&registration, &appservice_config_body)
.await
.map(|()| registration.id)
{
| Ok(()) => Ok(RoomMessageEventContent::text_plain(format!(
"Appservice registered with ID: {}",
registration.id
))),
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
"Failed to register appservice: {e}"
))),
| Err(e) => return Err!("Failed to register appservice: {e}"),
| Ok(id) => write!(self, "Appservice registered with ID: {id}"),
},
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
"Could not parse appservice config as YAML: {e}"
))),
}
.await
}
#[admin_command]
pub(super) async fn unregister(
&self,
appservice_identifier: String,
) -> Result<RoomMessageEventContent> {
pub(super) async fn unregister(&self, appservice_identifier: String) -> Result {
match self
.services
.appservice
.unregister_appservice(&appservice_identifier)
.await
{
| Ok(()) => Ok(RoomMessageEventContent::text_plain("Appservice unregistered.")),
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
"Failed to unregister appservice: {e}"
))),
| Err(e) => return Err!("Failed to unregister appservice: {e}"),
| Ok(()) => write!(self, "Appservice unregistered."),
}
.await
}
#[admin_command]
pub(super) async fn show_appservice_config(
&self,
appservice_identifier: String,
) -> Result<RoomMessageEventContent> {
pub(super) async fn show_appservice_config(&self, appservice_identifier: String) -> Result {
match self
.services
.appservice
.get_registration(&appservice_identifier)
.await
{
| None => return Err!("Appservice does not exist."),
| Some(config) => {
let config_str = serde_yaml::to_string(&config)
.expect("config should've been validated on register");
let output =
format!("Config for {appservice_identifier}:\n\n```yaml\n{config_str}\n```",);
Ok(RoomMessageEventContent::notice_markdown(output))
let config_str = serde_yaml::to_string(&config)?;
write!(self, "Config for {appservice_identifier}:\n\n```yaml\n{config_str}\n```")
},
| None => Ok(RoomMessageEventContent::text_plain("Appservice does not exist.")),
}
.await
}
#[admin_command]
pub(super) async fn list_registered(&self) -> Result<RoomMessageEventContent> {
let appservices = self.services.appservice.iter_ids().await;
let output = format!("Appservices ({}): {}", appservices.len(), appservices.join(", "));
Ok(RoomMessageEventContent::text_plain(output))
pub(super) async fn list_registered(&self) -> Result {
self.services
.appservice
.iter_ids()
.collect()
.map(Ok)
.and_then(|appservices: Vec<_>| {
let len = appservices.len();
let list = appservices.join(", ");
write!(self, "Appservices ({len}): {list}")
})
.await
}

View file

@ -1,15 +1,14 @@
use conduwuit::Result;
use conduwuit_macros::implement;
use futures::StreamExt;
use ruma::events::room::message::RoomMessageEventContent;
use crate::Command;
use crate::Context;
/// Uses the iterator in `src/database/key_value/users.rs` to iterator over
/// every user in our database (remote and local). Reports total count, any
/// errors if there were any, etc
#[implement(Command, params = "<'_>")]
pub(super) async fn check_all_users(&self) -> Result<RoomMessageEventContent> {
#[implement(Context, params = "<'_>")]
pub(super) async fn check_all_users(&self) -> Result {
let timer = tokio::time::Instant::now();
let users = self.services.users.iter().collect::<Vec<_>>().await;
let query_time = timer.elapsed();
@ -18,11 +17,10 @@ pub(super) async fn check_all_users(&self) -> Result<RoomMessageEventContent> {
let err_count = users.iter().filter(|_user| false).count();
let ok_count = users.iter().filter(|_user| true).count();
let message = format!(
self.write_str(&format!(
"Database query completed in {query_time:?}:\n\n```\nTotal entries: \
{total:?}\nFailure/Invalid user count: {err_count:?}\nSuccess/Valid user count: \
{ok_count:?}\n```"
);
Ok(RoomMessageEventContent::notice_markdown(message))
))
.await
}

View file

@ -3,13 +3,13 @@ use std::{fmt, time::SystemTime};
use conduwuit::Result;
use conduwuit_service::Services;
use futures::{
Future, FutureExt,
Future, FutureExt, TryFutureExt,
io::{AsyncWriteExt, BufWriter},
lock::Mutex,
};
use ruma::EventId;
pub(crate) struct Command<'a> {
pub(crate) struct Context<'a> {
pub(crate) services: &'a Services,
pub(crate) body: &'a [&'a str],
pub(crate) timer: SystemTime,
@ -17,14 +17,14 @@ pub(crate) struct Command<'a> {
pub(crate) output: Mutex<BufWriter<Vec<u8>>>,
}
impl Command<'_> {
impl Context<'_> {
pub(crate) fn write_fmt(
&self,
arguments: fmt::Arguments<'_>,
) -> impl Future<Output = Result> + Send + '_ + use<'_> {
let buf = format!("{arguments}");
self.output.lock().then(|mut output| async move {
output.write_all(buf.as_bytes()).await.map_err(Into::into)
self.output.lock().then(async move |mut output| {
output.write_all(buf.as_bytes()).map_err(Into::into).await
})
}
@ -32,8 +32,8 @@ impl Command<'_> {
&'a self,
s: &'a str,
) -> impl Future<Output = Result> + Send + 'a {
self.output.lock().then(move |mut output| async move {
output.write_all(s.as_bytes()).await.map_err(Into::into)
self.output.lock().then(async move |mut output| {
output.write_all(s.as_bytes()).map_err(Into::into).await
})
}
}

View file

@ -6,7 +6,7 @@ use std::{
};
use conduwuit::{
Error, Result, debug_error, err, info,
Err, Result, debug_error, err, info,
matrix::pdu::{PduEvent, PduId, RawPduId},
trace, utils,
utils::{
@ -17,10 +17,9 @@ use conduwuit::{
};
use futures::{FutureExt, StreamExt, TryStreamExt};
use ruma::{
CanonicalJsonObject, EventId, OwnedEventId, OwnedRoomOrAliasId, RoomId, RoomVersionId,
ServerName,
api::{client::error::ErrorKind, federation::event::get_room_state},
events::room::message::RoomMessageEventContent,
CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId,
OwnedRoomOrAliasId, OwnedServerName, RoomId, RoomVersionId,
api::federation::event::get_room_state,
};
use service::rooms::{
short::{ShortEventId, ShortRoomId},
@ -31,28 +30,24 @@ use tracing_subscriber::EnvFilter;
use crate::admin_command;
#[admin_command]
pub(super) async fn echo(&self, message: Vec<String>) -> Result<RoomMessageEventContent> {
pub(super) async fn echo(&self, message: Vec<String>) -> Result {
let message = message.join(" ");
Ok(RoomMessageEventContent::notice_plain(message))
self.write_str(&message).await
}
#[admin_command]
pub(super) async fn get_auth_chain(
&self,
event_id: Box<EventId>,
) -> Result<RoomMessageEventContent> {
pub(super) async fn get_auth_chain(&self, event_id: OwnedEventId) -> Result {
let Ok(event) = self.services.rooms.timeline.get_pdu_json(&event_id).await else {
return Ok(RoomMessageEventContent::notice_plain("Event not found."));
return Err!("Event not found.");
};
let room_id_str = event
.get("room_id")
.and_then(|val| val.as_str())
.ok_or_else(|| Error::bad_database("Invalid event in database"))?;
.and_then(CanonicalJsonValue::as_str)
.ok_or_else(|| err!(Database("Invalid event in database")))?;
let room_id = <&RoomId>::try_from(room_id_str)
.map_err(|_| Error::bad_database("Invalid room id field in event in database"))?;
.map_err(|_| err!(Database("Invalid room id field in event in database")))?;
let start = Instant::now();
let count = self
@ -65,51 +60,39 @@ pub(super) async fn get_auth_chain(
.await;
let elapsed = start.elapsed();
Ok(RoomMessageEventContent::text_plain(format!(
"Loaded auth chain with length {count} in {elapsed:?}"
)))
let out = format!("Loaded auth chain with length {count} in {elapsed:?}");
self.write_str(&out).await
}
#[admin_command]
pub(super) async fn parse_pdu(&self) -> Result<RoomMessageEventContent> {
pub(super) async fn parse_pdu(&self) -> Result {
if self.body.len() < 2
|| !self.body[0].trim().starts_with("```")
|| self.body.last().unwrap_or(&EMPTY).trim() != "```"
{
return Ok(RoomMessageEventContent::text_plain(
"Expected code block in command body. Add --help for details.",
));
return Err!("Expected code block in command body. Add --help for details.");
}
let string = self.body[1..self.body.len().saturating_sub(1)].join("\n");
match serde_json::from_str(&string) {
| Err(e) => return Err!("Invalid json in command body: {e}"),
| Ok(value) => match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) {
| Err(e) => return Err!("Could not parse PDU JSON: {e:?}"),
| Ok(hash) => {
let event_id = OwnedEventId::parse(format!("${hash}"));
match serde_json::from_value::<PduEvent>(
serde_json::to_value(value).expect("value is json"),
) {
| Ok(pdu) => Ok(RoomMessageEventContent::text_plain(format!(
"EventId: {event_id:?}\n{pdu:#?}"
))),
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
"EventId: {event_id:?}\nCould not parse event: {e}"
))),
match serde_json::from_value::<PduEvent>(serde_json::to_value(value)?) {
| Err(e) => return Err!("EventId: {event_id:?}\nCould not parse event: {e}"),
| Ok(pdu) => write!(self, "EventId: {event_id:?}\n{pdu:#?}"),
}
},
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
"Could not parse PDU JSON: {e:?}"
))),
},
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
"Invalid json in command body: {e}"
))),
}
.await
}
#[admin_command]
pub(super) async fn get_pdu(&self, event_id: Box<EventId>) -> Result<RoomMessageEventContent> {
pub(super) async fn get_pdu(&self, event_id: OwnedEventId) -> Result {
let mut outlier = false;
let mut pdu_json = self
.services
@ -124,21 +107,18 @@ pub(super) async fn get_pdu(&self, event_id: Box<EventId>) -> Result<RoomMessage
}
match pdu_json {
| Err(_) => return Err!("PDU not found locally."),
| Ok(json) => {
let json_text =
serde_json::to_string_pretty(&json).expect("canonical json is valid json");
Ok(RoomMessageEventContent::notice_markdown(format!(
"{}\n```json\n{}\n```",
if outlier {
"Outlier (Rejected / Soft Failed) PDU found in our database"
} else {
"PDU found in our database"
},
json_text
)))
let text = serde_json::to_string_pretty(&json)?;
let msg = if outlier {
"Outlier (Rejected / Soft Failed) PDU found in our database"
} else {
"PDU found in our database"
};
write!(self, "{msg}\n```json\n{text}\n```",)
},
| Err(_) => Ok(RoomMessageEventContent::text_plain("PDU not found locally.")),
}
.await
}
#[admin_command]
@ -146,7 +126,7 @@ pub(super) async fn get_short_pdu(
&self,
shortroomid: ShortRoomId,
shorteventid: ShortEventId,
) -> Result<RoomMessageEventContent> {
) -> Result {
let pdu_id: RawPduId = PduId {
shortroomid,
shorteventid: shorteventid.into(),
@ -161,41 +141,33 @@ pub(super) async fn get_short_pdu(
.await;
match pdu_json {
| Err(_) => return Err!("PDU not found locally."),
| Ok(json) => {
let json_text =
serde_json::to_string_pretty(&json).expect("canonical json is valid json");
Ok(RoomMessageEventContent::notice_markdown(format!("```json\n{json_text}\n```",)))
let json_text = serde_json::to_string_pretty(&json)?;
write!(self, "```json\n{json_text}\n```")
},
| Err(_) => Ok(RoomMessageEventContent::text_plain("PDU not found locally.")),
}
.await
}
#[admin_command]
pub(super) async fn get_remote_pdu_list(
&self,
server: Box<ServerName>,
force: bool,
) -> Result<RoomMessageEventContent> {
pub(super) async fn get_remote_pdu_list(&self, server: OwnedServerName, force: bool) -> Result {
if !self.services.server.config.allow_federation {
return Ok(RoomMessageEventContent::text_plain(
"Federation is disabled on this homeserver.",
));
return Err!("Federation is disabled on this homeserver.",);
}
if server == self.services.globals.server_name() {
return Ok(RoomMessageEventContent::text_plain(
return Err!(
"Not allowed to send federation requests to ourselves. Please use `get-pdu` for \
fetching local PDUs from the database.",
));
);
}
if self.body.len() < 2
|| !self.body[0].trim().starts_with("```")
|| self.body.last().unwrap_or(&EMPTY).trim() != "```"
{
return Ok(RoomMessageEventContent::text_plain(
"Expected code block in command body. Add --help for details.",
));
return Err!("Expected code block in command body. Add --help for details.",);
}
let list = self
@ -209,18 +181,19 @@ pub(super) async fn get_remote_pdu_list(
let mut failed_count: usize = 0;
let mut success_count: usize = 0;
for pdu in list {
for event_id in list {
if force {
match self.get_remote_pdu(Box::from(pdu), server.clone()).await {
match self
.get_remote_pdu(event_id.to_owned(), server.clone())
.await
{
| Err(e) => {
failed_count = failed_count.saturating_add(1);
self.services
.admin
.send_message(RoomMessageEventContent::text_plain(format!(
"Failed to get remote PDU, ignoring error: {e}"
)))
.await
.ok();
.send_text(&format!("Failed to get remote PDU, ignoring error: {e}"))
.await;
warn!("Failed to get remote PDU, ignoring error: {e}");
},
| _ => {
@ -228,44 +201,48 @@ pub(super) async fn get_remote_pdu_list(
},
}
} else {
self.get_remote_pdu(Box::from(pdu), server.clone()).await?;
self.get_remote_pdu(event_id.to_owned(), server.clone())
.await?;
success_count = success_count.saturating_add(1);
}
}
Ok(RoomMessageEventContent::text_plain(format!(
"Fetched {success_count} remote PDUs successfully with {failed_count} failures"
)))
let out =
format!("Fetched {success_count} remote PDUs successfully with {failed_count} failures");
self.write_str(&out).await
}
#[admin_command]
pub(super) async fn get_remote_pdu(
&self,
event_id: Box<EventId>,
server: Box<ServerName>,
) -> Result<RoomMessageEventContent> {
event_id: OwnedEventId,
server: OwnedServerName,
) -> Result {
if !self.services.server.config.allow_federation {
return Ok(RoomMessageEventContent::text_plain(
"Federation is disabled on this homeserver.",
));
return Err!("Federation is disabled on this homeserver.");
}
if server == self.services.globals.server_name() {
return Ok(RoomMessageEventContent::text_plain(
return Err!(
"Not allowed to send federation requests to ourselves. Please use `get-pdu` for \
fetching local PDUs.",
));
);
}
match self
.services
.sending
.send_federation_request(&server, ruma::api::federation::event::get_event::v1::Request {
event_id: event_id.clone().into(),
event_id: event_id.clone(),
include_unredacted_content: None,
})
.await
{
| Err(e) =>
return Err!(
"Remote server did not have PDU or failed sending request to remote server: {e}"
),
| Ok(response) => {
let json: CanonicalJsonObject =
serde_json::from_str(response.pdu.get()).map_err(|e| {
@ -273,10 +250,9 @@ pub(super) async fn get_remote_pdu(
"Requested event ID {event_id} from server but failed to convert from \
RawValue to CanonicalJsonObject (malformed event/response?): {e}"
);
Error::BadRequest(
ErrorKind::Unknown,
"Received response from server but failed to parse PDU",
)
err!(Request(Unknown(
"Received response from server but failed to parse PDU"
)))
})?;
trace!("Attempting to parse PDU: {:?}", &response.pdu);
@ -286,6 +262,7 @@ pub(super) async fn get_remote_pdu(
.rooms
.event_handler
.parse_incoming_pdu(&response.pdu)
.boxed()
.await;
let (event_id, value, room_id) = match parsed_result {
@ -293,9 +270,7 @@ pub(super) async fn get_remote_pdu(
| Err(e) => {
warn!("Failed to parse PDU: {e}");
info!("Full PDU: {:?}", &response.pdu);
return Ok(RoomMessageEventContent::text_plain(format!(
"Failed to parse PDU remote server {server} sent us: {e}"
)));
return Err!("Failed to parse PDU remote server {server} sent us: {e}");
},
};
@ -307,30 +282,18 @@ pub(super) async fn get_remote_pdu(
.rooms
.timeline
.backfill_pdu(&server, response.pdu)
.boxed()
.await?;
let json_text =
serde_json::to_string_pretty(&json).expect("canonical json is valid json");
Ok(RoomMessageEventContent::notice_markdown(format!(
"{}\n```json\n{}\n```",
"Got PDU from specified server and handled as backfilled PDU successfully. \
Event body:",
json_text
)))
let text = serde_json::to_string_pretty(&json)?;
let msg = "Got PDU from specified server and handled as backfilled";
write!(self, "{msg}. Event body:\n```json\n{text}\n```")
},
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
"Remote server did not have PDU or failed sending request to remote server: {e}"
))),
}
.await
}
#[admin_command]
pub(super) async fn get_room_state(
&self,
room: OwnedRoomOrAliasId,
) -> Result<RoomMessageEventContent> {
pub(super) async fn get_room_state(&self, room: OwnedRoomOrAliasId) -> Result {
let room_id = self.services.rooms.alias.resolve(&room).await?;
let room_state: Vec<_> = self
.services
@ -342,28 +305,24 @@ pub(super) async fn get_room_state(
.await?;
if room_state.is_empty() {
return Ok(RoomMessageEventContent::text_plain(
"Unable to find room state in our database (vector is empty)",
));
return Err!("Unable to find room state in our database (vector is empty)",);
}
let json = serde_json::to_string_pretty(&room_state).map_err(|e| {
warn!("Failed converting room state vector in our database to pretty JSON: {e}");
Error::bad_database(
err!(Database(
"Failed to convert room state events to pretty JSON, possible invalid room state \
events in our database",
)
events in our database {e}",
))
})?;
Ok(RoomMessageEventContent::notice_markdown(format!("```json\n{json}\n```")))
let out = format!("```json\n{json}\n```");
self.write_str(&out).await
}
#[admin_command]
pub(super) async fn ping(&self, server: Box<ServerName>) -> Result<RoomMessageEventContent> {
pub(super) async fn ping(&self, server: OwnedServerName) -> Result {
if server == self.services.globals.server_name() {
return Ok(RoomMessageEventContent::text_plain(
"Not allowed to send federation requests to ourselves.",
));
return Err!("Not allowed to send federation requests to ourselves.");
}
let timer = tokio::time::Instant::now();
@ -377,35 +336,27 @@ pub(super) async fn ping(&self, server: Box<ServerName>) -> Result<RoomMessageEv
)
.await
{
| Err(e) => {
return Err!("Failed sending federation request to specified server:\n\n{e}");
},
| Ok(response) => {
let ping_time = timer.elapsed();
let json_text_res = serde_json::to_string_pretty(&response.server);
if let Ok(json) = json_text_res {
return Ok(RoomMessageEventContent::notice_markdown(format!(
"Got response which took {ping_time:?} time:\n```json\n{json}\n```"
)));
}
let out = if let Ok(json) = json_text_res {
format!("Got response which took {ping_time:?} time:\n```json\n{json}\n```")
} else {
format!("Got non-JSON response which took {ping_time:?} time:\n{response:?}")
};
Ok(RoomMessageEventContent::text_plain(format!(
"Got non-JSON response which took {ping_time:?} time:\n{response:?}"
)))
},
| Err(e) => {
warn!(
"Failed sending federation request to specified server from ping debug command: \
{e}"
);
Ok(RoomMessageEventContent::text_plain(format!(
"Failed sending federation request to specified server:\n\n{e}",
)))
write!(self, "{out}")
},
}
.await
}
#[admin_command]
pub(super) async fn force_device_list_updates(&self) -> Result<RoomMessageEventContent> {
pub(super) async fn force_device_list_updates(&self) -> Result {
// Force E2EE device list updates for all users
self.services
.users
@ -413,27 +364,17 @@ pub(super) async fn force_device_list_updates(&self) -> Result<RoomMessageEventC
.for_each(|user_id| self.services.users.mark_device_key_update(user_id))
.await;
Ok(RoomMessageEventContent::text_plain(
"Marked all devices for all users as having new keys to update",
))
write!(self, "Marked all devices for all users as having new keys to update").await
}
#[admin_command]
pub(super) async fn change_log_level(
&self,
filter: Option<String>,
reset: bool,
) -> Result<RoomMessageEventContent> {
pub(super) async fn change_log_level(&self, filter: Option<String>, reset: bool) -> Result {
let handles = &["console"];
if reset {
let old_filter_layer = match EnvFilter::try_new(&self.services.server.config.log) {
| Ok(s) => s,
| Err(e) => {
return Ok(RoomMessageEventContent::text_plain(format!(
"Log level from config appears to be invalid now: {e}"
)));
},
| Err(e) => return Err!("Log level from config appears to be invalid now: {e}"),
};
match self
@ -443,16 +384,12 @@ pub(super) async fn change_log_level(
.reload
.reload(&old_filter_layer, Some(handles))
{
| Err(e) =>
return Err!("Failed to modify and reload the global tracing log level: {e}"),
| Ok(()) => {
return Ok(RoomMessageEventContent::text_plain(format!(
"Successfully changed log level back to config value {}",
self.services.server.config.log
)));
},
| Err(e) => {
return Ok(RoomMessageEventContent::text_plain(format!(
"Failed to modify and reload the global tracing log level: {e}"
)));
let value = &self.services.server.config.log;
let out = format!("Successfully changed log level back to config value {value}");
return self.write_str(&out).await;
},
}
}
@ -460,11 +397,7 @@ pub(super) async fn change_log_level(
if let Some(filter) = filter {
let new_filter_layer = match EnvFilter::try_new(filter) {
| Ok(s) => s,
| Err(e) => {
return Ok(RoomMessageEventContent::text_plain(format!(
"Invalid log level filter specified: {e}"
)));
},
| Err(e) => return Err!("Invalid log level filter specified: {e}"),
};
match self
@ -474,90 +407,75 @@ pub(super) async fn change_log_level(
.reload
.reload(&new_filter_layer, Some(handles))
{
| Ok(()) => {
return Ok(RoomMessageEventContent::text_plain("Successfully changed log level"));
},
| Err(e) => {
return Ok(RoomMessageEventContent::text_plain(format!(
"Failed to modify and reload the global tracing log level: {e}"
)));
},
| Ok(()) => return self.write_str("Successfully changed log level").await,
| Err(e) =>
return Err!("Failed to modify and reload the global tracing log level: {e}"),
}
}
Ok(RoomMessageEventContent::text_plain("No log level was specified."))
Err!("No log level was specified.")
}
#[admin_command]
pub(super) async fn sign_json(&self) -> Result<RoomMessageEventContent> {
pub(super) async fn sign_json(&self) -> Result {
if self.body.len() < 2
|| !self.body[0].trim().starts_with("```")
|| self.body.last().unwrap_or(&"").trim() != "```"
{
return Ok(RoomMessageEventContent::text_plain(
"Expected code block in command body. Add --help for details.",
));
return Err!("Expected code block in command body. Add --help for details.");
}
let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n");
match serde_json::from_str(&string) {
| Err(e) => return Err!("Invalid json: {e}"),
| Ok(mut value) => {
self.services
.server_keys
.sign_json(&mut value)
.expect("our request json is what ruma expects");
let json_text =
serde_json::to_string_pretty(&value).expect("canonical json is valid json");
Ok(RoomMessageEventContent::text_plain(json_text))
self.services.server_keys.sign_json(&mut value)?;
let json_text = serde_json::to_string_pretty(&value)?;
write!(self, "{json_text}")
},
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))),
}
.await
}
#[admin_command]
pub(super) async fn verify_json(&self) -> Result<RoomMessageEventContent> {
pub(super) async fn verify_json(&self) -> Result {
if self.body.len() < 2
|| !self.body[0].trim().starts_with("```")
|| self.body.last().unwrap_or(&"").trim() != "```"
{
return Ok(RoomMessageEventContent::text_plain(
"Expected code block in command body. Add --help for details.",
));
return Err!("Expected code block in command body. Add --help for details.");
}
let string = self.body[1..self.body.len().checked_sub(1).unwrap()].join("\n");
match serde_json::from_str::<CanonicalJsonObject>(&string) {
| Err(e) => return Err!("Invalid json: {e}"),
| Ok(value) => match self.services.server_keys.verify_json(&value, None).await {
| Ok(()) => Ok(RoomMessageEventContent::text_plain("Signature correct")),
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
"Signature verification failed: {e}"
))),
| Err(e) => return Err!("Signature verification failed: {e}"),
| Ok(()) => write!(self, "Signature correct"),
},
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!("Invalid json: {e}"))),
}
.await
}
#[admin_command]
pub(super) async fn verify_pdu(&self, event_id: Box<EventId>) -> Result<RoomMessageEventContent> {
pub(super) async fn verify_pdu(&self, event_id: OwnedEventId) -> Result {
use ruma::signatures::Verified;
let mut event = self.services.rooms.timeline.get_pdu_json(&event_id).await?;
event.remove("event_id");
let msg = match self.services.server_keys.verify_event(&event, None).await {
| Ok(ruma::signatures::Verified::Signatures) =>
"signatures OK, but content hash failed (redaction).",
| Ok(ruma::signatures::Verified::All) => "signatures and hashes OK.",
| Err(e) => return Err(e),
| Ok(Verified::Signatures) => "signatures OK, but content hash failed (redaction).",
| Ok(Verified::All) => "signatures and hashes OK.",
};
Ok(RoomMessageEventContent::notice_plain(msg))
self.write_str(msg).await
}
#[admin_command]
#[tracing::instrument(skip(self))]
pub(super) async fn first_pdu_in_room(
&self,
room_id: Box<RoomId>,
) -> Result<RoomMessageEventContent> {
pub(super) async fn first_pdu_in_room(&self, room_id: OwnedRoomId) -> Result {
if !self
.services
.rooms
@ -565,9 +483,7 @@ pub(super) async fn first_pdu_in_room(
.server_in_room(&self.services.server.name, &room_id)
.await
{
return Ok(RoomMessageEventContent::text_plain(
"We are not participating in the room / we don't know about the room ID.",
));
return Err!("We are not participating in the room / we don't know about the room ID.",);
}
let first_pdu = self
@ -576,17 +492,15 @@ pub(super) async fn first_pdu_in_room(
.timeline
.first_pdu_in_room(&room_id)
.await
.map_err(|_| Error::bad_database("Failed to find the first PDU in database"))?;
.map_err(|_| err!(Database("Failed to find the first PDU in database")))?;
Ok(RoomMessageEventContent::text_plain(format!("{first_pdu:?}")))
let out = format!("{first_pdu:?}");
self.write_str(&out).await
}
#[admin_command]
#[tracing::instrument(skip(self))]
pub(super) async fn latest_pdu_in_room(
&self,
room_id: Box<RoomId>,
) -> Result<RoomMessageEventContent> {
pub(super) async fn latest_pdu_in_room(&self, room_id: OwnedRoomId) -> Result {
if !self
.services
.rooms
@ -594,9 +508,7 @@ pub(super) async fn latest_pdu_in_room(
.server_in_room(&self.services.server.name, &room_id)
.await
{
return Ok(RoomMessageEventContent::text_plain(
"We are not participating in the room / we don't know about the room ID.",
));
return Err!("We are not participating in the room / we don't know about the room ID.");
}
let latest_pdu = self
@ -605,18 +517,19 @@ pub(super) async fn latest_pdu_in_room(
.timeline
.latest_pdu_in_room(&room_id)
.await
.map_err(|_| Error::bad_database("Failed to find the latest PDU in database"))?;
.map_err(|_| err!(Database("Failed to find the latest PDU in database")))?;
Ok(RoomMessageEventContent::text_plain(format!("{latest_pdu:?}")))
let out = format!("{latest_pdu:?}");
self.write_str(&out).await
}
#[admin_command]
#[tracing::instrument(skip(self))]
pub(super) async fn force_set_room_state_from_server(
&self,
room_id: Box<RoomId>,
server_name: Box<ServerName>,
) -> Result<RoomMessageEventContent> {
room_id: OwnedRoomId,
server_name: OwnedServerName,
) -> Result {
if !self
.services
.rooms
@ -624,9 +537,7 @@ pub(super) async fn force_set_room_state_from_server(
.server_in_room(&self.services.server.name, &room_id)
.await
{
return Ok(RoomMessageEventContent::text_plain(
"We are not participating in the room / we don't know about the room ID.",
));
return Err!("We are not participating in the room / we don't know about the room ID.");
}
let first_pdu = self
@ -635,7 +546,7 @@ pub(super) async fn force_set_room_state_from_server(
.timeline
.latest_pdu_in_room(&room_id)
.await
.map_err(|_| Error::bad_database("Failed to find the latest PDU in database"))?;
.map_err(|_| err!(Database("Failed to find the latest PDU in database")))?;
let room_version = self.services.rooms.state.get_room_version(&room_id).await?;
@ -645,10 +556,9 @@ pub(super) async fn force_set_room_state_from_server(
.services
.sending
.send_federation_request(&server_name, get_room_state::v1::Request {
room_id: room_id.clone().into(),
room_id: room_id.clone(),
event_id: first_pdu.event_id.clone(),
})
.boxed()
.await?;
for pdu in remote_state_response.pdus.clone() {
@ -657,7 +567,6 @@ pub(super) async fn force_set_room_state_from_server(
.rooms
.event_handler
.parse_incoming_pdu(&pdu)
.boxed()
.await
{
| Ok(t) => t,
@ -721,7 +630,6 @@ pub(super) async fn force_set_room_state_from_server(
.rooms
.event_handler
.resolve_state(&room_id, &room_version, state)
.boxed()
.await?;
info!("Forcing new room state");
@ -737,6 +645,7 @@ pub(super) async fn force_set_room_state_from_server(
.await?;
let state_lock = self.services.rooms.state.mutex.lock(&*room_id).await;
self.services
.rooms
.state
@ -753,21 +662,18 @@ pub(super) async fn force_set_room_state_from_server(
.update_joined_count(&room_id)
.await;
drop(state_lock);
Ok(RoomMessageEventContent::text_plain(
"Successfully forced the room state from the requested remote server.",
))
self.write_str("Successfully forced the room state from the requested remote server.")
.await
}
#[admin_command]
pub(super) async fn get_signing_keys(
&self,
server_name: Option<Box<ServerName>>,
notary: Option<Box<ServerName>>,
server_name: Option<OwnedServerName>,
notary: Option<OwnedServerName>,
query: bool,
) -> Result<RoomMessageEventContent> {
let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone().into());
) -> Result {
let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone());
if let Some(notary) = notary {
let signing_keys = self
@ -776,9 +682,8 @@ pub(super) async fn get_signing_keys(
.notary_request(&notary, &server_name)
.await?;
return Ok(RoomMessageEventContent::notice_markdown(format!(
"```rs\n{signing_keys:#?}\n```"
)));
let out = format!("```rs\n{signing_keys:#?}\n```");
return self.write_str(&out).await;
}
let signing_keys = if query {
@ -793,17 +698,13 @@ pub(super) async fn get_signing_keys(
.await?
};
Ok(RoomMessageEventContent::notice_markdown(format!(
"```rs\n{signing_keys:#?}\n```"
)))
let out = format!("```rs\n{signing_keys:#?}\n```");
self.write_str(&out).await
}
#[admin_command]
pub(super) async fn get_verify_keys(
&self,
server_name: Option<Box<ServerName>>,
) -> Result<RoomMessageEventContent> {
let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone().into());
pub(super) async fn get_verify_keys(&self, server_name: Option<OwnedServerName>) -> Result {
let server_name = server_name.unwrap_or_else(|| self.services.server.name.clone());
let keys = self
.services
@ -818,26 +719,24 @@ pub(super) async fn get_verify_keys(
writeln!(out, "| {key_id} | {key:?} |")?;
}
Ok(RoomMessageEventContent::notice_markdown(out))
self.write_str(&out).await
}
#[admin_command]
pub(super) async fn resolve_true_destination(
&self,
server_name: Box<ServerName>,
server_name: OwnedServerName,
no_cache: bool,
) -> Result<RoomMessageEventContent> {
) -> Result {
if !self.services.server.config.allow_federation {
return Ok(RoomMessageEventContent::text_plain(
"Federation is disabled on this homeserver.",
));
return Err!("Federation is disabled on this homeserver.",);
}
if server_name == self.services.server.name {
return Ok(RoomMessageEventContent::text_plain(
return Err!(
"Not allowed to send federation requests to ourselves. Please use `get-pdu` for \
fetching local PDUs.",
));
);
}
let actual = self
@ -846,13 +745,12 @@ pub(super) async fn resolve_true_destination(
.resolve_actual_dest(&server_name, !no_cache)
.await?;
let msg = format!("Destination: {}\nHostname URI: {}", actual.dest, actual.host,);
Ok(RoomMessageEventContent::text_markdown(msg))
let msg = format!("Destination: {}\nHostname URI: {}", actual.dest, actual.host);
self.write_str(&msg).await
}
#[admin_command]
pub(super) async fn memory_stats(&self, opts: Option<String>) -> Result<RoomMessageEventContent> {
pub(super) async fn memory_stats(&self, opts: Option<String>) -> Result {
const OPTS: &str = "abcdefghijklmnopqrstuvwxyz";
let opts: String = OPTS
@ -871,13 +769,12 @@ pub(super) async fn memory_stats(&self, opts: Option<String>) -> Result<RoomMess
self.write_str("```\n").await?;
self.write_str(&stats).await?;
self.write_str("\n```").await?;
Ok(RoomMessageEventContent::text_plain(""))
Ok(())
}
#[cfg(tokio_unstable)]
#[admin_command]
pub(super) async fn runtime_metrics(&self) -> Result<RoomMessageEventContent> {
pub(super) async fn runtime_metrics(&self) -> Result {
let out = self.services.server.metrics.runtime_metrics().map_or_else(
|| "Runtime metrics are not available.".to_owned(),
|metrics| {
@ -890,51 +787,51 @@ pub(super) async fn runtime_metrics(&self) -> Result<RoomMessageEventContent> {
},
);
Ok(RoomMessageEventContent::text_markdown(out))
self.write_str(&out).await
}
#[cfg(not(tokio_unstable))]
#[admin_command]
pub(super) async fn runtime_metrics(&self) -> Result<RoomMessageEventContent> {
Ok(RoomMessageEventContent::text_markdown(
"Runtime metrics require building with `tokio_unstable`.",
))
pub(super) async fn runtime_metrics(&self) -> Result {
self.write_str("Runtime metrics require building with `tokio_unstable`.")
.await
}
#[cfg(tokio_unstable)]
#[admin_command]
pub(super) async fn runtime_interval(&self) -> Result<RoomMessageEventContent> {
pub(super) async fn runtime_interval(&self) -> Result {
let out = self.services.server.metrics.runtime_interval().map_or_else(
|| "Runtime metrics are not available.".to_owned(),
|metrics| format!("```rs\n{metrics:#?}\n```"),
);
Ok(RoomMessageEventContent::text_markdown(out))
self.write_str(&out).await
}
#[cfg(not(tokio_unstable))]
#[admin_command]
pub(super) async fn runtime_interval(&self) -> Result<RoomMessageEventContent> {
Ok(RoomMessageEventContent::text_markdown(
"Runtime metrics require building with `tokio_unstable`.",
))
pub(super) async fn runtime_interval(&self) -> Result {
self.write_str("Runtime metrics require building with `tokio_unstable`.")
.await
}
#[admin_command]
pub(super) async fn time(&self) -> Result<RoomMessageEventContent> {
pub(super) async fn time(&self) -> Result {
let now = SystemTime::now();
Ok(RoomMessageEventContent::text_markdown(utils::time::format(now, "%+")))
let now = utils::time::format(now, "%+");
self.write_str(&now).await
}
#[admin_command]
pub(super) async fn list_dependencies(&self, names: bool) -> Result<RoomMessageEventContent> {
pub(super) async fn list_dependencies(&self, names: bool) -> Result {
if names {
let out = info::cargo::dependencies_names().join(" ");
return Ok(RoomMessageEventContent::notice_markdown(out));
return self.write_str(&out).await;
}
let deps = info::cargo::dependencies();
let mut out = String::new();
let deps = info::cargo::dependencies();
writeln!(out, "| name | version | features |")?;
writeln!(out, "| ---- | ------- | -------- |")?;
for (name, dep) in deps {
@ -945,10 +842,11 @@ pub(super) async fn list_dependencies(&self, names: bool) -> Result<RoomMessageE
} else {
String::new()
};
writeln!(out, "| {name} | {version} | {feats} |")?;
}
Ok(RoomMessageEventContent::notice_markdown(out))
self.write_str(&out).await
}
#[admin_command]
@ -956,7 +854,7 @@ pub(super) async fn database_stats(
&self,
property: Option<String>,
map: Option<String>,
) -> Result<RoomMessageEventContent> {
) -> Result {
let map_name = map.as_ref().map_or(EMPTY, String::as_str);
let property = property.unwrap_or_else(|| "rocksdb.stats".to_owned());
self.services
@ -968,17 +866,11 @@ pub(super) async fn database_stats(
let res = map.property(&property).expect("invalid property");
writeln!(self, "##### {name}:\n```\n{}\n```", res.trim())
})
.await?;
Ok(RoomMessageEventContent::notice_plain(""))
.await
}
#[admin_command]
pub(super) async fn database_files(
&self,
map: Option<String>,
level: Option<i32>,
) -> Result<RoomMessageEventContent> {
pub(super) async fn database_files(&self, map: Option<String>, level: Option<i32>) -> Result {
let mut files: Vec<_> = self.services.db.db.file_list().collect::<Result<_>>()?;
files.sort_by_key(|f| f.name.clone());
@ -1005,16 +897,12 @@ pub(super) async fn database_files(
file.column_family_name,
)
})
.await?;
Ok(RoomMessageEventContent::notice_plain(""))
.await
}
#[admin_command]
pub(super) async fn trim_memory(&self) -> Result<RoomMessageEventContent> {
pub(super) async fn trim_memory(&self) -> Result {
conduwuit::alloc::trim(None)?;
writeln!(self, "done").await?;
Ok(RoomMessageEventContent::notice_plain(""))
writeln!(self, "done").await
}

View file

@ -3,7 +3,7 @@ pub(crate) mod tester;
use clap::Subcommand;
use conduwuit::Result;
use ruma::{EventId, OwnedRoomOrAliasId, RoomId, ServerName};
use ruma::{OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedServerName};
use service::rooms::short::{ShortEventId, ShortRoomId};
use self::tester::TesterCommand;
@ -20,7 +20,7 @@ pub(super) enum DebugCommand {
/// - Get the auth_chain of a PDU
GetAuthChain {
/// An event ID (the $ character followed by the base64 reference hash)
event_id: Box<EventId>,
event_id: OwnedEventId,
},
/// - Parse and print a PDU from a JSON
@ -35,7 +35,7 @@ pub(super) enum DebugCommand {
/// - Retrieve and print a PDU by EventID from the conduwuit database
GetPdu {
/// An event ID (a $ followed by the base64 reference hash)
event_id: Box<EventId>,
event_id: OwnedEventId,
},
/// - Retrieve and print a PDU by PduId from the conduwuit database
@ -52,11 +52,11 @@ pub(super) enum DebugCommand {
/// (following normal event auth rules, handles it as an incoming PDU).
GetRemotePdu {
/// An event ID (a $ followed by the base64 reference hash)
event_id: Box<EventId>,
event_id: OwnedEventId,
/// Argument for us to attempt to fetch the event from the
/// specified remote server.
server: Box<ServerName>,
server: OwnedServerName,
},
/// - Same as `get-remote-pdu` but accepts a codeblock newline delimited
@ -64,7 +64,7 @@ pub(super) enum DebugCommand {
GetRemotePduList {
/// Argument for us to attempt to fetch all the events from the
/// specified remote server.
server: Box<ServerName>,
server: OwnedServerName,
/// If set, ignores errors, else stops at the first error/failure.
#[arg(short, long)]
@ -88,10 +88,10 @@ pub(super) enum DebugCommand {
/// - Get and display signing keys from local cache or remote server.
GetSigningKeys {
server_name: Option<Box<ServerName>>,
server_name: Option<OwnedServerName>,
#[arg(long)]
notary: Option<Box<ServerName>>,
notary: Option<OwnedServerName>,
#[arg(short, long)]
query: bool,
@ -99,14 +99,14 @@ pub(super) enum DebugCommand {
/// - Get and display signing keys from local cache or remote server.
GetVerifyKeys {
server_name: Option<Box<ServerName>>,
server_name: Option<OwnedServerName>,
},
/// - Sends a federation request to the remote server's
/// `/_matrix/federation/v1/version` endpoint and measures the latency it
/// took for the server to respond
Ping {
server: Box<ServerName>,
server: OwnedServerName,
},
/// - Forces device lists for all local and remote users to be updated (as
@ -141,21 +141,21 @@ pub(super) enum DebugCommand {
///
/// This re-verifies a PDU existing in the database found by ID.
VerifyPdu {
event_id: Box<EventId>,
event_id: OwnedEventId,
},
/// - Prints the very first PDU in the specified room (typically
/// m.room.create)
FirstPduInRoom {
/// The room ID
room_id: Box<RoomId>,
room_id: OwnedRoomId,
},
/// - Prints the latest ("last") PDU in the specified room (typically a
/// message)
LatestPduInRoom {
/// The room ID
room_id: Box<RoomId>,
room_id: OwnedRoomId,
},
/// - Forcefully replaces the room state of our local copy of the specified
@ -174,9 +174,9 @@ pub(super) enum DebugCommand {
/// `/_matrix/federation/v1/state/{roomId}`.
ForceSetRoomStateFromServer {
/// The impacted room ID
room_id: Box<RoomId>,
room_id: OwnedRoomId,
/// The server we will use to query the room state for
server_name: Box<ServerName>,
server_name: OwnedServerName,
},
/// - Runs a server name through conduwuit's true destination resolution
@ -184,7 +184,7 @@ pub(super) enum DebugCommand {
///
/// Useful for debugging well-known issues
ResolveTrueDestination {
server_name: Box<ServerName>,
server_name: OwnedServerName,
#[arg(short, long)]
no_cache: bool,

View file

@ -1,7 +1,6 @@
use conduwuit::Err;
use ruma::events::room::message::RoomMessageEventContent;
use conduwuit::{Err, Result};
use crate::{Result, admin_command, admin_command_dispatch};
use crate::{admin_command, admin_command_dispatch};
#[admin_command_dispatch]
#[derive(Debug, clap::Subcommand)]
@ -14,14 +13,14 @@ pub(crate) enum TesterCommand {
#[rustfmt::skip]
#[admin_command]
async fn panic(&self) -> Result<RoomMessageEventContent> {
async fn panic(&self) -> Result {
panic!("panicked")
}
#[rustfmt::skip]
#[admin_command]
async fn failure(&self) -> Result<RoomMessageEventContent> {
async fn failure(&self) -> Result {
Err!("failed")
}
@ -29,20 +28,20 @@ async fn failure(&self) -> Result<RoomMessageEventContent> {
#[inline(never)]
#[rustfmt::skip]
#[admin_command]
async fn tester(&self) -> Result<RoomMessageEventContent> {
async fn tester(&self) -> Result {
Ok(RoomMessageEventContent::notice_plain("legacy"))
self.write_str("Ok").await
}
#[inline(never)]
#[rustfmt::skip]
#[admin_command]
async fn timer(&self) -> Result<RoomMessageEventContent> {
async fn timer(&self) -> Result {
let started = std::time::Instant::now();
timed(self.body);
let elapsed = started.elapsed();
Ok(RoomMessageEventContent::notice_plain(format!("completed in {elapsed:#?}")))
self.write_str(&format!("completed in {elapsed:#?}")).await
}
#[inline(never)]

View file

@ -1,49 +1,48 @@
use std::fmt::Write;
use conduwuit::Result;
use conduwuit::{Err, Result};
use futures::StreamExt;
use ruma::{
OwnedRoomId, RoomId, ServerName, UserId, events::room::message::RoomMessageEventContent,
};
use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId};
use crate::{admin_command, get_room_info};
#[admin_command]
pub(super) async fn disable_room(&self, room_id: Box<RoomId>) -> Result<RoomMessageEventContent> {
pub(super) async fn disable_room(&self, room_id: OwnedRoomId) -> Result {
self.services.rooms.metadata.disable_room(&room_id, true);
Ok(RoomMessageEventContent::text_plain("Room disabled."))
self.write_str("Room disabled.").await
}
#[admin_command]
pub(super) async fn enable_room(&self, room_id: Box<RoomId>) -> Result<RoomMessageEventContent> {
pub(super) async fn enable_room(&self, room_id: OwnedRoomId) -> Result {
self.services.rooms.metadata.disable_room(&room_id, false);
Ok(RoomMessageEventContent::text_plain("Room enabled."))
self.write_str("Room enabled.").await
}
#[admin_command]
pub(super) async fn incoming_federation(&self) -> Result<RoomMessageEventContent> {
let map = self
.services
.rooms
.event_handler
.federation_handletime
.read()
.expect("locked");
let mut msg = format!("Handling {} incoming pdus:\n", map.len());
pub(super) async fn incoming_federation(&self) -> Result {
let msg = {
let map = self
.services
.rooms
.event_handler
.federation_handletime
.read()
.expect("locked");
for (r, (e, i)) in map.iter() {
let elapsed = i.elapsed();
writeln!(msg, "{} {}: {}m{}s", r, e, elapsed.as_secs() / 60, elapsed.as_secs() % 60)?;
}
let mut msg = format!("Handling {} incoming pdus:\n", map.len());
for (r, (e, i)) in map.iter() {
let elapsed = i.elapsed();
writeln!(msg, "{} {}: {}m{}s", r, e, elapsed.as_secs() / 60, elapsed.as_secs() % 60)?;
}
Ok(RoomMessageEventContent::text_plain(&msg))
msg
};
self.write_str(&msg).await
}
#[admin_command]
pub(super) async fn fetch_support_well_known(
&self,
server_name: Box<ServerName>,
) -> Result<RoomMessageEventContent> {
pub(super) async fn fetch_support_well_known(&self, server_name: OwnedServerName) -> Result {
let response = self
.services
.client
@ -55,54 +54,44 @@ pub(super) async fn fetch_support_well_known(
let text = response.text().await?;
if text.is_empty() {
return Ok(RoomMessageEventContent::text_plain("Response text/body is empty."));
return Err!("Response text/body is empty.");
}
if text.len() > 1500 {
return Ok(RoomMessageEventContent::text_plain(
return Err!(
"Response text/body is over 1500 characters, assuming no support well-known.",
));
);
}
let json: serde_json::Value = match serde_json::from_str(&text) {
| Ok(json) => json,
| Err(_) => {
return Ok(RoomMessageEventContent::text_plain(
"Response text/body is not valid JSON.",
));
return Err!("Response text/body is not valid JSON.",);
},
};
let pretty_json: String = match serde_json::to_string_pretty(&json) {
| Ok(json) => json,
| Err(_) => {
return Ok(RoomMessageEventContent::text_plain(
"Response text/body is not valid JSON.",
));
return Err!("Response text/body is not valid JSON.",);
},
};
Ok(RoomMessageEventContent::notice_markdown(format!(
"Got JSON response:\n\n```json\n{pretty_json}\n```"
)))
self.write_str(&format!("Got JSON response:\n\n```json\n{pretty_json}\n```"))
.await
}
#[admin_command]
pub(super) async fn remote_user_in_rooms(
&self,
user_id: Box<UserId>,
) -> Result<RoomMessageEventContent> {
pub(super) async fn remote_user_in_rooms(&self, user_id: OwnedUserId) -> Result {
if user_id.server_name() == self.services.server.name {
return Ok(RoomMessageEventContent::text_plain(
return Err!(
"User belongs to our server, please use `list-joined-rooms` user admin command \
instead.",
));
);
}
if !self.services.users.exists(&user_id).await {
return Ok(RoomMessageEventContent::text_plain(
"Remote user does not exist in our database.",
));
return Err!("Remote user does not exist in our database.",);
}
let mut rooms: Vec<(OwnedRoomId, u64, String)> = self
@ -115,21 +104,19 @@ pub(super) async fn remote_user_in_rooms(
.await;
if rooms.is_empty() {
return Ok(RoomMessageEventContent::text_plain("User is not in any rooms."));
return Err!("User is not in any rooms.");
}
rooms.sort_by_key(|r| r.1);
rooms.reverse();
let output = format!(
"Rooms {user_id} shares with us ({}):\n```\n{}\n```",
rooms.len(),
rooms
.iter()
.map(|(id, members, name)| format!("{id} | Members: {members} | Name: {name}"))
.collect::<Vec<_>>()
.join("\n")
);
let num = rooms.len();
let body = rooms
.iter()
.map(|(id, members, name)| format!("{id} | Members: {members} | Name: {name}"))
.collect::<Vec<_>>()
.join("\n");
Ok(RoomMessageEventContent::text_markdown(output))
self.write_str(&format!("Rooms {user_id} shares with us ({num}):\n```\n{body}\n```",))
.await
}

View file

@ -2,7 +2,7 @@ mod commands;
use clap::Subcommand;
use conduwuit::Result;
use ruma::{RoomId, ServerName, UserId};
use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId};
use crate::admin_command_dispatch;
@ -14,12 +14,12 @@ pub(super) enum FederationCommand {
/// - Disables incoming federation handling for a room.
DisableRoom {
room_id: Box<RoomId>,
room_id: OwnedRoomId,
},
/// - Enables incoming federation handling for a room again.
EnableRoom {
room_id: Box<RoomId>,
room_id: OwnedRoomId,
},
/// - Fetch `/.well-known/matrix/support` from the specified server
@ -32,11 +32,11 @@ pub(super) enum FederationCommand {
/// moderation, and security inquiries. This command provides a way to
/// easily fetch that information.
FetchSupportWellKnown {
server_name: Box<ServerName>,
server_name: OwnedServerName,
},
/// - Lists all the rooms we share/track with the specified *remote* user
RemoteUserInRooms {
user_id: Box<UserId>,
user_id: OwnedUserId,
},
}

View file

@ -1,26 +1,22 @@
use std::time::Duration;
use conduwuit::{
Result, debug, debug_info, debug_warn, error, info, trace, utils::time::parse_timepoint_ago,
Err, Result, debug, debug_info, debug_warn, error, info, trace,
utils::time::parse_timepoint_ago, warn,
};
use conduwuit_service::media::Dim;
use ruma::{
EventId, Mxc, MxcUri, OwnedMxcUri, OwnedServerName, ServerName,
events::room::message::RoomMessageEventContent,
};
use ruma::{Mxc, OwnedEventId, OwnedMxcUri, OwnedServerName};
use crate::{admin_command, utils::parse_local_user_id};
#[admin_command]
pub(super) async fn delete(
&self,
mxc: Option<Box<MxcUri>>,
event_id: Option<Box<EventId>>,
) -> Result<RoomMessageEventContent> {
mxc: Option<OwnedMxcUri>,
event_id: Option<OwnedEventId>,
) -> Result {
if event_id.is_some() && mxc.is_some() {
return Ok(RoomMessageEventContent::text_plain(
"Please specify either an MXC or an event ID, not both.",
));
return Err!("Please specify either an MXC or an event ID, not both.",);
}
if let Some(mxc) = mxc {
@ -30,9 +26,7 @@ pub(super) async fn delete(
.delete(&mxc.as_str().try_into()?)
.await?;
return Ok(RoomMessageEventContent::text_plain(
"Deleted the MXC from our database and on our filesystem.",
));
return Err!("Deleted the MXC from our database and on our filesystem.",);
}
if let Some(event_id) = event_id {
@ -113,41 +107,36 @@ pub(super) async fn delete(
let final_url = url.to_string().replace('"', "");
mxc_urls.push(final_url);
} else {
info!(
warn!(
"Found a URL in the event ID {event_id} but did not \
start with mxc://, ignoring"
);
}
} else {
info!("No \"url\" key in \"file\" key.");
error!("No \"url\" key in \"file\" key.");
}
}
}
} else {
return Ok(RoomMessageEventContent::text_plain(
return Err!(
"Event ID does not have a \"content\" key or failed parsing the \
event ID JSON.",
));
);
}
} else {
return Ok(RoomMessageEventContent::text_plain(
return Err!(
"Event ID does not have a \"content\" key, this is not a message or an \
event type that contains media.",
));
);
}
},
| _ => {
return Ok(RoomMessageEventContent::text_plain(
"Event ID does not exist or is not known to us.",
));
return Err!("Event ID does not exist or is not known to us.",);
},
}
if mxc_urls.is_empty() {
info!("Parsed event ID {event_id} but did not contain any MXC URLs.");
return Ok(RoomMessageEventContent::text_plain(
"Parsed event ID but found no MXC URLs.",
));
return Err!("Parsed event ID but found no MXC URLs.",);
}
let mut mxc_deletion_count: usize = 0;
@ -170,27 +159,27 @@ pub(super) async fn delete(
}
}
return Ok(RoomMessageEventContent::text_plain(format!(
"Deleted {mxc_deletion_count} total MXCs from our database and the filesystem from \
event ID {event_id}."
)));
return self
.write_str(&format!(
"Deleted {mxc_deletion_count} total MXCs from our database and the filesystem \
from event ID {event_id}."
))
.await;
}
Ok(RoomMessageEventContent::text_plain(
Err!(
"Please specify either an MXC using --mxc or an event ID using --event-id of the \
message containing an image. See --help for details.",
))
message containing an image. See --help for details."
)
}
#[admin_command]
pub(super) async fn delete_list(&self) -> Result<RoomMessageEventContent> {
pub(super) async fn delete_list(&self) -> Result {
if self.body.len() < 2
|| !self.body[0].trim().starts_with("```")
|| self.body.last().unwrap_or(&"").trim() != "```"
{
return Ok(RoomMessageEventContent::text_plain(
"Expected code block in command body. Add --help for details.",
));
return Err!("Expected code block in command body. Add --help for details.",);
}
let mut failed_parsed_mxcs: usize = 0;
@ -204,7 +193,6 @@ pub(super) async fn delete_list(&self) -> Result<RoomMessageEventContent> {
.try_into()
.inspect_err(|e| {
debug_warn!("Failed to parse user-provided MXC URI: {e}");
failed_parsed_mxcs = failed_parsed_mxcs.saturating_add(1);
})
.ok()
@ -227,10 +215,11 @@ pub(super) async fn delete_list(&self) -> Result<RoomMessageEventContent> {
}
}
Ok(RoomMessageEventContent::text_plain(format!(
self.write_str(&format!(
"Finished bulk MXC deletion, deleted {mxc_deletion_count} total MXCs from our database \
and the filesystem. {failed_parsed_mxcs} MXCs failed to be parsed from the database.",
)))
))
.await
}
#[admin_command]
@ -240,11 +229,9 @@ pub(super) async fn delete_past_remote_media(
before: bool,
after: bool,
yes_i_want_to_delete_local_media: bool,
) -> Result<RoomMessageEventContent> {
) -> Result {
if before && after {
return Ok(RoomMessageEventContent::text_plain(
"Please only pick one argument, --before or --after.",
));
return Err!("Please only pick one argument, --before or --after.",);
}
assert!(!(before && after), "--before and --after should not be specified together");
@ -260,35 +247,28 @@ pub(super) async fn delete_past_remote_media(
)
.await?;
Ok(RoomMessageEventContent::text_plain(format!(
"Deleted {deleted_count} total files.",
)))
self.write_str(&format!("Deleted {deleted_count} total files.",))
.await
}
#[admin_command]
pub(super) async fn delete_all_from_user(
&self,
username: String,
) -> Result<RoomMessageEventContent> {
pub(super) async fn delete_all_from_user(&self, username: String) -> Result {
let user_id = parse_local_user_id(self.services, &username)?;
let deleted_count = self.services.media.delete_from_user(&user_id).await?;
Ok(RoomMessageEventContent::text_plain(format!(
"Deleted {deleted_count} total files.",
)))
self.write_str(&format!("Deleted {deleted_count} total files.",))
.await
}
#[admin_command]
pub(super) async fn delete_all_from_server(
&self,
server_name: Box<ServerName>,
server_name: OwnedServerName,
yes_i_want_to_delete_local_media: bool,
) -> Result<RoomMessageEventContent> {
) -> Result {
if server_name == self.services.globals.server_name() && !yes_i_want_to_delete_local_media {
return Ok(RoomMessageEventContent::text_plain(
"This command only works for remote media by default.",
));
return Err!("This command only works for remote media by default.",);
}
let Ok(all_mxcs) = self
@ -298,9 +278,7 @@ pub(super) async fn delete_all_from_server(
.await
.inspect_err(|e| error!("Failed to get MXC URIs from our database: {e}"))
else {
return Ok(RoomMessageEventContent::text_plain(
"Failed to get MXC URIs from our database",
));
return Err!("Failed to get MXC URIs from our database",);
};
let mut deleted_count: usize = 0;
@ -336,17 +314,16 @@ pub(super) async fn delete_all_from_server(
}
}
Ok(RoomMessageEventContent::text_plain(format!(
"Deleted {deleted_count} total files.",
)))
self.write_str(&format!("Deleted {deleted_count} total files.",))
.await
}
#[admin_command]
pub(super) async fn get_file_info(&self, mxc: OwnedMxcUri) -> Result<RoomMessageEventContent> {
pub(super) async fn get_file_info(&self, mxc: OwnedMxcUri) -> Result {
let mxc: Mxc<'_> = mxc.as_str().try_into()?;
let metadata = self.services.media.get_metadata(&mxc).await;
Ok(RoomMessageEventContent::notice_markdown(format!("```\n{metadata:#?}\n```")))
self.write_str(&format!("```\n{metadata:#?}\n```")).await
}
#[admin_command]
@ -355,7 +332,7 @@ pub(super) async fn get_remote_file(
mxc: OwnedMxcUri,
server: Option<OwnedServerName>,
timeout: u32,
) -> Result<RoomMessageEventContent> {
) -> Result {
let mxc: Mxc<'_> = mxc.as_str().try_into()?;
let timeout = Duration::from_millis(timeout.into());
let mut result = self
@ -368,8 +345,8 @@ pub(super) async fn get_remote_file(
let len = result.content.as_ref().expect("content").len();
result.content.as_mut().expect("content").clear();
let out = format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```");
Ok(RoomMessageEventContent::notice_markdown(out))
self.write_str(&format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```"))
.await
}
#[admin_command]
@ -380,7 +357,7 @@ pub(super) async fn get_remote_thumbnail(
timeout: u32,
width: u32,
height: u32,
) -> Result<RoomMessageEventContent> {
) -> Result {
let mxc: Mxc<'_> = mxc.as_str().try_into()?;
let timeout = Duration::from_millis(timeout.into());
let dim = Dim::new(width, height, None);
@ -394,6 +371,6 @@ pub(super) async fn get_remote_thumbnail(
let len = result.content.as_ref().expect("content").len();
result.content.as_mut().expect("content").clear();
let out = format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```");
Ok(RoomMessageEventContent::notice_markdown(out))
self.write_str(&format!("```\n{result:#?}\nreceived {len} bytes for file content.\n```"))
.await
}

View file

@ -3,7 +3,7 @@ mod commands;
use clap::Subcommand;
use conduwuit::Result;
use ruma::{EventId, MxcUri, OwnedMxcUri, OwnedServerName, ServerName};
use ruma::{OwnedEventId, OwnedMxcUri, OwnedServerName};
use crate::admin_command_dispatch;
@ -15,12 +15,12 @@ pub(super) enum MediaCommand {
Delete {
/// The MXC URL to delete
#[arg(long)]
mxc: Option<Box<MxcUri>>,
mxc: Option<OwnedMxcUri>,
/// - The message event ID which contains the media and thumbnail MXC
/// URLs
#[arg(long)]
event_id: Option<Box<EventId>>,
event_id: Option<OwnedEventId>,
},
/// - Deletes a codeblock list of MXC URLs from our database and on the
@ -57,7 +57,7 @@ pub(super) enum MediaCommand {
/// - Deletes all remote media from the specified remote server. This will
/// always ignore errors by default.
DeleteAllFromServer {
server_name: Box<ServerName>,
server_name: OwnedServerName,
/// Long argument to delete local media
#[arg(long)]

View file

@ -4,7 +4,7 @@
#![allow(clippy::too_many_arguments)]
pub(crate) mod admin;
pub(crate) mod command;
pub(crate) mod context;
pub(crate) mod processor;
mod tests;
pub(crate) mod utils;
@ -23,13 +23,9 @@ extern crate conduwuit_api as api;
extern crate conduwuit_core as conduwuit;
extern crate conduwuit_service as service;
pub(crate) use conduwuit::Result;
pub(crate) use conduwuit_macros::{admin_command, admin_command_dispatch};
pub(crate) use crate::{
command::Command,
utils::{escape_html, get_room_info},
};
pub(crate) use crate::{context::Context, utils::get_room_info};
pub(crate) const PAGE_SIZE: usize = 100;

View file

@ -33,7 +33,7 @@ use service::{
use tracing::Level;
use tracing_subscriber::{EnvFilter, filter::LevelFilter};
use crate::{Command, admin, admin::AdminCommand};
use crate::{admin, admin::AdminCommand, context::Context};
#[must_use]
pub(super) fn complete(line: &str) -> String { complete_command(AdminCommand::command(), line) }
@ -58,7 +58,7 @@ async fn process_command(services: Arc<Services>, input: &CommandInput) -> Proce
| Ok(parsed) => parsed,
};
let context = Command {
let context = Context {
services: &services,
body: &body,
timer: SystemTime::now(),
@ -103,7 +103,7 @@ fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult {
/// Parse and process a message from the admin room
async fn process(
context: &Command<'_>,
context: &Context<'_>,
command: AdminCommand,
args: &[String],
) -> (Result, String) {
@ -132,7 +132,7 @@ async fn process(
(result, output)
}
fn capture_create(context: &Command<'_>) -> (Arc<Capture>, Arc<Mutex<String>>) {
fn capture_create(context: &Context<'_>) -> (Arc<Capture>, Arc<Mutex<String>>) {
let env_config = &context.services.server.config.admin_log_capture;
let env_filter = EnvFilter::try_new(env_config).unwrap_or_else(|e| {
warn!("admin_log_capture filter invalid: {e:?}");

View file

@ -1,7 +1,7 @@
use clap::Subcommand;
use conduwuit::Result;
use futures::StreamExt;
use ruma::{RoomId, UserId, events::room::message::RoomMessageEventContent};
use ruma::{OwnedRoomId, OwnedUserId};
use crate::{admin_command, admin_command_dispatch};
@ -12,31 +12,31 @@ pub(crate) enum AccountDataCommand {
/// - Returns all changes to the account data that happened after `since`.
ChangesSince {
/// Full user ID
user_id: Box<UserId>,
user_id: OwnedUserId,
/// UNIX timestamp since (u64)
since: u64,
/// Optional room ID of the account data
room_id: Option<Box<RoomId>>,
room_id: Option<OwnedRoomId>,
},
/// - Searches the account data for a specific kind.
AccountDataGet {
/// Full user ID
user_id: Box<UserId>,
user_id: OwnedUserId,
/// Account data event type
kind: String,
/// Optional room ID of the account data
room_id: Option<Box<RoomId>>,
room_id: Option<OwnedRoomId>,
},
}
#[admin_command]
async fn changes_since(
&self,
user_id: Box<UserId>,
user_id: OwnedUserId,
since: u64,
room_id: Option<Box<RoomId>>,
) -> Result<RoomMessageEventContent> {
room_id: Option<OwnedRoomId>,
) -> Result {
let timer = tokio::time::Instant::now();
let results: Vec<_> = self
.services
@ -46,18 +46,17 @@ async fn changes_since(
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
)))
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"))
.await
}
#[admin_command]
async fn account_data_get(
&self,
user_id: Box<UserId>,
user_id: OwnedUserId,
kind: String,
room_id: Option<Box<RoomId>>,
) -> Result<RoomMessageEventContent> {
room_id: Option<OwnedRoomId>,
) -> Result {
let timer = tokio::time::Instant::now();
let results = self
.services
@ -66,7 +65,6 @@ async fn account_data_get(
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
)))
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"))
.await
}

View file

@ -1,7 +1,8 @@
use clap::Subcommand;
use conduwuit::Result;
use futures::TryStreamExt;
use crate::Command;
use crate::Context;
#[derive(Debug, Subcommand)]
/// All the getters and iterators from src/database/key_value/appservice.rs
@ -9,7 +10,7 @@ pub(crate) enum AppserviceCommand {
/// - Gets the appservice registration info/details from the ID as a string
GetRegistration {
/// Appservice registration ID
appservice_id: Box<str>,
appservice_id: String,
},
/// - Gets all appservice registrations with their ID and registration info
@ -17,7 +18,7 @@ pub(crate) enum AppserviceCommand {
}
/// All the getters and iterators from src/database/key_value/appservice.rs
pub(super) async fn process(subcommand: AppserviceCommand, context: &Command<'_>) -> Result {
pub(super) async fn process(subcommand: AppserviceCommand, context: &Context<'_>) -> Result {
let services = context.services;
match subcommand {
@ -31,7 +32,7 @@ pub(super) async fn process(subcommand: AppserviceCommand, context: &Command<'_>
},
| AppserviceCommand::All => {
let timer = tokio::time::Instant::now();
let results = services.appservice.all().await;
let results: Vec<_> = services.appservice.iter_db_ids().try_collect().await?;
let query_time = timer.elapsed();
write!(context, "Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```")

View file

@ -1,8 +1,8 @@
use clap::Subcommand;
use conduwuit::Result;
use ruma::ServerName;
use ruma::OwnedServerName;
use crate::Command;
use crate::Context;
#[derive(Debug, Subcommand)]
/// All the getters and iterators from src/database/key_value/globals.rs
@ -16,12 +16,12 @@ pub(crate) enum GlobalsCommand {
/// - This returns an empty `Ok(BTreeMap<..>)` when there are no keys found
/// for the server.
SigningKeysFor {
origin: Box<ServerName>,
origin: OwnedServerName,
},
}
/// All the getters and iterators from src/database/key_value/globals.rs
pub(super) async fn process(subcommand: GlobalsCommand, context: &Command<'_>) -> Result {
pub(super) async fn process(subcommand: GlobalsCommand, context: &Context<'_>) -> Result {
let services = context.services;
match subcommand {

View file

@ -1,9 +1,9 @@
use clap::Subcommand;
use conduwuit::Result;
use futures::StreamExt;
use ruma::UserId;
use ruma::OwnedUserId;
use crate::Command;
use crate::Context;
#[derive(Debug, Subcommand)]
/// All the getters and iterators from src/database/key_value/presence.rs
@ -11,7 +11,7 @@ pub(crate) enum PresenceCommand {
/// - Returns the latest presence event for the given user.
GetPresence {
/// Full user ID
user_id: Box<UserId>,
user_id: OwnedUserId,
},
/// - Iterator of the most recent presence updates that happened after the
@ -23,7 +23,7 @@ pub(crate) enum PresenceCommand {
}
/// All the getters and iterators in key_value/presence.rs
pub(super) async fn process(subcommand: PresenceCommand, context: &Command<'_>) -> Result {
pub(super) async fn process(subcommand: PresenceCommand, context: &Context<'_>) -> Result {
let services = context.services;
match subcommand {

View file

@ -1,19 +1,19 @@
use clap::Subcommand;
use conduwuit::Result;
use ruma::UserId;
use ruma::OwnedUserId;
use crate::Command;
use crate::Context;
#[derive(Debug, Subcommand)]
pub(crate) enum PusherCommand {
/// - Returns all the pushers for the user.
GetPushers {
/// Full user ID
user_id: Box<UserId>,
user_id: OwnedUserId,
},
}
pub(super) async fn process(subcommand: PusherCommand, context: &Command<'_>) -> Result {
pub(super) async fn process(subcommand: PusherCommand, context: &Context<'_>) -> Result {
let services = context.services;
match subcommand {

View file

@ -11,7 +11,6 @@ use conduwuit::{
use conduwuit_database::Map;
use conduwuit_service::Services;
use futures::{FutureExt, Stream, StreamExt, TryStreamExt};
use ruma::events::room::message::RoomMessageEventContent;
use tokio::time::Instant;
use crate::{admin_command, admin_command_dispatch};
@ -170,7 +169,7 @@ pub(super) async fn compact(
into: Option<usize>,
parallelism: Option<usize>,
exhaustive: bool,
) -> Result<RoomMessageEventContent> {
) -> Result {
use conduwuit_database::compact::Options;
let default_all_maps: Option<_> = map.is_none().then(|| {
@ -221,17 +220,11 @@ pub(super) async fn compact(
let results = results.await;
let query_time = timer.elapsed();
self.write_str(&format!("Jobs completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"))
.await?;
Ok(RoomMessageEventContent::text_plain(""))
.await
}
#[admin_command]
pub(super) async fn raw_count(
&self,
map: Option<String>,
prefix: Option<String>,
) -> Result<RoomMessageEventContent> {
pub(super) async fn raw_count(&self, map: Option<String>, prefix: Option<String>) -> Result {
let prefix = prefix.as_deref().unwrap_or(EMPTY);
let timer = Instant::now();
@ -242,17 +235,11 @@ pub(super) async fn raw_count(
let query_time = timer.elapsed();
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{count:#?}\n```"))
.await?;
Ok(RoomMessageEventContent::text_plain(""))
.await
}
#[admin_command]
pub(super) async fn raw_keys(
&self,
map: String,
prefix: Option<String>,
) -> Result<RoomMessageEventContent> {
pub(super) async fn raw_keys(&self, map: String, prefix: Option<String>) -> Result {
writeln!(self, "```").boxed().await?;
let map = self.services.db.get(map.as_str())?;
@ -266,18 +253,12 @@ pub(super) async fn raw_keys(
.await?;
let query_time = timer.elapsed();
let out = format!("\n```\n\nQuery completed in {query_time:?}");
self.write_str(out.as_str()).await?;
Ok(RoomMessageEventContent::text_plain(""))
self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}"))
.await
}
#[admin_command]
pub(super) async fn raw_keys_sizes(
&self,
map: Option<String>,
prefix: Option<String>,
) -> Result<RoomMessageEventContent> {
pub(super) async fn raw_keys_sizes(&self, map: Option<String>, prefix: Option<String>) -> Result {
let prefix = prefix.as_deref().unwrap_or(EMPTY);
let timer = Instant::now();
@ -294,18 +275,12 @@ pub(super) async fn raw_keys_sizes(
.await;
let query_time = timer.elapsed();
let result = format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}");
self.write_str(result.as_str()).await?;
Ok(RoomMessageEventContent::text_plain(""))
self.write_str(&format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}"))
.await
}
#[admin_command]
pub(super) async fn raw_keys_total(
&self,
map: Option<String>,
prefix: Option<String>,
) -> Result<RoomMessageEventContent> {
pub(super) async fn raw_keys_total(&self, map: Option<String>, prefix: Option<String>) -> Result {
let prefix = prefix.as_deref().unwrap_or(EMPTY);
let timer = Instant::now();
@ -318,19 +293,12 @@ pub(super) async fn raw_keys_total(
.await;
let query_time = timer.elapsed();
self.write_str(&format!("```\n{result:#?}\n\n```\n\nQuery completed in {query_time:?}"))
.await?;
Ok(RoomMessageEventContent::text_plain(""))
.await
}
#[admin_command]
pub(super) async fn raw_vals_sizes(
&self,
map: Option<String>,
prefix: Option<String>,
) -> Result<RoomMessageEventContent> {
pub(super) async fn raw_vals_sizes(&self, map: Option<String>, prefix: Option<String>) -> Result {
let prefix = prefix.as_deref().unwrap_or(EMPTY);
let timer = Instant::now();
@ -348,18 +316,12 @@ pub(super) async fn raw_vals_sizes(
.await;
let query_time = timer.elapsed();
let result = format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}");
self.write_str(result.as_str()).await?;
Ok(RoomMessageEventContent::text_plain(""))
self.write_str(&format!("```\n{result:#?}\n```\n\nQuery completed in {query_time:?}"))
.await
}
#[admin_command]
pub(super) async fn raw_vals_total(
&self,
map: Option<String>,
prefix: Option<String>,
) -> Result<RoomMessageEventContent> {
pub(super) async fn raw_vals_total(&self, map: Option<String>, prefix: Option<String>) -> Result {
let prefix = prefix.as_deref().unwrap_or(EMPTY);
let timer = Instant::now();
@ -373,19 +335,12 @@ pub(super) async fn raw_vals_total(
.await;
let query_time = timer.elapsed();
self.write_str(&format!("```\n{result:#?}\n\n```\n\nQuery completed in {query_time:?}"))
.await?;
Ok(RoomMessageEventContent::text_plain(""))
.await
}
#[admin_command]
pub(super) async fn raw_iter(
&self,
map: String,
prefix: Option<String>,
) -> Result<RoomMessageEventContent> {
pub(super) async fn raw_iter(&self, map: String, prefix: Option<String>) -> Result {
writeln!(self, "```").await?;
let map = self.services.db.get(&map)?;
@ -401,9 +356,7 @@ pub(super) async fn raw_iter(
let query_time = timer.elapsed();
self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}"))
.await?;
Ok(RoomMessageEventContent::text_plain(""))
.await
}
#[admin_command]
@ -412,7 +365,7 @@ pub(super) async fn raw_keys_from(
map: String,
start: String,
limit: Option<usize>,
) -> Result<RoomMessageEventContent> {
) -> Result {
writeln!(self, "```").await?;
let map = self.services.db.get(&map)?;
@ -426,9 +379,7 @@ pub(super) async fn raw_keys_from(
let query_time = timer.elapsed();
self.write_str(&format!("\n```\n\nQuery completed in {query_time:?}"))
.await?;
Ok(RoomMessageEventContent::text_plain(""))
.await
}
#[admin_command]
@ -437,7 +388,7 @@ pub(super) async fn raw_iter_from(
map: String,
start: String,
limit: Option<usize>,
) -> Result<RoomMessageEventContent> {
) -> Result {
let map = self.services.db.get(&map)?;
let timer = Instant::now();
let result = map
@ -449,41 +400,38 @@ pub(super) async fn raw_iter_from(
.await?;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
.await
}
#[admin_command]
pub(super) async fn raw_del(&self, map: String, key: String) -> Result<RoomMessageEventContent> {
pub(super) async fn raw_del(&self, map: String, key: String) -> Result {
let map = self.services.db.get(&map)?;
let timer = Instant::now();
map.remove(&key);
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Operation completed in {query_time:?}"
)))
let query_time = timer.elapsed();
self.write_str(&format!("Operation completed in {query_time:?}"))
.await
}
#[admin_command]
pub(super) async fn raw_get(&self, map: String, key: String) -> Result<RoomMessageEventContent> {
pub(super) async fn raw_get(&self, map: String, key: String) -> Result {
let map = self.services.db.get(&map)?;
let timer = Instant::now();
let handle = map.get(&key).await?;
let query_time = timer.elapsed();
let result = String::from_utf8_lossy(&handle);
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```"
)))
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```"))
.await
}
#[admin_command]
pub(super) async fn raw_maps(&self) -> Result<RoomMessageEventContent> {
pub(super) async fn raw_maps(&self) -> Result {
let list: Vec<_> = self.services.db.iter().map(at!(0)).copied().collect();
Ok(RoomMessageEventContent::notice_markdown(format!("{list:#?}")))
self.write_str(&format!("{list:#?}")).await
}
fn with_maps_or<'a>(

View file

@ -1,7 +1,7 @@
use clap::Subcommand;
use conduwuit::{Result, utils::time};
use futures::StreamExt;
use ruma::{OwnedServerName, events::room::message::RoomMessageEventContent};
use ruma::OwnedServerName;
use crate::{admin_command, admin_command_dispatch};
@ -21,10 +21,7 @@ pub(crate) enum ResolverCommand {
}
#[admin_command]
async fn destinations_cache(
&self,
server_name: Option<OwnedServerName>,
) -> Result<RoomMessageEventContent> {
async fn destinations_cache(&self, server_name: Option<OwnedServerName>) -> Result {
use service::resolver::cache::CachedDest;
writeln!(self, "| Server Name | Destination | Hostname | Expires |").await?;
@ -44,11 +41,11 @@ async fn destinations_cache(
.await?;
}
Ok(RoomMessageEventContent::notice_plain(""))
Ok(())
}
#[admin_command]
async fn overrides_cache(&self, server_name: Option<String>) -> Result<RoomMessageEventContent> {
async fn overrides_cache(&self, server_name: Option<String>) -> Result {
use service::resolver::cache::CachedOverride;
writeln!(self, "| Server Name | IP | Port | Expires | Overriding |").await?;
@ -70,5 +67,5 @@ async fn overrides_cache(&self, server_name: Option<String>) -> Result<RoomMessa
.await?;
}
Ok(RoomMessageEventContent::notice_plain(""))
Ok(())
}

View file

@ -1,22 +1,22 @@
use clap::Subcommand;
use conduwuit::Result;
use futures::StreamExt;
use ruma::{RoomAliasId, RoomId};
use ruma::{OwnedRoomAliasId, OwnedRoomId};
use crate::Command;
use crate::Context;
#[derive(Debug, Subcommand)]
/// All the getters and iterators from src/database/key_value/rooms/alias.rs
pub(crate) enum RoomAliasCommand {
ResolveLocalAlias {
/// Full room alias
alias: Box<RoomAliasId>,
alias: OwnedRoomAliasId,
},
/// - Iterator of all our local room aliases for the room ID
LocalAliasesForRoom {
/// Full room ID
room_id: Box<RoomId>,
room_id: OwnedRoomId,
},
/// - Iterator of all our local aliases in our database with their room IDs
@ -24,7 +24,7 @@ pub(crate) enum RoomAliasCommand {
}
/// All the getters and iterators in src/database/key_value/rooms/alias.rs
pub(super) async fn process(subcommand: RoomAliasCommand, context: &Command<'_>) -> Result {
pub(super) async fn process(subcommand: RoomAliasCommand, context: &Context<'_>) -> Result {
let services = context.services;
match subcommand {

View file

@ -1,85 +1,85 @@
use clap::Subcommand;
use conduwuit::{Error, Result};
use conduwuit::Result;
use futures::StreamExt;
use ruma::{RoomId, ServerName, UserId, events::room::message::RoomMessageEventContent};
use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId};
use crate::Command;
use crate::Context;
#[derive(Debug, Subcommand)]
pub(crate) enum RoomStateCacheCommand {
ServerInRoom {
server: Box<ServerName>,
room_id: Box<RoomId>,
server: OwnedServerName,
room_id: OwnedRoomId,
},
RoomServers {
room_id: Box<RoomId>,
room_id: OwnedRoomId,
},
ServerRooms {
server: Box<ServerName>,
server: OwnedServerName,
},
RoomMembers {
room_id: Box<RoomId>,
room_id: OwnedRoomId,
},
LocalUsersInRoom {
room_id: Box<RoomId>,
room_id: OwnedRoomId,
},
ActiveLocalUsersInRoom {
room_id: Box<RoomId>,
room_id: OwnedRoomId,
},
RoomJoinedCount {
room_id: Box<RoomId>,
room_id: OwnedRoomId,
},
RoomInvitedCount {
room_id: Box<RoomId>,
room_id: OwnedRoomId,
},
RoomUserOnceJoined {
room_id: Box<RoomId>,
room_id: OwnedRoomId,
},
RoomMembersInvited {
room_id: Box<RoomId>,
room_id: OwnedRoomId,
},
GetInviteCount {
room_id: Box<RoomId>,
user_id: Box<UserId>,
room_id: OwnedRoomId,
user_id: OwnedUserId,
},
GetLeftCount {
room_id: Box<RoomId>,
user_id: Box<UserId>,
room_id: OwnedRoomId,
user_id: OwnedUserId,
},
RoomsJoined {
user_id: Box<UserId>,
user_id: OwnedUserId,
},
RoomsLeft {
user_id: Box<UserId>,
user_id: OwnedUserId,
},
RoomsInvited {
user_id: Box<UserId>,
user_id: OwnedUserId,
},
InviteState {
user_id: Box<UserId>,
room_id: Box<RoomId>,
user_id: OwnedUserId,
room_id: OwnedRoomId,
},
}
pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command<'_>) -> Result {
pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Context<'_>) -> Result {
let services = context.services;
let c = match subcommand {
match subcommand {
| RoomStateCacheCommand::ServerInRoom { server, room_id } => {
let timer = tokio::time::Instant::now();
let result = services
@ -89,9 +89,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
.await;
let query_time = timer.elapsed();
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
context
.write_str(&format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
))
.await
},
| RoomStateCacheCommand::RoomServers { room_id } => {
let timer = tokio::time::Instant::now();
@ -104,9 +106,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
.await;
let query_time = timer.elapsed();
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
)))
context
.write_str(&format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
))
.await
},
| RoomStateCacheCommand::ServerRooms { server } => {
let timer = tokio::time::Instant::now();
@ -119,9 +123,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
.await;
let query_time = timer.elapsed();
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
)))
context
.write_str(&format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
))
.await
},
| RoomStateCacheCommand::RoomMembers { room_id } => {
let timer = tokio::time::Instant::now();
@ -134,9 +140,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
.await;
let query_time = timer.elapsed();
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
)))
context
.write_str(&format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
))
.await
},
| RoomStateCacheCommand::LocalUsersInRoom { room_id } => {
let timer = tokio::time::Instant::now();
@ -149,9 +157,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
.await;
let query_time = timer.elapsed();
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
)))
context
.write_str(&format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
))
.await
},
| RoomStateCacheCommand::ActiveLocalUsersInRoom { room_id } => {
let timer = tokio::time::Instant::now();
@ -164,18 +174,22 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
.await;
let query_time = timer.elapsed();
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
)))
context
.write_str(&format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
))
.await
},
| RoomStateCacheCommand::RoomJoinedCount { room_id } => {
let timer = tokio::time::Instant::now();
let results = services.rooms.state_cache.room_joined_count(&room_id).await;
let query_time = timer.elapsed();
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
)))
context
.write_str(&format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
))
.await
},
| RoomStateCacheCommand::RoomInvitedCount { room_id } => {
let timer = tokio::time::Instant::now();
@ -186,9 +200,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
.await;
let query_time = timer.elapsed();
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
)))
context
.write_str(&format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
))
.await
},
| RoomStateCacheCommand::RoomUserOnceJoined { room_id } => {
let timer = tokio::time::Instant::now();
@ -201,9 +217,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
.await;
let query_time = timer.elapsed();
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
)))
context
.write_str(&format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
))
.await
},
| RoomStateCacheCommand::RoomMembersInvited { room_id } => {
let timer = tokio::time::Instant::now();
@ -216,9 +234,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
.await;
let query_time = timer.elapsed();
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
)))
context
.write_str(&format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
))
.await
},
| RoomStateCacheCommand::GetInviteCount { room_id, user_id } => {
let timer = tokio::time::Instant::now();
@ -229,9 +249,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
.await;
let query_time = timer.elapsed();
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
)))
context
.write_str(&format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
))
.await
},
| RoomStateCacheCommand::GetLeftCount { room_id, user_id } => {
let timer = tokio::time::Instant::now();
@ -242,9 +264,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
.await;
let query_time = timer.elapsed();
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
)))
context
.write_str(&format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
))
.await
},
| RoomStateCacheCommand::RoomsJoined { user_id } => {
let timer = tokio::time::Instant::now();
@ -257,9 +281,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
.await;
let query_time = timer.elapsed();
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
)))
context
.write_str(&format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
))
.await
},
| RoomStateCacheCommand::RoomsInvited { user_id } => {
let timer = tokio::time::Instant::now();
@ -271,9 +297,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
.await;
let query_time = timer.elapsed();
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
)))
context
.write_str(&format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
))
.await
},
| RoomStateCacheCommand::RoomsLeft { user_id } => {
let timer = tokio::time::Instant::now();
@ -285,9 +313,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
.await;
let query_time = timer.elapsed();
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
)))
context
.write_str(&format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
))
.await
},
| RoomStateCacheCommand::InviteState { user_id, room_id } => {
let timer = tokio::time::Instant::now();
@ -298,13 +328,11 @@ pub(super) async fn process(subcommand: RoomStateCacheCommand, context: &Command
.await;
let query_time = timer.elapsed();
Result::<_, Error>::Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
)))
context
.write_str(&format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
))
.await
},
}?;
context.write_str(c.body()).await?;
Ok(())
}
}

View file

@ -1,7 +1,7 @@
use clap::Subcommand;
use conduwuit::{PduCount, Result, utils::stream::TryTools};
use futures::TryStreamExt;
use ruma::{OwnedRoomOrAliasId, events::room::message::RoomMessageEventContent};
use ruma::OwnedRoomOrAliasId;
use crate::{admin_command, admin_command_dispatch};
@ -24,7 +24,7 @@ pub(crate) enum RoomTimelineCommand {
}
#[admin_command]
pub(super) async fn last(&self, room_id: OwnedRoomOrAliasId) -> Result<RoomMessageEventContent> {
pub(super) async fn last(&self, room_id: OwnedRoomOrAliasId) -> Result {
let room_id = self.services.rooms.alias.resolve(&room_id).await?;
let result = self
@ -34,7 +34,7 @@ pub(super) async fn last(&self, room_id: OwnedRoomOrAliasId) -> Result<RoomMessa
.last_timeline_count(None, &room_id)
.await?;
Ok(RoomMessageEventContent::notice_markdown(format!("{result:#?}")))
self.write_str(&format!("{result:#?}")).await
}
#[admin_command]
@ -43,7 +43,7 @@ pub(super) async fn pdus(
room_id: OwnedRoomOrAliasId,
from: Option<String>,
limit: Option<usize>,
) -> Result<RoomMessageEventContent> {
) -> Result {
let room_id = self.services.rooms.alias.resolve(&room_id).await?;
let from: Option<PduCount> = from.as_deref().map(str::parse).transpose()?;
@ -57,5 +57,5 @@ pub(super) async fn pdus(
.try_collect()
.await?;
Ok(RoomMessageEventContent::notice_markdown(format!("{result:#?}")))
self.write_str(&format!("{result:#?}")).await
}

View file

@ -1,10 +1,10 @@
use clap::Subcommand;
use conduwuit::Result;
use conduwuit::{Err, Result};
use futures::StreamExt;
use ruma::{ServerName, UserId, events::room::message::RoomMessageEventContent};
use ruma::{OwnedServerName, OwnedUserId};
use service::sending::Destination;
use crate::Command;
use crate::Context;
#[derive(Debug, Subcommand)]
/// All the getters and iterators from src/database/key_value/sending.rs
@ -27,9 +27,9 @@ pub(crate) enum SendingCommand {
#[arg(short, long)]
appservice_id: Option<String>,
#[arg(short, long)]
server_name: Option<Box<ServerName>>,
server_name: Option<OwnedServerName>,
#[arg(short, long)]
user_id: Option<Box<UserId>>,
user_id: Option<OwnedUserId>,
#[arg(short, long)]
push_key: Option<String>,
},
@ -49,30 +49,20 @@ pub(crate) enum SendingCommand {
#[arg(short, long)]
appservice_id: Option<String>,
#[arg(short, long)]
server_name: Option<Box<ServerName>>,
server_name: Option<OwnedServerName>,
#[arg(short, long)]
user_id: Option<Box<UserId>>,
user_id: Option<OwnedUserId>,
#[arg(short, long)]
push_key: Option<String>,
},
GetLatestEduCount {
server_name: Box<ServerName>,
server_name: OwnedServerName,
},
}
/// All the getters and iterators in key_value/sending.rs
pub(super) async fn process(subcommand: SendingCommand, context: &Command<'_>) -> Result {
let c = reprocess(subcommand, context).await?;
context.write_str(c.body()).await?;
Ok(())
}
/// All the getters and iterators in key_value/sending.rs
pub(super) async fn reprocess(
subcommand: SendingCommand,
context: &Command<'_>,
) -> Result<RoomMessageEventContent> {
pub(super) async fn process(subcommand: SendingCommand, context: &Context<'_>) -> Result {
let services = context.services;
match subcommand {
@ -82,9 +72,11 @@ pub(super) async fn reprocess(
let active_requests = results.collect::<Vec<_>>().await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```"
)))
context
.write_str(&format!(
"Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```"
))
.await
},
| SendingCommand::QueuedRequests {
appservice_id,
@ -97,19 +89,19 @@ pub(super) async fn reprocess(
&& user_id.is_none()
&& push_key.is_none()
{
return Ok(RoomMessageEventContent::text_plain(
return Err!(
"An appservice ID, server name, or a user ID with push key must be \
specified via arguments. See --help for more details.",
));
);
}
let timer = tokio::time::Instant::now();
let results = match (appservice_id, server_name, user_id, push_key) {
| (Some(appservice_id), None, None, None) => {
if appservice_id.is_empty() {
return Ok(RoomMessageEventContent::text_plain(
return Err!(
"An appservice ID, server name, or a user ID with push key must be \
specified via arguments. See --help for more details.",
));
);
}
services
@ -120,40 +112,42 @@ pub(super) async fn reprocess(
| (None, Some(server_name), None, None) => services
.sending
.db
.queued_requests(&Destination::Federation(server_name.into())),
.queued_requests(&Destination::Federation(server_name)),
| (None, None, Some(user_id), Some(push_key)) => {
if push_key.is_empty() {
return Ok(RoomMessageEventContent::text_plain(
return Err!(
"An appservice ID, server name, or a user ID with push key must be \
specified via arguments. See --help for more details.",
));
);
}
services
.sending
.db
.queued_requests(&Destination::Push(user_id.into(), push_key))
.queued_requests(&Destination::Push(user_id, push_key))
},
| (Some(_), Some(_), Some(_), Some(_)) => {
return Ok(RoomMessageEventContent::text_plain(
return Err!(
"An appservice ID, server name, or a user ID with push key must be \
specified via arguments. Not all of them See --help for more details.",
));
);
},
| _ => {
return Ok(RoomMessageEventContent::text_plain(
return Err!(
"An appservice ID, server name, or a user ID with push key must be \
specified via arguments. See --help for more details.",
));
);
},
};
let queued_requests = results.collect::<Vec<_>>().await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{queued_requests:#?}\n```"
)))
context
.write_str(&format!(
"Query completed in {query_time:?}:\n\n```rs\n{queued_requests:#?}\n```"
))
.await
},
| SendingCommand::ActiveRequestsFor {
appservice_id,
@ -166,20 +160,20 @@ pub(super) async fn reprocess(
&& user_id.is_none()
&& push_key.is_none()
{
return Ok(RoomMessageEventContent::text_plain(
return Err!(
"An appservice ID, server name, or a user ID with push key must be \
specified via arguments. See --help for more details.",
));
);
}
let timer = tokio::time::Instant::now();
let results = match (appservice_id, server_name, user_id, push_key) {
| (Some(appservice_id), None, None, None) => {
if appservice_id.is_empty() {
return Ok(RoomMessageEventContent::text_plain(
return Err!(
"An appservice ID, server name, or a user ID with push key must be \
specified via arguments. See --help for more details.",
));
);
}
services
@ -190,49 +184,53 @@ pub(super) async fn reprocess(
| (None, Some(server_name), None, None) => services
.sending
.db
.active_requests_for(&Destination::Federation(server_name.into())),
.active_requests_for(&Destination::Federation(server_name)),
| (None, None, Some(user_id), Some(push_key)) => {
if push_key.is_empty() {
return Ok(RoomMessageEventContent::text_plain(
return Err!(
"An appservice ID, server name, or a user ID with push key must be \
specified via arguments. See --help for more details.",
));
);
}
services
.sending
.db
.active_requests_for(&Destination::Push(user_id.into(), push_key))
.active_requests_for(&Destination::Push(user_id, push_key))
},
| (Some(_), Some(_), Some(_), Some(_)) => {
return Ok(RoomMessageEventContent::text_plain(
return Err!(
"An appservice ID, server name, or a user ID with push key must be \
specified via arguments. Not all of them See --help for more details.",
));
);
},
| _ => {
return Ok(RoomMessageEventContent::text_plain(
return Err!(
"An appservice ID, server name, or a user ID with push key must be \
specified via arguments. See --help for more details.",
));
);
},
};
let active_requests = results.collect::<Vec<_>>().await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```"
)))
context
.write_str(&format!(
"Query completed in {query_time:?}:\n\n```rs\n{active_requests:#?}\n```"
))
.await
},
| SendingCommand::GetLatestEduCount { server_name } => {
let timer = tokio::time::Instant::now();
let results = services.sending.db.get_latest_educount(&server_name).await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
)))
context
.write_str(&format!(
"Query completed in {query_time:?}:\n\n```rs\n{results:#?}\n```"
))
.await
},
}
}

View file

@ -1,6 +1,6 @@
use clap::Subcommand;
use conduwuit::Result;
use ruma::{OwnedEventId, OwnedRoomOrAliasId, events::room::message::RoomMessageEventContent};
use ruma::{OwnedEventId, OwnedRoomOrAliasId};
use crate::{admin_command, admin_command_dispatch};
@ -18,10 +18,7 @@ pub(crate) enum ShortCommand {
}
#[admin_command]
pub(super) async fn short_event_id(
&self,
event_id: OwnedEventId,
) -> Result<RoomMessageEventContent> {
pub(super) async fn short_event_id(&self, event_id: OwnedEventId) -> Result {
let shortid = self
.services
.rooms
@ -29,17 +26,14 @@ pub(super) async fn short_event_id(
.get_shorteventid(&event_id)
.await?;
Ok(RoomMessageEventContent::notice_markdown(format!("{shortid:#?}")))
self.write_str(&format!("{shortid:#?}")).await
}
#[admin_command]
pub(super) async fn short_room_id(
&self,
room_id: OwnedRoomOrAliasId,
) -> Result<RoomMessageEventContent> {
pub(super) async fn short_room_id(&self, room_id: OwnedRoomOrAliasId) -> Result {
let room_id = self.services.rooms.alias.resolve(&room_id).await?;
let shortid = self.services.rooms.short.get_shortroomid(&room_id).await?;
Ok(RoomMessageEventContent::notice_markdown(format!("{shortid:#?}")))
self.write_str(&format!("{shortid:#?}")).await
}

View file

@ -1,9 +1,7 @@
use clap::Subcommand;
use conduwuit::Result;
use futures::stream::StreamExt;
use ruma::{
OwnedDeviceId, OwnedRoomId, OwnedUserId, events::room::message::RoomMessageEventContent,
};
use ruma::{OwnedDeviceId, OwnedRoomId, OwnedUserId};
use crate::{admin_command, admin_command_dispatch};
@ -99,11 +97,7 @@ pub(crate) enum UsersCommand {
}
#[admin_command]
async fn get_shared_rooms(
&self,
user_a: OwnedUserId,
user_b: OwnedUserId,
) -> Result<RoomMessageEventContent> {
async fn get_shared_rooms(&self, user_a: OwnedUserId, user_b: OwnedUserId) -> Result {
let timer = tokio::time::Instant::now();
let result: Vec<_> = self
.services
@ -115,9 +109,8 @@ async fn get_shared_rooms(
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
.await
}
#[admin_command]
@ -127,7 +120,7 @@ async fn get_backup_session(
version: String,
room_id: OwnedRoomId,
session_id: String,
) -> Result<RoomMessageEventContent> {
) -> Result {
let timer = tokio::time::Instant::now();
let result = self
.services
@ -136,9 +129,8 @@ async fn get_backup_session(
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
.await
}
#[admin_command]
@ -147,7 +139,7 @@ async fn get_room_backups(
user_id: OwnedUserId,
version: String,
room_id: OwnedRoomId,
) -> Result<RoomMessageEventContent> {
) -> Result {
let timer = tokio::time::Instant::now();
let result = self
.services
@ -156,32 +148,22 @@ async fn get_room_backups(
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
.await
}
#[admin_command]
async fn get_all_backups(
&self,
user_id: OwnedUserId,
version: String,
) -> Result<RoomMessageEventContent> {
async fn get_all_backups(&self, user_id: OwnedUserId, version: String) -> Result {
let timer = tokio::time::Instant::now();
let result = self.services.key_backups.get_all(&user_id, &version).await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
.await
}
#[admin_command]
async fn get_backup_algorithm(
&self,
user_id: OwnedUserId,
version: String,
) -> Result<RoomMessageEventContent> {
async fn get_backup_algorithm(&self, user_id: OwnedUserId, version: String) -> Result {
let timer = tokio::time::Instant::now();
let result = self
.services
@ -190,16 +172,12 @@ async fn get_backup_algorithm(
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
.await
}
#[admin_command]
async fn get_latest_backup_version(
&self,
user_id: OwnedUserId,
) -> Result<RoomMessageEventContent> {
async fn get_latest_backup_version(&self, user_id: OwnedUserId) -> Result {
let timer = tokio::time::Instant::now();
let result = self
.services
@ -208,36 +186,33 @@ async fn get_latest_backup_version(
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
.await
}
#[admin_command]
async fn get_latest_backup(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
async fn get_latest_backup(&self, user_id: OwnedUserId) -> Result {
let timer = tokio::time::Instant::now();
let result = self.services.key_backups.get_latest_backup(&user_id).await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
.await
}
#[admin_command]
async fn iter_users(&self) -> Result<RoomMessageEventContent> {
async fn iter_users(&self) -> Result {
let timer = tokio::time::Instant::now();
let result: Vec<OwnedUserId> = self.services.users.stream().map(Into::into).collect().await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
.await
}
#[admin_command]
async fn iter_users2(&self) -> Result<RoomMessageEventContent> {
async fn iter_users2(&self) -> Result {
let timer = tokio::time::Instant::now();
let result: Vec<_> = self.services.users.stream().collect().await;
let result: Vec<_> = result
@ -248,35 +223,32 @@ async fn iter_users2(&self) -> Result<RoomMessageEventContent> {
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```"
)))
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:?}\n```"))
.await
}
#[admin_command]
async fn count_users(&self) -> Result<RoomMessageEventContent> {
async fn count_users(&self) -> Result {
let timer = tokio::time::Instant::now();
let result = self.services.users.count().await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
.await
}
#[admin_command]
async fn password_hash(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
async fn password_hash(&self, user_id: OwnedUserId) -> Result {
let timer = tokio::time::Instant::now();
let result = self.services.users.password_hash(&user_id).await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
.await
}
#[admin_command]
async fn list_devices(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
async fn list_devices(&self, user_id: OwnedUserId) -> Result {
let timer = tokio::time::Instant::now();
let devices = self
.services
@ -288,13 +260,12 @@ async fn list_devices(&self, user_id: OwnedUserId) -> Result<RoomMessageEventCon
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{devices:#?}\n```"
)))
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{devices:#?}\n```"))
.await
}
#[admin_command]
async fn list_devices_metadata(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
async fn list_devices_metadata(&self, user_id: OwnedUserId) -> Result {
let timer = tokio::time::Instant::now();
let devices = self
.services
@ -304,17 +275,12 @@ async fn list_devices_metadata(&self, user_id: OwnedUserId) -> Result<RoomMessag
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{devices:#?}\n```"
)))
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{devices:#?}\n```"))
.await
}
#[admin_command]
async fn get_device_metadata(
&self,
user_id: OwnedUserId,
device_id: OwnedDeviceId,
) -> Result<RoomMessageEventContent> {
async fn get_device_metadata(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result {
let timer = tokio::time::Instant::now();
let device = self
.services
@ -323,28 +289,22 @@ async fn get_device_metadata(
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```"
)))
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```"))
.await
}
#[admin_command]
async fn get_devices_version(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
async fn get_devices_version(&self, user_id: OwnedUserId) -> Result {
let timer = tokio::time::Instant::now();
let device = self.services.users.get_devicelist_version(&user_id).await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```"
)))
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{device:#?}\n```"))
.await
}
#[admin_command]
async fn count_one_time_keys(
&self,
user_id: OwnedUserId,
device_id: OwnedDeviceId,
) -> Result<RoomMessageEventContent> {
async fn count_one_time_keys(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result {
let timer = tokio::time::Instant::now();
let result = self
.services
@ -353,17 +313,12 @@ async fn count_one_time_keys(
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
.await
}
#[admin_command]
async fn get_device_keys(
&self,
user_id: OwnedUserId,
device_id: OwnedDeviceId,
) -> Result<RoomMessageEventContent> {
async fn get_device_keys(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result {
let timer = tokio::time::Instant::now();
let result = self
.services
@ -372,24 +327,22 @@ async fn get_device_keys(
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
.await
}
#[admin_command]
async fn get_user_signing_key(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
async fn get_user_signing_key(&self, user_id: OwnedUserId) -> Result {
let timer = tokio::time::Instant::now();
let result = self.services.users.get_user_signing_key(&user_id).await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
.await
}
#[admin_command]
async fn get_master_key(&self, user_id: OwnedUserId) -> Result<RoomMessageEventContent> {
async fn get_master_key(&self, user_id: OwnedUserId) -> Result {
let timer = tokio::time::Instant::now();
let result = self
.services
@ -398,17 +351,12 @@ async fn get_master_key(&self, user_id: OwnedUserId) -> Result<RoomMessageEventC
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
.await
}
#[admin_command]
async fn get_to_device_events(
&self,
user_id: OwnedUserId,
device_id: OwnedDeviceId,
) -> Result<RoomMessageEventContent> {
async fn get_to_device_events(&self, user_id: OwnedUserId, device_id: OwnedDeviceId) -> Result {
let timer = tokio::time::Instant::now();
let result = self
.services
@ -418,7 +366,6 @@ async fn get_to_device_events(
.await;
let query_time = timer.elapsed();
Ok(RoomMessageEventContent::notice_markdown(format!(
"Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"
)))
self.write_str(&format!("Query completed in {query_time:?}:\n\n```rs\n{result:#?}\n```"))
.await
}

View file

@ -1,13 +1,11 @@
use std::fmt::Write;
use clap::Subcommand;
use conduwuit::Result;
use conduwuit::{Err, Result};
use futures::StreamExt;
use ruma::{
OwnedRoomAliasId, OwnedRoomId, RoomId, events::room::message::RoomMessageEventContent,
};
use ruma::{OwnedRoomAliasId, OwnedRoomId};
use crate::{Command, escape_html};
use crate::Context;
#[derive(Debug, Subcommand)]
pub(crate) enum RoomAliasCommand {
@ -18,7 +16,7 @@ pub(crate) enum RoomAliasCommand {
force: bool,
/// The room id to set the alias on
room_id: Box<RoomId>,
room_id: OwnedRoomId,
/// The alias localpart to use (`alias`, not `#alias:servername.tld`)
room_alias_localpart: String,
@ -40,21 +38,11 @@ pub(crate) enum RoomAliasCommand {
/// - List aliases currently being used
List {
/// If set, only list the aliases for this room
room_id: Option<Box<RoomId>>,
room_id: Option<OwnedRoomId>,
},
}
pub(super) async fn process(command: RoomAliasCommand, context: &Command<'_>) -> Result {
let c = reprocess(command, context).await?;
context.write_str(c.body()).await?;
Ok(())
}
pub(super) async fn reprocess(
command: RoomAliasCommand,
context: &Command<'_>,
) -> Result<RoomMessageEventContent> {
pub(super) async fn process(command: RoomAliasCommand, context: &Context<'_>) -> Result {
let services = context.services;
let server_user = &services.globals.server_user;
@ -67,9 +55,7 @@ pub(super) async fn reprocess(
let room_alias = match OwnedRoomAliasId::parse(room_alias_str) {
| Ok(alias) => alias,
| Err(err) => {
return Ok(RoomMessageEventContent::text_plain(format!(
"Failed to parse alias: {err}"
)));
return Err!("Failed to parse alias: {err}");
},
};
match command {
@ -81,60 +67,50 @@ pub(super) async fn reprocess(
&room_id,
server_user,
) {
| Ok(()) => Ok(RoomMessageEventContent::text_plain(format!(
"Successfully overwrote alias (formerly {id})"
))),
| Err(err) => Ok(RoomMessageEventContent::text_plain(format!(
"Failed to remove alias: {err}"
))),
| Err(err) => Err!("Failed to remove alias: {err}"),
| Ok(()) =>
context
.write_str(&format!(
"Successfully overwrote alias (formerly {id})"
))
.await,
}
},
| (false, Ok(id)) => Ok(RoomMessageEventContent::text_plain(format!(
| (false, Ok(id)) => Err!(
"Refusing to overwrite in use alias for {id}, use -f or --force to \
overwrite"
))),
),
| (_, Err(_)) => {
match services.rooms.alias.set_alias(
&room_alias,
&room_id,
server_user,
) {
| Ok(()) => Ok(RoomMessageEventContent::text_plain(
"Successfully set alias",
)),
| Err(err) => Ok(RoomMessageEventContent::text_plain(format!(
"Failed to remove alias: {err}"
))),
| Err(err) => Err!("Failed to remove alias: {err}"),
| Ok(()) => context.write_str("Successfully set alias").await,
}
},
}
},
| RoomAliasCommand::Remove { .. } => {
match services.rooms.alias.resolve_local_alias(&room_alias).await {
| Err(_) => Err!("Alias isn't in use."),
| Ok(id) => match services
.rooms
.alias
.remove_alias(&room_alias, server_user)
.await
{
| Ok(()) => Ok(RoomMessageEventContent::text_plain(format!(
"Removed alias from {id}"
))),
| Err(err) => Ok(RoomMessageEventContent::text_plain(format!(
"Failed to remove alias: {err}"
))),
| Err(err) => Err!("Failed to remove alias: {err}"),
| Ok(()) =>
context.write_str(&format!("Removed alias from {id}")).await,
},
| Err(_) =>
Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")),
}
},
| RoomAliasCommand::Which { .. } => {
match services.rooms.alias.resolve_local_alias(&room_alias).await {
| Ok(id) => Ok(RoomMessageEventContent::text_plain(format!(
"Alias resolves to {id}"
))),
| Err(_) =>
Ok(RoomMessageEventContent::text_plain("Alias isn't in use.")),
| Err(_) => Err!("Alias isn't in use."),
| Ok(id) => context.write_str(&format!("Alias resolves to {id}")).await,
}
},
| RoomAliasCommand::List { .. } => unreachable!(),
@ -156,15 +132,8 @@ pub(super) async fn reprocess(
output
});
let html_list = aliases.iter().fold(String::new(), |mut output, alias| {
writeln!(output, "<li>{}</li>", escape_html(alias.as_ref()))
.expect("should be able to write to string buffer");
output
});
let plain = format!("Aliases for {room_id}:\n{plain_list}");
let html = format!("Aliases for {room_id}:\n<ul>{html_list}</ul>");
Ok(RoomMessageEventContent::text_html(plain, html))
context.write_str(&plain).await
} else {
let aliases = services
.rooms
@ -183,23 +152,8 @@ pub(super) async fn reprocess(
output
});
let html_list = aliases
.iter()
.fold(String::new(), |mut output, (alias, id)| {
writeln!(
output,
"<li><code>{}</code> -> #{}:{}</li>",
escape_html(alias.as_ref()),
escape_html(id),
server_name
)
.expect("should be able to write to string buffer");
output
});
let plain = format!("Aliases:\n{plain_list}");
let html = format!("Aliases:\n<ul>{html_list}</ul>");
Ok(RoomMessageEventContent::text_html(plain, html))
context.write_str(&plain).await
},
}
}

View file

@ -1,6 +1,6 @@
use conduwuit::Result;
use conduwuit::{Err, Result};
use futures::StreamExt;
use ruma::{OwnedRoomId, events::room::message::RoomMessageEventContent};
use ruma::OwnedRoomId;
use crate::{PAGE_SIZE, admin_command, get_room_info};
@ -11,7 +11,7 @@ pub(super) async fn list_rooms(
exclude_disabled: bool,
exclude_banned: bool,
no_details: bool,
) -> Result<RoomMessageEventContent> {
) -> Result {
// TODO: i know there's a way to do this with clap, but i can't seem to find it
let page = page.unwrap_or(1);
let mut rooms = self
@ -41,29 +41,28 @@ pub(super) async fn list_rooms(
.collect::<Vec<_>>();
if rooms.is_empty() {
return Ok(RoomMessageEventContent::text_plain("No more rooms."));
return Err!("No more rooms.");
}
let output_plain = format!(
"Rooms ({}):\n```\n{}\n```",
rooms.len(),
rooms
.iter()
.map(|(id, members, name)| if no_details {
let body = rooms
.iter()
.map(|(id, members, name)| {
if no_details {
format!("{id}")
} else {
format!("{id}\tMembers: {members}\tName: {name}")
})
.collect::<Vec<_>>()
.join("\n")
);
}
})
.collect::<Vec<_>>()
.join("\n");
Ok(RoomMessageEventContent::notice_markdown(output_plain))
self.write_str(&format!("Rooms ({}):\n```\n{body}\n```", rooms.len(),))
.await
}
#[admin_command]
pub(super) async fn exists(&self, room_id: OwnedRoomId) -> Result<RoomMessageEventContent> {
pub(super) async fn exists(&self, room_id: OwnedRoomId) -> Result {
let result = self.services.rooms.metadata.exists(&room_id).await;
Ok(RoomMessageEventContent::notice_markdown(format!("{result}")))
self.write_str(&format!("{result}")).await
}

View file

@ -1,22 +1,22 @@
use clap::Subcommand;
use conduwuit::Result;
use conduwuit::{Err, Result};
use futures::StreamExt;
use ruma::{RoomId, events::room::message::RoomMessageEventContent};
use ruma::OwnedRoomId;
use crate::{Command, PAGE_SIZE, get_room_info};
use crate::{Context, PAGE_SIZE, get_room_info};
#[derive(Debug, Subcommand)]
pub(crate) enum RoomDirectoryCommand {
/// - Publish a room to the room directory
Publish {
/// The room id of the room to publish
room_id: Box<RoomId>,
room_id: OwnedRoomId,
},
/// - Unpublish a room to the room directory
Unpublish {
/// The room id of the room to unpublish
room_id: Box<RoomId>,
room_id: OwnedRoomId,
},
/// - List rooms that are published
@ -25,25 +25,16 @@ pub(crate) enum RoomDirectoryCommand {
},
}
pub(super) async fn process(command: RoomDirectoryCommand, context: &Command<'_>) -> Result {
let c = reprocess(command, context).await?;
context.write_str(c.body()).await?;
Ok(())
}
pub(super) async fn reprocess(
command: RoomDirectoryCommand,
context: &Command<'_>,
) -> Result<RoomMessageEventContent> {
pub(super) async fn process(command: RoomDirectoryCommand, context: &Context<'_>) -> Result {
let services = context.services;
match command {
| RoomDirectoryCommand::Publish { room_id } => {
services.rooms.directory.set_public(&room_id);
Ok(RoomMessageEventContent::notice_plain("Room published"))
context.write_str("Room published").await
},
| RoomDirectoryCommand::Unpublish { room_id } => {
services.rooms.directory.set_not_public(&room_id);
Ok(RoomMessageEventContent::notice_plain("Room unpublished"))
context.write_str("Room unpublished").await
},
| RoomDirectoryCommand::List { page } => {
// TODO: i know there's a way to do this with clap, but i can't seem to find it
@ -66,20 +57,18 @@ pub(super) async fn reprocess(
.collect();
if rooms.is_empty() {
return Ok(RoomMessageEventContent::text_plain("No more rooms."));
return Err!("No more rooms.");
}
let output = format!(
"Rooms (page {page}):\n```\n{}\n```",
rooms
.iter()
.map(|(id, members, name)| format!(
"{id} | Members: {members} | Name: {name}"
))
.collect::<Vec<_>>()
.join("\n")
);
Ok(RoomMessageEventContent::text_markdown(output))
let body = rooms
.iter()
.map(|(id, members, name)| format!("{id} | Members: {members} | Name: {name}"))
.collect::<Vec<_>>()
.join("\n");
context
.write_str(&format!("Rooms (page {page}):\n```\n{body}\n```",))
.await
},
}
}

View file

@ -1,7 +1,7 @@
use clap::Subcommand;
use conduwuit::{Result, utils::ReadyExt};
use conduwuit::{Err, Result, utils::ReadyExt};
use futures::StreamExt;
use ruma::{RoomId, events::room::message::RoomMessageEventContent};
use ruma::OwnedRoomId;
use crate::{admin_command, admin_command_dispatch};
@ -10,7 +10,7 @@ use crate::{admin_command, admin_command_dispatch};
pub(crate) enum RoomInfoCommand {
/// - List joined members in a room
ListJoinedMembers {
room_id: Box<RoomId>,
room_id: OwnedRoomId,
/// Lists only our local users in the specified room
#[arg(long)]
@ -22,16 +22,12 @@ pub(crate) enum RoomInfoCommand {
/// Room topics can be huge, so this is in its
/// own separate command
ViewRoomTopic {
room_id: Box<RoomId>,
room_id: OwnedRoomId,
},
}
#[admin_command]
async fn list_joined_members(
&self,
room_id: Box<RoomId>,
local_only: bool,
) -> Result<RoomMessageEventContent> {
async fn list_joined_members(&self, room_id: OwnedRoomId, local_only: bool) -> Result {
let room_name = self
.services
.rooms
@ -64,22 +60,19 @@ async fn list_joined_members(
.collect()
.await;
let output_plain = format!(
"{} Members in Room \"{}\":\n```\n{}\n```",
member_info.len(),
room_name,
member_info
.into_iter()
.map(|(displayname, mxid)| format!("{mxid} | {displayname}"))
.collect::<Vec<_>>()
.join("\n")
);
let num = member_info.len();
let body = member_info
.into_iter()
.map(|(displayname, mxid)| format!("{mxid} | {displayname}"))
.collect::<Vec<_>>()
.join("\n");
Ok(RoomMessageEventContent::notice_markdown(output_plain))
self.write_str(&format!("{num} Members in Room \"{room_name}\":\n```\n{body}\n```",))
.await
}
#[admin_command]
async fn view_room_topic(&self, room_id: Box<RoomId>) -> Result<RoomMessageEventContent> {
async fn view_room_topic(&self, room_id: OwnedRoomId) -> Result {
let Ok(room_topic) = self
.services
.rooms
@ -87,10 +80,9 @@ async fn view_room_topic(&self, room_id: Box<RoomId>) -> Result<RoomMessageEvent
.get_room_topic(&room_id)
.await
else {
return Ok(RoomMessageEventContent::text_plain("Room does not have a room topic set."));
return Err!("Room does not have a room topic set.");
};
Ok(RoomMessageEventContent::notice_markdown(format!(
"Room topic:\n```\n{room_topic}\n```"
)))
self.write_str(&format!("Room topic:\n```\n{room_topic}\n```"))
.await
}

View file

@ -1,15 +1,12 @@
use api::client::leave_room;
use clap::Subcommand;
use conduwuit::{
Result, debug,
Err, Result, debug,
utils::{IterStream, ReadyExt},
warn,
};
use futures::StreamExt;
use ruma::{
OwnedRoomId, RoomAliasId, RoomId, RoomOrAliasId,
events::room::message::RoomMessageEventContent,
};
use ruma::{OwnedRoomId, OwnedRoomOrAliasId, RoomAliasId, RoomId, RoomOrAliasId};
use crate::{admin_command, admin_command_dispatch, get_room_info};
@ -24,7 +21,7 @@ pub(crate) enum RoomModerationCommand {
BanRoom {
/// The room in the format of `!roomid:example.com` or a room alias in
/// the format of `#roomalias:example.com`
room: Box<RoomOrAliasId>,
room: OwnedRoomOrAliasId,
},
/// - Bans a list of rooms (room IDs and room aliases) from a newline
@ -36,7 +33,7 @@ pub(crate) enum RoomModerationCommand {
UnbanRoom {
/// The room in the format of `!roomid:example.com` or a room alias in
/// the format of `#roomalias:example.com`
room: Box<RoomOrAliasId>,
room: OwnedRoomOrAliasId,
},
/// - List of all rooms we have banned
@ -49,14 +46,14 @@ pub(crate) enum RoomModerationCommand {
}
#[admin_command]
async fn ban_room(&self, room: Box<RoomOrAliasId>) -> Result<RoomMessageEventContent> {
async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
debug!("Got room alias or ID: {}", room);
let admin_room_alias = &self.services.globals.admin_alias;
if let Ok(admin_room_id) = self.services.admin.get_admin_room().await {
if room.to_string().eq(&admin_room_id) || room.to_string().eq(admin_room_alias) {
return Ok(RoomMessageEventContent::text_plain("Not allowed to ban the admin room."));
return Err!("Not allowed to ban the admin room.");
}
}
@ -64,11 +61,11 @@ async fn ban_room(&self, room: Box<RoomOrAliasId>) -> Result<RoomMessageEventCon
let room_id = match RoomId::parse(&room) {
| Ok(room_id) => room_id,
| Err(e) => {
return Ok(RoomMessageEventContent::text_plain(format!(
return Err!(
"Failed to parse room ID {room}. Please note that this requires a full room \
ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
(`#roomalias:example.com`): {e}"
)));
);
},
};
@ -80,11 +77,11 @@ async fn ban_room(&self, room: Box<RoomOrAliasId>) -> Result<RoomMessageEventCon
let room_alias = match RoomAliasId::parse(&room) {
| Ok(room_alias) => room_alias,
| Err(e) => {
return Ok(RoomMessageEventContent::text_plain(format!(
return Err!(
"Failed to parse room ID {room}. Please note that this requires a full room \
ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
(`#roomalias:example.com`): {e}"
)));
);
},
};
@ -123,9 +120,9 @@ async fn ban_room(&self, room: Box<RoomOrAliasId>) -> Result<RoomMessageEventCon
room_id
},
| Err(e) => {
return Ok(RoomMessageEventContent::notice_plain(format!(
return Err!(
"Failed to resolve room alias {room_alias} to a room ID: {e}"
)));
);
},
}
},
@ -135,11 +132,11 @@ async fn ban_room(&self, room: Box<RoomOrAliasId>) -> Result<RoomMessageEventCon
room_id
} else {
return Ok(RoomMessageEventContent::text_plain(
return Err!(
"Room specified is not a room ID or room alias. Please note that this requires a \
full room ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
(`#roomalias:example.com`)",
));
);
};
debug!("Making all users leave the room {room_id} and forgetting it");
@ -185,20 +182,19 @@ async fn ban_room(&self, room: Box<RoomOrAliasId>) -> Result<RoomMessageEventCon
self.services.rooms.metadata.disable_room(&room_id, true);
Ok(RoomMessageEventContent::text_plain(
self.write_str(
"Room banned, removed all our local users, and disabled incoming federation with room.",
))
)
.await
}
#[admin_command]
async fn ban_list_of_rooms(&self) -> Result<RoomMessageEventContent> {
async fn ban_list_of_rooms(&self) -> Result {
if self.body.len() < 2
|| !self.body[0].trim().starts_with("```")
|| self.body.last().unwrap_or(&"").trim() != "```"
{
return Ok(RoomMessageEventContent::text_plain(
"Expected code block in command body. Add --help for details.",
));
return Err!("Expected code block in command body. Add --help for details.",);
}
let rooms_s = self
@ -356,23 +352,24 @@ async fn ban_list_of_rooms(&self) -> Result<RoomMessageEventContent> {
self.services.rooms.metadata.disable_room(&room_id, true);
}
Ok(RoomMessageEventContent::text_plain(format!(
self.write_str(&format!(
"Finished bulk room ban, banned {room_ban_count} total rooms, evicted all users, and \
disabled incoming federation with the room."
)))
))
.await
}
#[admin_command]
async fn unban_room(&self, room: Box<RoomOrAliasId>) -> Result<RoomMessageEventContent> {
async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result {
let room_id = if room.is_room_id() {
let room_id = match RoomId::parse(&room) {
| Ok(room_id) => room_id,
| Err(e) => {
return Ok(RoomMessageEventContent::text_plain(format!(
return Err!(
"Failed to parse room ID {room}. Please note that this requires a full room \
ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
(`#roomalias:example.com`): {e}"
)));
);
},
};
@ -384,11 +381,11 @@ async fn unban_room(&self, room: Box<RoomOrAliasId>) -> Result<RoomMessageEventC
let room_alias = match RoomAliasId::parse(&room) {
| Ok(room_alias) => room_alias,
| Err(e) => {
return Ok(RoomMessageEventContent::text_plain(format!(
return Err!(
"Failed to parse room ID {room}. Please note that this requires a full room \
ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
(`#roomalias:example.com`): {e}"
)));
);
},
};
@ -427,9 +424,7 @@ async fn unban_room(&self, room: Box<RoomOrAliasId>) -> Result<RoomMessageEventC
room_id
},
| Err(e) => {
return Ok(RoomMessageEventContent::text_plain(format!(
"Failed to resolve room alias {room} to a room ID: {e}"
)));
return Err!("Failed to resolve room alias {room} to a room ID: {e}");
},
}
},
@ -439,19 +434,20 @@ async fn unban_room(&self, room: Box<RoomOrAliasId>) -> Result<RoomMessageEventC
room_id
} else {
return Ok(RoomMessageEventContent::text_plain(
return Err!(
"Room specified is not a room ID or room alias. Please note that this requires a \
full room ID (`!awIh6gGInaS5wLQJwa:example.com`) or a room alias \
(`#roomalias:example.com`)",
));
);
};
self.services.rooms.metadata.disable_room(&room_id, false);
Ok(RoomMessageEventContent::text_plain("Room unbanned and federation re-enabled."))
self.write_str("Room unbanned and federation re-enabled.")
.await
}
#[admin_command]
async fn list_banned_rooms(&self, no_details: bool) -> Result<RoomMessageEventContent> {
async fn list_banned_rooms(&self, no_details: bool) -> Result {
let room_ids: Vec<OwnedRoomId> = self
.services
.rooms
@ -462,7 +458,7 @@ async fn list_banned_rooms(&self, no_details: bool) -> Result<RoomMessageEventCo
.await;
if room_ids.is_empty() {
return Ok(RoomMessageEventContent::text_plain("No rooms are banned."));
return Err!("No rooms are banned.");
}
let mut rooms = room_ids
@ -475,19 +471,20 @@ async fn list_banned_rooms(&self, no_details: bool) -> Result<RoomMessageEventCo
rooms.sort_by_key(|r| r.1);
rooms.reverse();
let output_plain = format!(
"Rooms Banned ({}):\n```\n{}\n```",
rooms.len(),
rooms
.iter()
.map(|(id, members, name)| if no_details {
let num = rooms.len();
let body = rooms
.iter()
.map(|(id, members, name)| {
if no_details {
format!("{id}")
} else {
format!("{id}\tMembers: {members}\tName: {name}")
})
.collect::<Vec<_>>()
.join("\n")
);
}
})
.collect::<Vec<_>>()
.join("\n");
Ok(RoomMessageEventContent::notice_markdown(output_plain))
self.write_str(&format!("Rooms Banned ({num}):\n```\n{body}\n```",))
.await
}

View file

@ -1,12 +1,16 @@
use std::{fmt::Write, path::PathBuf, sync::Arc};
use conduwuit::{Err, Result, info, utils::time, warn};
use ruma::events::room::message::RoomMessageEventContent;
use conduwuit::{
Err, Result, info,
utils::{stream::IterStream, time},
warn,
};
use futures::TryStreamExt;
use crate::admin_command;
#[admin_command]
pub(super) async fn uptime(&self) -> Result<RoomMessageEventContent> {
pub(super) async fn uptime(&self) -> Result {
let elapsed = self
.services
.server
@ -15,47 +19,36 @@ pub(super) async fn uptime(&self) -> Result<RoomMessageEventContent> {
.expect("standard duration");
let result = time::pretty(elapsed);
Ok(RoomMessageEventContent::notice_plain(format!("{result}.")))
self.write_str(&format!("{result}.")).await
}
#[admin_command]
pub(super) async fn show_config(&self) -> Result<RoomMessageEventContent> {
// Construct and send the response
Ok(RoomMessageEventContent::text_markdown(format!(
"{}",
*self.services.server.config
)))
pub(super) async fn show_config(&self) -> Result {
self.write_str(&format!("{}", *self.services.server.config))
.await
}
#[admin_command]
pub(super) async fn reload_config(
&self,
path: Option<PathBuf>,
) -> Result<RoomMessageEventContent> {
pub(super) async fn reload_config(&self, path: Option<PathBuf>) -> Result {
let path = path.as_deref().into_iter();
self.services.config.reload(path)?;
Ok(RoomMessageEventContent::text_plain("Successfully reconfigured."))
self.write_str("Successfully reconfigured.").await
}
#[admin_command]
pub(super) async fn list_features(
&self,
available: bool,
enabled: bool,
comma: bool,
) -> Result<RoomMessageEventContent> {
pub(super) async fn list_features(&self, available: bool, enabled: bool, comma: bool) -> Result {
let delim = if comma { "," } else { " " };
if enabled && !available {
let features = info::rustc::features().join(delim);
let out = format!("`\n{features}\n`");
return Ok(RoomMessageEventContent::text_markdown(out));
return self.write_str(&out).await;
}
if available && !enabled {
let features = info::cargo::features().join(delim);
let out = format!("`\n{features}\n`");
return Ok(RoomMessageEventContent::text_markdown(out));
return self.write_str(&out).await;
}
let mut features = String::new();
@ -68,77 +61,76 @@ pub(super) async fn list_features(
writeln!(features, "{emoji} {feature} {remark}")?;
}
Ok(RoomMessageEventContent::text_markdown(features))
self.write_str(&features).await
}
#[admin_command]
pub(super) async fn memory_usage(&self) -> Result<RoomMessageEventContent> {
pub(super) async fn memory_usage(&self) -> Result {
let services_usage = self.services.memory_usage().await?;
let database_usage = self.services.db.db.memory_usage()?;
let allocator_usage =
conduwuit::alloc::memory_usage().map_or(String::new(), |s| format!("\nAllocator:\n{s}"));
Ok(RoomMessageEventContent::text_plain(format!(
self.write_str(&format!(
"Services:\n{services_usage}\nDatabase:\n{database_usage}{allocator_usage}",
)))
))
.await
}
#[admin_command]
pub(super) async fn clear_caches(&self) -> Result<RoomMessageEventContent> {
pub(super) async fn clear_caches(&self) -> Result {
self.services.clear_cache().await;
Ok(RoomMessageEventContent::text_plain("Done."))
self.write_str("Done.").await
}
#[admin_command]
pub(super) async fn list_backups(&self) -> Result<RoomMessageEventContent> {
let result = self.services.db.db.backup_list()?;
if result.is_empty() {
Ok(RoomMessageEventContent::text_plain("No backups found."))
} else {
Ok(RoomMessageEventContent::text_plain(result))
}
pub(super) async fn list_backups(&self) -> Result {
self.services
.db
.db
.backup_list()?
.try_stream()
.try_for_each(|result| write!(self, "{result}"))
.await
}
#[admin_command]
pub(super) async fn backup_database(&self) -> Result<RoomMessageEventContent> {
pub(super) async fn backup_database(&self) -> Result {
let db = Arc::clone(&self.services.db);
let mut result = self
let result = self
.services
.server
.runtime()
.spawn_blocking(move || match db.db.backup() {
| Ok(()) => String::new(),
| Err(e) => e.to_string(),
| Ok(()) => "Done".to_owned(),
| Err(e) => format!("Failed: {e}"),
})
.await?;
if result.is_empty() {
result = self.services.db.db.backup_list()?;
}
Ok(RoomMessageEventContent::notice_markdown(result))
let count = self.services.db.db.backup_count()?;
self.write_str(&format!("{result}. Currently have {count} backups."))
.await
}
#[admin_command]
pub(super) async fn admin_notice(&self, message: Vec<String>) -> Result<RoomMessageEventContent> {
pub(super) async fn admin_notice(&self, message: Vec<String>) -> Result {
let message = message.join(" ");
self.services.admin.send_text(&message).await;
Ok(RoomMessageEventContent::notice_plain("Notice was sent to #admins"))
self.write_str("Notice was sent to #admins").await
}
#[admin_command]
pub(super) async fn reload_mods(&self) -> Result<RoomMessageEventContent> {
pub(super) async fn reload_mods(&self) -> Result {
self.services.server.reload()?;
Ok(RoomMessageEventContent::notice_plain("Reloading server..."))
self.write_str("Reloading server...").await
}
#[admin_command]
#[cfg(unix)]
pub(super) async fn restart(&self, force: bool) -> Result<RoomMessageEventContent> {
pub(super) async fn restart(&self, force: bool) -> Result {
use conduwuit::utils::sys::current_exe_deleted;
if !force && current_exe_deleted() {
@ -150,13 +142,13 @@ pub(super) async fn restart(&self, force: bool) -> Result<RoomMessageEventConten
self.services.server.restart()?;
Ok(RoomMessageEventContent::notice_plain("Restarting server..."))
self.write_str("Restarting server...").await
}
#[admin_command]
pub(super) async fn shutdown(&self) -> Result<RoomMessageEventContent> {
pub(super) async fn shutdown(&self) -> Result {
warn!("shutdown command");
self.services.server.shutdown()?;
Ok(RoomMessageEventContent::notice_plain("Shutting down server..."))
self.write_str("Shutting down server...").await
}

View file

@ -2,7 +2,7 @@ use std::{collections::BTreeMap, fmt::Write as _};
use api::client::{full_user_deactivate, join_room_by_id_helper, leave_room};
use conduwuit::{
Result, debug, debug_warn, error, info, is_equal_to,
Err, Result, debug, debug_warn, error, info, is_equal_to,
matrix::pdu::PduBuilder,
utils::{self, ReadyExt},
warn,
@ -10,11 +10,10 @@ use conduwuit::{
use conduwuit_api::client::{leave_all_rooms, update_avatar_url, update_displayname};
use futures::StreamExt;
use ruma::{
EventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, RoomId, UserId,
OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, UserId,
events::{
RoomAccountDataEventType, StateEventType,
room::{
message::RoomMessageEventContent,
power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent},
redaction::RoomRedactionEventContent,
},
@ -31,7 +30,7 @@ const AUTO_GEN_PASSWORD_LENGTH: usize = 25;
const BULK_JOIN_REASON: &str = "Bulk force joining this room as initiated by the server admin.";
#[admin_command]
pub(super) async fn list_users(&self) -> Result<RoomMessageEventContent> {
pub(super) async fn list_users(&self) -> Result {
let users: Vec<_> = self
.services
.users
@ -44,30 +43,22 @@ pub(super) async fn list_users(&self) -> Result<RoomMessageEventContent> {
plain_msg += users.join("\n").as_str();
plain_msg += "\n```";
self.write_str(plain_msg.as_str()).await?;
Ok(RoomMessageEventContent::text_plain(""))
self.write_str(&plain_msg).await
}
#[admin_command]
pub(super) async fn create_user(
&self,
username: String,
password: Option<String>,
) -> Result<RoomMessageEventContent> {
pub(super) async fn create_user(&self, username: String, password: Option<String>) -> Result {
// Validate user id
let user_id = parse_local_user_id(self.services, &username)?;
if let Err(e) = user_id.validate_strict() {
if self.services.config.emergency_password.is_none() {
return Ok(RoomMessageEventContent::text_plain(format!(
"Username {user_id} contains disallowed characters or spaces: {e}"
)));
return Err!("Username {user_id} contains disallowed characters or spaces: {e}");
}
}
if self.services.users.exists(&user_id).await {
return Ok(RoomMessageEventContent::text_plain(format!("User {user_id} already exists")));
return Err!("User {user_id} already exists");
}
let password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH));
@ -89,8 +80,7 @@ pub(super) async fn create_user(
.new_user_displayname_suffix
.is_empty()
{
write!(displayname, " {}", self.services.server.config.new_user_displayname_suffix)
.expect("should be able to write to string buffer");
write!(displayname, " {}", self.services.server.config.new_user_displayname_suffix)?;
}
self.services
@ -110,15 +100,17 @@ pub(super) async fn create_user(
content: ruma::events::push_rules::PushRulesEventContent {
global: ruma::push::Ruleset::server_default(&user_id),
},
})
.expect("to json value always works"),
})?,
)
.await?;
if !self.services.server.config.auto_join_rooms.is_empty() {
for room in &self.services.server.config.auto_join_rooms {
let Ok(room_id) = self.services.rooms.alias.resolve(room).await else {
error!(%user_id, "Failed to resolve room alias to room ID when attempting to auto join {room}, skipping");
error!(
%user_id,
"Failed to resolve room alias to room ID when attempting to auto join {room}, skipping"
);
continue;
};
@ -154,18 +146,17 @@ pub(super) async fn create_user(
info!("Automatically joined room {room} for user {user_id}");
},
| Err(e) => {
self.services
.admin
.send_message(RoomMessageEventContent::text_plain(format!(
"Failed to automatically join room {room} for user {user_id}: \
{e}"
)))
.await
.ok();
// don't return this error so we don't fail registrations
error!(
"Failed to automatically join room {room} for user {user_id}: {e}"
);
self.services
.admin
.send_text(&format!(
"Failed to automatically join room {room} for user {user_id}: \
{e}"
))
.await;
},
}
}
@ -192,25 +183,18 @@ pub(super) async fn create_user(
debug!("create_user admin command called without an admin room being available");
}
Ok(RoomMessageEventContent::text_plain(format!(
"Created user with user_id: {user_id} and password: `{password}`"
)))
self.write_str(&format!("Created user with user_id: {user_id} and password: `{password}`"))
.await
}
#[admin_command]
pub(super) async fn deactivate(
&self,
no_leave_rooms: bool,
user_id: String,
) -> Result<RoomMessageEventContent> {
pub(super) async fn deactivate(&self, no_leave_rooms: bool, user_id: String) -> Result {
// Validate user id
let user_id = parse_local_user_id(self.services, &user_id)?;
// don't deactivate the server service account
if user_id == self.services.globals.server_user {
return Ok(RoomMessageEventContent::text_plain(
"Not allowed to deactivate the server service account.",
));
return Err!("Not allowed to deactivate the server service account.",);
}
self.services.users.deactivate_account(&user_id).await?;
@ -218,11 +202,8 @@ pub(super) async fn deactivate(
if !no_leave_rooms {
self.services
.admin
.send_message(RoomMessageEventContent::text_plain(format!(
"Making {user_id} leave all rooms after deactivation..."
)))
.await
.ok();
.send_text(&format!("Making {user_id} leave all rooms after deactivation..."))
.await;
let all_joined_rooms: Vec<OwnedRoomId> = self
.services
@ -239,24 +220,19 @@ pub(super) async fn deactivate(
leave_all_rooms(self.services, &user_id).await;
}
Ok(RoomMessageEventContent::text_plain(format!(
"User {user_id} has been deactivated"
)))
self.write_str(&format!("User {user_id} has been deactivated"))
.await
}
#[admin_command]
pub(super) async fn reset_password(
&self,
username: String,
password: Option<String>,
) -> Result<RoomMessageEventContent> {
pub(super) async fn reset_password(&self, username: String, password: Option<String>) -> Result {
let user_id = parse_local_user_id(self.services, &username)?;
if user_id == self.services.globals.server_user {
return Ok(RoomMessageEventContent::text_plain(
return Err!(
"Not allowed to set the password for the server account. Please use the emergency \
password config option.",
));
);
}
let new_password = password.unwrap_or_else(|| utils::random_string(AUTO_GEN_PASSWORD_LENGTH));
@ -266,28 +242,20 @@ pub(super) async fn reset_password(
.users
.set_password(&user_id, Some(new_password.as_str()))
{
| Ok(()) => Ok(RoomMessageEventContent::text_plain(format!(
"Successfully reset the password for user {user_id}: `{new_password}`"
))),
| Err(e) => Ok(RoomMessageEventContent::text_plain(format!(
"Couldn't reset the password for user {user_id}: {e}"
))),
| Err(e) => return Err!("Couldn't reset the password for user {user_id}: {e}"),
| Ok(()) =>
write!(self, "Successfully reset the password for user {user_id}: `{new_password}`"),
}
.await
}
#[admin_command]
pub(super) async fn deactivate_all(
&self,
no_leave_rooms: bool,
force: bool,
) -> Result<RoomMessageEventContent> {
pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) -> Result {
if self.body.len() < 2
|| !self.body[0].trim().starts_with("```")
|| self.body.last().unwrap_or(&"").trim() != "```"
{
return Ok(RoomMessageEventContent::text_plain(
"Expected code block in command body. Add --help for details.",
));
return Err!("Expected code block in command body. Add --help for details.",);
}
let usernames = self
@ -301,15 +269,23 @@ pub(super) async fn deactivate_all(
for username in usernames {
match parse_active_local_user_id(self.services, username).await {
| Err(e) => {
self.services
.admin
.send_text(&format!("{username} is not a valid username, skipping over: {e}"))
.await;
continue;
},
| Ok(user_id) => {
if self.services.users.is_admin(&user_id).await && !force {
self.services
.admin
.send_message(RoomMessageEventContent::text_plain(format!(
.send_text(&format!(
"{username} is an admin and --force is not set, skipping over"
)))
.await
.ok();
))
.await;
admins.push(username);
continue;
}
@ -318,26 +294,16 @@ pub(super) async fn deactivate_all(
if user_id == self.services.globals.server_user {
self.services
.admin
.send_message(RoomMessageEventContent::text_plain(format!(
.send_text(&format!(
"{username} is the server service account, skipping over"
)))
.await
.ok();
))
.await;
continue;
}
user_ids.push(user_id);
},
| Err(e) => {
self.services
.admin
.send_message(RoomMessageEventContent::text_plain(format!(
"{username} is not a valid username, skipping over: {e}"
)))
.await
.ok();
continue;
},
}
}
@ -345,6 +311,12 @@ pub(super) async fn deactivate_all(
for user_id in user_ids {
match self.services.users.deactivate_account(&user_id).await {
| Err(e) => {
self.services
.admin
.send_text(&format!("Failed deactivating user: {e}"))
.await;
},
| Ok(()) => {
deactivation_count = deactivation_count.saturating_add(1);
if !no_leave_rooms {
@ -365,33 +337,24 @@ pub(super) async fn deactivate_all(
leave_all_rooms(self.services, &user_id).await;
}
},
| Err(e) => {
self.services
.admin
.send_message(RoomMessageEventContent::text_plain(format!(
"Failed deactivating user: {e}"
)))
.await
.ok();
},
}
}
if admins.is_empty() {
Ok(RoomMessageEventContent::text_plain(format!(
"Deactivated {deactivation_count} accounts."
)))
write!(self, "Deactivated {deactivation_count} accounts.")
} else {
Ok(RoomMessageEventContent::text_plain(format!(
write!(
self,
"Deactivated {deactivation_count} accounts.\nSkipped admin accounts: {}. Use \
--force to deactivate admin accounts",
admins.join(", ")
)))
)
}
.await
}
#[admin_command]
pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result<RoomMessageEventContent> {
pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result {
// Validate user id
let user_id = parse_local_user_id(self.services, &user_id)?;
@ -405,23 +368,20 @@ pub(super) async fn list_joined_rooms(&self, user_id: String) -> Result<RoomMess
.await;
if rooms.is_empty() {
return Ok(RoomMessageEventContent::text_plain("User is not in any rooms."));
return Err!("User is not in any rooms.");
}
rooms.sort_by_key(|r| r.1);
rooms.reverse();
let output_plain = format!(
"Rooms {user_id} Joined ({}):\n```\n{}\n```",
rooms.len(),
rooms
.iter()
.map(|(id, members, name)| format!("{id}\tMembers: {members}\tName: {name}"))
.collect::<Vec<_>>()
.join("\n")
);
let body = rooms
.iter()
.map(|(id, members, name)| format!("{id}\tMembers: {members}\tName: {name}"))
.collect::<Vec<_>>()
.join("\n");
Ok(RoomMessageEventContent::notice_markdown(output_plain))
self.write_str(&format!("Rooms {user_id} Joined ({}):\n```\n{body}\n```", rooms.len(),))
.await
}
#[admin_command]
@ -429,27 +389,23 @@ pub(super) async fn force_join_list_of_local_users(
&self,
room_id: OwnedRoomOrAliasId,
yes_i_want_to_do_this: bool,
) -> Result<RoomMessageEventContent> {
) -> Result {
if self.body.len() < 2
|| !self.body[0].trim().starts_with("```")
|| self.body.last().unwrap_or(&"").trim() != "```"
{
return Ok(RoomMessageEventContent::text_plain(
"Expected code block in command body. Add --help for details.",
));
return Err!("Expected code block in command body. Add --help for details.",);
}
if !yes_i_want_to_do_this {
return Ok(RoomMessageEventContent::notice_markdown(
return Err!(
"You must pass the --yes-i-want-to-do-this-flag to ensure you really want to force \
bulk join all specified local users.",
));
);
}
let Ok(admin_room) = self.services.admin.get_admin_room().await else {
return Ok(RoomMessageEventContent::notice_markdown(
"There is not an admin room to check for server admins.",
));
return Err!("There is not an admin room to check for server admins.",);
};
let (room_id, servers) = self
@ -466,7 +422,7 @@ pub(super) async fn force_join_list_of_local_users(
.server_in_room(self.services.globals.server_name(), &room_id)
.await
{
return Ok(RoomMessageEventContent::notice_markdown("We are not joined in this room."));
return Err!("We are not joined in this room.");
}
let server_admins: Vec<_> = self
@ -486,9 +442,7 @@ pub(super) async fn force_join_list_of_local_users(
.ready_any(|user_id| server_admins.contains(&user_id.to_owned()))
.await
{
return Ok(RoomMessageEventContent::notice_markdown(
"There is not a single server admin in the room.",
));
return Err!("There is not a single server admin in the room.",);
}
let usernames = self
@ -506,11 +460,11 @@ pub(super) async fn force_join_list_of_local_users(
if user_id == self.services.globals.server_user {
self.services
.admin
.send_message(RoomMessageEventContent::text_plain(format!(
.send_text(&format!(
"{username} is the server service account, skipping over"
)))
.await
.ok();
))
.await;
continue;
}
@ -519,11 +473,9 @@ pub(super) async fn force_join_list_of_local_users(
| Err(e) => {
self.services
.admin
.send_message(RoomMessageEventContent::text_plain(format!(
"{username} is not a valid username, skipping over: {e}"
)))
.await
.ok();
.send_text(&format!("{username} is not a valid username, skipping over: {e}"))
.await;
continue;
},
}
@ -554,10 +506,11 @@ pub(super) async fn force_join_list_of_local_users(
}
}
Ok(RoomMessageEventContent::notice_markdown(format!(
self.write_str(&format!(
"{successful_joins} local users have been joined to {room_id}. {failed_joins} joins \
failed.",
)))
))
.await
}
#[admin_command]
@ -565,18 +518,16 @@ pub(super) async fn force_join_all_local_users(
&self,
room_id: OwnedRoomOrAliasId,
yes_i_want_to_do_this: bool,
) -> Result<RoomMessageEventContent> {
) -> Result {
if !yes_i_want_to_do_this {
return Ok(RoomMessageEventContent::notice_markdown(
return Err!(
"You must pass the --yes-i-want-to-do-this-flag to ensure you really want to force \
bulk join all local users.",
));
);
}
let Ok(admin_room) = self.services.admin.get_admin_room().await else {
return Ok(RoomMessageEventContent::notice_markdown(
"There is not an admin room to check for server admins.",
));
return Err!("There is not an admin room to check for server admins.",);
};
let (room_id, servers) = self
@ -593,7 +544,7 @@ pub(super) async fn force_join_all_local_users(
.server_in_room(self.services.globals.server_name(), &room_id)
.await
{
return Ok(RoomMessageEventContent::notice_markdown("We are not joined in this room."));
return Err!("We are not joined in this room.");
}
let server_admins: Vec<_> = self
@ -613,9 +564,7 @@ pub(super) async fn force_join_all_local_users(
.ready_any(|user_id| server_admins.contains(&user_id.to_owned()))
.await
{
return Ok(RoomMessageEventContent::notice_markdown(
"There is not a single server admin in the room.",
));
return Err!("There is not a single server admin in the room.",);
}
let mut failed_joins: usize = 0;
@ -650,10 +599,11 @@ pub(super) async fn force_join_all_local_users(
}
}
Ok(RoomMessageEventContent::notice_markdown(format!(
self.write_str(&format!(
"{successful_joins} local users have been joined to {room_id}. {failed_joins} joins \
failed.",
)))
))
.await
}
#[admin_command]
@ -661,7 +611,7 @@ pub(super) async fn force_join_room(
&self,
user_id: String,
room_id: OwnedRoomOrAliasId,
) -> Result<RoomMessageEventContent> {
) -> Result {
let user_id = parse_local_user_id(self.services, &user_id)?;
let (room_id, servers) = self
.services
@ -677,9 +627,8 @@ pub(super) async fn force_join_room(
join_room_by_id_helper(self.services, &user_id, &room_id, None, &servers, None, &None)
.await?;
Ok(RoomMessageEventContent::notice_markdown(format!(
"{user_id} has been joined to {room_id}.",
)))
self.write_str(&format!("{user_id} has been joined to {room_id}.",))
.await
}
#[admin_command]
@ -687,7 +636,7 @@ pub(super) async fn force_leave_room(
&self,
user_id: String,
room_id: OwnedRoomOrAliasId,
) -> Result<RoomMessageEventContent> {
) -> Result {
let user_id = parse_local_user_id(self.services, &user_id)?;
let room_id = self.services.rooms.alias.resolve(&room_id).await?;
@ -703,24 +652,17 @@ pub(super) async fn force_leave_room(
.is_joined(&user_id, &room_id)
.await
{
return Ok(RoomMessageEventContent::notice_markdown(format!(
"{user_id} is not joined in the room"
)));
return Err!("{user_id} is not joined in the room");
}
leave_room(self.services, &user_id, &room_id, None).await?;
Ok(RoomMessageEventContent::notice_markdown(format!(
"{user_id} has left {room_id}.",
)))
self.write_str(&format!("{user_id} has left {room_id}.",))
.await
}
#[admin_command]
pub(super) async fn force_demote(
&self,
user_id: String,
room_id: OwnedRoomOrAliasId,
) -> Result<RoomMessageEventContent> {
pub(super) async fn force_demote(&self, user_id: String, room_id: OwnedRoomOrAliasId) -> Result {
let user_id = parse_local_user_id(self.services, &user_id)?;
let room_id = self.services.rooms.alias.resolve(&room_id).await?;
@ -731,15 +673,11 @@ pub(super) async fn force_demote(
let state_lock = self.services.rooms.state.mutex.lock(&room_id).await;
let room_power_levels = self
let room_power_levels: Option<RoomPowerLevelsEventContent> = self
.services
.rooms
.state_accessor
.room_state_get_content::<RoomPowerLevelsEventContent>(
&room_id,
&StateEventType::RoomPowerLevels,
"",
)
.room_state_get_content(&room_id, &StateEventType::RoomPowerLevels, "")
.await
.ok();
@ -757,9 +695,7 @@ pub(super) async fn force_demote(
.is_ok_and(|event| event.sender == user_id);
if !user_can_demote_self {
return Ok(RoomMessageEventContent::notice_markdown(
"User is not allowed to modify their own power levels in the room.",
));
return Err!("User is not allowed to modify their own power levels in the room.",);
}
let mut power_levels_content = room_power_levels.unwrap_or_default();
@ -777,34 +713,34 @@ pub(super) async fn force_demote(
)
.await?;
Ok(RoomMessageEventContent::notice_markdown(format!(
self.write_str(&format!(
"User {user_id} demoted themselves to the room default power level in {room_id} - \
{event_id}"
)))
))
.await
}
#[admin_command]
pub(super) async fn make_user_admin(&self, user_id: String) -> Result<RoomMessageEventContent> {
pub(super) async fn make_user_admin(&self, user_id: String) -> Result {
let user_id = parse_local_user_id(self.services, &user_id)?;
assert!(
self.services.globals.user_is_local(&user_id),
"Parsed user_id must be a local user"
);
self.services.admin.make_user_admin(&user_id).await?;
Ok(RoomMessageEventContent::notice_markdown(format!(
"{user_id} has been granted admin privileges.",
)))
self.write_str(&format!("{user_id} has been granted admin privileges.",))
.await
}
#[admin_command]
pub(super) async fn put_room_tag(
&self,
user_id: String,
room_id: Box<RoomId>,
room_id: OwnedRoomId,
tag: String,
) -> Result<RoomMessageEventContent> {
) -> Result {
let user_id = parse_active_local_user_id(self.services, &user_id).await?;
let mut tags_event = self
@ -831,18 +767,19 @@ pub(super) async fn put_room_tag(
)
.await?;
Ok(RoomMessageEventContent::text_plain(format!(
self.write_str(&format!(
"Successfully updated room account data for {user_id} and room {room_id} with tag {tag}"
)))
))
.await
}
#[admin_command]
pub(super) async fn delete_room_tag(
&self,
user_id: String,
room_id: Box<RoomId>,
room_id: OwnedRoomId,
tag: String,
) -> Result<RoomMessageEventContent> {
) -> Result {
let user_id = parse_active_local_user_id(self.services, &user_id).await?;
let mut tags_event = self
@ -866,18 +803,15 @@ pub(super) async fn delete_room_tag(
)
.await?;
Ok(RoomMessageEventContent::text_plain(format!(
self.write_str(&format!(
"Successfully updated room account data for {user_id} and room {room_id}, deleting room \
tag {tag}"
)))
))
.await
}
#[admin_command]
pub(super) async fn get_room_tags(
&self,
user_id: String,
room_id: Box<RoomId>,
) -> Result<RoomMessageEventContent> {
pub(super) async fn get_room_tags(&self, user_id: String, room_id: OwnedRoomId) -> Result {
let user_id = parse_active_local_user_id(self.services, &user_id).await?;
let tags_event = self
@ -889,17 +823,12 @@ pub(super) async fn get_room_tags(
content: TagEventContent { tags: BTreeMap::new() },
});
Ok(RoomMessageEventContent::notice_markdown(format!(
"```\n{:#?}\n```",
tags_event.content.tags
)))
self.write_str(&format!("```\n{:#?}\n```", tags_event.content.tags))
.await
}
#[admin_command]
pub(super) async fn redact_event(
&self,
event_id: Box<EventId>,
) -> Result<RoomMessageEventContent> {
pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result {
let Ok(event) = self
.services
.rooms
@ -907,20 +836,18 @@ pub(super) async fn redact_event(
.get_non_outlier_pdu(&event_id)
.await
else {
return Ok(RoomMessageEventContent::text_plain("Event does not exist in our database."));
return Err!("Event does not exist in our database.");
};
if event.is_redacted() {
return Ok(RoomMessageEventContent::text_plain("Event is already redacted."));
return Err!("Event is already redacted.");
}
let room_id = event.room_id;
let sender_user = event.sender;
if !self.services.globals.user_is_local(&sender_user) {
return Ok(RoomMessageEventContent::text_plain(
"This command only works on local users.",
));
return Err!("This command only works on local users.");
}
let reason = format!(
@ -949,9 +876,8 @@ pub(super) async fn redact_event(
.await?
};
let out = format!("Successfully redacted event. Redaction event ID: {redaction_event_id}");
self.write_str(out.as_str()).await?;
Ok(RoomMessageEventContent::text_plain(""))
self.write_str(&format!(
"Successfully redacted event. Redaction event ID: {redaction_event_id}"
))
.await
}

View file

@ -2,7 +2,7 @@ mod commands;
use clap::Subcommand;
use conduwuit::Result;
use ruma::{EventId, OwnedRoomOrAliasId, RoomId};
use ruma::{OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId};
use crate::admin_command_dispatch;
@ -102,21 +102,21 @@ pub(super) enum UserCommand {
/// room's internal ID, and the tag name `m.server_notice`.
PutRoomTag {
user_id: String,
room_id: Box<RoomId>,
room_id: OwnedRoomId,
tag: String,
},
/// - Deletes the room tag for the specified user and room ID
DeleteRoomTag {
user_id: String,
room_id: Box<RoomId>,
room_id: OwnedRoomId,
tag: String,
},
/// - Gets all the room tags for the specified user and room ID
GetRoomTags {
user_id: String,
room_id: Box<RoomId>,
room_id: OwnedRoomId,
},
/// - Attempts to forcefully redact the specified event ID from the sender
@ -124,7 +124,7 @@ pub(super) enum UserCommand {
///
/// This is only valid for local users
RedactEvent {
event_id: Box<EventId>,
event_id: OwnedEventId,
},
/// - Force joins a specified list of local users to join the specified

View file

@ -1,3 +1,5 @@
#![allow(dead_code)]
use conduwuit_core::{Err, Result, err};
use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId};
use service::Services;

View file

@ -17,21 +17,50 @@ crate-type = [
]
[features]
element_hacks = []
release_max_log_level = [
"tracing/max_level_trace",
"tracing/release_max_level_info",
"log/max_level_trace",
"log/release_max_level_info",
brotli_compression = [
"conduwuit-core/brotli_compression",
"conduwuit-service/brotli_compression",
"reqwest/brotli",
]
zstd_compression = [
"reqwest/zstd",
element_hacks = [
"conduwuit-service/element_hacks",
]
gzip_compression = [
"conduwuit-core/gzip_compression",
"conduwuit-service/gzip_compression",
"reqwest/gzip",
]
brotli_compression = [
"reqwest/brotli",
io_uring = [
"conduwuit-service/io_uring",
]
jemalloc = [
"conduwuit-core/jemalloc",
"conduwuit-service/jemalloc",
]
jemalloc_conf = [
"conduwuit-core/jemalloc_conf",
"conduwuit-service/jemalloc_conf",
]
jemalloc_prof = [
"conduwuit-core/jemalloc_prof",
"conduwuit-service/jemalloc_prof",
]
jemalloc_stats = [
"conduwuit-core/jemalloc_stats",
"conduwuit-service/jemalloc_stats",
]
release_max_log_level = [
"conduwuit-core/release_max_log_level",
"conduwuit-service/release_max_log_level",
"log/max_level_trace",
"log/release_max_level_info",
"tracing/max_level_trace",
"tracing/release_max_level_info",
]
zstd_compression = [
"conduwuit-core/zstd_compression",
"conduwuit-service/zstd_compression",
"reqwest/zstd",
]
[dependencies]
@ -42,7 +71,6 @@ axum.workspace = true
base64.workspace = true
bytes.workspace = true
conduwuit-core.workspace = true
conduwuit-database.workspace = true
conduwuit-service.workspace = true
const-str.workspace = true
futures.workspace = true

View file

@ -1,6 +1,6 @@
use std::{
borrow::Borrow,
collections::{BTreeMap, HashMap, HashSet},
collections::{HashMap, HashSet},
iter::once,
net::IpAddr,
sync::Arc,
@ -9,7 +9,7 @@ use std::{
use axum::extract::State;
use axum_client_ip::InsecureClientIp;
use conduwuit::{
Err, Result, at, debug, debug_info, debug_warn, err, error, info,
Err, Result, at, debug, debug_error, debug_info, debug_warn, err, error, info, is_matching,
matrix::{
StateKey,
pdu::{PduBuilder, PduEvent, gen_event_id, gen_event_id_canonical_json},
@ -17,7 +17,12 @@ use conduwuit::{
},
result::{FlatOk, NotFound},
trace,
utils::{self, IterStream, ReadyExt, shuffle},
utils::{
self, FutureBoolExt,
future::ReadyEqExt,
shuffle,
stream::{BroadbandExt, IterStream, ReadyExt},
},
warn,
};
use conduwuit_service::{
@ -28,7 +33,7 @@ use conduwuit_service::{
state_compressor::{CompressedState, HashSetCompressStateEvent},
},
};
use futures::{FutureExt, StreamExt, TryFutureExt, future::join4, join};
use futures::{FutureExt, StreamExt, TryFutureExt, join, pin_mut};
use ruma::{
CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName,
OwnedUserId, RoomId, RoomVersionId, ServerName, UserId,
@ -52,7 +57,6 @@ use ruma::{
room::{
join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent},
member::{MembershipState, RoomMemberEventContent},
message::RoomMessageEventContent,
},
},
};
@ -81,7 +85,7 @@ async fn banned_room_check(
|| services
.config
.forbidden_remote_server_names
.is_match(room_id.server_name().unwrap().host())
.is_match(room_id.server_name().expect("legacy room mxid").host())
{
warn!(
"User {user_id} who is not an admin attempted to send an invite for or \
@ -96,12 +100,11 @@ async fn banned_room_check(
if services.server.config.admin_room_notices {
services
.admin
.send_message(RoomMessageEventContent::text_plain(format!(
.send_text(&format!(
"Automatically deactivating user {user_id} due to attempted banned \
room join from IP {client_ip}"
)))
.await
.ok();
))
.await;
}
let all_joined_rooms: Vec<OwnedRoomId> = services
@ -136,12 +139,11 @@ async fn banned_room_check(
if services.server.config.admin_room_notices {
services
.admin
.send_message(RoomMessageEventContent::text_plain(format!(
.send_text(&format!(
"Automatically deactivating user {user_id} due to attempted banned \
room join from IP {client_ip}"
)))
.await
.ok();
))
.await;
}
let all_joined_rooms: Vec<OwnedRoomId> = services
@ -366,10 +368,10 @@ pub(crate) async fn knock_room_route(
InsecureClientIp(client): InsecureClientIp,
body: Ruma<knock_room::v3::Request>,
) -> Result<knock_room::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let body = body.body;
let sender_user = body.sender_user();
let body = &body.body;
let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) {
let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias.clone()) {
| Ok(room_id) => {
banned_room_check(
&services,
@ -493,7 +495,7 @@ pub(crate) async fn invite_user_route(
let sender_user = body.sender_user();
if !services.users.is_admin(sender_user).await && services.config.block_non_admin_invites {
info!(
debug_error!(
"User {sender_user} is not an admin and attempted to send an invite to room {}",
&body.room_id
);
@ -722,12 +724,10 @@ pub(crate) async fn forget_room_route(
let joined = services.rooms.state_cache.is_joined(user_id, room_id);
let knocked = services.rooms.state_cache.is_knocked(user_id, room_id);
let left = services.rooms.state_cache.is_left(user_id, room_id);
let invited = services.rooms.state_cache.is_invited(user_id, room_id);
let (joined, knocked, left, invited) = join4(joined, knocked, left, invited).await;
if joined || knocked || invited {
pin_mut!(joined, knocked, invited);
if joined.or(knocked).or(invited).await {
return Err!(Request(Unknown("You must leave the room before forgetting it")));
}
@ -741,11 +741,11 @@ pub(crate) async fn forget_room_route(
return Err!(Request(Unknown("No membership event was found, room was never joined")));
}
if left
|| membership.is_ok_and(|member| {
member.membership == MembershipState::Leave
|| member.membership == MembershipState::Ban
}) {
let non_membership = membership
.map(|member| member.membership)
.is_ok_and(is_matching!(MembershipState::Leave | MembershipState::Ban));
if non_membership || services.rooms.state_cache.is_left(user_id, room_id).await {
services.rooms.state_cache.forget(room_id, user_id);
}
@ -866,32 +866,32 @@ pub(crate) async fn joined_members_route(
State(services): State<crate::State>,
body: Ruma<joined_members::v3::Request>,
) -> Result<joined_members::v3::Response> {
let sender_user = body.sender_user();
if !services
.rooms
.state_accessor
.user_can_see_state_events(sender_user, &body.room_id)
.user_can_see_state_events(body.sender_user(), &body.room_id)
.await
{
return Err!(Request(Forbidden("You don't have permission to view this room.")));
}
let joined: BTreeMap<OwnedUserId, RoomMember> = services
.rooms
.state_cache
.room_members(&body.room_id)
.map(ToOwned::to_owned)
.then(|user| async move {
(user.clone(), RoomMember {
display_name: services.users.displayname(&user).await.ok(),
avatar_url: services.users.avatar_url(&user).await.ok(),
})
})
.collect()
.await;
Ok(joined_members::v3::Response {
joined: services
.rooms
.state_cache
.room_members(&body.room_id)
.map(ToOwned::to_owned)
.broad_then(|user_id| async move {
let member = RoomMember {
display_name: services.users.displayname(&user_id).await.ok(),
avatar_url: services.users.avatar_url(&user_id).await.ok(),
};
Ok(joined_members::v3::Response { joined })
(user_id, member)
})
.collect()
.await,
})
}
pub async fn join_room_by_id_helper(
@ -1118,9 +1118,10 @@ async fn join_room_by_id_helper_remote(
})?;
if signed_event_id != event_id {
return Err!(Request(BadJson(
warn!(%signed_event_id, %event_id, "Server {remote_server} sent event with wrong event ID")
)));
return Err!(Request(BadJson(warn!(
%signed_event_id, %event_id,
"Server {remote_server} sent event with wrong event ID"
))));
}
match signed_value["signatures"]
@ -1696,19 +1697,18 @@ pub(crate) async fn invite_helper(
})?;
if pdu.event_id != event_id {
return Err!(Request(BadJson(
warn!(%pdu.event_id, %event_id, "Server {} sent event with wrong event ID", user_id.server_name())
)));
return Err!(Request(BadJson(warn!(
%pdu.event_id, %event_id,
"Server {} sent event with wrong event ID",
user_id.server_name()
))));
}
let origin: OwnedServerName = serde_json::from_value(
serde_json::to_value(
value
.get("origin")
.ok_or_else(|| err!(Request(BadJson("Event missing origin field."))))?,
)
.expect("CanonicalJson is valid json value"),
)
let origin: OwnedServerName = serde_json::from_value(serde_json::to_value(
value
.get("origin")
.ok_or_else(|| err!(Request(BadJson("Event missing origin field."))))?,
)?)
.map_err(|e| {
err!(Request(BadJson(warn!("Origin field in event is not a valid server name: {e}"))))
})?;
@ -1818,9 +1818,11 @@ pub async fn leave_room(
blurhash: None,
};
if services.rooms.metadata.is_banned(room_id).await
|| services.rooms.metadata.is_disabled(room_id).await
{
let is_banned = services.rooms.metadata.is_banned(room_id);
let is_disabled = services.rooms.metadata.is_disabled(room_id);
pin_mut!(is_banned, is_disabled);
if is_banned.or(is_disabled).await {
// the room is banned/disabled, the room must be rejected locally since we
// cant/dont want to federate with this server
services
@ -1840,18 +1842,21 @@ pub async fn leave_room(
return Ok(());
}
// Ask a remote server if we don't have this room and are not knocking on it
if !services
let dont_have_room = services
.rooms
.state_cache
.server_in_room(services.globals.server_name(), room_id)
.await && !services
.eq(&false);
let not_knocked = services
.rooms
.state_cache
.is_knocked(user_id, room_id)
.await
{
if let Err(e) = remote_leave_room(services, user_id, room_id).await {
.eq(&false);
// Ask a remote server if we don't have this room and are not knocking on it
if dont_have_room.and(not_knocked).await {
if let Err(e) = remote_leave_room(services, user_id, room_id).boxed().await {
warn!(%user_id, "Failed to leave room {room_id} remotely: {e}");
// Don't tell the client about this error
}

View file

@ -5,16 +5,12 @@ mod v5;
use conduwuit::{
Error, PduCount, Result,
matrix::pdu::PduEvent,
utils::{
IterStream,
stream::{BroadbandExt, ReadyExt, TryIgnore},
},
utils::stream::{BroadbandExt, ReadyExt, TryIgnore},
};
use conduwuit_service::Services;
use futures::{StreamExt, pin_mut};
use ruma::{
RoomId, UserId,
directory::RoomTypeFilter,
events::TimelineEventType::{
self, Beacon, CallInvite, PollStart, RoomEncrypted, RoomMessage, Sticker,
},
@ -87,33 +83,3 @@ async fn share_encrypted_room(
})
.await
}
pub(crate) async fn filter_rooms<'a>(
services: &Services,
rooms: &[&'a RoomId],
filter: &[RoomTypeFilter],
negate: bool,
) -> Vec<&'a RoomId> {
rooms
.iter()
.stream()
.filter_map(|r| async move {
let room_type = services.rooms.state_accessor.get_room_type(r).await;
if room_type.as_ref().is_err_and(|e| !e.is_not_found()) {
return None;
}
let room_type_filter = RoomTypeFilter::from(room_type.ok());
let include = if negate {
!filter.contains(&room_type_filter)
} else {
filter.is_empty() || filter.contains(&room_type_filter)
};
include.then_some(r)
})
.collect()
.await
}

View file

@ -14,8 +14,8 @@ use conduwuit::{
pair_of, ref_at,
result::FlatOk,
utils::{
self, BoolExt, IterStream, ReadyExt, TryFutureExtExt,
future::OptionStream,
self, BoolExt, FutureBoolExt, IterStream, ReadyExt, TryFutureExtExt,
future::{OptionStream, ReadyEqExt},
math::ruma_from_u64,
stream::{BroadbandExt, Tools, TryExpect, WidebandExt},
},
@ -32,6 +32,7 @@ use conduwuit_service::{
use futures::{
FutureExt, StreamExt, TryFutureExt, TryStreamExt,
future::{OptionFuture, join, join3, join4, join5, try_join, try_join4},
pin_mut,
};
use ruma::{
DeviceId, EventId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId,
@ -433,10 +434,14 @@ async fn handle_left_room(
return Ok(None);
}
if !services.rooms.metadata.exists(room_id).await
|| services.rooms.metadata.is_disabled(room_id).await
|| services.rooms.metadata.is_banned(room_id).await
{
let is_not_found = services.rooms.metadata.exists(room_id).eq(&false);
let is_disabled = services.rooms.metadata.is_disabled(room_id);
let is_banned = services.rooms.metadata.is_banned(room_id);
pin_mut!(is_not_found, is_disabled, is_banned);
if is_not_found.or(is_disabled).or(is_banned).await {
// This is just a rejected invite, not a room we know
// Insert a leave event anyways for the client
let event = PduEvent {

View file

@ -6,23 +6,27 @@ use std::{
use axum::extract::State;
use conduwuit::{
Error, PduCount, PduEvent, Result, debug, error, extract_variant,
Err, Error, PduCount, PduEvent, Result, debug, error, extract_variant,
matrix::TypeStateKey,
utils::{
BoolExt, IterStream, ReadyExt, TryFutureExtExt,
math::{ruma_from_usize, usize_from_ruma, usize_from_u64_truncated},
},
warn,
};
use conduwuit_service::{
Services,
rooms::read_receipt::pack_receipts,
sync::{into_db_key, into_snake_key},
};
use futures::{FutureExt, StreamExt, TryFutureExt};
use ruma::{
MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId,
api::client::{
error::ErrorKind,
sync::sync_events::{
self, DeviceLists, UnreadNotificationsCount,
v4::{SlidingOp, SlidingSyncRoomHero},
},
api::client::sync::sync_events::{
self, DeviceLists, UnreadNotificationsCount,
v4::{SlidingOp, SlidingSyncRoomHero},
},
directory::RoomTypeFilter,
events::{
AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType,
TimelineEventType::*,
@ -31,15 +35,15 @@ use ruma::{
serde::Raw,
uint,
};
use service::rooms::read_receipt::pack_receipts;
use super::{load_timeline, share_encrypted_room};
use crate::{
Ruma,
client::{DEFAULT_BUMP_TYPES, filter_rooms, ignored_filter, sync::v5::TodoRooms},
client::{DEFAULT_BUMP_TYPES, ignored_filter},
};
pub(crate) const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync";
type TodoRooms = BTreeMap<OwnedRoomId, (BTreeSet<TypeStateKey>, usize, u64)>;
const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync";
/// POST `/_matrix/client/unstable/org.matrix.msc3575/sync`
///
@ -50,10 +54,11 @@ pub(crate) async fn sync_events_v4_route(
) -> Result<sync_events::v4::Response> {
debug_assert!(DEFAULT_BUMP_TYPES.is_sorted(), "DEFAULT_BUMP_TYPES is not sorted");
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let sender_device = body.sender_device.expect("user is authenticated");
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
let mut body = body.body;
// Setup watchers, so if there's no response, we can wait for them
let watcher = services.sync.watch(sender_user, &sender_device);
let watcher = services.sync.watch(sender_user, sender_device);
let next_batch = services.globals.next_count()?;
@ -68,33 +73,21 @@ pub(crate) async fn sync_events_v4_route(
.and_then(|string| string.parse().ok())
.unwrap_or(0);
if globalsince != 0
&& !services
.sync
.remembered(sender_user.clone(), sender_device.clone(), conn_id.clone())
{
let db_key = into_db_key(sender_user, sender_device, conn_id.clone());
if globalsince != 0 && !services.sync.remembered(&db_key) {
debug!("Restarting sync stream because it was gone from the database");
return Err(Error::Request(
ErrorKind::UnknownPos,
"Connection data lost since last time".into(),
http::StatusCode::BAD_REQUEST,
));
return Err!(Request(UnknownPos("Connection data lost since last time")));
}
if globalsince == 0 {
services.sync.forget_sync_request_connection(
sender_user.clone(),
sender_device.clone(),
conn_id.clone(),
);
services.sync.forget_sync_request_connection(&db_key);
}
// Get sticky parameters from cache
let known_rooms = services.sync.update_sync_request_with_cache(
sender_user.clone(),
sender_device.clone(),
&mut body,
);
let snake_key = into_snake_key(sender_user, sender_device, conn_id.clone());
let known_rooms = services
.sync
.update_sync_request_with_cache(&snake_key, &mut body);
let all_joined_rooms: Vec<_> = services
.rooms
@ -136,7 +129,7 @@ pub(crate) async fn sync_events_v4_route(
if body.extensions.to_device.enabled.unwrap_or(false) {
services
.users
.remove_to_device_events(sender_user, &sender_device, globalsince)
.remove_to_device_events(sender_user, sender_device, globalsince)
.await;
}
@ -261,7 +254,7 @@ pub(crate) async fn sync_events_v4_route(
if let Some(Ok(user_id)) =
pdu.state_key.as_deref().map(UserId::parse)
{
if user_id == *sender_user {
if user_id == sender_user {
continue;
}
@ -299,7 +292,7 @@ pub(crate) async fn sync_events_v4_route(
.state_cache
.room_members(room_id)
// Don't send key updates from the sender to the sender
.ready_filter(|user_id| sender_user != user_id)
.ready_filter(|&user_id| sender_user != user_id)
// Only send keys if the sender doesn't share an encrypted room with the target
// already
.filter_map(|user_id| {
@ -425,10 +418,9 @@ pub(crate) async fn sync_events_v4_route(
});
if let Some(conn_id) = &body.conn_id {
let db_key = into_db_key(sender_user, sender_device, conn_id);
services.sync.update_sync_known_rooms(
sender_user,
&sender_device,
conn_id.clone(),
&db_key,
list_id.clone(),
new_known_rooms,
globalsince,
@ -478,23 +470,20 @@ pub(crate) async fn sync_events_v4_route(
}
if let Some(conn_id) = &body.conn_id {
let db_key = into_db_key(sender_user, sender_device, conn_id);
services.sync.update_sync_known_rooms(
sender_user,
&sender_device,
conn_id.clone(),
&db_key,
"subscriptions".to_owned(),
known_subscription_rooms,
globalsince,
);
}
if let Some(conn_id) = &body.conn_id {
services.sync.update_sync_subscriptions(
sender_user.clone(),
sender_device.clone(),
conn_id.clone(),
body.room_subscriptions,
);
if let Some(conn_id) = body.conn_id.clone() {
let db_key = into_db_key(sender_user, sender_device, conn_id);
services
.sync
.update_sync_subscriptions(&db_key, body.room_subscriptions);
}
let mut rooms = BTreeMap::new();
@ -648,7 +637,7 @@ pub(crate) async fn sync_events_v4_route(
.rooms
.state_cache
.room_members(room_id)
.ready_filter(|member| member != sender_user)
.ready_filter(|&member| member != sender_user)
.filter_map(|user_id| {
services
.rooms
@ -787,7 +776,7 @@ pub(crate) async fn sync_events_v4_route(
.users
.get_to_device_events(
sender_user,
&sender_device,
sender_device,
Some(globalsince),
Some(next_batch),
)
@ -805,7 +794,7 @@ pub(crate) async fn sync_events_v4_route(
},
device_one_time_keys_count: services
.users
.count_one_time_keys(sender_user, &sender_device)
.count_one_time_keys(sender_user, sender_device)
.await,
// Fallback keys are not yet supported
device_unused_fallback_key_types: None,
@ -817,3 +806,33 @@ pub(crate) async fn sync_events_v4_route(
delta_token: None,
})
}
async fn filter_rooms<'a>(
services: &Services,
rooms: &[&'a RoomId],
filter: &[RoomTypeFilter],
negate: bool,
) -> Vec<&'a RoomId> {
rooms
.iter()
.stream()
.filter_map(|r| async move {
let room_type = services.rooms.state_accessor.get_room_type(r).await;
if room_type.as_ref().is_err_and(|e| !e.is_not_found()) {
return None;
}
let room_type_filter = RoomTypeFilter::from(room_type.ok());
let include = if negate {
!filter.contains(&room_type_filter)
} else {
filter.is_empty() || filter.contains(&room_type_filter)
};
include.then_some(r)
})
.collect()
.await
}

View file

@ -1,31 +1,35 @@
use std::{
cmp::{self, Ordering},
collections::{BTreeMap, BTreeSet, HashMap, HashSet},
ops::Deref,
time::Duration,
};
use axum::extract::State;
use conduwuit::{
Error, Result, debug, error, extract_variant,
Err, Error, Result, error, extract_variant, is_equal_to,
matrix::{
TypeStateKey,
pdu::{PduCount, PduEvent},
},
trace,
utils::{
BoolExt, IterStream, ReadyExt, TryFutureExtExt,
BoolExt, FutureBoolExt, IterStream, ReadyExt, TryFutureExtExt,
future::ReadyEqExt,
math::{ruma_from_usize, usize_from_ruma},
},
warn,
};
use conduwuit_service::rooms::read_receipt::pack_receipts;
use futures::{FutureExt, StreamExt, TryFutureExt};
use conduwuit_service::{Services, rooms::read_receipt::pack_receipts, sync::into_snake_key};
use futures::{
FutureExt, Stream, StreamExt, TryFutureExt,
future::{OptionFuture, join3, try_join4},
pin_mut,
};
use ruma::{
DeviceId, OwnedEventId, OwnedRoomId, RoomId, UInt, UserId,
api::client::{
error::ErrorKind,
sync::sync_events::{self, DeviceLists, UnreadNotificationsCount},
},
api::client::sync::sync_events::{self, DeviceLists, UnreadNotificationsCount},
directory::RoomTypeFilter,
events::{
AnyRawAccountDataEvent, AnySyncEphemeralRoomEvent, StateEventType, TimelineEventType,
room::member::{MembershipState, RoomMemberEventContent},
@ -34,13 +38,15 @@ use ruma::{
uint,
};
use super::{filter_rooms, share_encrypted_room};
use super::share_encrypted_room;
use crate::{
Ruma,
client::{DEFAULT_BUMP_TYPES, ignored_filter, sync::load_timeline},
};
type SyncInfo<'a> = (&'a UserId, &'a DeviceId, u64, &'a sync_events::v5::Request);
type TodoRooms = BTreeMap<OwnedRoomId, (BTreeSet<TypeStateKey>, usize, u64)>;
type KnownRooms = BTreeMap<String, BTreeMap<OwnedRoomId, u64>>;
/// `POST /_matrix/client/unstable/org.matrix.simplified_msc3575/sync`
/// ([MSC4186])
@ -53,7 +59,7 @@ type SyncInfo<'a> = (&'a UserId, &'a DeviceId, u64, &'a sync_events::v5::Request
/// [MSC3575]: https://github.com/matrix-org/matrix-spec-proposals/pull/3575
/// [MSC4186]: https://github.com/matrix-org/matrix-spec-proposals/pull/4186
pub(crate) async fn sync_events_v5_route(
State(services): State<crate::State>,
State(ref services): State<crate::State>,
body: Ruma<sync_events::v5::Request>,
) -> Result<sync_events::v5::Response> {
debug_assert!(DEFAULT_BUMP_TYPES.is_sorted(), "DEFAULT_BUMP_TYPES is not sorted");
@ -74,95 +80,95 @@ pub(crate) async fn sync_events_v5_route(
.and_then(|string| string.parse().ok())
.unwrap_or(0);
if globalsince != 0
&& !services.sync.snake_connection_cached(
sender_user.clone(),
sender_device.clone(),
conn_id.clone(),
) {
debug!("Restarting sync stream because it was gone from the database");
return Err(Error::Request(
ErrorKind::UnknownPos,
"Connection data lost since last time".into(),
http::StatusCode::BAD_REQUEST,
));
let snake_key = into_snake_key(sender_user, sender_device, conn_id);
if globalsince != 0 && !services.sync.snake_connection_cached(&snake_key) {
return Err!(Request(UnknownPos(
"Connection data unknown to server; restarting sync stream."
)));
}
// Client / User requested an initial sync
if globalsince == 0 {
services.sync.forget_snake_sync_connection(
sender_user.clone(),
sender_device.clone(),
conn_id.clone(),
);
services.sync.forget_snake_sync_connection(&snake_key);
}
// Get sticky parameters from cache
let known_rooms = services.sync.update_snake_sync_request_with_cache(
sender_user.clone(),
sender_device.clone(),
&mut body,
);
let known_rooms = services
.sync
.update_snake_sync_request_with_cache(&snake_key, &mut body);
let all_joined_rooms: Vec<_> = services
let all_joined_rooms = services
.rooms
.state_cache
.rooms_joined(sender_user)
.map(ToOwned::to_owned)
.collect()
.await;
.collect::<Vec<OwnedRoomId>>();
let all_invited_rooms: Vec<_> = services
let all_invited_rooms = services
.rooms
.state_cache
.rooms_invited(sender_user)
.map(|r| r.0)
.collect()
.await;
.collect::<Vec<OwnedRoomId>>();
let all_knocked_rooms: Vec<_> = services
let all_knocked_rooms = services
.rooms
.state_cache
.rooms_knocked(sender_user)
.map(|r| r.0)
.collect()
.await;
.collect::<Vec<OwnedRoomId>>();
let all_rooms: Vec<&RoomId> = all_joined_rooms
.iter()
.map(AsRef::as_ref)
.chain(all_invited_rooms.iter().map(AsRef::as_ref))
.chain(all_knocked_rooms.iter().map(AsRef::as_ref))
.collect();
let (all_joined_rooms, all_invited_rooms, all_knocked_rooms) =
join3(all_joined_rooms, all_invited_rooms, all_knocked_rooms).await;
let all_joined_rooms = all_joined_rooms.iter().map(AsRef::as_ref).collect();
let all_invited_rooms = all_invited_rooms.iter().map(AsRef::as_ref).collect();
let all_joined_rooms = all_joined_rooms.iter().map(AsRef::as_ref);
let all_invited_rooms = all_invited_rooms.iter().map(AsRef::as_ref);
let all_knocked_rooms = all_knocked_rooms.iter().map(AsRef::as_ref);
let all_rooms = all_joined_rooms
.clone()
.chain(all_invited_rooms.clone())
.chain(all_knocked_rooms.clone());
let pos = next_batch.clone().to_string();
let mut todo_rooms: TodoRooms = BTreeMap::new();
let sync_info: SyncInfo<'_> = (sender_user, sender_device, globalsince, &body);
let account_data = collect_account_data(services, sync_info).map(Ok);
let e2ee = collect_e2ee(services, sync_info, all_joined_rooms.clone());
let to_device = collect_to_device(services, sync_info, next_batch).map(Ok);
let receipts = collect_receipts(services).map(Ok);
let (account_data, e2ee, to_device, receipts) =
try_join4(account_data, e2ee, to_device, receipts).await?;
let extensions = sync_events::v5::response::Extensions {
account_data,
e2ee,
to_device,
receipts,
typing: sync_events::v5::response::Typing::default(),
};
let mut response = sync_events::v5::Response {
txn_id: body.txn_id.clone(),
pos,
lists: BTreeMap::new(),
rooms: BTreeMap::new(),
extensions: sync_events::v5::response::Extensions {
account_data: collect_account_data(services, sync_info).await,
e2ee: collect_e2ee(services, sync_info, &all_joined_rooms).await?,
to_device: collect_to_device(services, sync_info, next_batch).await,
receipts: collect_receipts(services).await,
typing: sync_events::v5::response::Typing::default(),
},
extensions,
};
handle_lists(
services,
sync_info,
&all_invited_rooms,
&all_joined_rooms,
&all_rooms,
all_invited_rooms.clone(),
all_joined_rooms.clone(),
all_rooms,
&mut todo_rooms,
&known_rooms,
&mut response,
@ -175,7 +181,7 @@ pub(crate) async fn sync_events_v5_route(
services,
sender_user,
next_batch,
&all_invited_rooms,
all_invited_rooms.clone(),
&todo_rooms,
&mut response,
&body,
@ -200,31 +206,33 @@ pub(crate) async fn sync_events_v5_route(
}
trace!(
rooms=?response.rooms.len(),
account_data=?response.extensions.account_data.rooms.len(),
receipts=?response.extensions.receipts.rooms.len(),
rooms = ?response.rooms.len(),
account_data = ?response.extensions.account_data.rooms.len(),
receipts = ?response.extensions.receipts.rooms.len(),
"responding to request with"
);
Ok(response)
}
type KnownRooms = BTreeMap<String, BTreeMap<OwnedRoomId, u64>>;
pub(crate) type TodoRooms = BTreeMap<OwnedRoomId, (BTreeSet<TypeStateKey>, usize, u64)>;
async fn fetch_subscriptions(
services: crate::State,
services: &Services,
(sender_user, sender_device, globalsince, body): SyncInfo<'_>,
known_rooms: &KnownRooms,
todo_rooms: &mut TodoRooms,
) {
let mut known_subscription_rooms = BTreeSet::new();
for (room_id, room) in &body.room_subscriptions {
if !services.rooms.metadata.exists(room_id).await
|| services.rooms.metadata.is_disabled(room_id).await
|| services.rooms.metadata.is_banned(room_id).await
{
let not_exists = services.rooms.metadata.exists(room_id).eq(&false);
let is_disabled = services.rooms.metadata.is_disabled(room_id);
let is_banned = services.rooms.metadata.is_banned(room_id);
pin_mut!(not_exists, is_disabled, is_banned);
if not_exists.or(is_disabled).or(is_banned).await {
continue;
}
let todo_room =
todo_rooms
.entry(room_id.clone())
@ -254,11 +262,10 @@ async fn fetch_subscriptions(
// body.room_subscriptions.remove(&r);
//}
if let Some(conn_id) = &body.conn_id {
if let Some(conn_id) = body.conn_id.clone() {
let snake_key = into_snake_key(sender_user, sender_device, conn_id);
services.sync.update_snake_sync_known_rooms(
sender_user,
sender_device,
conn_id.clone(),
&snake_key,
"subscriptions".to_owned(),
known_subscription_rooms,
globalsince,
@ -267,27 +274,39 @@ async fn fetch_subscriptions(
}
#[allow(clippy::too_many_arguments)]
async fn handle_lists<'a>(
services: crate::State,
async fn handle_lists<'a, Rooms, AllRooms>(
services: &Services,
(sender_user, sender_device, globalsince, body): SyncInfo<'_>,
all_invited_rooms: &Vec<&'a RoomId>,
all_joined_rooms: &Vec<&'a RoomId>,
all_rooms: &Vec<&'a RoomId>,
all_invited_rooms: Rooms,
all_joined_rooms: Rooms,
all_rooms: AllRooms,
todo_rooms: &'a mut TodoRooms,
known_rooms: &'a KnownRooms,
response: &'_ mut sync_events::v5::Response,
) -> KnownRooms {
) -> KnownRooms
where
Rooms: Iterator<Item = &'a RoomId> + Clone + Send + 'a,
AllRooms: Iterator<Item = &'a RoomId> + Clone + Send + 'a,
{
for (list_id, list) in &body.lists {
let active_rooms = match list.filters.clone().and_then(|f| f.is_invite) {
| Some(true) => all_invited_rooms,
| Some(false) => all_joined_rooms,
| None => all_rooms,
let active_rooms: Vec<_> = match list.filters.as_ref().and_then(|f| f.is_invite) {
| None => all_rooms.clone().collect(),
| Some(true) => all_invited_rooms.clone().collect(),
| Some(false) => all_joined_rooms.clone().collect(),
};
let active_rooms = match list.filters.clone().map(|f| f.not_room_types) {
| Some(filter) if filter.is_empty() => active_rooms,
| Some(value) => &filter_rooms(&services, active_rooms, &value, true).await,
let active_rooms = match list.filters.as_ref().map(|f| &f.not_room_types) {
| None => active_rooms,
| Some(filter) if filter.is_empty() => active_rooms,
| Some(value) =>
filter_rooms(
services,
value,
&true,
active_rooms.iter().stream().map(Deref::deref),
)
.collect()
.await,
};
let mut new_known_rooms: BTreeSet<OwnedRoomId> = BTreeSet::new();
@ -305,6 +324,7 @@ async fn handle_lists<'a>(
let new_rooms: BTreeSet<OwnedRoomId> =
room_ids.clone().into_iter().map(From::from).collect();
new_known_rooms.extend(new_rooms);
//new_known_rooms.extend(room_ids..cloned());
for room_id in room_ids {
@ -340,29 +360,32 @@ async fn handle_lists<'a>(
count: ruma_from_usize(active_rooms.len()),
});
if let Some(conn_id) = &body.conn_id {
if let Some(conn_id) = body.conn_id.clone() {
let snake_key = into_snake_key(sender_user, sender_device, conn_id);
services.sync.update_snake_sync_known_rooms(
sender_user,
sender_device,
conn_id.clone(),
&snake_key,
list_id.clone(),
new_known_rooms,
globalsince,
);
}
}
BTreeMap::default()
}
async fn process_rooms(
services: crate::State,
async fn process_rooms<'a, Rooms>(
services: &Services,
sender_user: &UserId,
next_batch: u64,
all_invited_rooms: &[&RoomId],
all_invited_rooms: Rooms,
todo_rooms: &TodoRooms,
response: &mut sync_events::v5::Response,
body: &sync_events::v5::Request,
) -> Result<BTreeMap<OwnedRoomId, sync_events::v5::response::Room>> {
) -> Result<BTreeMap<OwnedRoomId, sync_events::v5::response::Room>>
where
Rooms: Iterator<Item = &'a RoomId> + Clone + Send + 'a,
{
let mut rooms = BTreeMap::new();
for (room_id, (required_state_request, timeline_limit, roomsince)) in todo_rooms {
let roomsincecount = PduCount::Normal(*roomsince);
@ -371,7 +394,7 @@ async fn process_rooms(
let mut invite_state = None;
let (timeline_pdus, limited);
let new_room_id: &RoomId = (*room_id).as_ref();
if all_invited_rooms.contains(&new_room_id) {
if all_invited_rooms.clone().any(is_equal_to!(new_room_id)) {
// TODO: figure out a timestamp we can use for remote invites
invite_state = services
.rooms
@ -383,7 +406,7 @@ async fn process_rooms(
(timeline_pdus, limited) = (Vec::new(), true);
} else {
(timeline_pdus, limited) = match load_timeline(
&services,
services,
sender_user,
room_id,
roomsincecount,
@ -416,18 +439,17 @@ async fn process_rooms(
.rooms
.read_receipt
.last_privateread_update(sender_user, room_id)
.await > *roomsince;
.await;
let private_read_event = if last_privateread_update {
services
.rooms
.read_receipt
.private_read_get(room_id, sender_user)
.await
.ok()
} else {
None
};
let private_read_event: OptionFuture<_> = (last_privateread_update > *roomsince)
.then(|| {
services
.rooms
.read_receipt
.private_read_get(room_id, sender_user)
.ok()
})
.into();
let mut receipts: Vec<Raw<AnySyncEphemeralRoomEvent>> = services
.rooms
@ -443,7 +465,7 @@ async fn process_rooms(
.collect()
.await;
if let Some(private_read_event) = private_read_event {
if let Some(private_read_event) = private_read_event.await.flatten() {
receipts.push(private_read_event);
}
@ -492,7 +514,7 @@ async fn process_rooms(
let room_events: Vec<_> = timeline_pdus
.iter()
.stream()
.filter_map(|item| ignored_filter(&services, item.clone(), sender_user))
.filter_map(|item| ignored_filter(services, item.clone(), sender_user))
.map(|(_, pdu)| pdu.to_sync_room_event())
.collect()
.await;
@ -644,7 +666,7 @@ async fn process_rooms(
Ok(rooms)
}
async fn collect_account_data(
services: crate::State,
services: &Services,
(sender_user, _, globalsince, body): (&UserId, &DeviceId, u64, &sync_events::v5::Request),
) -> sync_events::v5::response::AccountData {
let mut account_data = sync_events::v5::response::AccountData {
@ -680,16 +702,19 @@ async fn collect_account_data(
account_data
}
async fn collect_e2ee<'a>(
services: crate::State,
async fn collect_e2ee<'a, Rooms>(
services: &Services,
(sender_user, sender_device, globalsince, body): (
&UserId,
&DeviceId,
u64,
&sync_events::v5::Request,
),
all_joined_rooms: &'a Vec<&'a RoomId>,
) -> Result<sync_events::v5::response::E2EE> {
all_joined_rooms: Rooms,
) -> Result<sync_events::v5::response::E2EE>
where
Rooms: Iterator<Item = &'a RoomId> + Send + 'a,
{
if !body.extensions.e2ee.enabled.unwrap_or(false) {
return Ok(sync_events::v5::response::E2EE::default());
}
@ -790,7 +815,7 @@ async fn collect_e2ee<'a>(
| MembershipState::Join => {
// A new user joined an encrypted room
if !share_encrypted_room(
&services,
services,
sender_user,
user_id,
Some(room_id),
@ -823,7 +848,7 @@ async fn collect_e2ee<'a>(
// Only send keys if the sender doesn't share an encrypted room with the target
// already
.filter_map(|user_id| {
share_encrypted_room(&services, sender_user, user_id, Some(room_id))
share_encrypted_room(services, sender_user, user_id, Some(room_id))
.map(|res| res.or_some(user_id.to_owned()))
})
.collect::<Vec<_>>()
@ -846,7 +871,7 @@ async fn collect_e2ee<'a>(
for user_id in left_encrypted_users {
let dont_share_encrypted_room =
!share_encrypted_room(&services, sender_user, &user_id, None).await;
!share_encrypted_room(services, sender_user, &user_id, None).await;
// If the user doesn't share an encrypted room with the target anymore, we need
// to tell them
@ -856,20 +881,22 @@ async fn collect_e2ee<'a>(
}
Ok(sync_events::v5::response::E2EE {
device_lists: DeviceLists {
changed: device_list_changes.into_iter().collect(),
left: device_list_left.into_iter().collect(),
},
device_unused_fallback_key_types: None,
device_one_time_keys_count: services
.users
.count_one_time_keys(sender_user, sender_device)
.await,
device_unused_fallback_key_types: None,
device_lists: DeviceLists {
changed: device_list_changes.into_iter().collect(),
left: device_list_left.into_iter().collect(),
},
})
}
async fn collect_to_device(
services: crate::State,
services: &Services,
(sender_user, sender_device, globalsince, body): SyncInfo<'_>,
next_batch: u64,
) -> Option<sync_events::v5::response::ToDevice> {
@ -892,7 +919,35 @@ async fn collect_to_device(
})
}
async fn collect_receipts(_services: crate::State) -> sync_events::v5::response::Receipts {
async fn collect_receipts(_services: &Services) -> sync_events::v5::response::Receipts {
sync_events::v5::response::Receipts { rooms: BTreeMap::new() }
// TODO: get explicitly requested read receipts
}
fn filter_rooms<'a, Rooms>(
services: &'a Services,
filter: &'a [RoomTypeFilter],
negate: &'a bool,
rooms: Rooms,
) -> impl Stream<Item = &'a RoomId> + Send + 'a
where
Rooms: Stream<Item = &'a RoomId> + Send + 'a,
{
rooms.filter_map(async |room_id| {
let room_type = services.rooms.state_accessor.get_room_type(room_id).await;
if room_type.as_ref().is_err_and(|e| !e.is_not_found()) {
return None;
}
let room_type_filter = RoomTypeFilter::from(room_type.ok());
let include = if *negate {
!filter.contains(&room_type_filter)
} else {
filter.is_empty() || filter.contains(&room_type_filter)
};
include.then_some(room_id)
})
}

View file

@ -1,7 +1,10 @@
use axum::extract::State;
use conduwuit::{
Result,
utils::{future::BoolExt, stream::BroadbandExt},
utils::{
future::BoolExt,
stream::{BroadbandExt, ReadyExt},
},
};
use futures::{FutureExt, StreamExt, pin_mut};
use ruma::{
@ -30,29 +33,21 @@ pub(crate) async fn search_users_route(
.map_or(LIMIT_DEFAULT, usize::from)
.min(LIMIT_MAX);
let search_term = body.search_term.to_lowercase();
let mut users = services
.users
.stream()
.ready_filter(|user_id| user_id.as_str().to_lowercase().contains(&search_term))
.map(ToOwned::to_owned)
.broad_filter_map(async |user_id| {
let user = search_users::v3::User {
user_id: user_id.clone(),
display_name: services.users.displayname(&user_id).await.ok(),
avatar_url: services.users.avatar_url(&user_id).await.ok(),
};
let display_name = services.users.displayname(&user_id).await.ok();
let user_id_matches = user
.user_id
.as_str()
.to_lowercase()
.contains(&body.search_term.to_lowercase());
let display_name_matches = display_name
.as_deref()
.map(str::to_lowercase)
.is_some_and(|display_name| display_name.contains(&search_term));
let user_displayname_matches = user.display_name.as_ref().is_some_and(|name| {
name.to_lowercase()
.contains(&body.search_term.to_lowercase())
});
if !user_id_matches && !user_displayname_matches {
if !display_name_matches {
return None;
}
@ -61,11 +56,11 @@ pub(crate) async fn search_users_route(
.state_cache
.rooms_joined(&user_id)
.map(ToOwned::to_owned)
.any(|room| async move {
.broad_any(async |room_id| {
services
.rooms
.state_accessor
.get_join_rules(&room)
.get_join_rules(&room_id)
.map(|rule| matches!(rule, JoinRule::Public))
.await
});
@ -76,8 +71,14 @@ pub(crate) async fn search_users_route(
.user_sees_user(sender_user, &user_id);
pin_mut!(user_in_public_room, user_sees_user);
user_in_public_room.or(user_sees_user).await.then_some(user)
user_in_public_room
.or(user_sees_user)
.await
.then_some(search_users::v3::User {
user_id: user_id.clone(),
display_name,
avatar_url: services.users.avatar_url(&user_id).await.ok(),
})
});
let results = users.by_ref().take(limit).collect().await;

View file

@ -17,17 +17,24 @@ crate-type = [
]
[features]
release_max_log_level = [
"tracing/max_level_trace",
"tracing/release_max_level_info",
"log/max_level_trace",
"log/release_max_level_info",
brotli_compression = [
"reqwest/brotli",
]
conduwuit_mods = [
"dep:libloading"
]
gzip_compression = [
"reqwest/gzip",
]
hardened_malloc = [
"dep:hardened_malloc-rs"
]
jemalloc = [
"dep:tikv-jemalloc-sys",
"dep:tikv-jemalloc-ctl",
"dep:tikv-jemallocator",
]
jemalloc_conf = []
jemalloc_prof = [
"tikv-jemalloc-sys/profiling",
]
@ -36,24 +43,17 @@ jemalloc_stats = [
"tikv-jemalloc-ctl/stats",
"tikv-jemallocator/stats",
]
jemalloc_conf = []
hardened_malloc = [
"dep:hardened_malloc-rs"
]
gzip_compression = [
"reqwest/gzip",
]
brotli_compression = [
"reqwest/brotli",
perf_measurements = []
release_max_log_level = [
"tracing/max_level_trace",
"tracing/release_max_level_info",
"log/max_level_trace",
"log/release_max_level_info",
]
sentry_telemetry = []
zstd_compression = [
"reqwest/zstd",
]
perf_measurements = []
sentry_telemetry = []
conduwuit_mods = [
"dep:libloading"
]
[dependencies]
argon2.workspace = true

View file

@ -12,6 +12,7 @@ pub use crate::{result::DebugInspect, utils::debug::*};
/// Log event at given level in debug-mode (when debug-assertions are enabled).
/// In release-mode it becomes DEBUG level, and possibly subject to elision.
#[macro_export]
#[collapse_debuginfo(yes)]
macro_rules! debug_event {
( $level:expr_2021, $($x:tt)+ ) => {
if $crate::debug::logging() {

View file

@ -33,6 +33,7 @@
//! option of replacing `error!` with `debug_error!`.
#[macro_export]
#[collapse_debuginfo(yes)]
macro_rules! Err {
($($args:tt)*) => {
Err($crate::err!($($args)*))
@ -40,6 +41,7 @@ macro_rules! Err {
}
#[macro_export]
#[collapse_debuginfo(yes)]
macro_rules! err {
(Request(Forbidden($level:ident!($($args:tt)+)))) => {{
let mut buf = String::new();
@ -109,6 +111,7 @@ macro_rules! err {
/// can share the same callsite metadata for the source of our Error and the
/// associated logging and tracing event dispatches.
#[macro_export]
#[collapse_debuginfo(yes)]
macro_rules! err_log {
($out:ident, $level:ident, $($fields:tt)+) => {{
use $crate::tracing::{

View file

@ -31,7 +31,7 @@ const ROUTER_MANIFEST: &'static str = ();
#[cargo_manifest(crate = "main")]
const MAIN_MANIFEST: &'static str = ();
/// Processed list of features access all project crates. This is generated from
/// Processed list of features across all project crates. This is generated from
/// the data in the MANIFEST strings and contains all possible project features.
/// For *enabled* features see the info::rustc module instead.
static FEATURES: OnceLock<Vec<String>> = OnceLock::new();

View file

@ -33,6 +33,7 @@ pub struct Log {
// the crate namespace like these.
#[macro_export]
#[collapse_debuginfo(yes)]
macro_rules! event {
( $level:expr_2021, $($x:tt)+ ) => { ::tracing::event!( $level, $($x)+ ) }
}

View file

@ -2,7 +2,6 @@ use std::{
borrow::Borrow,
fmt::{Debug, Display},
hash::Hash,
sync::Arc,
};
use ruma::{EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId, events::TimelineEventType};
@ -72,31 +71,3 @@ impl<T: Event> Event for &T {
fn redacts(&self) -> Option<&Self::Id> { (*self).redacts() }
}
impl<T: Event> Event for Arc<T> {
type Id = T::Id;
fn event_id(&self) -> &Self::Id { (**self).event_id() }
fn room_id(&self) -> &RoomId { (**self).room_id() }
fn sender(&self) -> &UserId { (**self).sender() }
fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { (**self).origin_server_ts() }
fn event_type(&self) -> &TimelineEventType { (**self).event_type() }
fn content(&self) -> &RawJsonValue { (**self).content() }
fn state_key(&self) -> Option<&str> { (**self).state_key() }
fn prev_events(&self) -> impl DoubleEndedIterator<Item = &Self::Id> + Send + '_ {
(**self).prev_events()
}
fn auth_events(&self) -> impl DoubleEndedIterator<Item = &Self::Id> + Send + '_ {
(**self).auth_events()
}
fn redacts(&self) -> Option<&Self::Id> { (**self).redacts() }
}

View file

@ -4,10 +4,7 @@ extern crate test;
use std::{
borrow::Borrow,
collections::{HashMap, HashSet},
sync::{
Arc,
atomic::{AtomicU64, Ordering::SeqCst},
},
sync::atomic::{AtomicU64, Ordering::SeqCst},
};
use futures::{future, future::ready};
@ -64,7 +61,7 @@ fn resolution_shallow_auth_chain(c: &mut test::Bencher) {
c.iter(|| async {
let ev_map = store.0.clone();
let state_sets = [&state_at_bob, &state_at_charlie];
let fetch = |id: OwnedEventId| ready(ev_map.get(&id).map(Arc::clone));
let fetch = |id: OwnedEventId| ready(ev_map.get(&id).clone());
let exists = |id: OwnedEventId| ready(ev_map.get(&id).is_some());
let auth_chain_sets: Vec<HashSet<_>> = state_sets
.iter()
@ -148,7 +145,7 @@ fn resolve_deeper_event_set(c: &mut test::Bencher) {
})
.collect();
let fetch = |id: OwnedEventId| ready(inner.get(&id).map(Arc::clone));
let fetch = |id: OwnedEventId| ready(inner.get(&id).clone());
let exists = |id: OwnedEventId| ready(inner.get(&id).is_some());
let _ = match state_res::resolve(
&RoomVersionId::V6,
@ -171,20 +168,20 @@ fn resolve_deeper_event_set(c: &mut test::Bencher) {
// IMPLEMENTATION DETAILS AHEAD
//
/////////////////////////////////////////////////////////////////////*/
struct TestStore<E: Event>(HashMap<OwnedEventId, Arc<E>>);
struct TestStore<E: Event>(HashMap<OwnedEventId, E>);
#[allow(unused)]
impl<E: Event> TestStore<E> {
fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> Result<Arc<E>> {
impl<E: Event + Clone> TestStore<E> {
fn get_event(&self, room_id: &RoomId, event_id: &EventId) -> Result<E> {
self.0
.get(event_id)
.map(Arc::clone)
.cloned()
.ok_or_else(|| Error::NotFound(format!("{} not found", event_id)))
}
/// Returns the events that correspond to the `event_ids` sorted in the same
/// order.
fn get_events(&self, room_id: &RoomId, event_ids: &[OwnedEventId]) -> Result<Vec<Arc<E>>> {
fn get_events(&self, room_id: &RoomId, event_ids: &[OwnedEventId]) -> Result<Vec<E>> {
let mut events = vec![];
for id in event_ids {
events.push(self.get_event(room_id, id)?);
@ -264,7 +261,7 @@ impl TestStore<PduEvent> {
&[],
);
let cre = create_event.event_id().to_owned();
self.0.insert(cre.clone(), Arc::clone(&create_event));
self.0.insert(cre.clone(), create_event.clone());
let alice_mem = to_pdu_event(
"IMA",
@ -276,7 +273,7 @@ impl TestStore<PduEvent> {
&[cre.clone()],
);
self.0
.insert(alice_mem.event_id().to_owned(), Arc::clone(&alice_mem));
.insert(alice_mem.event_id().to_owned(), alice_mem.clone());
let join_rules = to_pdu_event(
"IJR",
@ -383,7 +380,7 @@ fn to_pdu_event<S>(
content: Box<RawJsonValue>,
auth_events: &[S],
prev_events: &[S],
) -> Arc<PduEvent>
) -> PduEvent
where
S: AsRef<str>,
{
@ -407,7 +404,7 @@ where
.collect::<Vec<_>>();
let state_key = state_key.map(ToOwned::to_owned);
Arc::new(PduEvent {
PduEvent {
event_id: id.try_into().unwrap(),
rest: Pdu::RoomV3Pdu(RoomV3Pdu {
room_id: room_id().to_owned(),
@ -424,12 +421,12 @@ where
hashes: EventHash::new(String::new()),
signatures: Signatures::new(),
}),
})
}
}
// all graphs start with these input events
#[allow(non_snake_case)]
fn INITIAL_EVENTS() -> HashMap<OwnedEventId, Arc<PduEvent>> {
fn INITIAL_EVENTS() -> HashMap<OwnedEventId, PduEvent> {
vec![
to_pdu_event::<&EventId>(
"CREATE",
@ -511,7 +508,7 @@ fn INITIAL_EVENTS() -> HashMap<OwnedEventId, Arc<PduEvent>> {
// all graphs start with these input events
#[allow(non_snake_case)]
fn BAN_STATE_SET() -> HashMap<OwnedEventId, Arc<PduEvent>> {
fn BAN_STATE_SET() -> HashMap<OwnedEventId, PduEvent> {
vec![
to_pdu_event(
"PA",

View file

@ -1112,8 +1112,6 @@ fn verify_third_party_invite(
#[cfg(test)]
mod tests {
use std::sync::Arc;
use ruma::events::{
StateEventType, TimelineEventType,
room::{
@ -1143,7 +1141,7 @@ mod tests {
let auth_events = events
.values()
.map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev)))
.map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone()))
.collect::<StateMap<_>>();
let requester = to_pdu_event(
@ -1188,7 +1186,7 @@ mod tests {
let auth_events = events
.values()
.map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev)))
.map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone()))
.collect::<StateMap<_>>();
let requester = to_pdu_event(
@ -1233,7 +1231,7 @@ mod tests {
let auth_events = events
.values()
.map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev)))
.map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone()))
.collect::<StateMap<_>>();
let requester = to_pdu_event(
@ -1278,7 +1276,7 @@ mod tests {
let auth_events = events
.values()
.map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev)))
.map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone()))
.collect::<StateMap<_>>();
let requester = to_pdu_event(
@ -1340,7 +1338,7 @@ mod tests {
let auth_events = events
.values()
.map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev)))
.map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone()))
.collect::<StateMap<_>>();
let requester = to_pdu_event(
@ -1412,7 +1410,7 @@ mod tests {
let auth_events = events
.values()
.map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), Arc::clone(ev)))
.map(|ev| (ev.event_type().with_state_key(ev.state_key().unwrap()), ev.clone()))
.collect::<StateMap<_>>();
let requester = to_pdu_event(

View file

@ -15,11 +15,10 @@ use std::{
borrow::Borrow,
cmp::{Ordering, Reverse},
collections::{BinaryHeap, HashMap, HashSet},
fmt::Debug,
hash::{BuildHasher, Hash},
};
use futures::{Future, FutureExt, StreamExt, TryFutureExt, TryStreamExt, future, stream};
use futures::{Future, FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt, future};
use ruma::{
EventId, Int, MilliSecondsSinceUnixEpoch, RoomVersionId,
events::{
@ -37,9 +36,13 @@ pub use self::{
room_version::RoomVersion,
};
use crate::{
debug,
debug, debug_error,
matrix::{event::Event, pdu::StateKey},
trace, warn,
trace,
utils::stream::{
BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, TryReadyExt, WidebandExt,
},
warn,
};
/// A mapping of event type and state_key to some value `T`, usually an
@ -112,20 +115,16 @@ where
debug!(count = conflicting.len(), "conflicting events");
trace!(map = ?conflicting, "conflicting events");
let auth_chain_diff =
get_auth_chain_diff(auth_chain_sets).chain(conflicting.into_values().flatten());
let conflicting_values = conflicting.into_values().flatten().stream();
// `all_conflicted` contains unique items
// synapse says `full_set = {eid for eid in full_conflicted_set if eid in
// event_map}`
let all_conflicted: HashSet<_> = stream::iter(auth_chain_diff)
// Don't honor events we cannot "verify"
.map(|id| event_exists(id.clone()).map(move |exists| (id, exists)))
.buffer_unordered(parallel_fetches)
.filter_map(|(id, exists)| future::ready(exists.then_some(id)))
.collect()
.boxed()
.await;
let all_conflicted: HashSet<_> = get_auth_chain_diff(auth_chain_sets)
.chain(conflicting_values)
.broad_filter_map(async |id| event_exists(id.clone()).await.then_some(id))
.collect()
.await;
debug!(count = all_conflicted.len(), "full conflicted set");
trace!(set = ?all_conflicted, "full conflicted set");
@ -135,12 +134,15 @@ where
// Get only the control events with a state_key: "" or ban/kick event (sender !=
// state_key)
let control_events: Vec<_> = stream::iter(all_conflicted.iter())
.map(|id| is_power_event_id(id, &event_fetch).map(move |is| (id, is)))
.buffer_unordered(parallel_fetches)
.filter_map(|(id, is)| future::ready(is.then_some(id.clone())))
let control_events: Vec<_> = all_conflicted
.iter()
.stream()
.wide_filter_map(async |id| {
is_power_event_id(id, &event_fetch)
.await
.then_some(id.clone())
})
.collect()
.boxed()
.await;
// Sort the control events based on power_level/clock/event_id and
@ -160,10 +162,9 @@ where
// Sequentially auth check each control event.
let resolved_control = iterative_auth_check(
&room_version,
sorted_control_levels.iter(),
sorted_control_levels.iter().stream(),
clean.clone(),
&event_fetch,
parallel_fetches,
)
.await?;
@ -172,36 +173,35 @@ where
// At this point the control_events have been resolved we now have to
// sort the remaining events using the mainline of the resolved power level.
let deduped_power_ev = sorted_control_levels.into_iter().collect::<HashSet<_>>();
let deduped_power_ev: HashSet<_> = sorted_control_levels.into_iter().collect();
// This removes the control events that passed auth and more importantly those
// that failed auth
let events_to_resolve = all_conflicted
let events_to_resolve: Vec<_> = all_conflicted
.iter()
.filter(|&id| !deduped_power_ev.contains(id.borrow()))
.cloned()
.collect::<Vec<_>>();
.collect();
debug!(count = events_to_resolve.len(), "events left to resolve");
trace!(list = ?events_to_resolve, "events left to resolve");
// This "epochs" power level event
let power_event = resolved_control.get(&(StateEventType::RoomPowerLevels, StateKey::new()));
let power_levels_ty_sk = (StateEventType::RoomPowerLevels, StateKey::new());
let power_event = resolved_control.get(&power_levels_ty_sk);
debug!(event_id = ?power_event, "power event");
let sorted_left_events =
mainline_sort(&events_to_resolve, power_event.cloned(), &event_fetch, parallel_fetches)
.await?;
mainline_sort(&events_to_resolve, power_event.cloned(), &event_fetch).await?;
trace!(list = ?sorted_left_events, "events left, sorted");
let mut resolved_state = iterative_auth_check(
&room_version,
sorted_left_events.iter(),
sorted_left_events.iter().stream(),
resolved_control, // The control events are added to the final resolved state
&event_fetch,
parallel_fetches,
)
.await?;
@ -265,7 +265,7 @@ where
#[allow(clippy::arithmetic_side_effects)]
fn get_auth_chain_diff<Id, Hasher>(
auth_chain_sets: &[HashSet<Id, Hasher>],
) -> impl Iterator<Item = Id> + Send + use<Id, Hasher>
) -> impl Stream<Item = Id> + Send + use<Id, Hasher>
where
Id: Clone + Eq + Hash + Send,
Hasher: BuildHasher + Send + Sync,
@ -279,6 +279,7 @@ where
id_counts
.into_iter()
.filter_map(move |(id, count)| (count < num_sets).then_some(id))
.stream()
}
/// Events are sorted from "earliest" to "latest".
@ -310,13 +311,15 @@ where
}
// This is used in the `key_fn` passed to the lexico_topo_sort fn
let event_to_pl = stream::iter(graph.keys())
let event_to_pl = graph
.keys()
.stream()
.map(|event_id| {
get_power_level_for_sender(event_id.clone(), fetch_event, parallel_fetches)
get_power_level_for_sender(event_id.clone(), fetch_event)
.map(move |res| res.map(|pl| (event_id, pl)))
})
.buffer_unordered(parallel_fetches)
.try_fold(HashMap::new(), |mut event_to_pl, (event_id, pl)| {
.ready_try_fold(HashMap::new(), |mut event_to_pl, (event_id, pl)| {
debug!(
event_id = event_id.borrow().as_str(),
power_level = i64::from(pl),
@ -324,7 +327,7 @@ where
);
event_to_pl.insert(event_id.clone(), pl);
future::ok(event_to_pl)
Ok(event_to_pl)
})
.boxed()
.await?;
@ -475,7 +478,6 @@ where
async fn get_power_level_for_sender<E, F, Fut>(
event_id: E::Id,
fetch_event: &F,
parallel_fetches: usize,
) -> serde_json::Result<Int>
where
F: Fn(E::Id) -> Fut + Sync,
@ -485,19 +487,17 @@ where
{
debug!("fetch event ({event_id}) senders power level");
let event = fetch_event(event_id.clone()).await;
let event = fetch_event(event_id).await;
let auth_events = event.as_ref().map(Event::auth_events).into_iter().flatten();
let auth_events = event.as_ref().map(Event::auth_events);
let pl = stream::iter(auth_events)
.map(|aid| fetch_event(aid.clone()))
.buffer_unordered(parallel_fetches.min(5))
.filter_map(future::ready)
.collect::<Vec<_>>()
.boxed()
.await
let pl = auth_events
.into_iter()
.find(|aev| is_type_and_key(aev, &TimelineEventType::RoomPowerLevels, ""));
.flatten()
.stream()
.broadn_filter_map(5, |aid| fetch_event(aid.clone()))
.ready_find(|aev| is_type_and_key(aev, &TimelineEventType::RoomPowerLevels, ""))
.await;
let content: PowerLevelsContentFields = match pl {
| None => return Ok(int!(0)),
@ -525,34 +525,28 @@ where
/// For each `events_to_check` event we gather the events needed to auth it from
/// the the `fetch_event` closure and verify each event using the
/// `event_auth::auth_check` function.
async fn iterative_auth_check<'a, E, F, Fut, I>(
async fn iterative_auth_check<'a, E, F, Fut, S>(
room_version: &RoomVersion,
events_to_check: I,
events_to_check: S,
unconflicted_state: StateMap<E::Id>,
fetch_event: &F,
parallel_fetches: usize,
) -> Result<StateMap<E::Id>>
where
F: Fn(E::Id) -> Fut + Sync,
Fut: Future<Output = Option<E>> + Send,
E::Id: Borrow<EventId> + Clone + Eq + Ord + Send + Sync + 'a,
I: Iterator<Item = &'a E::Id> + Debug + Send + 'a,
S: Stream<Item = &'a E::Id> + Send + 'a,
E: Event + Clone + Send + Sync,
{
debug!("starting iterative auth check");
trace!(
list = ?events_to_check,
"events to check"
);
let events_to_check: Vec<_> = stream::iter(events_to_check)
let events_to_check: Vec<_> = events_to_check
.map(Result::Ok)
.map_ok(|event_id| {
fetch_event(event_id.clone()).map(move |result| {
result.ok_or_else(|| Error::NotFound(format!("Failed to find {event_id}")))
})
.broad_and_then(async |event_id| {
fetch_event(event_id.clone())
.await
.ok_or_else(|| Error::NotFound(format!("Failed to find {event_id}")))
})
.try_buffer_unordered(parallel_fetches)
.try_collect()
.boxed()
.await?;
@ -562,10 +556,10 @@ where
.flat_map(|event: &E| event.auth_events().map(Clone::clone))
.collect();
let auth_events: HashMap<E::Id, E> = stream::iter(auth_event_ids.into_iter())
.map(fetch_event)
.buffer_unordered(parallel_fetches)
.filter_map(future::ready)
let auth_events: HashMap<E::Id, E> = auth_event_ids
.into_iter()
.stream()
.broad_filter_map(fetch_event)
.map(|auth_event| (auth_event.event_id().clone(), auth_event))
.collect()
.boxed()
@ -574,7 +568,6 @@ where
let auth_events = &auth_events;
let mut resolved_state = unconflicted_state;
for event in &events_to_check {
let event_id = event.event_id();
let state_key = event
.state_key()
.ok_or_else(|| Error::InvalidPdu("State event had no state key".to_owned()))?;
@ -603,24 +596,22 @@ where
}
}
stream::iter(
auth_types
.iter()
.filter_map(|key| Some((key, resolved_state.get(key)?))),
)
.filter_map(|(key, ev_id)| async move {
if let Some(event) = auth_events.get(ev_id.borrow()) {
Some((key, event.clone()))
} else {
Some((key, fetch_event(ev_id.clone()).await?))
}
})
.for_each(|(key, event)| {
//TODO: synapse checks "rejected_reason" is None here
auth_state.insert(key.to_owned(), event);
future::ready(())
})
.await;
auth_types
.iter()
.stream()
.ready_filter_map(|key| Some((key, resolved_state.get(key)?)))
.filter_map(|(key, ev_id)| async move {
if let Some(event) = auth_events.get(ev_id.borrow()) {
Some((key, event.clone()))
} else {
Some((key, fetch_event(ev_id.clone()).await?))
}
})
.ready_for_each(|(key, event)| {
//TODO: synapse checks "rejected_reason" is None here
auth_state.insert(key.to_owned(), event);
})
.await;
debug!("event to check {:?}", event.event_id());
@ -634,12 +625,25 @@ where
future::ready(auth_state.get(&ty.with_state_key(key)))
};
if auth_check(room_version, &event, current_third_party.as_ref(), fetch_state).await? {
// add event to resolved state map
resolved_state.insert(event.event_type().with_state_key(state_key), event_id.clone());
} else {
// synapse passes here on AuthError. We do not add this event to resolved_state.
warn!("event {event_id} failed the authentication check");
let auth_result =
auth_check(room_version, &event, current_third_party.as_ref(), fetch_state).await;
match auth_result {
| Ok(true) => {
// add event to resolved state map
resolved_state.insert(
event.event_type().with_state_key(state_key),
event.event_id().clone(),
);
},
| Ok(false) => {
// synapse passes here on AuthError. We do not add this event to resolved_state.
warn!("event {} failed the authentication check", event.event_id());
},
| Err(e) => {
debug_error!("event {} failed the authentication check: {e}", event.event_id());
return Err(e);
},
}
}
@ -659,7 +663,6 @@ async fn mainline_sort<E, F, Fut>(
to_sort: &[E::Id],
resolved_power_level: Option<E::Id>,
fetch_event: &F,
parallel_fetches: usize,
) -> Result<Vec<E::Id>>
where
F: Fn(E::Id) -> Fut + Sync,
@ -682,11 +685,13 @@ where
let event = fetch_event(p.clone())
.await
.ok_or_else(|| Error::NotFound(format!("Failed to find {p}")))?;
pl = None;
for aid in event.auth_events() {
let ev = fetch_event(aid.clone())
.await
.ok_or_else(|| Error::NotFound(format!("Failed to find {aid}")))?;
if is_type_and_key(&ev, &TimelineEventType::RoomPowerLevels, "") {
pl = Some(aid.to_owned());
break;
@ -694,36 +699,32 @@ where
}
}
let mainline_map = mainline
let mainline_map: HashMap<_, _> = mainline
.iter()
.rev()
.enumerate()
.map(|(idx, eid)| ((*eid).clone(), idx))
.collect::<HashMap<_, _>>();
.collect();
let order_map = stream::iter(to_sort.iter())
.map(|ev_id| {
fetch_event(ev_id.clone()).map(move |event| event.map(|event| (event, ev_id)))
let order_map: HashMap<_, _> = to_sort
.iter()
.stream()
.broad_filter_map(async |ev_id| {
fetch_event(ev_id.clone()).await.map(|event| (event, ev_id))
})
.buffer_unordered(parallel_fetches)
.filter_map(future::ready)
.map(|(event, ev_id)| {
.broad_filter_map(|(event, ev_id)| {
get_mainline_depth(Some(event.clone()), &mainline_map, fetch_event)
.map_ok(move |depth| (depth, event, ev_id))
.map_ok(move |depth| (ev_id, (depth, event.origin_server_ts(), ev_id)))
.map(Result::ok)
})
.buffer_unordered(parallel_fetches)
.filter_map(future::ready)
.fold(HashMap::new(), |mut order_map, (depth, event, ev_id)| {
order_map.insert(ev_id, (depth, event.origin_server_ts(), ev_id));
future::ready(order_map)
})
.collect()
.boxed()
.await;
// Sort the event_ids by their depth, timestamp and EventId
// unwrap is OK order map and sort_event_ids are from to_sort (the same Vec)
let mut sort_event_ids = order_map.keys().map(|&k| k.clone()).collect::<Vec<_>>();
let mut sort_event_ids: Vec<_> = order_map.keys().map(|&k| k.clone()).collect();
sort_event_ids.sort_by_key(|sort_id| &order_map[sort_id]);
Ok(sort_event_ids)
@ -744,6 +745,7 @@ where
{
while let Some(sort_ev) = event {
debug!(event_id = sort_ev.event_id().borrow().as_str(), "mainline");
let id = sort_ev.event_id();
if let Some(depth) = mainline_map.get(id.borrow()) {
return Ok(*depth);
@ -754,6 +756,7 @@ where
let aev = fetch_event(aid.clone())
.await
.ok_or_else(|| Error::NotFound(format!("Failed to find {aid}")))?;
if is_type_and_key(&aev, &TimelineEventType::RoomPowerLevels, "") {
event = Some(aev);
break;
@ -858,10 +861,7 @@ where
#[cfg(test)]
mod tests {
use std::{
collections::{HashMap, HashSet},
sync::Arc,
};
use std::collections::{HashMap, HashSet};
use maplit::{hashmap, hashset};
use rand::seq::SliceRandom;
@ -884,7 +884,7 @@ mod tests {
zara,
},
};
use crate::debug;
use crate::{debug, utils::stream::IterStream};
async fn test_event_sort() {
use futures::future::ready;
@ -903,7 +903,7 @@ mod tests {
let power_events = event_map
.values()
.filter(|&pdu| is_power_event(&**pdu))
.filter(|&pdu| is_power_event(&*pdu))
.map(|pdu| pdu.event_id.clone())
.collect::<Vec<_>>();
@ -915,10 +915,9 @@ mod tests {
let resolved_power = super::iterative_auth_check(
&RoomVersion::V6,
sorted_power_events.iter(),
sorted_power_events.iter().stream(),
HashMap::new(), // unconflicted events
&fetcher,
1,
)
.await
.expect("iterative auth check failed on resolved events");
@ -932,7 +931,7 @@ mod tests {
.get(&(StateEventType::RoomPowerLevels, "".into()))
.cloned();
let sorted_event_ids = super::mainline_sort(&events_to_sort, power_level, &fetcher, 1)
let sorted_event_ids = super::mainline_sort(&events_to_sort, power_level, &fetcher)
.await
.unwrap();
@ -1487,7 +1486,7 @@ mod tests {
}
#[allow(non_snake_case)]
fn BAN_STATE_SET() -> HashMap<OwnedEventId, Arc<PduEvent>> {
fn BAN_STATE_SET() -> HashMap<OwnedEventId, PduEvent> {
vec![
to_pdu_event(
"PA",
@ -1532,7 +1531,7 @@ mod tests {
}
#[allow(non_snake_case)]
fn JOIN_RULE() -> HashMap<OwnedEventId, Arc<PduEvent>> {
fn JOIN_RULE() -> HashMap<OwnedEventId, PduEvent> {
vec![
to_pdu_event(
"JR",

View file

@ -1,10 +1,7 @@
use std::{
borrow::Borrow,
collections::{BTreeMap, HashMap, HashSet},
sync::{
Arc,
atomic::{AtomicU64, Ordering::SeqCst},
},
sync::atomic::{AtomicU64, Ordering::SeqCst},
};
use futures::future::ready;
@ -36,7 +33,7 @@ use crate::{
static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0);
pub(crate) async fn do_check(
events: &[Arc<PduEvent>],
events: &[PduEvent],
edges: Vec<Vec<OwnedEventId>>,
expected_state_ids: Vec<OwnedEventId>,
) {
@ -85,7 +82,7 @@ pub(crate) async fn do_check(
}
// event_id -> PduEvent
let mut event_map: HashMap<OwnedEventId, Arc<PduEvent>> = HashMap::new();
let mut event_map: HashMap<OwnedEventId, PduEvent> = HashMap::new();
// event_id -> StateMap<OwnedEventId>
let mut state_at_event: HashMap<OwnedEventId, StateMap<OwnedEventId>> = HashMap::new();
@ -194,7 +191,7 @@ pub(crate) async fn do_check(
store.0.insert(ev_id.to_owned(), event.clone());
state_at_event.insert(node, state_after);
event_map.insert(event_id.to_owned(), Arc::clone(store.0.get(ev_id).unwrap()));
event_map.insert(event_id.to_owned(), store.0.get(ev_id).unwrap().clone());
}
let mut expected_state = StateMap::new();
@ -235,10 +232,10 @@ pub(crate) async fn do_check(
}
#[allow(clippy::exhaustive_structs)]
pub(crate) struct TestStore<E: Event>(pub(crate) HashMap<OwnedEventId, Arc<E>>);
pub(crate) struct TestStore<E: Event>(pub(crate) HashMap<OwnedEventId, E>);
impl<E: Event> TestStore<E> {
pub(crate) fn get_event(&self, _: &RoomId, event_id: &EventId) -> Result<Arc<E>> {
impl<E: Event + Clone> TestStore<E> {
pub(crate) fn get_event(&self, _: &RoomId, event_id: &EventId) -> Result<E> {
self.0
.get(event_id)
.cloned()
@ -288,7 +285,7 @@ impl TestStore<PduEvent> {
&[],
);
let cre = create_event.event_id().to_owned();
self.0.insert(cre.clone(), Arc::clone(&create_event));
self.0.insert(cre.clone(), create_event.clone());
let alice_mem = to_pdu_event(
"IMA",
@ -300,7 +297,7 @@ impl TestStore<PduEvent> {
&[cre.clone()],
);
self.0
.insert(alice_mem.event_id().to_owned(), Arc::clone(&alice_mem));
.insert(alice_mem.event_id().to_owned(), alice_mem.clone());
let join_rules = to_pdu_event(
"IJR",
@ -399,7 +396,7 @@ pub(crate) fn to_init_pdu_event(
ev_type: TimelineEventType,
state_key: Option<&str>,
content: Box<RawJsonValue>,
) -> Arc<PduEvent> {
) -> PduEvent {
let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst);
let id = if id.contains('$') {
id.to_owned()
@ -408,7 +405,7 @@ pub(crate) fn to_init_pdu_event(
};
let state_key = state_key.map(ToOwned::to_owned);
Arc::new(PduEvent {
PduEvent {
event_id: id.try_into().unwrap(),
rest: Pdu::RoomV3Pdu(RoomV3Pdu {
room_id: room_id().to_owned(),
@ -425,7 +422,7 @@ pub(crate) fn to_init_pdu_event(
hashes: EventHash::new("".to_owned()),
signatures: ServerSignatures::default(),
}),
})
}
}
pub(crate) fn to_pdu_event<S>(
@ -436,7 +433,7 @@ pub(crate) fn to_pdu_event<S>(
content: Box<RawJsonValue>,
auth_events: &[S],
prev_events: &[S],
) -> Arc<PduEvent>
) -> PduEvent
where
S: AsRef<str>,
{
@ -458,7 +455,7 @@ where
.collect::<Vec<_>>();
let state_key = state_key.map(ToOwned::to_owned);
Arc::new(PduEvent {
PduEvent {
event_id: id.try_into().unwrap(),
rest: Pdu::RoomV3Pdu(RoomV3Pdu {
room_id: room_id().to_owned(),
@ -475,12 +472,12 @@ where
hashes: EventHash::new("".to_owned()),
signatures: ServerSignatures::default(),
}),
})
}
}
// all graphs start with these input events
#[allow(non_snake_case)]
pub(crate) fn INITIAL_EVENTS() -> HashMap<OwnedEventId, Arc<PduEvent>> {
pub(crate) fn INITIAL_EVENTS() -> HashMap<OwnedEventId, PduEvent> {
vec![
to_pdu_event::<&EventId>(
"CREATE",
@ -562,7 +559,7 @@ pub(crate) fn INITIAL_EVENTS() -> HashMap<OwnedEventId, Arc<PduEvent>> {
// all graphs start with these input events
#[allow(non_snake_case)]
pub(crate) fn INITIAL_EVENTS_CREATE_ROOM() -> HashMap<OwnedEventId, Arc<PduEvent>> {
pub(crate) fn INITIAL_EVENTS_CREATE_ROOM() -> HashMap<OwnedEventId, PduEvent> {
vec![to_pdu_event::<&EventId>(
"CREATE",
alice(),

View file

@ -22,30 +22,6 @@ where
Self: Sized + Unpin;
}
pub async fn and<I, F>(args: I) -> impl Future<Output = bool> + Send
where
I: Iterator<Item = F> + Send,
F: Future<Output = bool> + Send,
{
type Result = crate::Result<(), ()>;
let args = args.map(|a| a.map(|a| a.then_some(()).ok_or(Result::Err(()))));
try_join_all(args).map(|result| result.is_ok())
}
pub async fn or<I, F>(args: I) -> impl Future<Output = bool> + Send
where
I: Iterator<Item = F> + Send,
F: Future<Output = bool> + Send + Unpin,
{
type Result = crate::Result<(), ()>;
let args = args.map(|a| a.map(|a| a.then_some(()).ok_or(Result::Err(()))));
select_ok(args).map(|result| result.is_ok())
}
impl<Fut> BoolExt for Fut
where
Fut: Future<Output = bool> + Send,
@ -80,3 +56,27 @@ where
try_select(a, b).map(|result| result.is_ok())
}
}
pub async fn and<I, F>(args: I) -> impl Future<Output = bool> + Send
where
I: Iterator<Item = F> + Send,
F: Future<Output = bool> + Send,
{
type Result = crate::Result<(), ()>;
let args = args.map(|a| a.map(|a| a.then_some(()).ok_or(Result::Err(()))));
try_join_all(args).map(|result| result.is_ok())
}
pub async fn or<I, F>(args: I) -> impl Future<Output = bool> + Send
where
I: Iterator<Item = F> + Send,
F: Future<Output = bool> + Send + Unpin,
{
type Result = crate::Result<(), ()>;
let args = args.map(|a| a.map(|a| a.then_some(()).ok_or(Result::Err(()))));
select_ok(args).map(|result| result.is_ok())
}

View file

@ -2,10 +2,12 @@ mod bool_ext;
mod ext_ext;
mod option_ext;
mod option_stream;
mod ready_eq_ext;
mod try_ext_ext;
pub use bool_ext::{BoolExt, and, or};
pub use ext_ext::ExtExt;
pub use option_ext::OptionExt;
pub use option_stream::OptionStream;
pub use ready_eq_ext::ReadyEqExt;
pub use try_ext_ext::TryExtExt;

View file

@ -0,0 +1,25 @@
//! Future extension for Partial Equality against present value
use futures::{Future, FutureExt};
pub trait ReadyEqExt<T>
where
Self: Future<Output = T> + Send + Sized,
T: PartialEq + Send + Sync,
{
fn eq(self, t: &T) -> impl Future<Output = bool> + Send;
fn ne(self, t: &T) -> impl Future<Output = bool> + Send;
}
impl<Fut, T> ReadyEqExt<T> for Fut
where
Fut: Future<Output = T> + Send + Sized,
T: PartialEq + Send + Sync,
{
#[inline]
fn eq(self, t: &T) -> impl Future<Output = bool> + Send { self.map(move |r| r.eq(t)) }
#[inline]
fn ne(self, t: &T) -> impl Future<Output = bool> + Send { self.map(move |r| r.ne(t)) }
}

View file

@ -10,6 +10,7 @@ use crate::{Err, Error, Result, debug::type_name, err};
/// Checked arithmetic expression. Returns a Result<R, Error::Arithmetic>
#[macro_export]
#[collapse_debuginfo(yes)]
macro_rules! checked {
($($input:tt)+) => {
$crate::utils::math::checked_ops!($($input)+)
@ -22,6 +23,7 @@ macro_rules! checked {
/// has no realistic expectation for error and no interest in cluttering the
/// callsite with result handling from checked!.
#[macro_export]
#[collapse_debuginfo(yes)]
macro_rules! expected {
($msg:literal, $($input:tt)+) => {
$crate::checked!($($input)+).expect($msg)
@ -37,6 +39,7 @@ macro_rules! expected {
/// regression analysis.
#[cfg(not(debug_assertions))]
#[macro_export]
#[collapse_debuginfo(yes)]
macro_rules! validated {
($($input:tt)+) => {
//#[allow(clippy::arithmetic_side_effects)] {
@ -53,6 +56,7 @@ macro_rules! validated {
/// the expression is obviously safe. The check is elided in release-mode.
#[cfg(debug_assertions)]
#[macro_export]
#[collapse_debuginfo(yes)]
macro_rules! validated {
($($input:tt)+) => { $crate::expected!($($input)+) }
}

View file

@ -28,7 +28,7 @@ pub use self::{
bool::BoolExt,
bytes::{increment, u64_from_bytes, u64_from_u8, u64_from_u8x8},
debug::slice_truncated as debug_slice_truncated,
future::TryExtExt as TryFutureExtExt,
future::{BoolExt as FutureBoolExt, OptionStream, TryExtExt as TryFutureExtExt},
hash::sha256::delimited as calculate_hash,
html::Escape as HtmlEscape,
json::{deserialize_from_str, to_canonical_object},
@ -173,7 +173,6 @@ macro_rules! is_equal {
/// Functor for |x| *x.$i
#[macro_export]
#[collapse_debuginfo(yes)]
macro_rules! deref_at {
($idx:tt) => {
|t| *t.$idx
@ -182,7 +181,6 @@ macro_rules! deref_at {
/// Functor for |ref x| x.$i
#[macro_export]
#[collapse_debuginfo(yes)]
macro_rules! ref_at {
($idx:tt) => {
|ref t| &t.$idx
@ -191,7 +189,6 @@ macro_rules! ref_at {
/// Functor for |&x| x.$i
#[macro_export]
#[collapse_debuginfo(yes)]
macro_rules! val_at {
($idx:tt) => {
|&t| t.$idx
@ -200,7 +197,6 @@ macro_rules! val_at {
/// Functor for |x| x.$i
#[macro_export]
#[collapse_debuginfo(yes)]
macro_rules! at {
($idx:tt) => {
|t| t.$idx

View file

@ -10,7 +10,7 @@ pub trait TryExpect<'a, Item> {
impl<'a, T, Item> TryExpect<'a, Item> for T
where
T: Stream<Item = Result<Item>> + TryStream + Send + 'a,
T: Stream<Item = Result<Item>> + Send + TryStream + 'a,
Item: 'a,
{
#[inline]

View file

@ -2,7 +2,7 @@
#![allow(clippy::type_complexity)]
use futures::{
future::{Ready, ready},
future::{FutureExt, Ready, ready},
stream::{
All, Any, Filter, FilterMap, Fold, ForEach, Scan, SkipWhile, Stream, StreamExt, TakeWhile,
},
@ -16,7 +16,7 @@ use futures::{
/// This interface is not necessarily complete; feel free to add as-needed.
pub trait ReadyExt<Item>
where
Self: Stream<Item = Item> + Send + Sized,
Self: Stream<Item = Item> + Sized,
{
fn ready_all<F>(self, f: F) -> All<Self, Ready<bool>, impl FnMut(Item) -> Ready<bool>>
where
@ -26,6 +26,12 @@ where
where
F: Fn(Item) -> bool;
fn ready_find<'a, F>(self, f: F) -> impl Future<Output = Option<Item>> + Send
where
Self: Send + Unpin + 'a,
F: Fn(&Item) -> bool + Send + 'a,
Item: Send;
fn ready_filter<'a, F>(
self,
f: F,
@ -93,7 +99,7 @@ where
impl<Item, S> ReadyExt<Item> for S
where
S: Stream<Item = Item> + Send + Sized,
S: Stream<Item = Item> + Sized,
{
#[inline]
fn ready_all<F>(self, f: F) -> All<Self, Ready<bool>, impl FnMut(Item) -> Ready<bool>>
@ -111,6 +117,19 @@ where
self.any(move |t| ready(f(t)))
}
#[inline]
fn ready_find<'a, F>(self, f: F) -> impl Future<Output = Option<Item>> + Send
where
Self: Send + Unpin + 'a,
F: Fn(&Item) -> bool + Send + 'a,
Item: Send,
{
self.ready_filter(f)
.take(1)
.into_future()
.map(|(curr, _next)| curr)
}
#[inline]
fn ready_filter<'a, F>(
self,

View file

@ -13,8 +13,8 @@ use crate::Result;
/// This interface is not necessarily complete; feel free to add as-needed.
pub trait TryReadyExt<T, E, S>
where
S: TryStream<Ok = T, Error = E, Item = Result<T, E>> + Send + ?Sized,
Self: TryStream + Send + Sized,
S: TryStream<Ok = T, Error = E, Item = Result<T, E>> + ?Sized,
Self: TryStream + Sized,
{
fn ready_and_then<U, F>(
self,
@ -67,8 +67,8 @@ where
impl<T, E, S> TryReadyExt<T, E, S> for S
where
S: TryStream<Ok = T, Error = E, Item = Result<T, E>> + Send + ?Sized,
Self: TryStream + Send + Sized,
S: TryStream<Ok = T, Error = E, Item = Result<T, E>> + ?Sized,
Self: TryStream + Sized,
{
#[inline]
fn ready_and_then<U, F>(

View file

@ -8,8 +8,8 @@ use crate::Result;
/// TryStreamTools
pub trait TryTools<T, E, S>
where
S: TryStream<Ok = T, Error = E, Item = Result<T, E>> + Send + ?Sized,
Self: TryStream + Send + Sized,
S: TryStream<Ok = T, Error = E, Item = Result<T, E>> + ?Sized,
Self: TryStream + Sized,
{
fn try_take(
self,
@ -23,8 +23,8 @@ where
impl<T, E, S> TryTools<T, E, S> for S
where
S: TryStream<Ok = T, Error = E, Item = Result<T, E>> + Send + ?Sized,
Self: TryStream + Send + Sized,
S: TryStream<Ok = T, Error = E, Item = Result<T, E>> + ?Sized,
Self: TryStream + Sized,
{
#[inline]
fn try_take(

View file

@ -14,6 +14,7 @@ pub const EMPTY: &str = "";
/// returned otherwise the input (i.e. &'static str) is returned. If multiple
/// arguments are provided the first is assumed to be a format string.
#[macro_export]
#[collapse_debuginfo(yes)]
macro_rules! format_maybe {
($s:literal $(,)?) => {
if $crate::is_format!($s) { std::format!($s).into() } else { $s.into() }
@ -27,6 +28,7 @@ macro_rules! format_maybe {
/// Constant expression to decide if a literal is a format string. Note: could
/// use some improvement.
#[macro_export]
#[collapse_debuginfo(yes)]
macro_rules! is_format {
($s:literal) => {
::const_str::contains!($s, "{") && ::const_str::contains!($s, "}")

View file

@ -117,7 +117,7 @@ pub fn name_from_path(path: &Path) -> Result<String> {
/// Get the (major, minor) of the block device on which Path is mounted.
#[allow(clippy::useless_conversion, clippy::unnecessary_fallible_conversions)]
pub fn dev_from_path(path: &Path) -> Result<(dev_t, dev_t)> {
fn dev_from_path(path: &Path) -> Result<(dev_t, dev_t)> {
#[cfg(target_family = "unix")]
use std::os::unix::fs::MetadataExt;

View file

@ -17,19 +17,31 @@ crate-type = [
]
[features]
release_max_log_level = [
"tracing/max_level_trace",
"tracing/release_max_level_info",
"log/max_level_trace",
"log/release_max_level_info",
]
jemalloc = [
"rust-rocksdb/jemalloc",
]
io_uring = [
"rust-rocksdb/io-uring",
]
jemalloc = [
"conduwuit-core/jemalloc",
"rust-rocksdb/jemalloc",
]
jemalloc_conf = [
"conduwuit-core/jemalloc_conf",
]
jemalloc_prof = [
"conduwuit-core/jemalloc_prof",
]
jemalloc_stats = [
"conduwuit-core/jemalloc_stats",
]
release_max_log_level = [
"conduwuit-core/release_max_log_level",
"log/max_level_trace",
"log/release_max_level_info",
"tracing/max_level_trace",
"tracing/release_max_level_info",
]
zstd_compression = [
"conduwuit-core/zstd_compression",
"rust-rocksdb/zstd",
]

View file

@ -1,24 +1,16 @@
use std::fmt::Write;
use std::{ffi::OsString, path::PathBuf};
use conduwuit::{Result, error, implement, info, utils::time::rfc2822_from_seconds, warn};
use conduwuit::{Err, Result, error, implement, info, utils::time::rfc2822_from_seconds, warn};
use rocksdb::backup::{BackupEngine, BackupEngineOptions};
use super::Engine;
use crate::{or_else, util::map_err};
use crate::util::map_err;
#[implement(Engine)]
#[tracing::instrument(skip(self))]
pub fn backup(&self) -> Result {
let server = &self.ctx.server;
let config = &server.config;
let path = config.database_backup_path.as_ref();
if path.is_none() || path.is_some_and(|path| path.as_os_str().is_empty()) {
return Ok(());
}
let options =
BackupEngineOptions::new(path.expect("valid database backup path")).map_err(map_err)?;
let mut engine = BackupEngine::open(&options, &*self.ctx.env.lock()?).map_err(map_err)?;
let mut engine = self.backup_engine()?;
let config = &self.ctx.server.config;
if config.database_backups_to_keep > 0 {
let flush = !self.is_read_only();
engine
@ -40,34 +32,62 @@ pub fn backup(&self) -> Result {
}
}
if config.database_backups_to_keep == 0 {
warn!("Configuration item `database_backups_to_keep` is set to 0.");
}
Ok(())
}
#[implement(Engine)]
pub fn backup_list(&self) -> Result<String> {
let server = &self.ctx.server;
let config = &server.config;
let path = config.database_backup_path.as_ref();
if path.is_none() || path.is_some_and(|path| path.as_os_str().is_empty()) {
return Ok("Configure database_backup_path to enable backups, or the path specified is \
not valid"
.to_owned());
pub fn backup_list(&self) -> Result<impl Iterator<Item = String> + Send> {
let info = self.backup_engine()?.get_backup_info();
if info.is_empty() {
return Err!("No backups found.");
}
let mut res = String::new();
let options =
BackupEngineOptions::new(path.expect("valid database backup path")).or_else(or_else)?;
let engine = BackupEngine::open(&options, &*self.ctx.env.lock()?).or_else(or_else)?;
for info in engine.get_backup_info() {
writeln!(
res,
let list = info.into_iter().map(|info| {
format!(
"#{} {}: {} bytes, {} files",
info.backup_id,
rfc2822_from_seconds(info.timestamp),
info.size,
info.num_files,
)?;
)
});
Ok(list)
}
#[implement(Engine)]
pub fn backup_count(&self) -> Result<usize> {
let info = self.backup_engine()?.get_backup_info();
Ok(info.len())
}
#[implement(Engine)]
fn backup_engine(&self) -> Result<BackupEngine> {
let path = self.backup_path()?;
let options = BackupEngineOptions::new(path).map_err(map_err)?;
BackupEngine::open(&options, &*self.ctx.env.lock()?).map_err(map_err)
}
#[implement(Engine)]
fn backup_path(&self) -> Result<OsString> {
let path = self
.ctx
.server
.config
.database_backup_path
.clone()
.map(PathBuf::into_os_string)
.unwrap_or_default();
if path.is_empty() {
return Err!(Config("database_backup_path", "Configure path to enable backups"));
}
Ok(res)
Ok(path)
}

View file

@ -8,7 +8,7 @@ use crate::{Result, utils::camel_to_snake_string};
pub(super) fn command(mut item: ItemFn, _args: &[Meta]) -> Result<TokenStream> {
let attr: Attribute = parse_quote! {
#[conduwuit_macros::implement(crate::Command, params = "<'_>")]
#[conduwuit_macros::implement(crate::Context, params = "<'_>")]
};
item.attrs.push(attr);
@ -19,15 +19,16 @@ pub(super) fn command_dispatch(item: ItemEnum, _args: &[Meta]) -> Result<TokenSt
let name = &item.ident;
let arm: Vec<TokenStream2> = item.variants.iter().map(dispatch_arm).try_collect()?;
let switch = quote! {
#[allow(clippy::large_stack_frames)] //TODO: fixme
pub(super) async fn process(
command: #name,
context: &crate::Command<'_>
context: &crate::Context<'_>
) -> Result {
use #name::*;
#[allow(non_snake_case)]
Ok(match command {
match command {
#( #arm )*
})
}
}
};
@ -47,8 +48,7 @@ fn dispatch_arm(v: &Variant) -> Result<TokenStream2> {
let arg = field.clone();
quote! {
#name { #( #field ),* } => {
let c = Box::pin(context.#handler(#( #arg ),*)).await?;
Box::pin(context.write_str(c.body())).await?;
Box::pin(context.#handler(#( #arg ),*)).await
},
}
},
@ -58,15 +58,14 @@ fn dispatch_arm(v: &Variant) -> Result<TokenStream2> {
};
quote! {
#name ( #field ) => {
Box::pin(#handler::process(#field, context)).await?;
Box::pin(#handler::process(#field, context)).await
}
}
},
| Fields::Unit => {
quote! {
#name => {
let c = Box::pin(context.#handler()).await?;
Box::pin(context.write_str(c.body())).await?;
Box::pin(context.#handler()).await
},
}
},

View file

@ -70,6 +70,7 @@ element_hacks = [
]
gzip_compression = [
"conduwuit-api/gzip_compression",
"conduwuit-core/gzip_compression",
"conduwuit-router/gzip_compression",
"conduwuit-service/gzip_compression",
]
@ -141,6 +142,7 @@ zstd_compression = [
"conduwuit-core/zstd_compression",
"conduwuit-database/zstd_compression",
"conduwuit-router/zstd_compression",
"conduwuit-service/zstd_compression",
]
conduwuit_mods = [
"conduwuit-core/conduwuit_mods",

View file

@ -17,34 +17,79 @@ crate-type = [
]
[features]
brotli_compression = [
"conduwuit-admin/brotli_compression",
"conduwuit-api/brotli_compression",
"conduwuit-core/brotli_compression",
"conduwuit-service/brotli_compression",
"tower-http/compression-br",
]
direct_tls = [
"axum-server/tls-rustls",
"dep:rustls",
"dep:axum-server-dual-protocol",
]
gzip_compression = [
"conduwuit-admin/gzip_compression",
"conduwuit-api/gzip_compression",
"conduwuit-core/gzip_compression",
"conduwuit-service/gzip_compression",
"tower-http/compression-gzip",
]
io_uring = [
"conduwuit-admin/io_uring",
"conduwuit-api/io_uring",
"conduwuit-service/io_uring",
"conduwuit-api/io_uring",
]
jemalloc = [
"conduwuit-admin/jemalloc",
"conduwuit-api/jemalloc",
"conduwuit-core/jemalloc",
"conduwuit-service/jemalloc",
]
jemalloc_conf = [
"conduwuit-admin/jemalloc_conf",
"conduwuit-api/jemalloc_conf",
"conduwuit-core/jemalloc_conf",
"conduwuit-service/jemalloc_conf",
]
jemalloc_prof = [
"conduwuit-admin/jemalloc_prof",
"conduwuit-api/jemalloc_prof",
"conduwuit-core/jemalloc_prof",
"conduwuit-service/jemalloc_prof",
]
jemalloc_stats = [
"conduwuit-admin/jemalloc_stats",
"conduwuit-api/jemalloc_stats",
"conduwuit-core/jemalloc_stats",
"conduwuit-service/jemalloc_stats",
]
release_max_log_level = [
"conduwuit-admin/release_max_log_level",
"conduwuit-api/release_max_log_level",
"conduwuit-core/release_max_log_level",
"conduwuit-service/release_max_log_level",
"tracing/max_level_trace",
"tracing/release_max_level_info",
"log/max_level_trace",
"log/release_max_level_info",
]
sentry_telemetry = [
"conduwuit-core/sentry_telemetry",
"dep:sentry",
"dep:sentry-tracing",
"dep:sentry-tower",
]
zstd_compression = [
"tower-http/compression-zstd",
]
gzip_compression = [
"tower-http/compression-gzip",
]
brotli_compression = [
"tower-http/compression-br",
]
systemd = [
"dep:sd-notify",
]
direct_tls = [
"axum-server/tls-rustls",
"dep:rustls",
"dep:axum-server-dual-protocol",
zstd_compression = [
"conduwuit-api/zstd_compression",
"conduwuit-core/zstd_compression",
"conduwuit-service/zstd_compression",
"tower-http/compression-zstd",
]
[dependencies]

View file

@ -31,12 +31,14 @@ pub(super) async fn serve(
.install_default()
.expect("failed to initialise aws-lc-rs rustls crypto provider");
debug!("Using direct TLS. Certificate path {certs} and certificate private key path {key}",);
info!(
"Note: It is strongly recommended that you use a reverse proxy instead of running \
conduwuit directly with TLS."
);
let conf = RustlsConfig::from_pem_file(certs, key).await?;
debug!("Using direct TLS. Certificate path {certs} and certificate private key path {key}",);
let conf = RustlsConfig::from_pem_file(certs, key)
.await
.map_err(|e| err!(Config("tls", "Failed to load certificates or key: {e}")))?;
let mut join_set = JoinSet::new();
let app = app.into_make_service_with_connect_info::<SocketAddr>();

View file

@ -17,7 +17,12 @@ crate-type = [
]
[features]
blurhashing = [
"dep:image",
"dep:blurhash",
]
brotli_compression = [
"conduwuit-core/brotli_compression",
"reqwest/brotli",
]
console = [
@ -26,25 +31,48 @@ console = [
]
element_hacks = []
gzip_compression = [
"conduwuit-core/gzip_compression",
"reqwest/gzip",
]
io_uring = [
"conduwuit-database/io_uring",
]
jemalloc = [
"conduwuit-core/jemalloc",
"conduwuit-database/jemalloc",
]
jemalloc_conf = [
"conduwuit-core/jemalloc_conf",
"conduwuit-database/jemalloc_conf",
]
jemalloc_prof = [
"conduwuit-core/jemalloc_prof",
"conduwuit-database/jemalloc_prof",
]
jemalloc_stats = [
"conduwuit-core/jemalloc_stats",
"conduwuit-database/jemalloc_stats",
]
media_thumbnail = [
"dep:image",
]
release_max_log_level = [
"tracing/max_level_trace",
"tracing/release_max_level_info",
"conduwuit-core/release_max_log_level",
"conduwuit-database/release_max_log_level",
"log/max_level_trace",
"log/release_max_level_info",
"tracing/max_level_trace",
"tracing/release_max_level_info",
]
url_preview = [
"dep:image",
"dep:webpage",
]
zstd_compression = [
"conduwuit-core/zstd_compression",
"conduwuit-database/zstd_compression",
"reqwest/zstd",
]
blurhashing = ["dep:image","dep:blurhash"]
[dependencies]
async-trait.workspace = true

View file

@ -1,6 +1,7 @@
use std::collections::BTreeMap;
use conduwuit::{Result, pdu::PduBuilder};
use futures::FutureExt;
use ruma::{
RoomId, RoomVersionId,
events::room::{
@ -63,6 +64,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
&room_id,
&state_lock,
)
.boxed()
.await?;
// 2. Make server user/bot join
@ -78,6 +80,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
&room_id,
&state_lock,
)
.boxed()
.await?;
// 3. Power levels
@ -95,6 +98,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
&room_id,
&state_lock,
)
.boxed()
.await?;
// 4.1 Join Rules
@ -107,6 +111,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
&room_id,
&state_lock,
)
.boxed()
.await?;
// 4.2 History Visibility
@ -122,6 +127,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
&room_id,
&state_lock,
)
.boxed()
.await?;
// 4.3 Guest Access
@ -137,6 +143,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
&room_id,
&state_lock,
)
.boxed()
.await?;
// 5. Events implied by name and topic
@ -150,6 +157,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
&room_id,
&state_lock,
)
.boxed()
.await?;
services
@ -163,6 +171,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
&room_id,
&state_lock,
)
.boxed()
.await?;
// 6. Room alias
@ -180,6 +189,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
&room_id,
&state_lock,
)
.boxed()
.await?;
services
@ -197,6 +207,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
&room_id,
&state_lock,
)
.boxed()
.await?;
Ok(())

View file

@ -1,20 +1,20 @@
mod namespace_regex;
mod registration_info;
use std::{collections::BTreeMap, sync::Arc};
use std::{collections::BTreeMap, iter::IntoIterator, sync::Arc};
use async_trait::async_trait;
use conduwuit::{Result, err, utils::stream::TryIgnore};
use conduwuit::{Result, err, utils::stream::IterStream};
use database::Map;
use futures::{Future, StreamExt, TryStreamExt};
use futures::{Future, FutureExt, Stream, TryStreamExt};
use ruma::{RoomAliasId, RoomId, UserId, api::appservice::Registration};
use tokio::sync::RwLock;
use tokio::sync::{RwLock, RwLockReadGuard};
pub use self::{namespace_regex::NamespaceRegex, registration_info::RegistrationInfo};
use crate::{Dep, sending};
pub struct Service {
registration_info: RwLock<BTreeMap<String, RegistrationInfo>>,
registration_info: RwLock<Registrations>,
services: Services,
db: Data,
}
@ -27,6 +27,8 @@ struct Data {
id_appserviceregistrations: Arc<Map>,
}
type Registrations = BTreeMap<String, RegistrationInfo>;
#[async_trait]
impl crate::Service for Service {
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
@ -41,19 +43,18 @@ impl crate::Service for Service {
}))
}
async fn worker(self: Arc<Self>) -> Result<()> {
async fn worker(self: Arc<Self>) -> Result {
// Inserting registrations into cache
for appservice in self.iter_db_ids().await? {
self.registration_info.write().await.insert(
appservice.0,
appservice
.1
.try_into()
.expect("Should be validated on registration"),
);
}
self.iter_db_ids()
.try_for_each(async |appservice| {
self.registration_info
.write()
.await
.insert(appservice.0, appservice.1.try_into()?);
Ok(())
Ok(())
})
.await
}
fn name(&self) -> &str { crate::service::make_name(std::module_path!()) }
@ -84,7 +85,7 @@ impl Service {
/// # Arguments
///
/// * `service_name` - the registration ID of the appservice
pub async fn unregister_appservice(&self, appservice_id: &str) -> Result<()> {
pub async fn unregister_appservice(&self, appservice_id: &str) -> Result {
// removes the appservice registration info
self.registration_info
.write()
@ -112,15 +113,6 @@ impl Service {
.map(|info| info.registration)
}
pub async fn iter_ids(&self) -> Vec<String> {
self.registration_info
.read()
.await
.keys()
.cloned()
.collect()
}
pub async fn find_from_token(&self, token: &str) -> Option<RegistrationInfo> {
self.read()
.await
@ -156,15 +148,22 @@ impl Service {
.any(|info| info.rooms.is_exclusive_match(room_id.as_str()))
}
pub fn read(
&self,
) -> impl Future<Output = tokio::sync::RwLockReadGuard<'_, BTreeMap<String, RegistrationInfo>>>
{
self.registration_info.read()
pub fn iter_ids(&self) -> impl Stream<Item = String> + Send {
self.read()
.map(|info| info.keys().cloned().collect::<Vec<_>>())
.map(IntoIterator::into_iter)
.map(IterStream::stream)
.flatten_stream()
}
#[inline]
pub async fn all(&self) -> Result<Vec<(String, Registration)>> { self.iter_db_ids().await }
pub fn iter_db_ids(&self) -> impl Stream<Item = Result<(String, Registration)>> + Send {
self.db
.id_appserviceregistrations
.keys()
.and_then(move |id: &str| async move {
Ok((id.to_owned(), self.get_db_registration(id).await?))
})
}
pub async fn get_db_registration(&self, id: &str) -> Result<Registration> {
self.db
@ -175,16 +174,7 @@ impl Service {
.map_err(|e| err!(Database("Invalid appservice {id:?} registration: {e:?}")))
}
async fn iter_db_ids(&self) -> Result<Vec<(String, Registration)>> {
self.db
.id_appserviceregistrations
.keys()
.ignore_err()
.then(|id: String| async move {
let reg = self.get_db_registration(&id).await?;
Ok((id, reg))
})
.try_collect()
.await
pub fn read(&self) -> impl Future<Output = RwLockReadGuard<'_, Registrations>> + Send {
self.registration_info.read()
}
}

View file

@ -72,10 +72,4 @@ impl Data {
pub fn bump_database_version(&self, new_version: u64) {
self.global.raw_put(b"version", new_version);
}
#[inline]
pub fn backup(&self) -> Result { self.db.db.backup() }
#[inline]
pub fn backup_list(&self) -> Result<String> { self.db.db.backup_list() }
}

View file

@ -1,4 +1,4 @@
#![type_length_limit = "2048"]
#![type_length_limit = "8192"]
#![allow(refining_impl_trait)]
mod manager;

View file

@ -8,7 +8,7 @@ use std::{
use conduwuit::{Result, Server};
use database::Map;
use ruma::{
DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, UserId,
OwnedDeviceId, OwnedRoomId, OwnedUserId,
api::client::sync::sync_events::{
self,
v4::{ExtensionsConfig, SyncRequestList},
@ -49,8 +49,8 @@ struct Services {
struct SlidingSyncCache {
lists: BTreeMap<String, SyncRequestList>,
subscriptions: BTreeMap<OwnedRoomId, sync_events::v4::RoomSubscription>,
known_rooms: BTreeMap<String, BTreeMap<OwnedRoomId, u64>>, /* For every room, the
* roomsince number */
// For every room, the roomsince number
known_rooms: BTreeMap<String, BTreeMap<OwnedRoomId, u64>>,
extensions: ExtensionsConfig,
}
@ -98,79 +98,35 @@ impl crate::Service for Service {
fn name(&self) -> &str { crate::service::make_name(std::module_path!()) }
}
/// load params from cache if body doesn't contain it, as long as it's allowed
/// in some cases we may need to allow an empty list as an actual value
fn list_or_sticky<T: Clone>(target: &mut Vec<T>, cached: &Vec<T>) {
if target.is_empty() {
target.clone_from(cached);
}
}
fn some_or_sticky<T>(target: &mut Option<T>, cached: Option<T>) {
if target.is_none() {
*target = cached;
}
}
impl Service {
pub fn snake_connection_cached(
&self,
user_id: OwnedUserId,
device_id: OwnedDeviceId,
conn_id: Option<String>,
) -> bool {
self.snake_connections
.lock()
.unwrap()
.contains_key(&(user_id, device_id, conn_id))
}
pub fn forget_snake_sync_connection(
&self,
user_id: OwnedUserId,
device_id: OwnedDeviceId,
conn_id: Option<String>,
) {
pub fn snake_connection_cached(&self, key: &SnakeConnectionsKey) -> bool {
self.snake_connections
.lock()
.expect("locked")
.remove(&(user_id, device_id, conn_id));
.contains_key(key)
}
pub fn remembered(
&self,
user_id: OwnedUserId,
device_id: OwnedDeviceId,
conn_id: String,
) -> bool {
self.connections
.lock()
.unwrap()
.contains_key(&(user_id, device_id, conn_id))
pub fn forget_snake_sync_connection(&self, key: &SnakeConnectionsKey) {
self.snake_connections.lock().expect("locked").remove(key);
}
pub fn forget_sync_request_connection(
&self,
user_id: OwnedUserId,
device_id: OwnedDeviceId,
conn_id: String,
) {
self.connections
.lock()
.expect("locked")
.remove(&(user_id, device_id, conn_id));
pub fn remembered(&self, key: &DbConnectionsKey) -> bool {
self.connections.lock().expect("locked").contains_key(key)
}
pub fn forget_sync_request_connection(&self, key: &DbConnectionsKey) {
self.connections.lock().expect("locked").remove(key);
}
pub fn update_snake_sync_request_with_cache(
&self,
user_id: OwnedUserId,
device_id: OwnedDeviceId,
snake_key: &SnakeConnectionsKey,
request: &mut v5::Request,
) -> BTreeMap<String, BTreeMap<OwnedRoomId, u64>> {
let conn_id = request.conn_id.clone();
let mut cache = self.snake_connections.lock().expect("locked");
let cached = Arc::clone(
cache
.entry((user_id, device_id, conn_id))
.entry(snake_key.clone())
.or_insert_with(|| Arc::new(Mutex::new(SnakeSyncCache::default()))),
);
let cached = &mut cached.lock().expect("locked");
@ -268,25 +224,23 @@ impl Service {
pub fn update_sync_request_with_cache(
&self,
user_id: OwnedUserId,
device_id: OwnedDeviceId,
key: &SnakeConnectionsKey,
request: &mut sync_events::v4::Request,
) -> BTreeMap<String, BTreeMap<OwnedRoomId, u64>> {
let Some(conn_id) = request.conn_id.clone() else {
return BTreeMap::new();
};
let key = into_db_key(key.0.clone(), key.1.clone(), conn_id);
let mut cache = self.connections.lock().expect("locked");
let cached = Arc::clone(cache.entry((user_id, device_id, conn_id)).or_insert_with(
|| {
Arc::new(Mutex::new(SlidingSyncCache {
lists: BTreeMap::new(),
subscriptions: BTreeMap::new(),
known_rooms: BTreeMap::new(),
extensions: ExtensionsConfig::default(),
}))
},
));
let cached = Arc::clone(cache.entry(key).or_insert_with(|| {
Arc::new(Mutex::new(SlidingSyncCache {
lists: BTreeMap::new(),
subscriptions: BTreeMap::new(),
known_rooms: BTreeMap::new(),
extensions: ExtensionsConfig::default(),
}))
}));
let cached = &mut cached.lock().expect("locked");
drop(cache);
@ -371,22 +325,18 @@ impl Service {
pub fn update_sync_subscriptions(
&self,
user_id: OwnedUserId,
device_id: OwnedDeviceId,
conn_id: String,
key: &DbConnectionsKey,
subscriptions: BTreeMap<OwnedRoomId, sync_events::v4::RoomSubscription>,
) {
let mut cache = self.connections.lock().expect("locked");
let cached = Arc::clone(cache.entry((user_id, device_id, conn_id)).or_insert_with(
|| {
Arc::new(Mutex::new(SlidingSyncCache {
lists: BTreeMap::new(),
subscriptions: BTreeMap::new(),
known_rooms: BTreeMap::new(),
extensions: ExtensionsConfig::default(),
}))
},
));
let cached = Arc::clone(cache.entry(key.clone()).or_insert_with(|| {
Arc::new(Mutex::new(SlidingSyncCache {
lists: BTreeMap::new(),
subscriptions: BTreeMap::new(),
known_rooms: BTreeMap::new(),
extensions: ExtensionsConfig::default(),
}))
}));
let cached = &mut cached.lock().expect("locked");
drop(cache);
@ -395,90 +345,81 @@ impl Service {
pub fn update_sync_known_rooms(
&self,
user_id: &UserId,
device_id: &DeviceId,
conn_id: String,
key: &DbConnectionsKey,
list_id: String,
new_cached_rooms: BTreeSet<OwnedRoomId>,
globalsince: u64,
) {
let mut cache = self.connections.lock().expect("locked");
let cached = Arc::clone(
cache
.entry((user_id.to_owned(), device_id.to_owned(), conn_id))
.or_insert_with(|| {
Arc::new(Mutex::new(SlidingSyncCache {
lists: BTreeMap::new(),
subscriptions: BTreeMap::new(),
known_rooms: BTreeMap::new(),
extensions: ExtensionsConfig::default(),
}))
}),
);
let cached = Arc::clone(cache.entry(key.clone()).or_insert_with(|| {
Arc::new(Mutex::new(SlidingSyncCache {
lists: BTreeMap::new(),
subscriptions: BTreeMap::new(),
known_rooms: BTreeMap::new(),
extensions: ExtensionsConfig::default(),
}))
}));
let cached = &mut cached.lock().expect("locked");
drop(cache);
for (roomid, lastsince) in cached
for (room_id, lastsince) in cached
.known_rooms
.entry(list_id.clone())
.or_default()
.iter_mut()
{
if !new_cached_rooms.contains(roomid) {
if !new_cached_rooms.contains(room_id) {
*lastsince = 0;
}
}
let list = cached.known_rooms.entry(list_id).or_default();
for roomid in new_cached_rooms {
list.insert(roomid, globalsince);
for room_id in new_cached_rooms {
list.insert(room_id, globalsince);
}
}
pub fn update_snake_sync_known_rooms(
&self,
user_id: &UserId,
device_id: &DeviceId,
conn_id: String,
key: &SnakeConnectionsKey,
list_id: String,
new_cached_rooms: BTreeSet<OwnedRoomId>,
globalsince: u64,
) {
assert!(key.2.is_some(), "Some(conn_id) required for this call");
let mut cache = self.snake_connections.lock().expect("locked");
let cached = Arc::clone(
cache
.entry((user_id.to_owned(), device_id.to_owned(), Some(conn_id)))
.entry(key.clone())
.or_insert_with(|| Arc::new(Mutex::new(SnakeSyncCache::default()))),
);
let cached = &mut cached.lock().expect("locked");
drop(cache);
for (roomid, lastsince) in cached
for (room_id, lastsince) in cached
.known_rooms
.entry(list_id.clone())
.or_default()
.iter_mut()
{
if !new_cached_rooms.contains(roomid) {
if !new_cached_rooms.contains(room_id) {
*lastsince = 0;
}
}
let list = cached.known_rooms.entry(list_id).or_default();
for roomid in new_cached_rooms {
list.insert(roomid, globalsince);
for room_id in new_cached_rooms {
list.insert(room_id, globalsince);
}
}
pub fn update_snake_sync_subscriptions(
&self,
user_id: OwnedUserId,
device_id: OwnedDeviceId,
conn_id: Option<String>,
key: &SnakeConnectionsKey,
subscriptions: BTreeMap<OwnedRoomId, v5::request::RoomSubscription>,
) {
let mut cache = self.snake_connections.lock().expect("locked");
let cached = Arc::clone(
cache
.entry((user_id, device_id, conn_id))
.entry(key.clone())
.or_insert_with(|| Arc::new(Mutex::new(SnakeSyncCache::default()))),
);
let cached = &mut cached.lock().expect("locked");
@ -487,3 +428,37 @@ impl Service {
cached.subscriptions = subscriptions;
}
}
#[inline]
pub fn into_snake_key<U, D, C>(user_id: U, device_id: D, conn_id: C) -> SnakeConnectionsKey
where
U: Into<OwnedUserId>,
D: Into<OwnedDeviceId>,
C: Into<Option<String>>,
{
(user_id.into(), device_id.into(), conn_id.into())
}
#[inline]
pub fn into_db_key<U, D, C>(user_id: U, device_id: D, conn_id: C) -> DbConnectionsKey
where
U: Into<OwnedUserId>,
D: Into<OwnedDeviceId>,
C: Into<String>,
{
(user_id.into(), device_id.into(), conn_id.into())
}
/// load params from cache if body doesn't contain it, as long as it's allowed
/// in some cases we may need to allow an empty list as an actual value
fn list_or_sticky<T: Clone>(target: &mut Vec<T>, cached: &Vec<T>) {
if target.is_empty() {
target.clone_from(cached);
}
}
fn some_or_sticky<T>(target: &mut Option<T>, cached: Option<T>) {
if target.is_none() {
*target = cached;
}
}