de-global services for services

Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
Jason Volk 2024-07-18 06:37:47 +00:00
parent 992c0a1e58
commit 010e4ee35a
85 changed files with 2480 additions and 1887 deletions

View file

@ -1,23 +1,32 @@
use std::sync::Arc;
use conduit::{utils, Error, Result};
use database::{Database, Map};
use database::Map;
use ruma::{api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomAliasId, RoomId, UserId};
use crate::services;
use crate::{globals, Dep};
pub(super) struct Data {
alias_userid: Arc<Map>,
alias_roomid: Arc<Map>,
aliasid_alias: Arc<Map>,
services: Services,
}
struct Services {
globals: Dep<globals::Service>,
}
impl Data {
pub(super) fn new(db: &Arc<Database>) -> Self {
pub(super) fn new(args: &crate::Args<'_>) -> Self {
let db = &args.db;
Self {
alias_userid: db["alias_userid"].clone(),
alias_roomid: db["alias_roomid"].clone(),
aliasid_alias: db["aliasid_alias"].clone(),
services: Services {
globals: args.depend::<globals::Service>("globals"),
},
}
}
@ -31,7 +40,7 @@ impl Data {
let mut aliasid = room_id.as_bytes().to_vec();
aliasid.push(0xFF);
aliasid.extend_from_slice(&services().globals.next_count()?.to_be_bytes());
aliasid.extend_from_slice(&self.services.globals.next_count()?.to_be_bytes());
self.aliasid_alias.insert(&aliasid, alias.as_bytes())?;
Ok(())

View file

@ -4,9 +4,8 @@ mod remote;
use std::sync::Arc;
use conduit::{err, Error, Result};
use data::Data;
use ruma::{
api::{appservice, client::error::ErrorKind},
api::client::error::ErrorKind,
events::{
room::power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent},
StateEventType,
@ -14,16 +13,33 @@ use ruma::{
OwnedRoomAliasId, OwnedRoomId, OwnedServerName, RoomAliasId, RoomId, RoomOrAliasId, UserId,
};
use crate::{appservice::RegistrationInfo, server_is_ours, services};
use self::data::Data;
use crate::{admin, appservice, appservice::RegistrationInfo, globals, rooms, sending, server_is_ours, Dep};
pub struct Service {
db: Data,
services: Services,
}
struct Services {
admin: Dep<admin::Service>,
appservice: Dep<appservice::Service>,
globals: Dep<globals::Service>,
sending: Dep<sending::Service>,
state_accessor: Dep<rooms::state_accessor::Service>,
}
impl crate::Service for Service {
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
Ok(Arc::new(Self {
db: Data::new(args.db),
db: Data::new(&args),
services: Services {
admin: args.depend::<admin::Service>("admin"),
appservice: args.depend::<appservice::Service>("appservice"),
globals: args.depend::<globals::Service>("globals"),
sending: args.depend::<sending::Service>("sending"),
state_accessor: args.depend::<rooms::state_accessor::Service>("rooms::state_accessor"),
},
}))
}
@ -33,7 +49,7 @@ impl crate::Service for Service {
impl Service {
#[tracing::instrument(skip(self))]
pub fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId, user_id: &UserId) -> Result<()> {
if alias == services().globals.admin_alias && user_id != services().globals.server_user {
if alias == self.services.globals.admin_alias && user_id != self.services.globals.server_user {
Err(Error::BadRequest(
ErrorKind::forbidden(),
"Only the server user can set this alias",
@ -72,10 +88,10 @@ impl Service {
if !server_is_ours(room_alias.server_name())
&& (!servers
.as_ref()
.is_some_and(|servers| servers.contains(&services().globals.server_name().to_owned()))
.is_some_and(|servers| servers.contains(&self.services.globals.server_name().to_owned()))
|| servers.as_ref().is_none())
{
return remote::resolve(room_alias, servers).await;
return self.remote_resolve(room_alias, servers).await;
}
let room_id: Option<OwnedRoomId> = match self.resolve_local_alias(room_alias)? {
@ -111,7 +127,7 @@ impl Service {
return Err(Error::BadRequest(ErrorKind::NotFound, "Alias not found."));
};
let server_user = &services().globals.server_user;
let server_user = &self.services.globals.server_user;
// The creator of an alias can remove it
if self
@ -119,7 +135,7 @@ impl Service {
.who_created_alias(alias)?
.is_some_and(|user| user == user_id)
// Server admins can remove any local alias
|| services().admin.user_is_admin(user_id).await?
|| self.services.admin.user_is_admin(user_id).await?
// Always allow the server service account to remove the alias, since there may not be an admin room
|| server_user == user_id
{
@ -127,8 +143,7 @@ impl Service {
// Checking whether the user is able to change canonical aliases of the
// room
} else if let Some(event) =
services()
.rooms
self.services
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomPowerLevels, "")?
{
@ -140,8 +155,7 @@ impl Service {
// If there is no power levels event, only the room creator can change
// canonical aliases
} else if let Some(event) =
services()
.rooms
self.services
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomCreate, "")?
{
@ -152,14 +166,16 @@ impl Service {
}
async fn resolve_appservice_alias(&self, room_alias: &RoomAliasId) -> Result<Option<OwnedRoomId>> {
for appservice in services().appservice.read().await.values() {
use ruma::api::appservice::query::query_room_alias;
for appservice in self.services.appservice.read().await.values() {
if appservice.aliases.is_match(room_alias.as_str())
&& matches!(
services()
self.services
.sending
.send_appservice_request(
appservice.registration.clone(),
appservice::query::query_room_alias::v1::Request {
query_room_alias::v1::Request {
room_alias: room_alias.to_owned(),
},
)
@ -167,10 +183,7 @@ impl Service {
Ok(Some(_opt_result))
) {
return Ok(Some(
services()
.rooms
.alias
.resolve_local_alias(room_alias)?
self.resolve_local_alias(room_alias)?
.ok_or_else(|| err!(Request(NotFound("Room does not exist."))))?,
));
}
@ -178,20 +191,27 @@ impl Service {
Ok(None)
}
}
pub async fn appservice_checks(room_alias: &RoomAliasId, appservice_info: &Option<RegistrationInfo>) -> Result<()> {
if !server_is_ours(room_alias.server_name()) {
return Err(Error::BadRequest(ErrorKind::InvalidParam, "Alias is from another server."));
}
if let Some(ref info) = appservice_info {
if !info.aliases.is_match(room_alias.as_str()) {
return Err(Error::BadRequest(ErrorKind::Exclusive, "Room alias is not in namespace."));
pub async fn appservice_checks(
&self, room_alias: &RoomAliasId, appservice_info: &Option<RegistrationInfo>,
) -> Result<()> {
if !server_is_ours(room_alias.server_name()) {
return Err(Error::BadRequest(ErrorKind::InvalidParam, "Alias is from another server."));
}
} else if services().appservice.is_exclusive_alias(room_alias).await {
return Err(Error::BadRequest(ErrorKind::Exclusive, "Room alias reserved by appservice."));
}
Ok(())
if let Some(ref info) = appservice_info {
if !info.aliases.is_match(room_alias.as_str()) {
return Err(Error::BadRequest(ErrorKind::Exclusive, "Room alias is not in namespace."));
}
} else if self
.services
.appservice
.is_exclusive_alias(room_alias)
.await
{
return Err(Error::BadRequest(ErrorKind::Exclusive, "Room alias reserved by appservice."));
}
Ok(())
}
}

View file

@ -1,71 +1,75 @@
use conduit::{debug, debug_info, debug_warn, Error, Result};
use conduit::{debug, debug_warn, Error, Result};
use ruma::{
api::{client::error::ErrorKind, federation},
OwnedRoomId, OwnedServerName, RoomAliasId,
};
use crate::services;
impl super::Service {
pub(super) async fn remote_resolve(
&self, room_alias: &RoomAliasId, servers: Option<&Vec<OwnedServerName>>,
) -> Result<(OwnedRoomId, Option<Vec<OwnedServerName>>)> {
debug!(?room_alias, ?servers, "resolve");
pub(super) async fn resolve(
room_alias: &RoomAliasId, servers: Option<&Vec<OwnedServerName>>,
) -> Result<(OwnedRoomId, Option<Vec<OwnedServerName>>)> {
debug!(?room_alias, ?servers, "resolve");
let mut response = self
.services
.sending
.send_federation_request(
room_alias.server_name(),
federation::query::get_room_information::v1::Request {
room_alias: room_alias.to_owned(),
},
)
.await;
let mut response = services()
.sending
.send_federation_request(
room_alias.server_name(),
federation::query::get_room_information::v1::Request {
room_alias: room_alias.to_owned(),
},
)
.await;
debug!("room alias server_name get_alias_helper response: {response:?}");
debug!("room alias server_name get_alias_helper response: {response:?}");
if let Err(ref e) = response {
debug_warn!(
"Server {} of the original room alias failed to assist in resolving room alias: {e}",
room_alias.server_name(),
);
}
if let Err(ref e) = response {
debug_info!(
"Server {} of the original room alias failed to assist in resolving room alias: {e}",
room_alias.server_name()
);
}
if response.as_ref().is_ok_and(|resp| resp.servers.is_empty()) || response.as_ref().is_err() {
if let Some(servers) = servers {
for server in servers {
response = self
.services
.sending
.send_federation_request(
server,
federation::query::get_room_information::v1::Request {
room_alias: room_alias.to_owned(),
},
)
.await;
debug!("Got response from server {server} for room aliases: {response:?}");
if response.as_ref().is_ok_and(|resp| resp.servers.is_empty()) || response.as_ref().is_err() {
if let Some(servers) = servers {
for server in servers {
response = services()
.sending
.send_federation_request(
server,
federation::query::get_room_information::v1::Request {
room_alias: room_alias.to_owned(),
},
)
.await;
debug!("Got response from server {server} for room aliases: {response:?}");
if let Ok(ref response) = response {
if !response.servers.is_empty() {
break;
if let Ok(ref response) = response {
if !response.servers.is_empty() {
break;
}
debug_warn!(
"Server {server} responded with room aliases, but was empty? Response: {response:?}"
);
}
debug_warn!("Server {server} responded with room aliases, but was empty? Response: {response:?}");
}
}
}
if let Ok(response) = response {
let room_id = response.room_id;
let mut pre_servers = response.servers;
// since the room alis server responded, insert it into the list
pre_servers.push(room_alias.server_name().into());
return Ok((room_id, Some(pre_servers)));
}
Err(Error::BadRequest(
ErrorKind::NotFound,
"No servers could assist in resolving the room alias",
))
}
if let Ok(response) = response {
let room_id = response.room_id;
let mut pre_servers = response.servers;
// since the room alis server responded, insert it into the list
pre_servers.push(room_alias.server_name().into());
return Ok((room_id, Some(pre_servers)));
}
Err(Error::BadRequest(
ErrorKind::NotFound,
"No servers could assist in resolving the room alias",
))
}

View file

@ -3,8 +3,8 @@ use std::{
sync::{Arc, Mutex},
};
use conduit::{utils, utils::math::usize_from_f64, Result, Server};
use database::{Database, Map};
use conduit::{utils, utils::math::usize_from_f64, Result};
use database::Map;
use lru_cache::LruCache;
pub(super) struct Data {
@ -13,8 +13,9 @@ pub(super) struct Data {
}
impl Data {
pub(super) fn new(server: &Arc<Server>, db: &Arc<Database>) -> Self {
let config = &server.config;
pub(super) fn new(args: &crate::Args<'_>) -> Self {
let db = &args.db;
let config = &args.server.config;
let cache_size = f64::from(config.auth_chain_cache_capacity);
let cache_size = usize_from_f64(cache_size * config.cache_capacity_modifier).expect("valid cache size");
Self {

View file

@ -6,19 +6,29 @@ use std::{
};
use conduit::{debug, error, trace, validated, warn, Err, Result};
use data::Data;
use ruma::{EventId, RoomId};
use crate::services;
use self::data::Data;
use crate::{rooms, Dep};
pub struct Service {
services: Services,
db: Data,
}
struct Services {
short: Dep<rooms::short::Service>,
timeline: Dep<rooms::timeline::Service>,
}
impl crate::Service for Service {
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
Ok(Arc::new(Self {
db: Data::new(args.server, args.db),
services: Services {
short: args.depend::<rooms::short::Service>("rooms::short"),
timeline: args.depend::<rooms::timeline::Service>("rooms::timeline"),
},
db: Data::new(&args),
}))
}
@ -27,7 +37,7 @@ impl crate::Service for Service {
impl Service {
pub async fn event_ids_iter<'a>(
&self, room_id: &RoomId, starting_events_: Vec<Arc<EventId>>,
&'a self, room_id: &RoomId, starting_events_: Vec<Arc<EventId>>,
) -> Result<impl Iterator<Item = Arc<EventId>> + 'a> {
let mut starting_events: Vec<&EventId> = Vec::with_capacity(starting_events_.len());
for starting_event in &starting_events_ {
@ -38,7 +48,7 @@ impl Service {
.get_auth_chain(room_id, &starting_events)
.await?
.into_iter()
.filter_map(move |sid| services().rooms.short.get_eventid_from_short(sid).ok()))
.filter_map(move |sid| self.services.short.get_eventid_from_short(sid).ok()))
}
#[tracing::instrument(skip_all, name = "auth_chain")]
@ -48,8 +58,8 @@ impl Service {
let started = std::time::Instant::now();
let mut buckets = [BUCKET; NUM_BUCKETS];
for (i, &short) in services()
.rooms
for (i, &short) in self
.services
.short
.multi_get_or_create_shorteventid(starting_events)?
.iter()
@ -140,7 +150,7 @@ impl Service {
while let Some(event_id) = todo.pop() {
trace!(?event_id, "processing auth event");
match services().rooms.timeline.get_pdu(&event_id) {
match self.services.timeline.get_pdu(&event_id) {
Ok(Some(pdu)) => {
if pdu.room_id != room_id {
return Err!(Request(Forbidden(
@ -150,10 +160,7 @@ impl Service {
)));
}
for auth_event in &pdu.auth_events {
let sauthevent = services()
.rooms
.short
.get_or_create_shorteventid(auth_event)?;
let sauthevent = self.services.short.get_or_create_shorteventid(auth_event)?;
if found.insert(sauthevent) {
trace!(?event_id, ?auth_event, "adding auth event to processing queue");

View file

@ -2,10 +2,10 @@ mod data;
use std::sync::Arc;
use data::Data;
use conduit::Result;
use ruma::{OwnedRoomId, RoomId};
use crate::Result;
use self::data::Data;
pub struct Service {
db: Data,

View file

@ -10,12 +10,11 @@ use std::{
};
use conduit::{
debug, debug_error, debug_info, err, error, info, trace,
debug, debug_error, debug_info, err, error, info, pdu, trace,
utils::{math::continue_exponential_backoff_secs, MutexMap},
warn, Error, Result,
warn, Error, PduEvent, Result,
};
use futures_util::Future;
pub use parse_incoming_pdu::parse_incoming_pdu;
use ruma::{
api::{
client::error::ErrorKind,
@ -36,13 +35,28 @@ use ruma::{
use tokio::sync::RwLock;
use super::state_compressor::CompressedStateEvent;
use crate::{pdu, services, PduEvent};
use crate::{globals, rooms, sending, Dep};
pub struct Service {
services: Services,
pub federation_handletime: StdRwLock<HandleTimeMap>,
pub mutex_federation: RoomMutexMap,
}
struct Services {
globals: Dep<globals::Service>,
sending: Dep<sending::Service>,
auth_chain: Dep<rooms::auth_chain::Service>,
metadata: Dep<rooms::metadata::Service>,
outlier: Dep<rooms::outlier::Service>,
pdu_metadata: Dep<rooms::pdu_metadata::Service>,
short: Dep<rooms::short::Service>,
state: Dep<rooms::state::Service>,
state_accessor: Dep<rooms::state_accessor::Service>,
state_compressor: Dep<rooms::state_compressor::Service>,
timeline: Dep<rooms::timeline::Service>,
}
type RoomMutexMap = MutexMap<OwnedRoomId, ()>;
type HandleTimeMap = HashMap<OwnedRoomId, (OwnedEventId, Instant)>;
@ -55,8 +69,21 @@ type AsyncRecursiveCanonicalJsonResult<'a> =
AsyncRecursiveType<'a, Result<(Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>)>>;
impl crate::Service for Service {
fn build(_args: crate::Args<'_>) -> Result<Arc<Self>> {
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
Ok(Arc::new(Self {
services: Services {
globals: args.depend::<globals::Service>("globals"),
sending: args.depend::<sending::Service>("sending"),
auth_chain: args.depend::<rooms::auth_chain::Service>("rooms::auth_chain"),
metadata: args.depend::<rooms::metadata::Service>("rooms::metadata"),
outlier: args.depend::<rooms::outlier::Service>("rooms::outlier"),
pdu_metadata: args.depend::<rooms::pdu_metadata::Service>("rooms::pdu_metadata"),
short: args.depend::<rooms::short::Service>("rooms::short"),
state: args.depend::<rooms::state::Service>("rooms::state"),
state_accessor: args.depend::<rooms::state_accessor::Service>("rooms::state_accessor"),
state_compressor: args.depend::<rooms::state_compressor::Service>("rooms::state_compressor"),
timeline: args.depend::<rooms::timeline::Service>("rooms::timeline"),
},
federation_handletime: HandleTimeMap::new().into(),
mutex_federation: RoomMutexMap::new(),
}))
@ -114,17 +141,17 @@ impl Service {
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> Result<Option<Vec<u8>>> {
// 1. Skip the PDU if we already have it as a timeline event
if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(event_id)? {
if let Some(pdu_id) = self.services.timeline.get_pdu_id(event_id)? {
return Ok(Some(pdu_id.to_vec()));
}
// 1.1 Check the server is in the room
if !services().rooms.metadata.exists(room_id)? {
if !self.services.metadata.exists(room_id)? {
return Err(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server"));
}
// 1.2 Check if the room is disabled
if services().rooms.metadata.is_disabled(room_id)? {
if self.services.metadata.is_disabled(room_id)? {
return Err(Error::BadRequest(
ErrorKind::forbidden(),
"Federation of this room is currently disabled on this server.",
@ -147,8 +174,8 @@ impl Service {
self.acl_check(sender.server_name(), room_id)?;
// Fetch create event
let create_event = services()
.rooms
let create_event = self
.services
.state_accessor
.room_state_get(room_id, &StateEventType::RoomCreate, "")?
.ok_or_else(|| Error::bad_database("Failed to find create event in db."))?;
@ -156,8 +183,8 @@ impl Service {
// Procure the room version
let room_version_id = Self::get_room_version_id(&create_event)?;
let first_pdu_in_room = services()
.rooms
let first_pdu_in_room = self
.services
.timeline
.first_pdu_in_room(room_id)?
.ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?;
@ -208,7 +235,8 @@ impl Service {
Ok(()) => continue,
Err(e) => {
warn!("Prev event {} failed: {}", prev_id, e);
match services()
match self
.services
.globals
.bad_event_ratelimiter
.write()
@ -258,7 +286,7 @@ impl Service {
create_event: &Arc<PduEvent>, first_pdu_in_room: &Arc<PduEvent>, prev_id: &EventId,
) -> Result<()> {
// Check for disabled again because it might have changed
if services().rooms.metadata.is_disabled(room_id)? {
if self.services.metadata.is_disabled(room_id)? {
debug!(
"Federaton of room {room_id} is currently disabled on this server. Request by origin {origin} and \
event ID {event_id}"
@ -269,7 +297,8 @@ impl Service {
));
}
if let Some((time, tries)) = services()
if let Some((time, tries)) = self
.services
.globals
.bad_event_ratelimiter
.read()
@ -349,7 +378,7 @@ impl Service {
};
// Skip the PDU if it is redacted and we already have it as an outlier event
if services().rooms.timeline.get_pdu_json(event_id)?.is_some() {
if self.services.timeline.get_pdu_json(event_id)?.is_some() {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Event was redacted and we already knew about it",
@ -401,7 +430,7 @@ impl Service {
// Build map of auth events
let mut auth_events = HashMap::with_capacity(incoming_pdu.auth_events.len());
for id in &incoming_pdu.auth_events {
let Some(auth_event) = services().rooms.timeline.get_pdu(id)? else {
let Some(auth_event) = self.services.timeline.get_pdu(id)? else {
warn!("Could not find auth event {}", id);
continue;
};
@ -454,8 +483,7 @@ impl Service {
trace!("Validation successful.");
// 7. Persist the event as an outlier.
services()
.rooms
self.services
.outlier
.add_pdu_outlier(&incoming_pdu.event_id, &val)?;
@ -470,12 +498,12 @@ impl Service {
origin: &ServerName, room_id: &RoomId, pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> Result<Option<Vec<u8>>> {
// Skip the PDU if we already have it as a timeline event
if let Ok(Some(pduid)) = services().rooms.timeline.get_pdu_id(&incoming_pdu.event_id) {
if let Ok(Some(pduid)) = self.services.timeline.get_pdu_id(&incoming_pdu.event_id) {
return Ok(Some(pduid.to_vec()));
}
if services()
.rooms
if self
.services
.pdu_metadata
.is_event_soft_failed(&incoming_pdu.event_id)?
{
@ -521,14 +549,13 @@ impl Service {
&incoming_pdu,
None::<PduEvent>, // TODO: third party invite
|k, s| {
services()
.rooms
self.services
.short
.get_shortstatekey(&k.to_string().into(), s)
.ok()
.flatten()
.and_then(|shortstatekey| state_at_incoming_event.get(&shortstatekey))
.and_then(|event_id| services().rooms.timeline.get_pdu(event_id).ok().flatten())
.and_then(|event_id| self.services.timeline.get_pdu(event_id).ok().flatten())
},
)
.map_err(|_e| Error::BadRequest(ErrorKind::forbidden(), "Auth check failed."))?;
@ -541,7 +568,7 @@ impl Service {
}
debug!("Gathering auth events");
let auth_events = services().rooms.state.get_auth_events(
let auth_events = self.services.state.get_auth_events(
room_id,
&incoming_pdu.kind,
&incoming_pdu.sender,
@ -562,7 +589,7 @@ impl Service {
&& match room_version_id {
V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => {
if let Some(redact_id) = &incoming_pdu.redacts {
!services().rooms.state_accessor.user_can_redact(
!self.services.state_accessor.user_can_redact(
redact_id,
&incoming_pdu.sender,
&incoming_pdu.room_id,
@ -577,7 +604,7 @@ impl Service {
.map_err(|_| Error::bad_database("Invalid content in redaction pdu."))?;
if let Some(redact_id) = &content.redacts {
!services().rooms.state_accessor.user_can_redact(
!self.services.state_accessor.user_can_redact(
redact_id,
&incoming_pdu.sender,
&incoming_pdu.room_id,
@ -594,12 +621,12 @@ impl Service {
// We start looking at current room state now, so lets lock the room
trace!("Locking the room");
let state_lock = services().rooms.state.mutex.lock(room_id).await;
let state_lock = self.services.state.mutex.lock(room_id).await;
// Now we calculate the set of extremities this room has after the incoming
// event has been applied. We start with the previous extremities (aka leaves)
trace!("Calculating extremities");
let mut extremities = services().rooms.state.get_forward_extremities(room_id)?;
let mut extremities = self.services.state.get_forward_extremities(room_id)?;
trace!("Calculated {} extremities", extremities.len());
// Remove any forward extremities that are referenced by this incoming event's
@ -609,22 +636,13 @@ impl Service {
}
// Only keep those extremities were not referenced yet
extremities.retain(|id| {
!matches!(
services()
.rooms
.pdu_metadata
.is_event_referenced(room_id, id),
Ok(true)
)
});
extremities.retain(|id| !matches!(self.services.pdu_metadata.is_event_referenced(room_id, id), Ok(true)));
debug!("Retained {} extremities. Compressing state", extremities.len());
let state_ids_compressed = Arc::new(
state_at_incoming_event
.iter()
.map(|(shortstatekey, id)| {
services()
.rooms
self.services
.state_compressor
.compress_state_event(*shortstatekey, id)
})
@ -637,8 +655,8 @@ impl Service {
// We also add state after incoming event to the fork states
let mut state_after = state_at_incoming_event.clone();
if let Some(state_key) = &incoming_pdu.state_key {
let shortstatekey = services()
.rooms
let shortstatekey = self
.services
.short
.get_or_create_shortstatekey(&incoming_pdu.kind.to_string().into(), state_key)?;
@ -651,13 +669,12 @@ impl Service {
// Set the new room state to the resolved state
debug!("Forcing new room state");
let (sstatehash, new, removed) = services()
.rooms
let (sstatehash, new, removed) = self
.services
.state_compressor
.save_state(room_id, new_room_state)?;
services()
.rooms
self.services
.state
.force_state(room_id, sstatehash, new, removed, &state_lock)
.await?;
@ -667,8 +684,7 @@ impl Service {
// if not soft fail it
if soft_fail {
debug!("Soft failing event");
services()
.rooms
self.services
.timeline
.append_incoming_pdu(
&incoming_pdu,
@ -682,8 +698,7 @@ impl Service {
// Soft fail, we keep the event as an outlier but don't add it to the timeline
warn!("Event was soft failed: {:?}", incoming_pdu);
services()
.rooms
self.services
.pdu_metadata
.mark_event_soft_failed(&incoming_pdu.event_id)?;
@ -696,8 +711,8 @@ impl Service {
// Now that the event has passed all auth it is added into the timeline.
// We use the `state_at_event` instead of `state_after` so we accurately
// represent the state for this event.
let pdu_id = services()
.rooms
let pdu_id = self
.services
.timeline
.append_incoming_pdu(
&incoming_pdu,
@ -723,14 +738,14 @@ impl Service {
&self, room_id: &RoomId, room_version_id: &RoomVersionId, incoming_state: HashMap<u64, Arc<EventId>>,
) -> Result<Arc<HashSet<CompressedStateEvent>>> {
debug!("Loading current room state ids");
let current_sstatehash = services()
.rooms
let current_sstatehash = self
.services
.state
.get_room_shortstatehash(room_id)?
.expect("every room has state");
let current_state_ids = services()
.rooms
let current_state_ids = self
.services
.state_accessor
.state_full_ids(current_sstatehash)
.await?;
@ -740,8 +755,7 @@ impl Service {
let mut auth_chain_sets = Vec::with_capacity(fork_states.len());
for state in &fork_states {
auth_chain_sets.push(
services()
.rooms
self.services
.auth_chain
.event_ids_iter(room_id, state.iter().map(|(_, id)| id.clone()).collect())
.await?
@ -755,8 +769,7 @@ impl Service {
.map(|map| {
map.into_iter()
.filter_map(|(k, id)| {
services()
.rooms
self.services
.short
.get_statekey_from_short(k)
.map(|(ty, st_key)| ((ty.to_string().into(), st_key), id))
@ -766,11 +779,11 @@ impl Service {
})
.collect();
let lock = services().globals.stateres_mutex.lock();
let lock = self.services.globals.stateres_mutex.lock();
debug!("Resolving state");
let state_resolve = state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| {
let res = services().rooms.timeline.get_pdu(id);
let res = self.services.timeline.get_pdu(id);
if let Err(e) = &res {
error!("Failed to fetch event: {}", e);
}
@ -793,12 +806,11 @@ impl Service {
let new_room_state = state
.into_iter()
.map(|((event_type, state_key), event_id)| {
let shortstatekey = services()
.rooms
let shortstatekey = self
.services
.short
.get_or_create_shortstatekey(&event_type.to_string().into(), &state_key)?;
services()
.rooms
self.services
.state_compressor
.compress_state_event(shortstatekey, &event_id)
})
@ -814,15 +826,14 @@ impl Service {
&self, incoming_pdu: &Arc<PduEvent>,
) -> Result<Option<HashMap<u64, Arc<EventId>>>> {
let prev_event = &*incoming_pdu.prev_events[0];
let prev_event_sstatehash = services()
.rooms
let prev_event_sstatehash = self
.services
.state_accessor
.pdu_shortstatehash(prev_event)?;
let state = if let Some(shortstatehash) = prev_event_sstatehash {
Some(
services()
.rooms
self.services
.state_accessor
.state_full_ids(shortstatehash)
.await,
@ -833,8 +844,8 @@ impl Service {
if let Some(Ok(mut state)) = state {
debug!("Using cached state");
let prev_pdu = services()
.rooms
let prev_pdu = self
.services
.timeline
.get_pdu(prev_event)
.ok()
@ -842,8 +853,8 @@ impl Service {
.ok_or_else(|| Error::bad_database("Could not find prev event, but we know the state."))?;
if let Some(state_key) = &prev_pdu.state_key {
let shortstatekey = services()
.rooms
let shortstatekey = self
.services
.short
.get_or_create_shortstatekey(&prev_pdu.kind.to_string().into(), state_key)?;
@ -866,13 +877,13 @@ impl Service {
let mut okay = true;
for prev_eventid in &incoming_pdu.prev_events {
let Ok(Some(prev_event)) = services().rooms.timeline.get_pdu(prev_eventid) else {
let Ok(Some(prev_event)) = self.services.timeline.get_pdu(prev_eventid) else {
okay = false;
break;
};
let Ok(Some(sstatehash)) = services()
.rooms
let Ok(Some(sstatehash)) = self
.services
.state_accessor
.pdu_shortstatehash(prev_eventid)
else {
@ -891,15 +902,15 @@ impl Service {
let mut auth_chain_sets = Vec::with_capacity(extremity_sstatehashes.len());
for (sstatehash, prev_event) in extremity_sstatehashes {
let mut leaf_state: HashMap<_, _> = services()
.rooms
let mut leaf_state: HashMap<_, _> = self
.services
.state_accessor
.state_full_ids(sstatehash)
.await?;
if let Some(state_key) = &prev_event.state_key {
let shortstatekey = services()
.rooms
let shortstatekey = self
.services
.short
.get_or_create_shortstatekey(&prev_event.kind.to_string().into(), state_key)?;
leaf_state.insert(shortstatekey, Arc::from(&*prev_event.event_id));
@ -910,7 +921,7 @@ impl Service {
let mut starting_events = Vec::with_capacity(leaf_state.len());
for (k, id) in leaf_state {
if let Ok((ty, st_key)) = services().rooms.short.get_statekey_from_short(k) {
if let Ok((ty, st_key)) = self.services.short.get_statekey_from_short(k) {
// FIXME: Undo .to_string().into() when StateMap
// is updated to use StateEventType
state.insert((ty.to_string().into(), st_key), id.clone());
@ -921,8 +932,7 @@ impl Service {
}
auth_chain_sets.push(
services()
.rooms
self.services
.auth_chain
.event_ids_iter(room_id, starting_events)
.await?
@ -932,9 +942,9 @@ impl Service {
fork_states.push(state);
}
let lock = services().globals.stateres_mutex.lock();
let lock = self.services.globals.stateres_mutex.lock();
let result = state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| {
let res = services().rooms.timeline.get_pdu(id);
let res = self.services.timeline.get_pdu(id);
if let Err(e) = &res {
error!("Failed to fetch event: {}", e);
}
@ -947,8 +957,8 @@ impl Service {
new_state
.into_iter()
.map(|((event_type, state_key), event_id)| {
let shortstatekey = services()
.rooms
let shortstatekey = self
.services
.short
.get_or_create_shortstatekey(&event_type.to_string().into(), &state_key)?;
Ok((shortstatekey, event_id))
@ -974,7 +984,8 @@ impl Service {
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>, event_id: &EventId,
) -> Result<Option<HashMap<u64, Arc<EventId>>>> {
debug!("Fetching state ids");
match services()
match self
.services
.sending
.send_federation_request(
origin,
@ -1004,8 +1015,8 @@ impl Service {
.clone()
.ok_or_else(|| Error::bad_database("Found non-state pdu in state events."))?;
let shortstatekey = services()
.rooms
let shortstatekey = self
.services
.short
.get_or_create_shortstatekey(&pdu.kind.to_string().into(), &state_key)?;
@ -1022,8 +1033,8 @@ impl Service {
}
// The original create event must still be in the state
let create_shortstatekey = services()
.rooms
let create_shortstatekey = self
.services
.short
.get_shortstatekey(&StateEventType::RoomCreate, "")?
.expect("Room exists");
@ -1056,7 +1067,8 @@ impl Service {
) -> AsyncRecursiveCanonicalJsonVec<'a> {
Box::pin(async move {
let back_off = |id| async {
match services()
match self
.services
.globals
.bad_event_ratelimiter
.write()
@ -1075,7 +1087,7 @@ impl Service {
// a. Look in the main timeline (pduid_pdu tree)
// b. Look at outlier pdu tree
// (get_pdu_json checks both)
if let Ok(Some(local_pdu)) = services().rooms.timeline.get_pdu(id) {
if let Ok(Some(local_pdu)) = self.services.timeline.get_pdu(id) {
trace!("Found {} in db", id);
events_with_auth_events.push((id, Some(local_pdu), vec![]));
continue;
@ -1089,7 +1101,8 @@ impl Service {
let mut events_all = HashSet::with_capacity(todo_auth_events.len());
let mut i: u64 = 0;
while let Some(next_id) = todo_auth_events.pop() {
if let Some((time, tries)) = services()
if let Some((time, tries)) = self
.services
.globals
.bad_event_ratelimiter
.read()
@ -1114,13 +1127,14 @@ impl Service {
tokio::task::yield_now().await;
}
if let Ok(Some(_)) = services().rooms.timeline.get_pdu(&next_id) {
if let Ok(Some(_)) = self.services.timeline.get_pdu(&next_id) {
trace!("Found {} in db", next_id);
continue;
}
debug!("Fetching {} over federation.", next_id);
match services()
match self
.services
.sending
.send_federation_request(
origin,
@ -1195,7 +1209,8 @@ impl Service {
pdus.push((local_pdu, None));
}
for (next_id, value) in events_in_reverse_order.iter().rev() {
if let Some((time, tries)) = services()
if let Some((time, tries)) = self
.services
.globals
.bad_event_ratelimiter
.read()
@ -1244,8 +1259,8 @@ impl Service {
let mut eventid_info = HashMap::new();
let mut todo_outlier_stack: Vec<Arc<EventId>> = initial_set;
let first_pdu_in_room = services()
.rooms
let first_pdu_in_room = self
.services
.timeline
.first_pdu_in_room(room_id)?
.ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?;
@ -1267,19 +1282,18 @@ impl Service {
{
Self::check_room_id(room_id, &pdu)?;
if amount > services().globals.max_fetch_prev_events() {
if amount > self.services.globals.max_fetch_prev_events() {
// Max limit reached
debug!(
"Max prev event limit reached! Limit: {}",
services().globals.max_fetch_prev_events()
self.services.globals.max_fetch_prev_events()
);
graph.insert(prev_event_id.clone(), HashSet::new());
continue;
}
if let Some(json) = json_opt.or_else(|| {
services()
.rooms
self.services
.outlier
.get_outlier_pdu_json(&prev_event_id)
.ok()
@ -1335,8 +1349,7 @@ impl Service {
#[tracing::instrument(skip_all)]
pub fn acl_check(&self, server_name: &ServerName, room_id: &RoomId) -> Result<()> {
let acl_event = if let Some(acl) =
services()
.rooms
self.services
.state_accessor
.room_state_get(room_id, &StateEventType::RoomServerAcl, "")?
{

View file

@ -1,29 +1,28 @@
use conduit::{Err, Error, Result};
use conduit::{pdu::gen_event_id_canonical_json, warn, Err, Error, Result};
use ruma::{api::client::error::ErrorKind, CanonicalJsonObject, OwnedEventId, OwnedRoomId, RoomId};
use serde_json::value::RawValue as RawJsonValue;
use tracing::warn;
use crate::{pdu::gen_event_id_canonical_json, services};
impl super::Service {
pub fn parse_incoming_pdu(&self, pdu: &RawJsonValue) -> Result<(OwnedEventId, CanonicalJsonObject, OwnedRoomId)> {
let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
warn!("Error parsing incoming event {pdu:?}: {e:?}");
Error::BadServerResponse("Invalid PDU in server response")
})?;
pub fn parse_incoming_pdu(pdu: &RawJsonValue) -> Result<(OwnedEventId, CanonicalJsonObject, OwnedRoomId)> {
let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
warn!("Error parsing incoming event {pdu:?}: {e:?}");
Error::BadServerResponse("Invalid PDU in server response")
})?;
let room_id: OwnedRoomId = value
.get("room_id")
.and_then(|id| RoomId::parse(id.as_str()?).ok())
.ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Invalid room id in pdu"))?;
let room_id: OwnedRoomId = value
.get("room_id")
.and_then(|id| RoomId::parse(id.as_str()?).ok())
.ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Invalid room id in pdu"))?;
let Ok(room_version_id) = self.services.state.get_room_version(&room_id) else {
return Err!("Server is not in room {room_id}");
};
let Ok(room_version_id) = services().rooms.state.get_room_version(&room_id) else {
return Err!("Server is not in room {room_id}");
};
let Ok((event_id, value)) = gen_event_id_canonical_json(pdu, &room_version_id) else {
// Event could not be converted to canonical json
return Err!(Request(InvalidParam("Could not convert event to canonical json.")));
};
let Ok((event_id, value)) = gen_event_id_canonical_json(pdu, &room_version_id) else {
// Event could not be converted to canonical json
return Err!(Request(InvalidParam("Could not convert event to canonical json.")));
};
Ok((event_id, value, room_id))
Ok((event_id, value, room_id))
}
}

View file

@ -3,7 +3,7 @@ use std::{
time::{Duration, SystemTime},
};
use conduit::{debug, error, info, trace, warn};
use conduit::{debug, error, info, trace, warn, Error, Result};
use futures_util::{stream::FuturesUnordered, StreamExt};
use ruma::{
api::federation::{
@ -21,8 +21,6 @@ use ruma::{
use serde_json::value::RawValue as RawJsonValue;
use tokio::sync::{RwLock, RwLockWriteGuard};
use crate::{services, Error, Result};
impl super::Service {
pub async fn fetch_required_signing_keys<'a, E>(
&'a self, events: E, pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
@ -147,7 +145,8 @@ impl super::Service {
debug!("Loading signing keys for {}", origin);
let result: BTreeMap<_, _> = services()
let result: BTreeMap<_, _> = self
.services
.globals
.signing_keys_for(origin)?
.into_iter()
@ -171,9 +170,10 @@ impl super::Service {
&self, mut servers: BTreeMap<OwnedServerName, BTreeMap<OwnedServerSigningKeyId, QueryCriteria>>,
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> Result<()> {
for server in services().globals.trusted_servers() {
for server in self.services.globals.trusted_servers() {
debug!("Asking batch signing keys from trusted server {}", server);
match services()
match self
.services
.sending
.send_federation_request(
server,
@ -199,7 +199,8 @@ impl super::Service {
// TODO: Check signature from trusted server?
servers.remove(&k.server_name);
let result = services()
let result = self
.services
.globals
.db
.add_signing_key(&k.server_name, k.clone())?
@ -234,7 +235,7 @@ impl super::Service {
.into_keys()
.map(|server| async move {
(
services()
self.services
.sending
.send_federation_request(&server, get_server_keys::v2::Request::new())
.await,
@ -248,7 +249,8 @@ impl super::Service {
if let (Ok(get_keys_response), origin) = result {
debug!("Result is from {origin}");
if let Ok(key) = get_keys_response.server_key.deserialize() {
let result: BTreeMap<_, _> = services()
let result: BTreeMap<_, _> = self
.services
.globals
.db
.add_signing_key(&origin, key)?
@ -297,7 +299,7 @@ impl super::Service {
return Ok(());
}
if services().globals.query_trusted_key_servers_first() {
if self.services.globals.query_trusted_key_servers_first() {
info!(
"query_trusted_key_servers_first is set to true, querying notary trusted key servers first for \
homeserver signing keys."
@ -349,7 +351,8 @@ impl super::Service {
) -> Result<BTreeMap<String, Base64>> {
let contains_all_ids = |keys: &BTreeMap<String, Base64>| signature_ids.iter().all(|id| keys.contains_key(id));
let mut result: BTreeMap<_, _> = services()
let mut result: BTreeMap<_, _> = self
.services
.globals
.signing_keys_for(origin)?
.into_iter()
@ -362,15 +365,16 @@ impl super::Service {
}
// i didnt split this out into their own functions because it's relatively small
if services().globals.query_trusted_key_servers_first() {
if self.services.globals.query_trusted_key_servers_first() {
info!(
"query_trusted_key_servers_first is set to true, querying notary trusted servers first for {origin} \
keys"
);
for server in services().globals.trusted_servers() {
for server in self.services.globals.trusted_servers() {
debug!("Asking notary server {server} for {origin}'s signing key");
if let Some(server_keys) = services()
if let Some(server_keys) = self
.services
.sending
.send_federation_request(
server,
@ -394,7 +398,10 @@ impl super::Service {
}) {
debug!("Got signing keys: {:?}", server_keys);
for k in server_keys {
services().globals.db.add_signing_key(origin, k.clone())?;
self.services
.globals
.db
.add_signing_key(origin, k.clone())?;
result.extend(
k.verify_keys
.into_iter()
@ -414,14 +421,15 @@ impl super::Service {
}
debug!("Asking {origin} for their signing keys over federation");
if let Some(server_key) = services()
if let Some(server_key) = self
.services
.sending
.send_federation_request(origin, get_server_keys::v2::Request::new())
.await
.ok()
.and_then(|resp| resp.server_key.deserialize().ok())
{
services()
self.services
.globals
.db
.add_signing_key(origin, server_key.clone())?;
@ -447,14 +455,15 @@ impl super::Service {
info!("query_trusted_key_servers_first is set to false, querying {origin} first");
debug!("Asking {origin} for their signing keys over federation");
if let Some(server_key) = services()
if let Some(server_key) = self
.services
.sending
.send_federation_request(origin, get_server_keys::v2::Request::new())
.await
.ok()
.and_then(|resp| resp.server_key.deserialize().ok())
{
services()
self.services
.globals
.db
.add_signing_key(origin, server_key.clone())?;
@ -477,9 +486,10 @@ impl super::Service {
}
}
for server in services().globals.trusted_servers() {
for server in self.services.globals.trusted_servers() {
debug!("Asking notary server {server} for {origin}'s signing key");
if let Some(server_keys) = services()
if let Some(server_keys) = self
.services
.sending
.send_federation_request(
server,
@ -503,7 +513,10 @@ impl super::Service {
}) {
debug!("Got signing keys: {:?}", server_keys);
for k in server_keys {
services().globals.db.add_signing_key(origin, k.clone())?;
self.services
.globals
.db
.add_signing_key(origin, k.clone())?;
result.extend(
k.verify_keys
.into_iter()

View file

@ -6,10 +6,10 @@ use std::{
sync::{Arc, Mutex},
};
use data::Data;
use conduit::{PduCount, Result};
use ruma::{DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, RoomId, UserId};
use crate::{PduCount, Result};
use self::data::Data;
pub struct Service {
db: Data,

View file

@ -1,30 +1,39 @@
use std::sync::Arc;
use conduit::{error, utils, Error, Result};
use database::{Database, Map};
use database::Map;
use ruma::{OwnedRoomId, RoomId};
use crate::services;
use crate::{rooms, Dep};
pub(super) struct Data {
disabledroomids: Arc<Map>,
bannedroomids: Arc<Map>,
roomid_shortroomid: Arc<Map>,
pduid_pdu: Arc<Map>,
services: Services,
}
struct Services {
short: Dep<rooms::short::Service>,
}
impl Data {
pub(super) fn new(db: &Arc<Database>) -> Self {
pub(super) fn new(args: &crate::Args<'_>) -> Self {
let db = &args.db;
Self {
disabledroomids: db["disabledroomids"].clone(),
bannedroomids: db["bannedroomids"].clone(),
roomid_shortroomid: db["roomid_shortroomid"].clone(),
pduid_pdu: db["pduid_pdu"].clone(),
services: Services {
short: args.depend::<rooms::short::Service>("rooms::short"),
},
}
}
pub(super) fn exists(&self, room_id: &RoomId) -> Result<bool> {
let prefix = match services().rooms.short.get_shortroomid(room_id)? {
let prefix = match self.services.short.get_shortroomid(room_id)? {
Some(b) => b.to_be_bytes().to_vec(),
None => return Ok(false),
};

View file

@ -3,9 +3,10 @@ mod data;
use std::sync::Arc;
use conduit::Result;
use data::Data;
use ruma::{OwnedRoomId, RoomId};
use self::data::Data;
pub struct Service {
db: Data,
}
@ -13,7 +14,7 @@ pub struct Service {
impl crate::Service for Service {
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
Ok(Arc::new(Self {
db: Data::new(args.db),
db: Data::new(&args),
}))
}

View file

@ -33,13 +33,13 @@ pub struct Service {
pub read_receipt: Arc<read_receipt::Service>,
pub search: Arc<search::Service>,
pub short: Arc<short::Service>,
pub spaces: Arc<spaces::Service>,
pub state: Arc<state::Service>,
pub state_accessor: Arc<state_accessor::Service>,
pub state_cache: Arc<state_cache::Service>,
pub state_compressor: Arc<state_compressor::Service>,
pub timeline: Arc<timeline::Service>,
pub threads: Arc<threads::Service>,
pub timeline: Arc<timeline::Service>,
pub typing: Arc<typing::Service>,
pub spaces: Arc<spaces::Service>,
pub user: Arc<user::Service>,
}

View file

@ -1,26 +1,35 @@
use std::{mem::size_of, sync::Arc};
use conduit::{utils, Error, Result};
use database::{Database, Map};
use conduit::{utils, Error, PduCount, PduEvent, Result};
use database::Map;
use ruma::{EventId, RoomId, UserId};
use crate::{services, PduCount, PduEvent};
use crate::{rooms, Dep};
pub(super) struct Data {
tofrom_relation: Arc<Map>,
referencedevents: Arc<Map>,
softfailedeventids: Arc<Map>,
services: Services,
}
struct Services {
timeline: Dep<rooms::timeline::Service>,
}
type PdusIterItem = Result<(PduCount, PduEvent)>;
type PdusIterator<'a> = Box<dyn Iterator<Item = PdusIterItem> + 'a>;
impl Data {
pub(super) fn new(db: &Arc<Database>) -> Self {
pub(super) fn new(args: &crate::Args<'_>) -> Self {
let db = &args.db;
Self {
tofrom_relation: db["tofrom_relation"].clone(),
referencedevents: db["referencedevents"].clone(),
softfailedeventids: db["softfailedeventids"].clone(),
services: Services {
timeline: args.depend::<rooms::timeline::Service>("rooms::timeline"),
},
}
}
@ -57,8 +66,8 @@ impl Data {
let mut pduid = shortroomid.to_be_bytes().to_vec();
pduid.extend_from_slice(&from.to_be_bytes());
let mut pdu = services()
.rooms
let mut pdu = self
.services
.timeline
.get_pdu_from_id(&pduid)?
.ok_or_else(|| Error::bad_database("Pdu in tofrom_relation is invalid."))?;

View file

@ -2,8 +2,7 @@ mod data;
use std::sync::Arc;
use conduit::Result;
use data::Data;
use conduit::{PduCount, PduEvent, Result};
use ruma::{
api::{client::relations::get_relating_events, Direction},
events::{relation::RelationType, TimelineEventType},
@ -11,12 +10,20 @@ use ruma::{
};
use serde::Deserialize;
use crate::{services, PduCount, PduEvent};
use self::data::Data;
use crate::{rooms, Dep};
pub struct Service {
services: Services,
db: Data,
}
struct Services {
short: Dep<rooms::short::Service>,
state_accessor: Dep<rooms::state_accessor::Service>,
timeline: Dep<rooms::timeline::Service>,
}
#[derive(Clone, Debug, Deserialize)]
struct ExtractRelType {
rel_type: RelationType,
@ -30,7 +37,12 @@ struct ExtractRelatesToEventId {
impl crate::Service for Service {
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
Ok(Arc::new(Self {
db: Data::new(args.db),
services: Services {
short: args.depend::<rooms::short::Service>("rooms::short"),
state_accessor: args.depend::<rooms::state_accessor::Service>("rooms::state_accessor"),
timeline: args.depend::<rooms::timeline::Service>("rooms::timeline"),
},
db: Data::new(&args),
}))
}
@ -101,8 +113,7 @@ impl Service {
})
.take(limit)
.filter(|(_, pdu)| {
services()
.rooms
self.services
.state_accessor
.user_can_see_event(sender_user, room_id, &pdu.event_id)
.unwrap_or(false)
@ -147,8 +158,7 @@ impl Service {
})
.take(limit)
.filter(|(_, pdu)| {
services()
.rooms
self.services
.state_accessor
.user_can_see_event(sender_user, room_id, &pdu.event_id)
.unwrap_or(false)
@ -180,10 +190,10 @@ impl Service {
pub fn relations_until<'a>(
&'a self, user_id: &'a UserId, room_id: &'a RoomId, target: &'a EventId, until: PduCount, max_depth: u8,
) -> Result<Vec<(PduCount, PduEvent)>> {
let room_id = services().rooms.short.get_or_create_shortroomid(room_id)?;
let room_id = self.services.short.get_or_create_shortroomid(room_id)?;
#[allow(unknown_lints)]
#[allow(clippy::manual_unwrap_or_default)]
let target = match services().rooms.timeline.get_pdu_count(target)? {
let target = match self.services.timeline.get_pdu_count(target)? {
Some(PduCount::Normal(c)) => c,
// TODO: Support backfilled relations
_ => 0, // This will result in an empty iterator

View file

@ -1,14 +1,14 @@
use std::{mem::size_of, sync::Arc};
use conduit::{utils, Error, Result};
use database::{Database, Map};
use database::Map;
use ruma::{
events::{receipt::ReceiptEvent, AnySyncEphemeralRoomEvent},
serde::Raw,
CanonicalJsonObject, OwnedUserId, RoomId, UserId,
};
use crate::services;
use crate::{globals, Dep};
type AnySyncEphemeralRoomEventIter<'a> =
Box<dyn Iterator<Item = Result<(OwnedUserId, u64, Raw<AnySyncEphemeralRoomEvent>)>> + 'a>;
@ -16,15 +16,24 @@ type AnySyncEphemeralRoomEventIter<'a> =
pub(super) struct Data {
roomuserid_privateread: Arc<Map>,
roomuserid_lastprivatereadupdate: Arc<Map>,
services: Services,
readreceiptid_readreceipt: Arc<Map>,
}
struct Services {
globals: Dep<globals::Service>,
}
impl Data {
pub(super) fn new(db: &Arc<Database>) -> Self {
pub(super) fn new(args: &crate::Args<'_>) -> Self {
let db = &args.db;
Self {
roomuserid_privateread: db["roomuserid_privateread"].clone(),
roomuserid_lastprivatereadupdate: db["roomuserid_lastprivatereadupdate"].clone(),
readreceiptid_readreceipt: db["readreceiptid_readreceipt"].clone(),
services: Services {
globals: args.depend::<globals::Service>("globals"),
},
}
}
@ -51,7 +60,7 @@ impl Data {
}
let mut room_latest_id = prefix;
room_latest_id.extend_from_slice(&services().globals.next_count()?.to_be_bytes());
room_latest_id.extend_from_slice(&self.services.globals.next_count()?.to_be_bytes());
room_latest_id.push(0xFF);
room_latest_id.extend_from_slice(user_id.as_bytes());
@ -108,7 +117,7 @@ impl Data {
.insert(&key, &count.to_be_bytes())?;
self.roomuserid_lastprivatereadupdate
.insert(&key, &services().globals.next_count()?.to_be_bytes())
.insert(&key, &self.services.globals.next_count()?.to_be_bytes())
}
pub(super) fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result<Option<u64>> {

View file

@ -6,16 +6,24 @@ use conduit::Result;
use data::Data;
use ruma::{events::receipt::ReceiptEvent, serde::Raw, OwnedUserId, RoomId, UserId};
use crate::services;
use crate::{sending, Dep};
pub struct Service {
services: Services,
db: Data,
}
struct Services {
sending: Dep<sending::Service>,
}
impl crate::Service for Service {
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
Ok(Arc::new(Self {
db: Data::new(args.db),
services: Services {
sending: args.depend::<sending::Service>("sending"),
},
db: Data::new(&args),
}))
}
@ -26,7 +34,7 @@ impl Service {
/// Replaces the previous read receipt.
pub fn readreceipt_update(&self, user_id: &UserId, room_id: &RoomId, event: &ReceiptEvent) -> Result<()> {
self.db.readreceipt_update(user_id, room_id, event)?;
services().sending.flush_room(room_id)?;
self.services.sending.flush_room(room_id)?;
Ok(())
}

View file

@ -1,21 +1,30 @@
use std::sync::Arc;
use conduit::{utils, Result};
use database::{Database, Map};
use database::Map;
use ruma::RoomId;
use crate::services;
use crate::{rooms, Dep};
type SearchPdusResult<'a> = Result<Option<(Box<dyn Iterator<Item = Vec<u8>> + 'a>, Vec<String>)>>;
pub(super) struct Data {
tokenids: Arc<Map>,
services: Services,
}
struct Services {
short: Dep<rooms::short::Service>,
}
impl Data {
pub(super) fn new(db: &Arc<Database>) -> Self {
pub(super) fn new(args: &crate::Args<'_>) -> Self {
let db = &args.db;
Self {
tokenids: db["tokenids"].clone(),
services: Services {
short: args.depend::<rooms::short::Service>("rooms::short"),
},
}
}
@ -51,8 +60,8 @@ impl Data {
}
pub(super) fn search_pdus<'a>(&'a self, room_id: &RoomId, search_string: &str) -> SearchPdusResult<'a> {
let prefix = services()
.rooms
let prefix = self
.services
.short
.get_shortroomid(room_id)?
.expect("room exists")

View file

@ -13,7 +13,7 @@ pub struct Service {
impl crate::Service for Service {
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
Ok(Arc::new(Self {
db: Data::new(args.db),
db: Data::new(&args),
}))
}

View file

@ -1,10 +1,10 @@
use std::sync::Arc;
use conduit::{utils, warn, Error, Result};
use database::{Database, Map};
use database::Map;
use ruma::{events::StateEventType, EventId, RoomId};
use crate::services;
use crate::{globals, Dep};
pub(super) struct Data {
eventid_shorteventid: Arc<Map>,
@ -13,10 +13,16 @@ pub(super) struct Data {
shortstatekey_statekey: Arc<Map>,
roomid_shortroomid: Arc<Map>,
statehash_shortstatehash: Arc<Map>,
services: Services,
}
struct Services {
globals: Dep<globals::Service>,
}
impl Data {
pub(super) fn new(db: &Arc<Database>) -> Self {
pub(super) fn new(args: &crate::Args<'_>) -> Self {
let db = &args.db;
Self {
eventid_shorteventid: db["eventid_shorteventid"].clone(),
shorteventid_eventid: db["shorteventid_eventid"].clone(),
@ -24,6 +30,9 @@ impl Data {
shortstatekey_statekey: db["shortstatekey_statekey"].clone(),
roomid_shortroomid: db["roomid_shortroomid"].clone(),
statehash_shortstatehash: db["statehash_shortstatehash"].clone(),
services: Services {
globals: args.depend::<globals::Service>("globals"),
},
}
}
@ -31,7 +40,7 @@ impl Data {
let short = if let Some(shorteventid) = self.eventid_shorteventid.get(event_id.as_bytes())? {
utils::u64_from_bytes(&shorteventid).map_err(|_| Error::bad_database("Invalid shorteventid in db."))?
} else {
let shorteventid = services().globals.next_count()?;
let shorteventid = self.services.globals.next_count()?;
self.eventid_shorteventid
.insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?;
self.shorteventid_eventid
@ -59,7 +68,7 @@ impl Data {
utils::u64_from_bytes(short).map_err(|_| Error::bad_database("Invalid shorteventid in db."))?,
),
None => {
let short = services().globals.next_count()?;
let short = self.services.globals.next_count()?;
self.eventid_shorteventid
.insert(keys[i], &short.to_be_bytes())?;
self.shorteventid_eventid
@ -98,7 +107,7 @@ impl Data {
let short = if let Some(shortstatekey) = self.statekey_shortstatekey.get(&statekey_vec)? {
utils::u64_from_bytes(&shortstatekey).map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?
} else {
let shortstatekey = services().globals.next_count()?;
let shortstatekey = self.services.globals.next_count()?;
self.statekey_shortstatekey
.insert(&statekey_vec, &shortstatekey.to_be_bytes())?;
self.shortstatekey_statekey
@ -158,7 +167,7 @@ impl Data {
true,
)
} else {
let shortstatehash = services().globals.next_count()?;
let shortstatehash = self.services.globals.next_count()?;
self.statehash_shortstatehash
.insert(state_hash, &shortstatehash.to_be_bytes())?;
(shortstatehash, false)
@ -176,7 +185,7 @@ impl Data {
Ok(if let Some(short) = self.roomid_shortroomid.get(room_id.as_bytes())? {
utils::u64_from_bytes(&short).map_err(|_| Error::bad_database("Invalid shortroomid in db."))?
} else {
let short = services().globals.next_count()?;
let short = self.services.globals.next_count()?;
self.roomid_shortroomid
.insert(room_id.as_bytes(), &short.to_be_bytes())?;
short

View file

@ -3,9 +3,10 @@ mod data;
use std::sync::Arc;
use conduit::Result;
use data::Data;
use ruma::{events::StateEventType, EventId, RoomId};
use self::data::Data;
pub struct Service {
db: Data,
}
@ -13,7 +14,7 @@ pub struct Service {
impl crate::Service for Service {
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
Ok(Arc::new(Self {
db: Data::new(args.db),
db: Data::new(&args),
}))
}

View file

@ -28,7 +28,7 @@ use ruma::{
};
use tokio::sync::Mutex;
use crate::services;
use crate::{rooms, sending, Dep};
pub struct CachedSpaceHierarchySummary {
summary: SpaceHierarchyParentSummary,
@ -119,42 +119,18 @@ enum Identifier<'a> {
}
pub struct Service {
services: Services,
pub roomid_spacehierarchy_cache: Mutex<LruCache<OwnedRoomId, Option<CachedSpaceHierarchySummary>>>,
}
// Here because cannot implement `From` across ruma-federation-api and
// ruma-client-api types
impl From<CachedSpaceHierarchySummary> for SpaceHierarchyRoomsChunk {
fn from(value: CachedSpaceHierarchySummary) -> Self {
let SpaceHierarchyParentSummary {
canonical_alias,
name,
num_joined_members,
room_id,
topic,
world_readable,
guest_can_join,
avatar_url,
join_rule,
room_type,
children_state,
..
} = value.summary;
Self {
canonical_alias,
name,
num_joined_members,
room_id,
topic,
world_readable,
guest_can_join,
avatar_url,
join_rule,
room_type,
children_state,
}
}
struct Services {
state_accessor: Dep<rooms::state_accessor::Service>,
state_cache: Dep<rooms::state_cache::Service>,
state: Dep<rooms::state::Service>,
short: Dep<rooms::short::Service>,
event_handler: Dep<rooms::event_handler::Service>,
timeline: Dep<rooms::timeline::Service>,
sending: Dep<sending::Service>,
}
impl crate::Service for Service {
@ -163,6 +139,15 @@ impl crate::Service for Service {
let cache_size = f64::from(config.roomid_spacehierarchy_cache_capacity);
let cache_size = cache_size * config.cache_capacity_modifier;
Ok(Arc::new(Self {
services: Services {
state_accessor: args.depend::<rooms::state_accessor::Service>("rooms::state_accessor"),
state_cache: args.depend::<rooms::state_cache::Service>("rooms::state_cache"),
state: args.depend::<rooms::state::Service>("rooms::state"),
short: args.depend::<rooms::short::Service>("rooms::short"),
event_handler: args.depend::<rooms::event_handler::Service>("rooms::event_handler"),
timeline: args.depend::<rooms::timeline::Service>("rooms::timeline"),
sending: args.depend::<sending::Service>("sending"),
},
roomid_spacehierarchy_cache: Mutex::new(LruCache::new(usize_from_f64(cache_size)?)),
}))
}
@ -226,7 +211,7 @@ impl Service {
.as_ref()
{
return Ok(if let Some(cached) = cached {
if is_accessable_child(
if self.is_accessible_child(
current_room,
&cached.summary.join_rule,
&identifier,
@ -242,8 +227,8 @@ impl Service {
}
Ok(
if let Some(children_pdus) = get_stripped_space_child_events(current_room).await? {
let summary = Self::get_room_summary(current_room, children_pdus, &identifier);
if let Some(children_pdus) = self.get_stripped_space_child_events(current_room).await? {
let summary = self.get_room_summary(current_room, children_pdus, &identifier);
if let Ok(summary) = summary {
self.roomid_spacehierarchy_cache.lock().await.insert(
current_room.clone(),
@ -269,7 +254,8 @@ impl Service {
) -> Result<Option<SummaryAccessibility>> {
for server in via {
debug_info!("Asking {server} for /hierarchy");
let Ok(response) = services()
let Ok(response) = self
.services
.sending
.send_federation_request(
server,
@ -325,7 +311,10 @@ impl Service {
avatar_url,
join_rule,
room_type,
children_state: get_stripped_space_child_events(&room_id).await?.unwrap(),
children_state: self
.get_stripped_space_child_events(&room_id)
.await?
.unwrap(),
allowed_room_ids,
}
},
@ -333,7 +322,7 @@ impl Service {
);
}
}
if is_accessable_child(
if self.is_accessible_child(
current_room,
&response.room.join_rule,
&Identifier::UserId(user_id),
@ -370,12 +359,13 @@ impl Service {
}
fn get_room_summary(
current_room: &OwnedRoomId, children_state: Vec<Raw<HierarchySpaceChildEvent>>, identifier: &Identifier<'_>,
&self, current_room: &OwnedRoomId, children_state: Vec<Raw<HierarchySpaceChildEvent>>,
identifier: &Identifier<'_>,
) -> Result<SpaceHierarchyParentSummary, Error> {
let room_id: &RoomId = current_room;
let join_rule = services()
.rooms
let join_rule = self
.services
.state_accessor
.room_state_get(room_id, &StateEventType::RoomJoinRules, "")?
.map(|s| {
@ -386,12 +376,12 @@ impl Service {
.transpose()?
.unwrap_or(JoinRule::Invite);
let allowed_room_ids = services()
.rooms
let allowed_room_ids = self
.services
.state_accessor
.allowed_room_ids(join_rule.clone());
if !is_accessable_child(current_room, &join_rule.clone().into(), identifier, &allowed_room_ids) {
if !self.is_accessible_child(current_room, &join_rule.clone().into(), identifier, &allowed_room_ids) {
debug!("User is not allowed to see room {room_id}");
// This error will be caught later
return Err(Error::BadRequest(ErrorKind::forbidden(), "User is not allowed to see the room"));
@ -400,18 +390,18 @@ impl Service {
let join_rule = join_rule.into();
Ok(SpaceHierarchyParentSummary {
canonical_alias: services()
.rooms
canonical_alias: self
.services
.state_accessor
.get_canonical_alias(room_id)
.unwrap_or(None),
name: services()
.rooms
name: self
.services
.state_accessor
.get_name(room_id)
.unwrap_or(None),
num_joined_members: services()
.rooms
num_joined_members: self
.services
.state_cache
.room_joined_count(room_id)
.unwrap_or_default()
@ -422,22 +412,22 @@ impl Service {
.try_into()
.expect("user count should not be that big"),
room_id: room_id.to_owned(),
topic: services()
.rooms
topic: self
.services
.state_accessor
.get_room_topic(room_id)
.unwrap_or(None),
world_readable: services().rooms.state_accessor.is_world_readable(room_id)?,
guest_can_join: services().rooms.state_accessor.guest_can_join(room_id)?,
avatar_url: services()
.rooms
world_readable: self.services.state_accessor.is_world_readable(room_id)?,
guest_can_join: self.services.state_accessor.guest_can_join(room_id)?,
avatar_url: self
.services
.state_accessor
.get_avatar(room_id)?
.into_option()
.unwrap_or_default()
.url,
join_rule,
room_type: services().rooms.state_accessor.get_room_type(room_id)?,
room_type: self.services.state_accessor.get_room_type(room_id)?,
children_state,
allowed_room_ids,
})
@ -487,7 +477,7 @@ impl Service {
.into_iter()
.rev()
.skip_while(|(room, _)| {
if let Ok(short) = services().rooms.short.get_shortroomid(room)
if let Ok(short) = self.services.short.get_shortroomid(room)
{
short.as_ref() != short_room_ids.get(parents.len())
} else {
@ -541,7 +531,7 @@ impl Service {
let mut short_room_ids = vec![];
for room in parents {
short_room_ids.push(services().rooms.short.get_or_create_shortroomid(&room)?);
short_room_ids.push(self.services.short.get_or_create_shortroomid(&room)?);
}
Some(
@ -559,128 +549,152 @@ impl Service {
rooms: results,
})
}
}
fn next_room_to_traverse(
stack: &mut Vec<Vec<(OwnedRoomId, Vec<OwnedServerName>)>>, parents: &mut VecDeque<OwnedRoomId>,
) -> Option<(OwnedRoomId, Vec<OwnedServerName>)> {
while stack.last().map_or(false, Vec::is_empty) {
stack.pop();
parents.pop_back();
/// Simply returns the stripped m.space.child events of a room
async fn get_stripped_space_child_events(
&self, room_id: &RoomId,
) -> Result<Option<Vec<Raw<HierarchySpaceChildEvent>>>, Error> {
let Some(current_shortstatehash) = self.services.state.get_room_shortstatehash(room_id)? else {
return Ok(None);
};
let state = self
.services
.state_accessor
.state_full_ids(current_shortstatehash)
.await?;
let mut children_pdus = Vec::new();
for (key, id) in state {
let (event_type, state_key) = self.services.short.get_statekey_from_short(key)?;
if event_type != StateEventType::SpaceChild {
continue;
}
let pdu = self
.services
.timeline
.get_pdu(&id)?
.ok_or_else(|| Error::bad_database("Event in space state not found"))?;
if serde_json::from_str::<SpaceChildEventContent>(pdu.content.get())
.ok()
.map(|c| c.via)
.map_or(true, |v| v.is_empty())
{
continue;
}
if OwnedRoomId::try_from(state_key).is_ok() {
children_pdus.push(pdu.to_stripped_spacechild_state_event());
}
}
Ok(Some(children_pdus))
}
stack.last_mut().and_then(Vec::pop)
}
/// With the given identifier, checks if a room is accessable
fn is_accessible_child(
&self, current_room: &OwnedRoomId, join_rule: &SpaceRoomJoinRule, identifier: &Identifier<'_>,
allowed_room_ids: &Vec<OwnedRoomId>,
) -> bool {
// Note: unwrap_or_default for bool means false
match identifier {
Identifier::ServerName(server_name) => {
let room_id: &RoomId = current_room;
/// Simply returns the stripped m.space.child events of a room
async fn get_stripped_space_child_events(
room_id: &RoomId,
) -> Result<Option<Vec<Raw<HierarchySpaceChildEvent>>>, Error> {
let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? else {
return Ok(None);
};
let state = services()
.rooms
.state_accessor
.state_full_ids(current_shortstatehash)
.await?;
let mut children_pdus = Vec::new();
for (key, id) in state {
let (event_type, state_key) = services().rooms.short.get_statekey_from_short(key)?;
if event_type != StateEventType::SpaceChild {
continue;
}
let pdu = services()
.rooms
.timeline
.get_pdu(&id)?
.ok_or_else(|| Error::bad_database("Event in space state not found"))?;
if serde_json::from_str::<SpaceChildEventContent>(pdu.content.get())
.ok()
.map(|c| c.via)
.map_or(true, |v| v.is_empty())
{
continue;
}
if OwnedRoomId::try_from(state_key).is_ok() {
children_pdus.push(pdu.to_stripped_spacechild_state_event());
}
}
Ok(Some(children_pdus))
}
/// With the given identifier, checks if a room is accessable
fn is_accessable_child(
current_room: &OwnedRoomId, join_rule: &SpaceRoomJoinRule, identifier: &Identifier<'_>,
allowed_room_ids: &Vec<OwnedRoomId>,
) -> bool {
// Note: unwrap_or_default for bool means false
match identifier {
Identifier::ServerName(server_name) => {
let room_id: &RoomId = current_room;
// Checks if ACLs allow for the server to participate
if services()
.rooms
.event_handler
.acl_check(server_name, room_id)
.is_err()
{
return false;
}
},
Identifier::UserId(user_id) => {
if services()
.rooms
.state_cache
.is_joined(user_id, current_room)
.unwrap_or_default()
|| services()
.rooms
.state_cache
.is_invited(user_id, current_room)
.unwrap_or_default()
{
return true;
}
},
} // Takes care of join rules
match join_rule {
SpaceRoomJoinRule::Restricted => {
for room in allowed_room_ids {
match identifier {
Identifier::UserId(user) => {
if services()
.rooms
.state_cache
.is_joined(user, room)
.unwrap_or_default()
{
return true;
}
},
Identifier::ServerName(server) => {
if services()
.rooms
.state_cache
.server_in_room(server, room)
.unwrap_or_default()
{
return true;
}
},
// Checks if ACLs allow for the server to participate
if self
.services
.event_handler
.acl_check(server_name, room_id)
.is_err()
{
return false;
}
}
false
},
SpaceRoomJoinRule::Public | SpaceRoomJoinRule::Knock | SpaceRoomJoinRule::KnockRestricted => true,
// Invite only, Private, or Custom join rule
_ => false,
},
Identifier::UserId(user_id) => {
if self
.services
.state_cache
.is_joined(user_id, current_room)
.unwrap_or_default()
|| self
.services
.state_cache
.is_invited(user_id, current_room)
.unwrap_or_default()
{
return true;
}
},
} // Takes care of join rules
match join_rule {
SpaceRoomJoinRule::Restricted => {
for room in allowed_room_ids {
match identifier {
Identifier::UserId(user) => {
if self
.services
.state_cache
.is_joined(user, room)
.unwrap_or_default()
{
return true;
}
},
Identifier::ServerName(server) => {
if self
.services
.state_cache
.server_in_room(server, room)
.unwrap_or_default()
{
return true;
}
},
}
}
false
},
SpaceRoomJoinRule::Public | SpaceRoomJoinRule::Knock | SpaceRoomJoinRule::KnockRestricted => true,
// Invite only, Private, or Custom join rule
_ => false,
}
}
}
// Here because cannot implement `From` across ruma-federation-api and
// ruma-client-api types
impl From<CachedSpaceHierarchySummary> for SpaceHierarchyRoomsChunk {
fn from(value: CachedSpaceHierarchySummary) -> Self {
let SpaceHierarchyParentSummary {
canonical_alias,
name,
num_joined_members,
room_id,
topic,
world_readable,
guest_can_join,
avatar_url,
join_rule,
room_type,
children_state,
..
} = value.summary;
Self {
canonical_alias,
name,
num_joined_members,
room_id,
topic,
world_readable,
guest_can_join,
avatar_url,
join_rule,
room_type,
children_state,
}
}
}
@ -736,3 +750,14 @@ fn get_parent_children_via(
})
.collect()
}
fn next_room_to_traverse(
stack: &mut Vec<Vec<(OwnedRoomId, Vec<OwnedServerName>)>>, parents: &mut VecDeque<OwnedRoomId>,
) -> Option<(OwnedRoomId, Vec<OwnedServerName>)> {
while stack.last().map_or(false, Vec::is_empty) {
stack.pop();
parents.pop_back();
}
stack.last_mut().and_then(Vec::pop)
}

View file

@ -8,7 +8,7 @@ use std::{
use conduit::{
utils::{calculate_hash, MutexMap, MutexMapGuard},
warn, Error, Result,
warn, Error, PduEvent, Result,
};
use data::Data;
use ruma::{
@ -23,19 +23,39 @@ use ruma::{
};
use super::state_compressor::CompressedStateEvent;
use crate::{services, PduEvent};
use crate::{globals, rooms, Dep};
pub struct Service {
services: Services,
db: Data,
pub mutex: RoomMutexMap,
}
struct Services {
globals: Dep<globals::Service>,
short: Dep<rooms::short::Service>,
spaces: Dep<rooms::spaces::Service>,
state_cache: Dep<rooms::state_cache::Service>,
state_accessor: Dep<rooms::state_accessor::Service>,
state_compressor: Dep<rooms::state_compressor::Service>,
timeline: Dep<rooms::timeline::Service>,
}
type RoomMutexMap = MutexMap<OwnedRoomId, ()>;
pub type RoomMutexGuard = MutexMapGuard<OwnedRoomId, ()>;
impl crate::Service for Service {
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
Ok(Arc::new(Self {
services: Services {
globals: args.depend::<globals::Service>("globals"),
short: args.depend::<rooms::short::Service>("rooms::short"),
spaces: args.depend::<rooms::spaces::Service>("rooms::spaces"),
state_cache: args.depend::<rooms::state_cache::Service>("rooms::state_cache"),
state_accessor: args.depend::<rooms::state_accessor::Service>("rooms::state_accessor"),
state_compressor: args.depend::<rooms::state_compressor::Service>("rooms::state_compressor"),
timeline: args.depend::<rooms::timeline::Service>("rooms::timeline"),
},
db: Data::new(args.db),
mutex: RoomMutexMap::new(),
}))
@ -62,14 +82,13 @@ impl Service {
state_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex
) -> Result<()> {
for event_id in statediffnew.iter().filter_map(|new| {
services()
.rooms
self.services
.state_compressor
.parse_compressed_state_event(new)
.ok()
.map(|(_, id)| id)
}) {
let Some(pdu) = services().rooms.timeline.get_pdu_json(&event_id)? else {
let Some(pdu) = self.services.timeline.get_pdu_json(&event_id)? else {
continue;
};
@ -94,7 +113,7 @@ impl Service {
continue;
};
services().rooms.state_cache.update_membership(
self.services.state_cache.update_membership(
room_id,
&user_id,
membership_event,
@ -105,8 +124,7 @@ impl Service {
)?;
},
TimelineEventType::SpaceChild => {
services()
.rooms
self.services
.spaces
.roomid_spacehierarchy_cache
.lock()
@ -117,7 +135,7 @@ impl Service {
}
}
services().rooms.state_cache.update_joined_count(room_id)?;
self.services.state_cache.update_joined_count(room_id)?;
self.db
.set_room_state(room_id, shortstatehash, state_lock)?;
@ -133,10 +151,7 @@ impl Service {
pub fn set_event_state(
&self, event_id: &EventId, room_id: &RoomId, state_ids_compressed: Arc<HashSet<CompressedStateEvent>>,
) -> Result<u64> {
let shorteventid = services()
.rooms
.short
.get_or_create_shorteventid(event_id)?;
let shorteventid = self.services.short.get_or_create_shorteventid(event_id)?;
let previous_shortstatehash = self.db.get_room_shortstatehash(room_id)?;
@ -147,20 +162,15 @@ impl Service {
.collect::<Vec<_>>(),
);
let (shortstatehash, already_existed) = services()
.rooms
let (shortstatehash, already_existed) = self
.services
.short
.get_or_create_shortstatehash(&state_hash)?;
if !already_existed {
let states_parents = previous_shortstatehash.map_or_else(
|| Ok(Vec::new()),
|p| {
services()
.rooms
.state_compressor
.load_shortstatehash_info(p)
},
|p| self.services.state_compressor.load_shortstatehash_info(p),
)?;
let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() {
@ -179,7 +189,7 @@ impl Service {
} else {
(state_ids_compressed, Arc::new(HashSet::new()))
};
services().rooms.state_compressor.save_state_from_diff(
self.services.state_compressor.save_state_from_diff(
shortstatehash,
statediffnew,
statediffremoved,
@ -199,8 +209,8 @@ impl Service {
/// to `stateid_pduid` and adds the incoming event to `eventid_statehash`.
#[tracing::instrument(skip(self, new_pdu), level = "debug")]
pub fn append_to_state(&self, new_pdu: &PduEvent) -> Result<u64> {
let shorteventid = services()
.rooms
let shorteventid = self
.services
.short
.get_or_create_shorteventid(&new_pdu.event_id)?;
@ -214,21 +224,16 @@ impl Service {
let states_parents = previous_shortstatehash.map_or_else(
|| Ok(Vec::new()),
#[inline]
|p| {
services()
.rooms
.state_compressor
.load_shortstatehash_info(p)
},
|p| self.services.state_compressor.load_shortstatehash_info(p),
)?;
let shortstatekey = services()
.rooms
let shortstatekey = self
.services
.short
.get_or_create_shortstatekey(&new_pdu.kind.to_string().into(), state_key)?;
let new = services()
.rooms
let new = self
.services
.state_compressor
.compress_state_event(shortstatekey, &new_pdu.event_id)?;
@ -246,7 +251,7 @@ impl Service {
}
// TODO: statehash with deterministic inputs
let shortstatehash = services().globals.next_count()?;
let shortstatehash = self.services.globals.next_count()?;
let mut statediffnew = HashSet::new();
statediffnew.insert(new);
@ -256,7 +261,7 @@ impl Service {
statediffremoved.insert(*replaces);
}
services().rooms.state_compressor.save_state_from_diff(
self.services.state_compressor.save_state_from_diff(
shortstatehash,
Arc::new(statediffnew),
Arc::new(statediffremoved),
@ -275,22 +280,20 @@ impl Service {
let mut state = Vec::new();
// Add recommended events
if let Some(e) =
services()
.rooms
self.services
.state_accessor
.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")?
{
state.push(e.to_stripped_state_event());
}
if let Some(e) =
services()
.rooms
self.services
.state_accessor
.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")?
{
state.push(e.to_stripped_state_event());
}
if let Some(e) = services().rooms.state_accessor.room_state_get(
if let Some(e) = self.services.state_accessor.room_state_get(
&invite_event.room_id,
&StateEventType::RoomCanonicalAlias,
"",
@ -298,22 +301,20 @@ impl Service {
state.push(e.to_stripped_state_event());
}
if let Some(e) =
services()
.rooms
self.services
.state_accessor
.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")?
{
state.push(e.to_stripped_state_event());
}
if let Some(e) =
services()
.rooms
self.services
.state_accessor
.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")?
{
state.push(e.to_stripped_state_event());
}
if let Some(e) = services().rooms.state_accessor.room_state_get(
if let Some(e) = self.services.state_accessor.room_state_get(
&invite_event.room_id,
&StateEventType::RoomMember,
invite_event.sender.as_str(),
@ -339,8 +340,8 @@ impl Service {
/// Returns the room's version.
#[tracing::instrument(skip(self), level = "debug")]
pub fn get_room_version(&self, room_id: &RoomId) -> Result<RoomVersionId> {
let create_event = services()
.rooms
let create_event = self
.services
.state_accessor
.room_state_get(room_id, &StateEventType::RoomCreate, "")?;
@ -393,8 +394,7 @@ impl Service {
let mut sauthevents = auth_events
.into_iter()
.filter_map(|(event_type, state_key)| {
services()
.rooms
self.services
.short
.get_shortstatekey(&event_type.to_string().into(), &state_key)
.ok()
@ -403,8 +403,8 @@ impl Service {
})
.collect::<HashMap<_, _>>();
let full_state = services()
.rooms
let full_state = self
.services
.state_compressor
.load_shortstatehash_info(shortstatehash)?
.pop()
@ -414,16 +414,14 @@ impl Service {
Ok(full_state
.iter()
.filter_map(|compressed| {
services()
.rooms
self.services
.state_compressor
.parse_compressed_state_event(compressed)
.ok()
})
.filter_map(|(shortstatekey, event_id)| sauthevents.remove(&shortstatekey).map(|k| (k, event_id)))
.filter_map(|(k, event_id)| {
services()
.rooms
self.services
.timeline
.get_pdu(&event_id)
.ok()

View file

@ -1,28 +1,43 @@
use std::{collections::HashMap, sync::Arc};
use conduit::{utils, Error, Result};
use database::{Database, Map};
use conduit::{utils, Error, PduEvent, Result};
use database::Map;
use ruma::{events::StateEventType, EventId, RoomId};
use crate::{services, PduEvent};
use crate::{rooms, Dep};
pub(super) struct Data {
eventid_shorteventid: Arc<Map>,
shorteventid_shortstatehash: Arc<Map>,
services: Services,
}
struct Services {
short: Dep<rooms::short::Service>,
state: Dep<rooms::state::Service>,
state_compressor: Dep<rooms::state_compressor::Service>,
timeline: Dep<rooms::timeline::Service>,
}
impl Data {
pub(super) fn new(db: &Arc<Database>) -> Self {
pub(super) fn new(args: &crate::Args<'_>) -> Self {
let db = &args.db;
Self {
eventid_shorteventid: db["eventid_shorteventid"].clone(),
shorteventid_shortstatehash: db["shorteventid_shortstatehash"].clone(),
services: Services {
short: args.depend::<rooms::short::Service>("rooms::short"),
state: args.depend::<rooms::state::Service>("rooms::state"),
state_compressor: args.depend::<rooms::state_compressor::Service>("rooms::state_compressor"),
timeline: args.depend::<rooms::timeline::Service>("rooms::timeline"),
},
}
}
#[allow(unused_qualifications)] // async traits
pub(super) async fn state_full_ids(&self, shortstatehash: u64) -> Result<HashMap<u64, Arc<EventId>>> {
let full_state = services()
.rooms
let full_state = self
.services
.state_compressor
.load_shortstatehash_info(shortstatehash)?
.pop()
@ -31,8 +46,8 @@ impl Data {
let mut result = HashMap::new();
let mut i: u8 = 0;
for compressed in full_state.iter() {
let parsed = services()
.rooms
let parsed = self
.services
.state_compressor
.parse_compressed_state_event(compressed)?;
result.insert(parsed.0, parsed.1);
@ -49,8 +64,8 @@ impl Data {
pub(super) async fn state_full(
&self, shortstatehash: u64,
) -> Result<HashMap<(StateEventType, String), Arc<PduEvent>>> {
let full_state = services()
.rooms
let full_state = self
.services
.state_compressor
.load_shortstatehash_info(shortstatehash)?
.pop()
@ -60,11 +75,11 @@ impl Data {
let mut result = HashMap::new();
let mut i: u8 = 0;
for compressed in full_state.iter() {
let (_, eventid) = services()
.rooms
let (_, eventid) = self
.services
.state_compressor
.parse_compressed_state_event(compressed)?;
if let Some(pdu) = services().rooms.timeline.get_pdu(&eventid)? {
if let Some(pdu) = self.services.timeline.get_pdu(&eventid)? {
result.insert(
(
pdu.kind.to_string().into(),
@ -92,15 +107,15 @@ impl Data {
pub(super) fn state_get_id(
&self, shortstatehash: u64, event_type: &StateEventType, state_key: &str,
) -> Result<Option<Arc<EventId>>> {
let Some(shortstatekey) = services()
.rooms
let Some(shortstatekey) = self
.services
.short
.get_shortstatekey(event_type, state_key)?
else {
return Ok(None);
};
let full_state = services()
.rooms
let full_state = self
.services
.state_compressor
.load_shortstatehash_info(shortstatehash)?
.pop()
@ -110,8 +125,7 @@ impl Data {
.iter()
.find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes()))
.and_then(|compressed| {
services()
.rooms
self.services
.state_compressor
.parse_compressed_state_event(compressed)
.ok()
@ -125,7 +139,7 @@ impl Data {
&self, shortstatehash: u64, event_type: &StateEventType, state_key: &str,
) -> Result<Option<Arc<PduEvent>>> {
self.state_get_id(shortstatehash, event_type, state_key)?
.map_or(Ok(None), |event_id| services().rooms.timeline.get_pdu(&event_id))
.map_or(Ok(None), |event_id| self.services.timeline.get_pdu(&event_id))
}
/// Returns the state hash for this pdu.
@ -149,7 +163,7 @@ impl Data {
pub(super) async fn room_state_full(
&self, room_id: &RoomId,
) -> Result<HashMap<(StateEventType, String), Arc<PduEvent>>> {
if let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? {
if let Some(current_shortstatehash) = self.services.state.get_room_shortstatehash(room_id)? {
self.state_full(current_shortstatehash).await
} else {
Ok(HashMap::new())
@ -161,7 +175,7 @@ impl Data {
pub(super) fn room_state_get_id(
&self, room_id: &RoomId, event_type: &StateEventType, state_key: &str,
) -> Result<Option<Arc<EventId>>> {
if let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? {
if let Some(current_shortstatehash) = self.services.state.get_room_shortstatehash(room_id)? {
self.state_get_id(current_shortstatehash, event_type, state_key)
} else {
Ok(None)
@ -173,7 +187,7 @@ impl Data {
pub(super) fn room_state_get(
&self, room_id: &RoomId, event_type: &StateEventType, state_key: &str,
) -> Result<Option<Arc<PduEvent>>> {
if let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? {
if let Some(current_shortstatehash) = self.services.state.get_room_shortstatehash(room_id)? {
self.state_get(current_shortstatehash, event_type, state_key)
} else {
Ok(None)

View file

@ -6,7 +6,7 @@ use std::{
sync::{Arc, Mutex as StdMutex, Mutex},
};
use conduit::{err, error, utils::math::usize_from_f64, warn, Error, Result};
use conduit::{err, error, pdu::PduBuilder, utils::math::usize_from_f64, warn, Error, PduEvent, Result};
use data::Data;
use lru_cache::LruCache;
use ruma::{
@ -33,14 +33,20 @@ use ruma::{
};
use serde_json::value::to_raw_value;
use crate::{pdu::PduBuilder, rooms::state::RoomMutexGuard, services, PduEvent};
use crate::{rooms, rooms::state::RoomMutexGuard, Dep};
pub struct Service {
services: Services,
db: Data,
pub server_visibility_cache: Mutex<LruCache<(OwnedServerName, u64), bool>>,
pub user_visibility_cache: Mutex<LruCache<(OwnedUserId, u64), bool>>,
}
struct Services {
state_cache: Dep<rooms::state_cache::Service>,
timeline: Dep<rooms::timeline::Service>,
}
impl crate::Service for Service {
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
let config = &args.server.config;
@ -50,7 +56,11 @@ impl crate::Service for Service {
f64::from(config.user_visibility_cache_capacity) * config.cache_capacity_modifier;
Ok(Arc::new(Self {
db: Data::new(args.db),
services: Services {
state_cache: args.depend::<rooms::state_cache::Service>("rooms::state_cache"),
timeline: args.depend::<rooms::timeline::Service>("rooms::timeline"),
},
db: Data::new(&args),
server_visibility_cache: StdMutex::new(LruCache::new(usize_from_f64(server_visibility_cache_capacity)?)),
user_visibility_cache: StdMutex::new(LruCache::new(usize_from_f64(user_visibility_cache_capacity)?)),
}))
@ -164,8 +174,8 @@ impl Service {
})
.unwrap_or(HistoryVisibility::Shared);
let mut current_server_members = services()
.rooms
let mut current_server_members = self
.services
.state_cache
.room_members(room_id)
.filter_map(Result::ok)
@ -212,7 +222,7 @@ impl Service {
return Ok(*visibility);
}
let currently_member = services().rooms.state_cache.is_joined(user_id, room_id)?;
let currently_member = self.services.state_cache.is_joined(user_id, room_id)?;
let history_visibility = self
.state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "")?
@ -258,7 +268,7 @@ impl Service {
/// the room's history_visibility at that event's state.
#[tracing::instrument(skip(self, user_id, room_id))]
pub fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> Result<bool> {
let currently_member = services().rooms.state_cache.is_joined(user_id, room_id)?;
let currently_member = self.services.state_cache.is_joined(user_id, room_id)?;
let history_visibility = self
.room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")?
@ -342,8 +352,8 @@ impl Service {
redacts: None,
};
Ok(services()
.rooms
Ok(self
.services
.timeline
.create_hash_and_sign_event(new_event, sender, room_id, state_lock)
.is_ok())
@ -413,7 +423,7 @@ impl Service {
// Falling back on m.room.create to judge power level
if let Some(pdu) = self.room_state_get(room_id, &StateEventType::RoomCreate, "")? {
Ok(pdu.sender == sender
|| if let Ok(Some(pdu)) = services().rooms.timeline.get_pdu(redacts) {
|| if let Ok(Some(pdu)) = self.services.timeline.get_pdu(redacts) {
pdu.sender == sender
} else {
false
@ -430,7 +440,7 @@ impl Service {
.map(|event: RoomPowerLevels| {
event.user_can_redact_event_of_other(sender)
|| event.user_can_redact_own_event(sender)
&& if let Ok(Some(pdu)) = services().rooms.timeline.get_pdu(redacts) {
&& if let Ok(Some(pdu)) = self.services.timeline.get_pdu(redacts) {
if federation {
pdu.sender.server_name() == sender.server_name()
} else {

View file

@ -4,7 +4,7 @@ use std::{
};
use conduit::{utils, Error, Result};
use database::{Database, Map};
use database::Map;
use itertools::Itertools;
use ruma::{
events::{AnyStrippedStateEvent, AnySyncStateEvent},
@ -12,44 +12,55 @@ use ruma::{
OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
};
use crate::{appservice::RegistrationInfo, services, user_is_local};
use crate::{appservice::RegistrationInfo, globals, user_is_local, users, Dep};
type StrippedStateEventIter<'a> = Box<dyn Iterator<Item = Result<(OwnedRoomId, Vec<Raw<AnyStrippedStateEvent>>)>> + 'a>;
type AnySyncStateEventIter<'a> = Box<dyn Iterator<Item = Result<(OwnedRoomId, Vec<Raw<AnySyncStateEvent>>)>> + 'a>;
type AppServiceInRoomCache = RwLock<HashMap<OwnedRoomId, HashMap<String, bool>>>;
pub(super) struct Data {
userroomid_joined: Arc<Map>,
roomuserid_joined: Arc<Map>,
userroomid_invitestate: Arc<Map>,
roomuserid_invitecount: Arc<Map>,
userroomid_leftstate: Arc<Map>,
roomuserid_leftcount: Arc<Map>,
roomid_inviteviaservers: Arc<Map>,
roomuseroncejoinedids: Arc<Map>,
roomid_joinedcount: Arc<Map>,
roomid_invitedcount: Arc<Map>,
roomserverids: Arc<Map>,
serverroomids: Arc<Map>,
pub(super) appservice_in_room_cache: AppServiceInRoomCache,
roomid_invitedcount: Arc<Map>,
roomid_inviteviaservers: Arc<Map>,
roomid_joinedcount: Arc<Map>,
roomserverids: Arc<Map>,
roomuserid_invitecount: Arc<Map>,
roomuserid_joined: Arc<Map>,
roomuserid_leftcount: Arc<Map>,
roomuseroncejoinedids: Arc<Map>,
serverroomids: Arc<Map>,
userroomid_invitestate: Arc<Map>,
userroomid_joined: Arc<Map>,
userroomid_leftstate: Arc<Map>,
services: Services,
}
struct Services {
globals: Dep<globals::Service>,
users: Dep<users::Service>,
}
impl Data {
pub(super) fn new(db: &Arc<Database>) -> Self {
pub(super) fn new(args: &crate::Args<'_>) -> Self {
let db = &args.db;
Self {
userroomid_joined: db["userroomid_joined"].clone(),
roomuserid_joined: db["roomuserid_joined"].clone(),
userroomid_invitestate: db["userroomid_invitestate"].clone(),
roomuserid_invitecount: db["roomuserid_invitecount"].clone(),
userroomid_leftstate: db["userroomid_leftstate"].clone(),
roomuserid_leftcount: db["roomuserid_leftcount"].clone(),
roomid_inviteviaservers: db["roomid_inviteviaservers"].clone(),
roomuseroncejoinedids: db["roomuseroncejoinedids"].clone(),
roomid_joinedcount: db["roomid_joinedcount"].clone(),
roomid_invitedcount: db["roomid_invitedcount"].clone(),
roomserverids: db["roomserverids"].clone(),
serverroomids: db["serverroomids"].clone(),
appservice_in_room_cache: RwLock::new(HashMap::new()),
roomid_invitedcount: db["roomid_invitedcount"].clone(),
roomid_inviteviaservers: db["roomid_inviteviaservers"].clone(),
roomid_joinedcount: db["roomid_joinedcount"].clone(),
roomserverids: db["roomserverids"].clone(),
roomuserid_invitecount: db["roomuserid_invitecount"].clone(),
roomuserid_joined: db["roomuserid_joined"].clone(),
roomuserid_leftcount: db["roomuserid_leftcount"].clone(),
roomuseroncejoinedids: db["roomuseroncejoinedids"].clone(),
serverroomids: db["serverroomids"].clone(),
userroomid_invitestate: db["userroomid_invitestate"].clone(),
userroomid_joined: db["userroomid_joined"].clone(),
userroomid_leftstate: db["userroomid_leftstate"].clone(),
services: Services {
globals: args.depend::<globals::Service>("globals"),
users: args.depend::<users::Service>("users"),
},
}
}
@ -100,7 +111,7 @@ impl Data {
&serde_json::to_vec(&last_state.unwrap_or_default()).expect("state to bytes always works"),
)?;
self.roomuserid_invitecount
.insert(&roomuser_id, &services().globals.next_count()?.to_be_bytes())?;
.insert(&roomuser_id, &self.services.globals.next_count()?.to_be_bytes())?;
self.userroomid_joined.remove(&userroom_id)?;
self.roomuserid_joined.remove(&roomuser_id)?;
self.userroomid_leftstate.remove(&userroom_id)?;
@ -144,7 +155,7 @@ impl Data {
&serde_json::to_vec(&Vec::<Raw<AnySyncStateEvent>>::new()).unwrap(),
)?; // TODO
self.roomuserid_leftcount
.insert(&roomuser_id, &services().globals.next_count()?.to_be_bytes())?;
.insert(&roomuser_id, &self.services.globals.next_count()?.to_be_bytes())?;
self.userroomid_joined.remove(&userroom_id)?;
self.roomuserid_joined.remove(&roomuser_id)?;
self.userroomid_invitestate.remove(&userroom_id)?;
@ -228,7 +239,7 @@ impl Data {
} else {
let bridge_user_id = UserId::parse_with_server_name(
appservice.registration.sender_localpart.as_str(),
services().globals.server_name(),
self.services.globals.server_name(),
)
.ok();
@ -356,7 +367,7 @@ impl Data {
) -> Box<dyn Iterator<Item = OwnedUserId> + 'a> {
Box::new(
self.local_users_in_room(room_id)
.filter(|user| !services().users.is_deactivated(user).unwrap_or(true)),
.filter(|user| !self.services.users.is_deactivated(user).unwrap_or(true)),
)
}

View file

@ -21,16 +21,28 @@ use ruma::{
OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
};
use crate::{appservice::RegistrationInfo, services, user_is_local};
use crate::{account_data, appservice::RegistrationInfo, rooms, user_is_local, users, Dep};
pub struct Service {
services: Services,
db: Data,
}
struct Services {
account_data: Dep<account_data::Service>,
state_accessor: Dep<rooms::state_accessor::Service>,
users: Dep<users::Service>,
}
impl crate::Service for Service {
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
Ok(Arc::new(Self {
db: Data::new(args.db),
services: Services {
account_data: args.depend::<account_data::Service>("account_data"),
state_accessor: args.depend::<rooms::state_accessor::Service>("rooms::state_accessor"),
users: args.depend::<users::Service>("users"),
},
db: Data::new(&args),
}))
}
@ -54,18 +66,18 @@ impl Service {
// update
#[allow(clippy::collapsible_if)]
if !user_is_local(user_id) {
if !services().users.exists(user_id)? {
services().users.create(user_id, None)?;
if !self.services.users.exists(user_id)? {
self.services.users.create(user_id, None)?;
}
/*
// Try to update our local copy of the user if ours does not match
if ((services().users.displayname(user_id)? != membership_event.displayname)
|| (services().users.avatar_url(user_id)? != membership_event.avatar_url)
|| (services().users.blurhash(user_id)? != membership_event.blurhash))
if ((self.services.users.displayname(user_id)? != membership_event.displayname)
|| (self.services.users.avatar_url(user_id)? != membership_event.avatar_url)
|| (self.services.users.blurhash(user_id)? != membership_event.blurhash))
&& (membership != MembershipState::Leave)
{
let response = services()
let response = self.services
.sending
.send_federation_request(
user_id.server_name(),
@ -76,9 +88,9 @@ impl Service {
)
.await;
services().users.set_displayname(user_id, response.displayname.clone()).await?;
services().users.set_avatar_url(user_id, response.avatar_url).await?;
services().users.set_blurhash(user_id, response.blurhash).await?;
self.services.users.set_displayname(user_id, response.displayname.clone()).await?;
self.services.users.set_avatar_url(user_id, response.avatar_url).await?;
self.services.users.set_blurhash(user_id, response.blurhash).await?;
};
*/
}
@ -91,8 +103,8 @@ impl Service {
self.db.mark_as_once_joined(user_id, room_id)?;
// Check if the room has a predecessor
if let Some(predecessor) = services()
.rooms
if let Some(predecessor) = self
.services
.state_accessor
.room_state_get(room_id, &StateEventType::RoomCreate, "")?
.and_then(|create| serde_json::from_str(create.content.get()).ok())
@ -124,21 +136,23 @@ impl Service {
// .ok();
// Copy old tags to new room
if let Some(tag_event) = services()
if let Some(tag_event) = self
.services
.account_data
.get(Some(&predecessor.room_id), user_id, RoomAccountDataEventType::Tag)?
.map(|event| {
serde_json::from_str(event.get())
.map_err(|e| err!(Database(warn!("Invalid account data event in db: {e:?}"))))
}) {
services()
self.services
.account_data
.update(Some(room_id), user_id, RoomAccountDataEventType::Tag, &tag_event?)
.ok();
};
// Copy direct chat flag
if let Some(direct_event) = services()
if let Some(direct_event) = self
.services
.account_data
.get(None, user_id, GlobalAccountDataEventType::Direct.to_string().into())?
.map(|event| {
@ -156,7 +170,7 @@ impl Service {
}
if room_ids_updated {
services().account_data.update(
self.services.account_data.update(
None,
user_id,
GlobalAccountDataEventType::Direct.to_string().into(),
@ -171,7 +185,8 @@ impl Service {
},
MembershipState::Invite => {
// We want to know if the sender is ignored by the receiver
let is_ignored = services()
let is_ignored = self
.services
.account_data
.get(
None, // Ignored users are in global account data
@ -393,8 +408,8 @@ impl Service {
/// See <https://spec.matrix.org/v1.10/appendices/#routing>
#[tracing::instrument(skip(self))]
pub fn servers_route_via(&self, room_id: &RoomId) -> Result<Vec<OwnedServerName>> {
let most_powerful_user_server = services()
.rooms
let most_powerful_user_server = self
.services
.state_accessor
.room_state_get(room_id, &StateEventType::RoomPowerLevels, "")?
.map(|pdu| {

View file

@ -13,7 +13,7 @@ use lru_cache::LruCache;
use ruma::{EventId, RoomId};
use self::data::StateDiff;
use crate::services;
use crate::{rooms, Dep};
type StateInfoLruCache = Mutex<
LruCache<
@ -48,16 +48,25 @@ pub type CompressedStateEvent = [u8; 2 * size_of::<u64>()];
pub struct Service {
db: Data,
services: Services,
pub stateinfo_cache: StateInfoLruCache,
}
struct Services {
short: Dep<rooms::short::Service>,
state: Dep<rooms::state::Service>,
}
impl crate::Service for Service {
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
let config = &args.server.config;
let cache_capacity = f64::from(config.stateinfo_cache_capacity) * config.cache_capacity_modifier;
Ok(Arc::new(Self {
db: Data::new(args.db),
services: Services {
short: args.depend::<rooms::short::Service>("rooms::short"),
state: args.depend::<rooms::state::Service>("rooms::state"),
},
stateinfo_cache: StdMutex::new(LruCache::new(usize_from_f64(cache_capacity)?)),
}))
}
@ -124,8 +133,8 @@ impl Service {
pub fn compress_state_event(&self, shortstatekey: u64, event_id: &EventId) -> Result<CompressedStateEvent> {
let mut v = shortstatekey.to_be_bytes().to_vec();
v.extend_from_slice(
&services()
.rooms
&self
.services
.short
.get_or_create_shorteventid(event_id)?
.to_be_bytes(),
@ -138,7 +147,7 @@ impl Service {
pub fn parse_compressed_state_event(&self, compressed_event: &CompressedStateEvent) -> Result<(u64, Arc<EventId>)> {
Ok((
utils::u64_from_bytes(&compressed_event[0..size_of::<u64>()]).expect("bytes have right length"),
services().rooms.short.get_eventid_from_short(
self.services.short.get_eventid_from_short(
utils::u64_from_bytes(&compressed_event[size_of::<u64>()..]).expect("bytes have right length"),
)?,
))
@ -282,7 +291,7 @@ impl Service {
pub fn save_state(
&self, room_id: &RoomId, new_state_ids_compressed: Arc<HashSet<CompressedStateEvent>>,
) -> HashSetCompressStateEvent {
let previous_shortstatehash = services().rooms.state.get_room_shortstatehash(room_id)?;
let previous_shortstatehash = self.services.state.get_room_shortstatehash(room_id)?;
let state_hash = utils::calculate_hash(
&new_state_ids_compressed
@ -291,8 +300,8 @@ impl Service {
.collect::<Vec<_>>(),
);
let (new_shortstatehash, already_existed) = services()
.rooms
let (new_shortstatehash, already_existed) = self
.services
.short
.get_or_create_shortstatehash(&state_hash)?;

View file

@ -1,29 +1,40 @@
use std::{mem::size_of, sync::Arc};
use conduit::{checked, utils, Error, Result};
use database::{Database, Map};
use conduit::{checked, utils, Error, PduEvent, Result};
use database::Map;
use ruma::{api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, UserId};
use crate::{services, PduEvent};
use crate::{rooms, Dep};
type PduEventIterResult<'a> = Result<Box<dyn Iterator<Item = Result<(u64, PduEvent)>> + 'a>>;
pub(super) struct Data {
threadid_userids: Arc<Map>,
services: Services,
}
struct Services {
short: Dep<rooms::short::Service>,
timeline: Dep<rooms::timeline::Service>,
}
impl Data {
pub(super) fn new(db: &Arc<Database>) -> Self {
pub(super) fn new(args: &crate::Args<'_>) -> Self {
let db = &args.db;
Self {
threadid_userids: db["threadid_userids"].clone(),
services: Services {
short: args.depend::<rooms::short::Service>("rooms::short"),
timeline: args.depend::<rooms::timeline::Service>("rooms::timeline"),
},
}
}
pub(super) fn threads_until<'a>(
&'a self, user_id: &'a UserId, room_id: &'a RoomId, until: u64, _include: &'a IncludeThreads,
) -> PduEventIterResult<'a> {
let prefix = services()
.rooms
let prefix = self
.services
.short
.get_shortroomid(room_id)?
.expect("room exists")
@ -40,8 +51,8 @@ impl Data {
.map(move |(pduid, _users)| {
let count = utils::u64_from_bytes(&pduid[(size_of::<u64>())..])
.map_err(|_| Error::bad_database("Invalid pduid in threadid_userids."))?;
let mut pdu = services()
.rooms
let mut pdu = self
.services
.timeline
.get_pdu_from_id(&pduid)?
.ok_or_else(|| Error::bad_database("Invalid pduid reference in threadid_userids"))?;

View file

@ -2,7 +2,7 @@ mod data;
use std::{collections::BTreeMap, sync::Arc};
use conduit::{Error, Result};
use conduit::{Error, PduEvent, Result};
use data::Data;
use ruma::{
api::client::{error::ErrorKind, threads::get_threads::v1::IncludeThreads},
@ -11,16 +11,24 @@ use ruma::{
};
use serde_json::json;
use crate::{services, PduEvent};
use crate::{rooms, Dep};
pub struct Service {
services: Services,
db: Data,
}
struct Services {
timeline: Dep<rooms::timeline::Service>,
}
impl crate::Service for Service {
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
Ok(Arc::new(Self {
db: Data::new(args.db),
services: Services {
timeline: args.depend::<rooms::timeline::Service>("rooms::timeline"),
},
db: Data::new(&args),
}))
}
@ -35,22 +43,22 @@ impl Service {
}
pub fn add_to_thread(&self, root_event_id: &EventId, pdu: &PduEvent) -> Result<()> {
let root_id = &services()
.rooms
let root_id = self
.services
.timeline
.get_pdu_id(root_event_id)?
.ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Invalid event id in thread message"))?;
let root_pdu = services()
.rooms
let root_pdu = self
.services
.timeline
.get_pdu_from_id(root_id)?
.get_pdu_from_id(&root_id)?
.ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Thread root pdu not found"))?;
let mut root_pdu_json = services()
.rooms
let mut root_pdu_json = self
.services
.timeline
.get_pdu_json_from_id(root_id)?
.get_pdu_json_from_id(&root_id)?
.ok_or_else(|| Error::BadRequest(ErrorKind::InvalidParam, "Thread root pdu not found"))?;
if let CanonicalJsonValue::Object(unsigned) = root_pdu_json
@ -93,20 +101,19 @@ impl Service {
);
}
services()
.rooms
self.services
.timeline
.replace_pdu(root_id, &root_pdu_json, &root_pdu)?;
.replace_pdu(&root_id, &root_pdu_json, &root_pdu)?;
}
let mut users = Vec::new();
if let Some(userids) = self.db.get_participants(root_id)? {
if let Some(userids) = self.db.get_participants(&root_id)? {
users.extend_from_slice(&userids);
} else {
users.push(root_pdu.sender);
}
users.push(pdu.sender.clone());
self.db.update_participants(root_id, &users)
self.db.update_participants(&root_id, &users)
}
}

View file

@ -4,19 +4,25 @@ use std::{
sync::{Arc, Mutex},
};
use conduit::{checked, error, utils, Error, Result};
use conduit::{checked, error, utils, Error, PduCount, PduEvent, Result};
use database::{Database, Map};
use ruma::{api::client::error::ErrorKind, CanonicalJsonObject, EventId, OwnedRoomId, OwnedUserId, RoomId, UserId};
use crate::{services, PduCount, PduEvent};
use crate::{rooms, Dep};
pub(super) struct Data {
eventid_outlierpdu: Arc<Map>,
eventid_pduid: Arc<Map>,
pduid_pdu: Arc<Map>,
eventid_outlierpdu: Arc<Map>,
userroomid_notificationcount: Arc<Map>,
userroomid_highlightcount: Arc<Map>,
userroomid_notificationcount: Arc<Map>,
pub(super) lasttimelinecount_cache: LastTimelineCountCache,
pub(super) db: Arc<Database>,
services: Services,
}
struct Services {
short: Dep<rooms::short::Service>,
}
type PdusIterItem = Result<(PduCount, PduEvent)>;
@ -24,14 +30,19 @@ type PdusIterator<'a> = Box<dyn Iterator<Item = PdusIterItem> + 'a>;
type LastTimelineCountCache = Mutex<HashMap<OwnedRoomId, PduCount>>;
impl Data {
pub(super) fn new(db: &Arc<Database>) -> Self {
pub(super) fn new(args: &crate::Args<'_>) -> Self {
let db = &args.db;
Self {
eventid_outlierpdu: db["eventid_outlierpdu"].clone(),
eventid_pduid: db["eventid_pduid"].clone(),
pduid_pdu: db["pduid_pdu"].clone(),
eventid_outlierpdu: db["eventid_outlierpdu"].clone(),
userroomid_notificationcount: db["userroomid_notificationcount"].clone(),
userroomid_highlightcount: db["userroomid_highlightcount"].clone(),
userroomid_notificationcount: db["userroomid_notificationcount"].clone(),
lasttimelinecount_cache: Mutex::new(HashMap::new()),
db: args.db.clone(),
services: Services {
short: args.depend::<rooms::short::Service>("rooms::short"),
},
}
}
@ -210,7 +221,7 @@ impl Data {
/// happened before the event with id `until` in reverse-chronological
/// order.
pub(super) fn pdus_until(&self, user_id: &UserId, room_id: &RoomId, until: PduCount) -> Result<PdusIterator<'_>> {
let (prefix, current) = count_to_id(room_id, until, 1, true)?;
let (prefix, current) = self.count_to_id(room_id, until, 1, true)?;
let user_id = user_id.to_owned();
@ -232,7 +243,7 @@ impl Data {
}
pub(super) fn pdus_after(&self, user_id: &UserId, room_id: &RoomId, from: PduCount) -> Result<PdusIterator<'_>> {
let (prefix, current) = count_to_id(room_id, from, 1, false)?;
let (prefix, current) = self.count_to_id(room_id, from, 1, false)?;
let user_id = user_id.to_owned();
@ -277,6 +288,41 @@ impl Data {
.increment_batch(highlights_batch.iter().map(Vec::as_slice))?;
Ok(())
}
pub(super) fn count_to_id(
&self, room_id: &RoomId, count: PduCount, offset: u64, subtract: bool,
) -> Result<(Vec<u8>, Vec<u8>)> {
let prefix = self
.services
.short
.get_shortroomid(room_id)?
.ok_or_else(|| Error::bad_database("Looked for bad shortroomid in timeline"))?
.to_be_bytes()
.to_vec();
let mut pdu_id = prefix.clone();
// +1 so we don't send the base event
let count_raw = match count {
PduCount::Normal(x) => {
if subtract {
x.saturating_sub(offset)
} else {
x.saturating_add(offset)
}
},
PduCount::Backfilled(x) => {
pdu_id.extend_from_slice(&0_u64.to_be_bytes());
let num = u64::MAX.saturating_sub(x);
if subtract {
num.saturating_sub(offset)
} else {
num.saturating_add(offset)
}
},
};
pdu_id.extend_from_slice(&count_raw.to_be_bytes());
Ok((prefix, pdu_id))
}
}
/// Returns the `count` of this pdu's id.
@ -294,38 +340,3 @@ pub(super) fn pdu_count(pdu_id: &[u8]) -> Result<PduCount> {
Ok(PduCount::Normal(last_u64))
}
}
pub(super) fn count_to_id(
room_id: &RoomId, count: PduCount, offset: u64, subtract: bool,
) -> Result<(Vec<u8>, Vec<u8>)> {
let prefix = services()
.rooms
.short
.get_shortroomid(room_id)?
.ok_or_else(|| Error::bad_database("Looked for bad shortroomid in timeline"))?
.to_be_bytes()
.to_vec();
let mut pdu_id = prefix.clone();
// +1 so we don't send the base event
let count_raw = match count {
PduCount::Normal(x) => {
if subtract {
x.saturating_sub(offset)
} else {
x.saturating_add(offset)
}
},
PduCount::Backfilled(x) => {
pdu_id.extend_from_slice(&0_u64.to_be_bytes());
let num = u64::MAX.saturating_sub(x);
if subtract {
num.saturating_sub(offset)
} else {
num.saturating_add(offset)
}
},
};
pdu_id.extend_from_slice(&count_raw.to_be_bytes());
Ok((prefix, pdu_id))
}

View file

@ -7,11 +7,12 @@ use std::{
};
use conduit::{
debug, error, info, utils,
debug, error, info,
pdu::{EventHash, PduBuilder, PduCount, PduEvent},
utils,
utils::{MutexMap, MutexMapGuard},
validated, warn, Error, Result,
validated, warn, Error, Result, Server,
};
use data::Data;
use itertools::Itertools;
use ruma::{
api::{client::error::ErrorKind, federation},
@ -37,11 +38,10 @@ use serde::Deserialize;
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
use tokio::sync::RwLock;
use self::data::Data;
use crate::{
appservice::NamespaceRegex,
pdu::{EventHash, PduBuilder},
rooms::{event_handler::parse_incoming_pdu, state_compressor::CompressedStateEvent},
server_is_ours, services, PduCount, PduEvent,
account_data, admin, appservice, appservice::NamespaceRegex, globals, pusher, rooms,
rooms::state_compressor::CompressedStateEvent, sending, server_is_ours, Dep,
};
// Update Relationships
@ -67,17 +67,61 @@ struct ExtractBody {
}
pub struct Service {
services: Services,
db: Data,
pub mutex_insert: RoomMutexMap,
}
struct Services {
server: Arc<Server>,
account_data: Dep<account_data::Service>,
appservice: Dep<appservice::Service>,
admin: Dep<admin::Service>,
alias: Dep<rooms::alias::Service>,
globals: Dep<globals::Service>,
short: Dep<rooms::short::Service>,
state: Dep<rooms::state::Service>,
state_cache: Dep<rooms::state_cache::Service>,
state_accessor: Dep<rooms::state_accessor::Service>,
pdu_metadata: Dep<rooms::pdu_metadata::Service>,
read_receipt: Dep<rooms::read_receipt::Service>,
sending: Dep<sending::Service>,
user: Dep<rooms::user::Service>,
pusher: Dep<pusher::Service>,
threads: Dep<rooms::threads::Service>,
search: Dep<rooms::search::Service>,
spaces: Dep<rooms::spaces::Service>,
event_handler: Dep<rooms::event_handler::Service>,
}
type RoomMutexMap = MutexMap<OwnedRoomId, ()>;
pub type RoomMutexGuard = MutexMapGuard<OwnedRoomId, ()>;
impl crate::Service for Service {
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
Ok(Arc::new(Self {
db: Data::new(args.db),
services: Services {
server: args.server.clone(),
account_data: args.depend::<account_data::Service>("account_data"),
appservice: args.depend::<appservice::Service>("appservice"),
admin: args.depend::<admin::Service>("admin"),
alias: args.depend::<rooms::alias::Service>("rooms::alias"),
globals: args.depend::<globals::Service>("globals"),
short: args.depend::<rooms::short::Service>("rooms::short"),
state: args.depend::<rooms::state::Service>("rooms::state"),
state_cache: args.depend::<rooms::state_cache::Service>("rooms::state_cache"),
state_accessor: args.depend::<rooms::state_accessor::Service>("rooms::state_accessor"),
pdu_metadata: args.depend::<rooms::pdu_metadata::Service>("rooms::pdu_metadata"),
read_receipt: args.depend::<rooms::read_receipt::Service>("rooms::read_receipt"),
sending: args.depend::<sending::Service>("sending"),
user: args.depend::<rooms::user::Service>("rooms::user"),
pusher: args.depend::<pusher::Service>("pusher"),
threads: args.depend::<rooms::threads::Service>("rooms::threads"),
search: args.depend::<rooms::search::Service>("rooms::search"),
spaces: args.depend::<rooms::spaces::Service>("rooms::spaces"),
event_handler: args.depend::<rooms::event_handler::Service>("rooms::event_handler"),
},
db: Data::new(&args),
mutex_insert: RoomMutexMap::new(),
}))
}
@ -217,10 +261,10 @@ impl Service {
state_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex
) -> Result<Vec<u8>> {
// Coalesce database writes for the remainder of this scope.
let _cork = services().db.cork_and_flush();
let _cork = self.db.db.cork_and_flush();
let shortroomid = services()
.rooms
let shortroomid = self
.services
.short
.get_shortroomid(&pdu.room_id)?
.expect("room exists");
@ -233,14 +277,14 @@ impl Service {
.entry("unsigned".to_owned())
.or_insert_with(|| CanonicalJsonValue::Object(BTreeMap::default()))
{
if let Some(shortstatehash) = services()
.rooms
if let Some(shortstatehash) = self
.services
.state_accessor
.pdu_shortstatehash(&pdu.event_id)
.unwrap()
{
if let Some(prev_state) = services()
.rooms
if let Some(prev_state) = self
.services
.state_accessor
.state_get(shortstatehash, &pdu.kind.to_string().into(), state_key)
.unwrap()
@ -270,30 +314,26 @@ impl Service {
}
// We must keep track of all events that have been referenced.
services()
.rooms
self.services
.pdu_metadata
.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?;
services()
.rooms
self.services
.state
.set_forward_extremities(&pdu.room_id, leaves, state_lock)?;
let insert_lock = self.mutex_insert.lock(&pdu.room_id).await;
let count1 = services().globals.next_count()?;
let count1 = self.services.globals.next_count()?;
// Mark as read first so the sending client doesn't get a notification even if
// appending fails
services()
.rooms
self.services
.read_receipt
.private_read_set(&pdu.room_id, &pdu.sender, count1)?;
services()
.rooms
self.services
.user
.reset_notification_counts(&pdu.sender, &pdu.room_id)?;
let count2 = services().globals.next_count()?;
let count2 = self.services.globals.next_count()?;
let mut pdu_id = shortroomid.to_be_bytes().to_vec();
pdu_id.extend_from_slice(&count2.to_be_bytes());
@ -303,8 +343,8 @@ impl Service {
drop(insert_lock);
// See if the event matches any known pushers
let power_levels: RoomPowerLevelsEventContent = services()
.rooms
let power_levels: RoomPowerLevelsEventContent = self
.services
.state_accessor
.room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")?
.map(|ev| {
@ -319,8 +359,8 @@ impl Service {
let mut notifies = Vec::new();
let mut highlights = Vec::new();
let mut push_target = services()
.rooms
let mut push_target = self
.services
.state_cache
.active_local_users_in_room(&pdu.room_id)
.collect_vec();
@ -341,7 +381,8 @@ impl Service {
continue;
}
let rules_for_user = services()
let rules_for_user = self
.services
.account_data
.get(None, user, GlobalAccountDataEventType::PushRules.to_string().into())?
.map(|event| {
@ -357,7 +398,7 @@ impl Service {
let mut notify = false;
for action in
services()
self.services
.pusher
.get_actions(user, &rules_for_user, &power_levels, &sync_pdu, &pdu.room_id)?
{
@ -378,8 +419,10 @@ impl Service {
highlights.push(user.clone());
}
for push_key in services().pusher.get_pushkeys(user) {
services().sending.send_pdu_push(&pdu_id, user, push_key?)?;
for push_key in self.services.pusher.get_pushkeys(user) {
self.services
.sending
.send_pdu_push(&pdu_id, user, push_key?)?;
}
}
@ -390,11 +433,11 @@ impl Service {
TimelineEventType::RoomRedaction => {
use RoomVersionId::*;
let room_version_id = services().rooms.state.get_room_version(&pdu.room_id)?;
let room_version_id = self.services.state.get_room_version(&pdu.room_id)?;
match room_version_id {
V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => {
if let Some(redact_id) = &pdu.redacts {
if services().rooms.state_accessor.user_can_redact(
if self.services.state_accessor.user_can_redact(
redact_id,
&pdu.sender,
&pdu.room_id,
@ -412,7 +455,7 @@ impl Service {
})?;
if let Some(redact_id) = &content.redacts {
if services().rooms.state_accessor.user_can_redact(
if self.services.state_accessor.user_can_redact(
redact_id,
&pdu.sender,
&pdu.room_id,
@ -433,8 +476,7 @@ impl Service {
},
TimelineEventType::SpaceChild => {
if let Some(_state_key) = &pdu.state_key {
services()
.rooms
self.services
.spaces
.roomid_spacehierarchy_cache
.lock()
@ -455,7 +497,7 @@ impl Service {
let invite_state = match content.membership {
MembershipState::Invite => {
let state = services().rooms.state.calculate_invite_state(pdu)?;
let state = self.services.state.calculate_invite_state(pdu)?;
Some(state)
},
_ => None,
@ -463,7 +505,7 @@ impl Service {
// Update our membership info, we do this here incase a user is invited
// and immediately leaves we need the DB to record the invite event for auth
services().rooms.state_cache.update_membership(
self.services.state_cache.update_membership(
&pdu.room_id,
&target_user_id,
content,
@ -479,13 +521,12 @@ impl Service {
.map_err(|_| Error::bad_database("Invalid content in pdu."))?;
if let Some(body) = content.body {
services()
.rooms
self.services
.search
.index_pdu(shortroomid, &pdu_id, &body)?;
if services().admin.is_admin_command(pdu, &body).await {
services()
if self.services.admin.is_admin_command(pdu, &body).await {
self.services
.admin
.command(body, Some((*pdu.event_id).into()))
.await;
@ -497,8 +538,7 @@ impl Service {
if let Ok(content) = serde_json::from_str::<ExtractRelatesToEventId>(pdu.content.get()) {
if let Some(related_pducount) = self.get_pdu_count(&content.relates_to.event_id)? {
services()
.rooms
self.services
.pdu_metadata
.add_relation(PduCount::Normal(count2), related_pducount)?;
}
@ -512,29 +552,25 @@ impl Service {
// We need to do it again here, because replies don't have
// event_id as a top level field
if let Some(related_pducount) = self.get_pdu_count(&in_reply_to.event_id)? {
services()
.rooms
self.services
.pdu_metadata
.add_relation(PduCount::Normal(count2), related_pducount)?;
}
},
Relation::Thread(thread) => {
services()
.rooms
.threads
.add_to_thread(&thread.event_id, pdu)?;
self.services.threads.add_to_thread(&thread.event_id, pdu)?;
},
_ => {}, // TODO: Aggregate other types
}
}
for appservice in services().appservice.read().await.values() {
if services()
.rooms
for appservice in self.services.appservice.read().await.values() {
if self
.services
.state_cache
.appservice_in_room(&pdu.room_id, appservice)?
{
services()
self.services
.sending
.send_pdu_appservice(appservice.registration.id.clone(), pdu_id.clone())?;
continue;
@ -550,7 +586,7 @@ impl Service {
{
let appservice_uid = appservice.registration.sender_localpart.as_str();
if state_key_uid == appservice_uid {
services()
self.services
.sending
.send_pdu_appservice(appservice.registration.id.clone(), pdu_id.clone())?;
continue;
@ -567,8 +603,7 @@ impl Service {
.map_or(false, |state_key| users.is_match(state_key))
};
let matching_aliases = |aliases: &NamespaceRegex| {
services()
.rooms
self.services
.alias
.local_aliases_for_room(&pdu.room_id)
.filter_map(Result::ok)
@ -579,7 +614,7 @@ impl Service {
|| appservice.rooms.is_match(pdu.room_id.as_str())
|| matching_users(&appservice.users)
{
services()
self.services
.sending
.send_pdu_appservice(appservice.registration.id.clone(), pdu_id.clone())?;
}
@ -603,8 +638,8 @@ impl Service {
redacts,
} = pdu_builder;
let prev_events: Vec<_> = services()
.rooms
let prev_events: Vec<_> = self
.services
.state
.get_forward_extremities(room_id)?
.into_iter()
@ -612,28 +647,23 @@ impl Service {
.collect();
// If there was no create event yet, assume we are creating a room
let room_version_id = services()
.rooms
.state
.get_room_version(room_id)
.or_else(|_| {
if event_type == TimelineEventType::RoomCreate {
let content = serde_json::from_str::<RoomCreateEventContent>(content.get())
.expect("Invalid content in RoomCreate pdu.");
Ok(content.room_version)
} else {
Err(Error::InconsistentRoomState(
"non-create event for room of unknown version",
room_id.to_owned(),
))
}
})?;
let room_version_id = self.services.state.get_room_version(room_id).or_else(|_| {
if event_type == TimelineEventType::RoomCreate {
let content = serde_json::from_str::<RoomCreateEventContent>(content.get())
.expect("Invalid content in RoomCreate pdu.");
Ok(content.room_version)
} else {
Err(Error::InconsistentRoomState(
"non-create event for room of unknown version",
room_id.to_owned(),
))
}
})?;
let room_version = RoomVersion::new(&room_version_id).expect("room version is supported");
let auth_events =
services()
.rooms
self.services
.state
.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?;
@ -649,8 +679,7 @@ impl Service {
if let Some(state_key) = &state_key {
if let Some(prev_pdu) =
services()
.rooms
self.services
.state_accessor
.room_state_get(room_id, &event_type.to_string().into(), state_key)?
{
@ -730,12 +759,12 @@ impl Service {
// Add origin because synapse likes that (and it's required in the spec)
pdu_json.insert(
"origin".to_owned(),
to_canonical_value(services().globals.server_name()).expect("server name is a valid CanonicalJsonValue"),
to_canonical_value(self.services.globals.server_name()).expect("server name is a valid CanonicalJsonValue"),
);
match ruma::signatures::hash_and_sign_event(
services().globals.server_name().as_str(),
services().globals.keypair(),
self.services.globals.server_name().as_str(),
self.services.globals.keypair(),
&mut pdu_json,
&room_version_id,
) {
@ -763,8 +792,8 @@ impl Service {
);
// Generate short event id
let _shorteventid = services()
.rooms
let _shorteventid = self
.services
.short
.get_or_create_shorteventid(&pdu.event_id)?;
@ -783,7 +812,7 @@ impl Service {
state_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex
) -> Result<Arc<EventId>> {
let (pdu, pdu_json) = self.create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock)?;
if let Some(admin_room) = services().admin.get_admin_room()? {
if let Some(admin_room) = self.services.admin.get_admin_room()? {
if admin_room == room_id {
match pdu.event_type() {
TimelineEventType::RoomEncryption => {
@ -798,7 +827,7 @@ impl Service {
.state_key()
.filter(|v| v.starts_with('@'))
.unwrap_or(sender.as_str());
let server_user = &services().globals.server_user.to_string();
let server_user = &self.services.globals.server_user.to_string();
let content = serde_json::from_str::<RoomMemberEventContent>(pdu.content.get())
.map_err(|_| Error::bad_database("Invalid content in pdu"))?;
@ -812,8 +841,8 @@ impl Service {
));
}
let count = services()
.rooms
let count = self
.services
.state_cache
.room_members(room_id)
.filter_map(Result::ok)
@ -837,8 +866,8 @@ impl Service {
));
}
let count = services()
.rooms
let count = self
.services
.state_cache
.room_members(room_id)
.filter_map(Result::ok)
@ -861,15 +890,14 @@ impl Service {
// If redaction event is not authorized, do not append it to the timeline
if pdu.kind == TimelineEventType::RoomRedaction {
use RoomVersionId::*;
match services().rooms.state.get_room_version(&pdu.room_id)? {
match self.services.state.get_room_version(&pdu.room_id)? {
V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => {
if let Some(redact_id) = &pdu.redacts {
if !services().rooms.state_accessor.user_can_redact(
redact_id,
&pdu.sender,
&pdu.room_id,
false,
)? {
if !self
.services
.state_accessor
.user_can_redact(redact_id, &pdu.sender, &pdu.room_id, false)?
{
return Err(Error::BadRequest(ErrorKind::forbidden(), "User cannot redact this event."));
}
};
@ -879,12 +907,11 @@ impl Service {
.map_err(|_| Error::bad_database("Invalid content in redaction pdu."))?;
if let Some(redact_id) = &content.redacts {
if !services().rooms.state_accessor.user_can_redact(
redact_id,
&pdu.sender,
&pdu.room_id,
false,
)? {
if !self
.services
.state_accessor
.user_can_redact(redact_id, &pdu.sender, &pdu.room_id, false)?
{
return Err(Error::BadRequest(ErrorKind::forbidden(), "User cannot redact this event."));
}
}
@ -895,7 +922,7 @@ impl Service {
// We append to state before appending the pdu, so we don't have a moment in
// time with the pdu without it's state. This is okay because append_pdu can't
// fail.
let statehashid = services().rooms.state.append_to_state(&pdu)?;
let statehashid = self.services.state.append_to_state(&pdu)?;
let pdu_id = self
.append_pdu(
@ -910,13 +937,12 @@ impl Service {
// We set the room state after inserting the pdu, so that we never have a moment
// in time where events in the current room state do not exist
services()
.rooms
self.services
.state
.set_room_state(room_id, statehashid, state_lock)?;
let mut servers: HashSet<OwnedServerName> = services()
.rooms
let mut servers: HashSet<OwnedServerName> = self
.services
.state_cache
.room_servers(room_id)
.filter_map(Result::ok)
@ -936,9 +962,9 @@ impl Service {
// Remove our server from the server list since it will be added to it by
// room_servers() and/or the if statement above
servers.remove(services().globals.server_name());
servers.remove(self.services.globals.server_name());
services()
self.services
.sending
.send_pdu_servers(servers.into_iter(), &pdu_id)?;
@ -960,18 +986,15 @@ impl Service {
// We append to state before appending the pdu, so we don't have a moment in
// time with the pdu without it's state. This is okay because append_pdu can't
// fail.
services()
.rooms
self.services
.state
.set_event_state(&pdu.event_id, &pdu.room_id, state_ids_compressed)?;
if soft_fail {
services()
.rooms
self.services
.pdu_metadata
.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?;
services()
.rooms
self.services
.state
.set_forward_extremities(&pdu.room_id, new_room_leaves, state_lock)?;
return Ok(None);
@ -1022,14 +1045,13 @@ impl Service {
if let Ok(content) = serde_json::from_str::<ExtractBody>(pdu.content.get()) {
if let Some(body) = content.body {
services()
.rooms
self.services
.search
.deindex_pdu(shortroomid, &pdu_id, &body)?;
}
}
let room_version_id = services().rooms.state.get_room_version(&pdu.room_id)?;
let room_version_id = self.services.state.get_room_version(&pdu.room_id)?;
pdu.redact(room_version_id, reason)?;
@ -1058,8 +1080,8 @@ impl Service {
return Ok(());
}
let power_levels: RoomPowerLevelsEventContent = services()
.rooms
let power_levels: RoomPowerLevelsEventContent = self
.services
.state_accessor
.room_state_get(room_id, &StateEventType::RoomPowerLevels, "")?
.map(|ev| {
@ -1077,8 +1099,8 @@ impl Service {
}
});
let room_alias_servers = services()
.rooms
let room_alias_servers = self
.services
.alias
.local_aliases_for_room(room_id)
.filter_map(|alias| {
@ -1090,14 +1112,13 @@ impl Service {
let servers = room_mods
.chain(room_alias_servers)
.chain(services().globals.config.trusted_servers.clone())
.chain(self.services.server.config.trusted_servers.clone())
.filter(|server_name| {
if server_is_ours(server_name) {
return false;
}
services()
.rooms
self.services
.state_cache
.server_in_room(server_name, room_id)
.unwrap_or(false)
@ -1105,7 +1126,8 @@ impl Service {
for backfill_server in servers {
info!("Asking {backfill_server} for backfill");
let response = services()
let response = self
.services
.sending
.send_federation_request(
&backfill_server,
@ -1141,11 +1163,11 @@ impl Service {
&self, origin: &ServerName, pdu: Box<RawJsonValue>,
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> Result<()> {
let (event_id, value, room_id) = parse_incoming_pdu(&pdu)?;
let (event_id, value, room_id) = self.services.event_handler.parse_incoming_pdu(&pdu)?;
// Lock so we cannot backfill the same pdu twice at the same time
let mutex_lock = services()
.rooms
let mutex_lock = self
.services
.event_handler
.mutex_federation
.lock(&room_id)
@ -1158,14 +1180,12 @@ impl Service {
return Ok(());
}
services()
.rooms
self.services
.event_handler
.fetch_required_signing_keys([&value], pub_key_map)
.await?;
services()
.rooms
self.services
.event_handler
.handle_incoming_pdu(origin, &room_id, &event_id, value, false, pub_key_map)
.await?;
@ -1173,8 +1193,8 @@ impl Service {
let value = self.get_pdu_json(&event_id)?.expect("We just created it");
let pdu = self.get_pdu(&event_id)?.expect("We just created it");
let shortroomid = services()
.rooms
let shortroomid = self
.services
.short
.get_shortroomid(&room_id)?
.expect("room exists");
@ -1182,7 +1202,7 @@ impl Service {
let insert_lock = self.mutex_insert.lock(&room_id).await;
let max = u64::MAX;
let count = services().globals.next_count()?;
let count = self.services.globals.next_count()?;
let mut pdu_id = shortroomid.to_be_bytes().to_vec();
pdu_id.extend_from_slice(&0_u64.to_be_bytes());
pdu_id.extend_from_slice(&(validated!(max - count)?).to_be_bytes());
@ -1197,8 +1217,7 @@ impl Service {
.map_err(|_| Error::bad_database("Invalid content in pdu."))?;
if let Some(body) = content.body {
services()
.rooms
self.services
.search
.index_pdu(shortroomid, &pdu_id, &body)?;
}

View file

@ -1,6 +1,6 @@
use std::{collections::BTreeMap, sync::Arc};
use conduit::{debug_info, trace, utils, Result};
use conduit::{debug_info, trace, utils, Result, Server};
use ruma::{
api::federation::transactions::edu::{Edu, TypingContent},
events::SyncEphemeralRoomEvent,
@ -8,19 +8,31 @@ use ruma::{
};
use tokio::sync::{broadcast, RwLock};
use crate::{services, user_is_local};
use crate::{globals, sending, user_is_local, Dep};
pub struct Service {
pub typing: RwLock<BTreeMap<OwnedRoomId, BTreeMap<OwnedUserId, u64>>>, // u64 is unix timestamp of timeout
pub last_typing_update: RwLock<BTreeMap<OwnedRoomId, u64>>, /* timestamp of the last change to
* typing
* users */
server: Arc<Server>,
services: Services,
/// u64 is unix timestamp of timeout
pub typing: RwLock<BTreeMap<OwnedRoomId, BTreeMap<OwnedUserId, u64>>>,
/// timestamp of the last change to typing users
pub last_typing_update: RwLock<BTreeMap<OwnedRoomId, u64>>,
pub typing_update_sender: broadcast::Sender<OwnedRoomId>,
}
struct Services {
globals: Dep<globals::Service>,
sending: Dep<sending::Service>,
}
impl crate::Service for Service {
fn build(_args: crate::Args<'_>) -> Result<Arc<Self>> {
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
Ok(Arc::new(Self {
server: args.server.clone(),
services: Services {
globals: args.depend::<globals::Service>("globals"),
sending: args.depend::<sending::Service>("sending"),
},
typing: RwLock::new(BTreeMap::new()),
last_typing_update: RwLock::new(BTreeMap::new()),
typing_update_sender: broadcast::channel(100).0,
@ -45,14 +57,14 @@ impl Service {
self.last_typing_update
.write()
.await
.insert(room_id.to_owned(), services().globals.next_count()?);
.insert(room_id.to_owned(), self.services.globals.next_count()?);
if self.typing_update_sender.send(room_id.to_owned()).is_err() {
trace!("receiver found what it was looking for and is no longer interested");
}
// update federation
if user_is_local(user_id) {
Self::federation_send(room_id, user_id, true)?;
self.federation_send(room_id, user_id, true)?;
}
Ok(())
@ -71,14 +83,14 @@ impl Service {
self.last_typing_update
.write()
.await
.insert(room_id.to_owned(), services().globals.next_count()?);
.insert(room_id.to_owned(), self.services.globals.next_count()?);
if self.typing_update_sender.send(room_id.to_owned()).is_err() {
trace!("receiver found what it was looking for and is no longer interested");
}
// update federation
if user_is_local(user_id) {
Self::federation_send(room_id, user_id, false)?;
self.federation_send(room_id, user_id, false)?;
}
Ok(())
@ -126,7 +138,7 @@ impl Service {
self.last_typing_update
.write()
.await
.insert(room_id.to_owned(), services().globals.next_count()?);
.insert(room_id.to_owned(), self.services.globals.next_count()?);
if self.typing_update_sender.send(room_id.to_owned()).is_err() {
trace!("receiver found what it was looking for and is no longer interested");
}
@ -134,7 +146,7 @@ impl Service {
// update federation
for user in removable {
if user_is_local(&user) {
Self::federation_send(room_id, &user, false)?;
self.federation_send(room_id, &user, false)?;
}
}
}
@ -171,15 +183,15 @@ impl Service {
})
}
fn federation_send(room_id: &RoomId, user_id: &UserId, typing: bool) -> Result<()> {
fn federation_send(&self, room_id: &RoomId, user_id: &UserId, typing: bool) -> Result<()> {
debug_assert!(user_is_local(user_id), "tried to broadcast typing status of remote user",);
if !services().globals.config.allow_outgoing_typing {
if !self.server.config.allow_outgoing_typing {
return Ok(());
}
let edu = Edu::Typing(TypingContent::new(room_id.to_owned(), user_id.to_owned(), typing));
services()
self.services
.sending
.send_edu_room(room_id, serde_json::to_vec(&edu).expect("Serialized Edu::Typing"))?;

View file

@ -1,10 +1,10 @@
use std::sync::Arc;
use conduit::{utils, Error, Result};
use database::{Database, Map};
use database::Map;
use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId};
use crate::services;
use crate::{globals, rooms, Dep};
pub(super) struct Data {
userroomid_notificationcount: Arc<Map>,
@ -12,16 +12,27 @@ pub(super) struct Data {
roomuserid_lastnotificationread: Arc<Map>,
roomsynctoken_shortstatehash: Arc<Map>,
userroomid_joined: Arc<Map>,
services: Services,
}
struct Services {
globals: Dep<globals::Service>,
short: Dep<rooms::short::Service>,
}
impl Data {
pub(super) fn new(db: &Arc<Database>) -> Self {
pub(super) fn new(args: &crate::Args<'_>) -> Self {
let db = &args.db;
Self {
userroomid_notificationcount: db["userroomid_notificationcount"].clone(),
userroomid_highlightcount: db["userroomid_highlightcount"].clone(),
roomuserid_lastnotificationread: db["userroomid_highlightcount"].clone(), //< NOTE: known bug from conduit
roomsynctoken_shortstatehash: db["roomsynctoken_shortstatehash"].clone(),
userroomid_joined: db["userroomid_joined"].clone(),
services: Services {
globals: args.depend::<globals::Service>("globals"),
short: args.depend::<rooms::short::Service>("rooms::short"),
},
}
}
@ -39,7 +50,7 @@ impl Data {
.insert(&userroom_id, &0_u64.to_be_bytes())?;
self.roomuserid_lastnotificationread
.insert(&roomuser_id, &services().globals.next_count()?.to_be_bytes())?;
.insert(&roomuser_id, &self.services.globals.next_count()?.to_be_bytes())?;
Ok(())
}
@ -87,8 +98,8 @@ impl Data {
pub(super) fn associate_token_shortstatehash(
&self, room_id: &RoomId, token: u64, shortstatehash: u64,
) -> Result<()> {
let shortroomid = services()
.rooms
let shortroomid = self
.services
.short
.get_shortroomid(room_id)?
.expect("room exists");
@ -101,8 +112,8 @@ impl Data {
}
pub(super) fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result<Option<u64>> {
let shortroomid = services()
.rooms
let shortroomid = self
.services
.short
.get_shortroomid(room_id)?
.expect("room exists");

View file

@ -3,9 +3,10 @@ mod data;
use std::sync::Arc;
use conduit::Result;
use data::Data;
use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId};
use self::data::Data;
pub struct Service {
db: Data,
}
@ -13,7 +14,7 @@ pub struct Service {
impl crate::Service for Service {
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
Ok(Arc::new(Self {
db: Data::new(args.db),
db: Data::new(&args),
}))
}