impl crate::Service for Service

Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
Jason Volk 2024-07-04 03:26:19 +00:00
parent 177c9e8bfa
commit e125af620e
44 changed files with 673 additions and 548 deletions

View file

@ -208,41 +208,6 @@ impl Data {
pub fn cleanup(&self) -> Result<()> { self.db.db.cleanup() }
pub fn memory_usage(&self) -> String {
let (auth_chain_cache, max_auth_chain_cache) = services().rooms.auth_chain.get_cache_usage();
let (appservice_in_room_cache, max_appservice_in_room_cache) = services()
.rooms
.state_cache
.get_appservice_in_room_cache_usage();
let (lasttimelinecount_cache, max_lasttimelinecount_cache) = services()
.rooms
.timeline
.get_lasttimelinecount_cache_usage();
format!(
"auth_chain_cache: {auth_chain_cache} / {max_auth_chain_cache}\nappservice_in_room_cache: \
{appservice_in_room_cache} / {max_appservice_in_room_cache}\nlasttimelinecount_cache: \
{lasttimelinecount_cache} / {max_lasttimelinecount_cache}\n\n{}",
self.db.db.memory_usage().unwrap_or_default()
)
}
#[allow(clippy::unused_self)]
pub fn clear_caches(&self, amount: u32) {
if amount > 1 {
services().rooms.auth_chain.clear_cache();
}
if amount > 2 {
services()
.rooms
.state_cache
.clear_appservice_in_room_cache();
}
if amount > 3 {
services().rooms.timeline.clear_lasttimelinecount_cache();
}
}
pub fn load_keypair(&self) -> Result<Ed25519KeyPair> {
let keypair_bytes = self.global.get(b"keypair")?.map_or_else(
|| {

View file

@ -7,13 +7,13 @@ pub(super) mod updates;
use std::{
collections::{BTreeMap, HashMap},
sync::Arc,
fmt::Write,
sync::{Arc, RwLock},
time::Instant,
};
use conduit::{error, trace, utils::MutexMap, Config, Result, Server};
use conduit::{error, trace, utils::MutexMap, Config, Result};
use data::Data;
use database::Database;
use ipaddress::IPAddress;
use regex::RegexSet;
use ruma::{
@ -25,10 +25,7 @@ use ruma::{
DeviceId, OwnedEventId, OwnedRoomAliasId, OwnedRoomId, OwnedServerName, OwnedServerSigningKeyId, OwnedUserId,
RoomAliasId, RoomVersionId, ServerName, UserId,
};
use tokio::{
sync::{Mutex, RwLock},
task::JoinHandle,
};
use tokio::{sync::Mutex, task::JoinHandle};
use url::Url;
use crate::services;
@ -59,10 +56,10 @@ pub struct Service {
pub admin_alias: OwnedRoomAliasId,
}
impl Service {
pub fn build(server: &Arc<Server>, db: &Arc<Database>) -> Result<Self> {
let config = &server.config;
let db = Data::new(db);
impl crate::Service for Service {
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
let config = &args.server.config;
let db = Data::new(args.db);
let keypair = db.load_keypair();
let keypair = match keypair {
@ -133,9 +130,59 @@ impl Service {
s.config.default_room_version = crate::config::default_default_room_version();
};
Ok(s)
Ok(Arc::new(s))
}
fn memory_usage(&self, out: &mut dyn Write) -> Result<()> {
self.resolver.memory_usage(out)?;
let bad_event_ratelimiter = self
.bad_event_ratelimiter
.read()
.expect("locked for reading")
.len();
writeln!(out, "bad_event_ratelimiter: {bad_event_ratelimiter}")?;
let bad_query_ratelimiter = self
.bad_query_ratelimiter
.read()
.expect("locked for reading")
.len();
writeln!(out, "bad_query_ratelimiter: {bad_query_ratelimiter}")?;
let bad_signature_ratelimiter = self
.bad_signature_ratelimiter
.read()
.expect("locked for reading")
.len();
writeln!(out, "bad_signature_ratelimiter: {bad_signature_ratelimiter}")?;
Ok(())
}
fn clear_cache(&self) {
self.resolver.clear_cache();
self.bad_event_ratelimiter
.write()
.expect("locked for writing")
.clear();
self.bad_query_ratelimiter
.write()
.expect("locked for writing")
.clear();
self.bad_signature_ratelimiter
.write()
.expect("locked for writing")
.clear();
}
fn name(&self) -> &str { crate::service::make_name(std::module_path!()) }
}
impl Service {
/// Returns this server's keypair.
pub fn keypair(&self) -> &ruma::signatures::Ed25519KeyPair { &self.keypair }

View file

@ -1,12 +1,13 @@
use std::{
collections::HashMap,
fmt::Write,
future, iter,
net::{IpAddr, SocketAddr},
sync::{Arc, RwLock},
time::Duration,
};
use conduit::{error, Config, Error};
use conduit::{error, Config, Error, Result};
use hickory_resolver::TokioAsyncResolver;
use reqwest::dns::{Addrs, Name, Resolve, Resolving};
use ruma::OwnedServerName;
@ -30,7 +31,7 @@ pub struct Hooked {
impl Resolver {
#[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)]
pub fn new(config: &Config) -> Self {
pub(super) fn new(config: &Config) -> Self {
let (sys_conf, mut opts) = hickory_resolver::system_conf::read_system_conf()
.map_err(|e| {
error!("Failed to set up hickory dns resolver with system config: {e}");
@ -92,6 +93,22 @@ impl Resolver {
}),
}
}
pub(super) fn memory_usage(&self, out: &mut dyn Write) -> Result<()> {
let resolver_overrides_cache = self.overrides.read().expect("locked for reading").len();
writeln!(out, "resolver_overrides_cache: {resolver_overrides_cache}")?;
let resolver_destinations_cache = self.destinations.read().expect("locked for reading").len();
writeln!(out, "resolver_destinations_cache: {resolver_destinations_cache}")?;
Ok(())
}
pub(super) fn clear_cache(&self) {
self.overrides.write().expect("write locked").clear();
self.destinations.write().expect("write locked").clear();
self.resolver.clear_cache();
}
}
impl Resolve for Resolver {