messing with trait objects
This commit is contained in:
parent
8708cd3b63
commit
face766e0f
61 changed files with 623 additions and 544 deletions
|
@ -481,7 +481,7 @@ async fn join_room_by_id_helper(
|
||||||
let (make_join_response, remote_server) = make_join_response_and_server?;
|
let (make_join_response, remote_server) = make_join_response_and_server?;
|
||||||
|
|
||||||
let room_version = match make_join_response.room_version {
|
let room_version = match make_join_response.room_version {
|
||||||
Some(room_version) if services().rooms.metadata.is_supported_version(&room_version) => room_version,
|
Some(room_version) if services().globals.supported_room_versions().contains(&room_version) => room_version,
|
||||||
_ => return Err(Error::BadServerResponse("Room version is not supported")),
|
_ => return Err(Error::BadServerResponse("Room version is not supported")),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -568,7 +568,7 @@ async fn join_room_by_id_helper(
|
||||||
let mut state = HashMap::new();
|
let mut state = HashMap::new();
|
||||||
let pub_key_map = RwLock::new(BTreeMap::new());
|
let pub_key_map = RwLock::new(BTreeMap::new());
|
||||||
|
|
||||||
server_server::fetch_join_signing_keys(
|
services().rooms.event_handler.fetch_join_signing_keys(
|
||||||
&send_join_response,
|
&send_join_response,
|
||||||
&room_version,
|
&room_version,
|
||||||
&pub_key_map,
|
&pub_key_map,
|
||||||
|
@ -1048,7 +1048,7 @@ async fn remote_leave_room(
|
||||||
let (make_leave_response, remote_server) = make_leave_response_and_server?;
|
let (make_leave_response, remote_server) = make_leave_response_and_server?;
|
||||||
|
|
||||||
let room_version_id = match make_leave_response.room_version {
|
let room_version_id = match make_leave_response.room_version {
|
||||||
Some(version) if services().rooms.is_supported_version(&version) => version,
|
Some(version) if services().globals.supported_room_versions().contains(&version) => version,
|
||||||
_ => return Err(Error::BadServerResponse("Room version is not supported")),
|
_ => return Err(Error::BadServerResponse("Room version is not supported")),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -99,7 +99,7 @@ pub async fn create_room_route(
|
||||||
|
|
||||||
let room_version = match body.room_version.clone() {
|
let room_version = match body.room_version.clone() {
|
||||||
Some(room_version) => {
|
Some(room_version) => {
|
||||||
if services().rooms.is_supported_version(&services(), &room_version) {
|
if services().globals.supported_room_versions().contains(&room_version) {
|
||||||
room_version
|
room_version
|
||||||
} else {
|
} else {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
|
@ -470,7 +470,7 @@ pub async fn upgrade_room_route(
|
||||||
) -> Result<upgrade_room::v3::Response> {
|
) -> Result<upgrade_room::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if !services().rooms.is_supported_version(&body.new_version) {
|
if !services().globals.supported_room_versions().contains(&body.new_version) {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::UnsupportedRoomVersion,
|
ErrorKind::UnsupportedRoomVersion,
|
||||||
"This server does not support that room version.",
|
"This server does not support that room version.",
|
||||||
|
|
|
@ -175,7 +175,7 @@ async fn sync_helper(
|
||||||
services().rooms.edus.presence.ping_presence(&sender_user)?;
|
services().rooms.edus.presence.ping_presence(&sender_user)?;
|
||||||
|
|
||||||
// Setup watchers, so if there's no response, we can wait for them
|
// Setup watchers, so if there's no response, we can wait for them
|
||||||
let watcher = services().watch(&sender_user, &sender_device);
|
let watcher = services().globals.db.watch(&sender_user, &sender_device);
|
||||||
|
|
||||||
let next_batch = services().globals.current_count()?;
|
let next_batch = services().globals.current_count()?;
|
||||||
let next_batch_string = next_batch.to_string();
|
let next_batch_string = next_batch.to_string();
|
||||||
|
|
|
@ -197,7 +197,7 @@ where
|
||||||
request_map.insert("content".to_owned(), json_body.clone());
|
request_map.insert("content".to_owned(), json_body.clone());
|
||||||
};
|
};
|
||||||
|
|
||||||
let keys_result = server_server::fetch_signing_keys(
|
let keys_result = services().rooms.event_handler.fetch_signing_keys(
|
||||||
&x_matrix.origin,
|
&x_matrix.origin,
|
||||||
vec![x_matrix.key.to_owned()],
|
vec![x_matrix.key.to_owned()],
|
||||||
)
|
)
|
||||||
|
|
|
@ -664,7 +664,7 @@ pub async fn send_transaction_message_route(
|
||||||
Some(id) => id,
|
Some(id) => id,
|
||||||
None => {
|
None => {
|
||||||
// Event is invalid
|
// Event is invalid
|
||||||
resolved_map.insert(event_id, Err("Event needs a valid RoomId.".to_owned()));
|
resolved_map.insert(event_id, Err(Error::bad_database("Event needs a valid RoomId.")));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -707,7 +707,7 @@ pub async fn send_transaction_message_route(
|
||||||
|
|
||||||
for pdu in &resolved_map {
|
for pdu in &resolved_map {
|
||||||
if let Err(e) = pdu.1 {
|
if let Err(e) = pdu.1 {
|
||||||
if e != "Room is unknown to this server." {
|
if matches!(e, Error::BadRequest(ErrorKind::NotFound, _)) {
|
||||||
warn!("Incoming PDU failed {:?}", pdu);
|
warn!("Incoming PDU failed {:?}", pdu);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -854,170 +854,7 @@ pub async fn send_transaction_message_route(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(send_transaction_message::v1::Response { pdus: resolved_map })
|
Ok(send_transaction_message::v1::Response { pdus: resolved_map.into_iter().map(|(e, r)| (e, r.map_err(|e| e.to_string()))).collect() })
|
||||||
}
|
|
||||||
|
|
||||||
/// Search the DB for the signing keys of the given server, if we don't have them
|
|
||||||
/// fetch them from the server and save to our DB.
|
|
||||||
#[tracing::instrument(skip_all)]
|
|
||||||
pub(crate) async fn fetch_signing_keys(
|
|
||||||
origin: &ServerName,
|
|
||||||
signature_ids: Vec<String>,
|
|
||||||
) -> Result<BTreeMap<String, Base64>> {
|
|
||||||
let contains_all_ids =
|
|
||||||
|keys: &BTreeMap<String, Base64>| signature_ids.iter().all(|id| keys.contains_key(id));
|
|
||||||
|
|
||||||
let permit = services()
|
|
||||||
.globals
|
|
||||||
.servername_ratelimiter
|
|
||||||
.read()
|
|
||||||
.unwrap()
|
|
||||||
.get(origin)
|
|
||||||
.map(|s| Arc::clone(s).acquire_owned());
|
|
||||||
|
|
||||||
let permit = match permit {
|
|
||||||
Some(p) => p,
|
|
||||||
None => {
|
|
||||||
let mut write = services().globals.servername_ratelimiter.write().unwrap();
|
|
||||||
let s = Arc::clone(
|
|
||||||
write
|
|
||||||
.entry(origin.to_owned())
|
|
||||||
.or_insert_with(|| Arc::new(Semaphore::new(1))),
|
|
||||||
);
|
|
||||||
|
|
||||||
s.acquire_owned()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
.await;
|
|
||||||
|
|
||||||
let back_off = |id| match services()
|
|
||||||
.globals
|
|
||||||
.bad_signature_ratelimiter
|
|
||||||
.write()
|
|
||||||
.unwrap()
|
|
||||||
.entry(id)
|
|
||||||
{
|
|
||||||
hash_map::Entry::Vacant(e) => {
|
|
||||||
e.insert((Instant::now(), 1));
|
|
||||||
}
|
|
||||||
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some((time, tries)) = services()
|
|
||||||
.globals
|
|
||||||
.bad_signature_ratelimiter
|
|
||||||
.read()
|
|
||||||
.unwrap()
|
|
||||||
.get(&signature_ids)
|
|
||||||
{
|
|
||||||
// Exponential backoff
|
|
||||||
let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries);
|
|
||||||
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
|
|
||||||
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
|
|
||||||
}
|
|
||||||
|
|
||||||
if time.elapsed() < min_elapsed_duration {
|
|
||||||
debug!("Backing off from {:?}", signature_ids);
|
|
||||||
return Err(Error::BadServerResponse("bad signature, still backing off"));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
trace!("Loading signing keys for {}", origin);
|
|
||||||
|
|
||||||
let mut result: BTreeMap<_, _> = services()
|
|
||||||
.globals
|
|
||||||
.signing_keys_for(origin)?
|
|
||||||
.into_iter()
|
|
||||||
.map(|(k, v)| (k.to_string(), v.key))
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
if contains_all_ids(&result) {
|
|
||||||
return Ok(result);
|
|
||||||
}
|
|
||||||
|
|
||||||
debug!("Fetching signing keys for {} over federation", origin);
|
|
||||||
|
|
||||||
if let Some(server_key) = services()
|
|
||||||
.sending
|
|
||||||
.send_federation_request(origin, get_server_keys::v2::Request::new())
|
|
||||||
.await
|
|
||||||
.ok()
|
|
||||||
.and_then(|resp| resp.server_key.deserialize().ok())
|
|
||||||
{
|
|
||||||
services().globals.add_signing_key(origin, server_key.clone())?;
|
|
||||||
|
|
||||||
result.extend(
|
|
||||||
server_key
|
|
||||||
.verify_keys
|
|
||||||
.into_iter()
|
|
||||||
.map(|(k, v)| (k.to_string(), v.key)),
|
|
||||||
);
|
|
||||||
result.extend(
|
|
||||||
server_key
|
|
||||||
.old_verify_keys
|
|
||||||
.into_iter()
|
|
||||||
.map(|(k, v)| (k.to_string(), v.key)),
|
|
||||||
);
|
|
||||||
|
|
||||||
if contains_all_ids(&result) {
|
|
||||||
return Ok(result);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for server in services().globals.trusted_servers() {
|
|
||||||
debug!("Asking {} for {}'s signing key", server, origin);
|
|
||||||
if let Some(server_keys) = services()
|
|
||||||
.sending
|
|
||||||
.send_federation_request(
|
|
||||||
server,
|
|
||||||
get_remote_server_keys::v2::Request::new(
|
|
||||||
origin,
|
|
||||||
MilliSecondsSinceUnixEpoch::from_system_time(
|
|
||||||
SystemTime::now()
|
|
||||||
.checked_add(Duration::from_secs(3600))
|
|
||||||
.expect("SystemTime to large"),
|
|
||||||
)
|
|
||||||
.expect("time is valid"),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.ok()
|
|
||||||
.map(|resp| {
|
|
||||||
resp.server_keys
|
|
||||||
.into_iter()
|
|
||||||
.filter_map(|e| e.deserialize().ok())
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
})
|
|
||||||
{
|
|
||||||
trace!("Got signing keys: {:?}", server_keys);
|
|
||||||
for k in server_keys {
|
|
||||||
services().globals.add_signing_key(origin, k.clone())?;
|
|
||||||
result.extend(
|
|
||||||
k.verify_keys
|
|
||||||
.into_iter()
|
|
||||||
.map(|(k, v)| (k.to_string(), v.key)),
|
|
||||||
);
|
|
||||||
result.extend(
|
|
||||||
k.old_verify_keys
|
|
||||||
.into_iter()
|
|
||||||
.map(|(k, v)| (k.to_string(), v.key)),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if contains_all_ids(&result) {
|
|
||||||
return Ok(result);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
drop(permit);
|
|
||||||
|
|
||||||
back_off(signature_ids);
|
|
||||||
|
|
||||||
warn!("Failed to find public key for server: {}", origin);
|
|
||||||
Err(Error::BadServerResponse(
|
|
||||||
"Failed to find public key for server",
|
|
||||||
))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(starting_events))]
|
#[tracing::instrument(skip(starting_events))]
|
||||||
|
@ -1050,7 +887,7 @@ pub(crate) async fn get_auth_chain<'a>(
|
||||||
}
|
}
|
||||||
|
|
||||||
let chunk_key: Vec<u64> = chunk.iter().map(|(short, _)| short).copied().collect();
|
let chunk_key: Vec<u64> = chunk.iter().map(|(short, _)| short).copied().collect();
|
||||||
if let Some(cached) = services().rooms.auth_chain.get_auth_chain_from_cache(&chunk_key)? {
|
if let Some(cached) = services().rooms.auth_chain.get_cached_eventid_authchain(&chunk_key)? {
|
||||||
hits += 1;
|
hits += 1;
|
||||||
full_auth_chain.extend(cached.iter().copied());
|
full_auth_chain.extend(cached.iter().copied());
|
||||||
continue;
|
continue;
|
||||||
|
@ -1062,7 +899,7 @@ pub(crate) async fn get_auth_chain<'a>(
|
||||||
let mut misses2 = 0;
|
let mut misses2 = 0;
|
||||||
let mut i = 0;
|
let mut i = 0;
|
||||||
for (sevent_id, event_id) in chunk {
|
for (sevent_id, event_id) in chunk {
|
||||||
if let Some(cached) = services().rooms.auth_chain.get_auth_chain_from_cache(&[sevent_id])? {
|
if let Some(cached) = services().rooms.auth_chain.get_cached_eventid_authchain(&[sevent_id])? {
|
||||||
hits2 += 1;
|
hits2 += 1;
|
||||||
chunk_cache.extend(cached.iter().copied());
|
chunk_cache.extend(cached.iter().copied());
|
||||||
} else {
|
} else {
|
||||||
|
@ -1689,7 +1526,7 @@ pub async fn create_invite_route(
|
||||||
|
|
||||||
services().rooms.event_handler.acl_check(&sender_servername, &body.room_id)?;
|
services().rooms.event_handler.acl_check(&sender_servername, &body.room_id)?;
|
||||||
|
|
||||||
if !services().rooms.is_supported_version(&body.room_version) {
|
if !services().globals.supported_room_versions().contains(&body.room_version) {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::IncompatibleRoomVersion {
|
ErrorKind::IncompatibleRoomVersion {
|
||||||
room_version: body.room_version.clone(),
|
room_version: body.room_version.clone(),
|
||||||
|
|
|
@ -4,10 +4,10 @@ use crate::{Result, service, database::KeyValueDatabase, Error, utils};
|
||||||
|
|
||||||
impl service::globals::Data for KeyValueDatabase {
|
impl service::globals::Data for KeyValueDatabase {
|
||||||
fn load_keypair(&self) -> Result<Ed25519KeyPair> {
|
fn load_keypair(&self) -> Result<Ed25519KeyPair> {
|
||||||
let keypair_bytes = self.globals.get(b"keypair")?.map_or_else(
|
let keypair_bytes = self.global.get(b"keypair")?.map_or_else(
|
||||||
|| {
|
|| {
|
||||||
let keypair = utils::generate_keypair();
|
let keypair = utils::generate_keypair();
|
||||||
self.globals.insert(b"keypair", &keypair)?;
|
self.global.insert(b"keypair", &keypair)?;
|
||||||
Ok::<_, Error>(keypair)
|
Ok::<_, Error>(keypair)
|
||||||
},
|
},
|
||||||
|s| Ok(s.to_vec()),
|
|s| Ok(s.to_vec()),
|
||||||
|
@ -33,8 +33,10 @@ impl service::globals::Data for KeyValueDatabase {
|
||||||
Ed25519KeyPair::from_der(key, version)
|
Ed25519KeyPair::from_der(key, version)
|
||||||
.map_err(|_| Error::bad_database("Private or public keys are invalid."))
|
.map_err(|_| Error::bad_database("Private or public keys are invalid."))
|
||||||
});
|
});
|
||||||
|
|
||||||
|
keypair
|
||||||
}
|
}
|
||||||
fn remove_keypair(&self) -> Result<()> {
|
fn remove_keypair(&self) -> Result<()> {
|
||||||
self.globals.remove(b"keypair")?
|
self.global.remove(b"keypair")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
use ruma::api::client::error::ErrorKind;
|
||||||
|
|
||||||
use crate::{database::KeyValueDatabase, service, Error, utils, Result};
|
use crate::{database::KeyValueDatabase, service, Error, utils, Result};
|
||||||
|
|
||||||
impl service::media::Data for KeyValueDatabase {
|
impl service::media::Data for KeyValueDatabase {
|
||||||
|
@ -33,7 +35,7 @@ impl service::media::Data for KeyValueDatabase {
|
||||||
prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail
|
prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail
|
||||||
prefix.push(0xff);
|
prefix.push(0xff);
|
||||||
|
|
||||||
let (key, _) = self.mediaid_file.scan_prefix(prefix).next().ok_or(Error::NotFound)?;
|
let (key, _) = self.mediaid_file.scan_prefix(prefix).next().ok_or(Error::BadRequest(ErrorKind::NotFound, "Media not found"))?;
|
||||||
|
|
||||||
let mut parts = key.rsplit(|&b| b == 0xff);
|
let mut parts = key.rsplit(|&b| b == 0xff);
|
||||||
|
|
||||||
|
|
|
@ -55,6 +55,6 @@ impl service::pusher::Data for KeyValueDatabase {
|
||||||
let mut prefix = sender.as_bytes().to_vec();
|
let mut prefix = sender.as_bytes().to_vec();
|
||||||
prefix.push(0xff);
|
prefix.push(0xff);
|
||||||
|
|
||||||
self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| k)
|
Box::new(self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| k))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,15 +56,15 @@ impl service::rooms::alias::Data for KeyValueDatabase {
|
||||||
fn local_aliases_for_room(
|
fn local_aliases_for_room(
|
||||||
&self,
|
&self,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
) -> Result<Box<dyn Iterator<Item=String>>> {
|
) -> Box<dyn Iterator<Item = Result<Box<RoomAliasId>>>> {
|
||||||
let mut prefix = room_id.as_bytes().to_vec();
|
let mut prefix = room_id.as_bytes().to_vec();
|
||||||
prefix.push(0xff);
|
prefix.push(0xff);
|
||||||
|
|
||||||
self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| {
|
Box::new(self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| {
|
||||||
utils::string_from_bytes(&bytes)
|
utils::string_from_bytes(&bytes)
|
||||||
.map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))?
|
.map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))?
|
||||||
.try_into()
|
.try_into()
|
||||||
.map_err(|_| Error::bad_database("Invalid alias in aliasid_alias."))
|
.map_err(|_| Error::bad_database("Invalid alias in aliasid_alias."))
|
||||||
})
|
}))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,8 +3,8 @@ use std::{collections::HashSet, mem::size_of};
|
||||||
use crate::{service, database::KeyValueDatabase, Result, utils};
|
use crate::{service, database::KeyValueDatabase, Result, utils};
|
||||||
|
|
||||||
impl service::rooms::auth_chain::Data for KeyValueDatabase {
|
impl service::rooms::auth_chain::Data for KeyValueDatabase {
|
||||||
fn get_cached_eventid_authchain(&self, shorteventid: u64) -> Result<HashSet<u64>> {
|
fn get_cached_eventid_authchain(&self, shorteventid: u64) -> Result<Option<HashSet<u64>>> {
|
||||||
self.shorteventid_authchain
|
Ok(self.shorteventid_authchain
|
||||||
.get(&shorteventid.to_be_bytes())?
|
.get(&shorteventid.to_be_bytes())?
|
||||||
.map(|chain| {
|
.map(|chain| {
|
||||||
chain
|
chain
|
||||||
|
@ -13,7 +13,7 @@ impl service::rooms::auth_chain::Data for KeyValueDatabase {
|
||||||
utils::u64_from_bytes(chunk).expect("byte length is correct")
|
utils::u64_from_bytes(chunk).expect("byte length is correct")
|
||||||
})
|
})
|
||||||
.collect()
|
.collect()
|
||||||
})
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn cache_eventid_authchain(&self, shorteventid: u64, auth_chain: &HashSet<u64>) -> Result<()> {
|
fn cache_eventid_authchain(&self, shorteventid: u64, auth_chain: &HashSet<u64>) -> Result<()> {
|
||||||
|
|
|
@ -145,4 +145,6 @@ fn parse_presence_event(bytes: &[u8]) -> Result<PresenceEvent> {
|
||||||
.last_active_ago
|
.last_active_ago
|
||||||
.map(|timestamp| current_timestamp - timestamp);
|
.map(|timestamp| current_timestamp - timestamp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Ok(presence)
|
||||||
}
|
}
|
||||||
|
|
|
@ -64,7 +64,7 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase {
|
||||||
let mut first_possible_edu = prefix.clone();
|
let mut first_possible_edu = prefix.clone();
|
||||||
first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since
|
first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since
|
||||||
|
|
||||||
self.readreceiptid_readreceipt
|
Box::new(self.readreceiptid_readreceipt
|
||||||
.iter_from(&first_possible_edu, false)
|
.iter_from(&first_possible_edu, false)
|
||||||
.take_while(move |(k, _)| k.starts_with(&prefix2))
|
.take_while(move |(k, _)| k.starts_with(&prefix2))
|
||||||
.map(move |(k, v)| {
|
.map(move |(k, v)| {
|
||||||
|
@ -91,7 +91,7 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase {
|
||||||
serde_json::value::to_raw_value(&json).expect("json is valid raw value"),
|
serde_json::value::to_raw_value(&json).expect("json is valid raw value"),
|
||||||
),
|
),
|
||||||
))
|
))
|
||||||
})
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn private_read_set(
|
fn private_read_set(
|
||||||
|
|
|
@ -25,14 +25,8 @@ impl service::rooms::lazy_loading::Data for KeyValueDatabase {
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
device_id: &DeviceId,
|
device_id: &DeviceId,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
since: u64,
|
confirmed_user_ids: &mut dyn Iterator<Item = &UserId>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&(
|
|
||||||
user_id.to_owned(),
|
|
||||||
device_id.to_owned(),
|
|
||||||
room_id.to_owned(),
|
|
||||||
since,
|
|
||||||
)) {
|
|
||||||
let mut prefix = user_id.as_bytes().to_vec();
|
let mut prefix = user_id.as_bytes().to_vec();
|
||||||
prefix.push(0xff);
|
prefix.push(0xff);
|
||||||
prefix.extend_from_slice(device_id.as_bytes());
|
prefix.extend_from_slice(device_id.as_bytes());
|
||||||
|
@ -40,12 +34,11 @@ impl service::rooms::lazy_loading::Data for KeyValueDatabase {
|
||||||
prefix.extend_from_slice(room_id.as_bytes());
|
prefix.extend_from_slice(room_id.as_bytes());
|
||||||
prefix.push(0xff);
|
prefix.push(0xff);
|
||||||
|
|
||||||
for ll_id in user_ids {
|
for ll_id in confirmed_user_ids {
|
||||||
let mut key = prefix.clone();
|
let mut key = prefix.clone();
|
||||||
key.extend_from_slice(ll_id.as_bytes());
|
key.extend_from_slice(ll_id.as_bytes());
|
||||||
self.lazyloadedids.insert(&key, &[])?;
|
self.lazyloadedids.insert(&key, &[])?;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
use ruma::RoomId;
|
use ruma::RoomId;
|
||||||
|
|
||||||
use crate::{service, database::KeyValueDatabase, Result};
|
use crate::{service, database::KeyValueDatabase, Result, services};
|
||||||
|
|
||||||
impl service::rooms::metadata::Data for KeyValueDatabase {
|
impl service::rooms::metadata::Data for KeyValueDatabase {
|
||||||
fn exists(&self, room_id: &RoomId) -> Result<bool> {
|
fn exists(&self, room_id: &RoomId) -> Result<bool> {
|
||||||
let prefix = match self.get_shortroomid(room_id)? {
|
let prefix = match services().rooms.short.get_shortroomid(room_id)? {
|
||||||
Some(b) => b.to_be_bytes().to_vec(),
|
Some(b) => b.to_be_bytes().to_vec(),
|
||||||
None => return Ok(false),
|
None => return Ok(false),
|
||||||
};
|
};
|
||||||
|
|
|
@ -2,10 +2,10 @@ use std::mem::size_of;
|
||||||
|
|
||||||
use ruma::RoomId;
|
use ruma::RoomId;
|
||||||
|
|
||||||
use crate::{service, database::KeyValueDatabase, utils, Result};
|
use crate::{service, database::KeyValueDatabase, utils, Result, services};
|
||||||
|
|
||||||
impl service::rooms::search::Data for KeyValueDatabase {
|
impl service::rooms::search::Data for KeyValueDatabase {
|
||||||
fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: u64, message_body: String) -> Result<()> {
|
fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: String) -> Result<()> {
|
||||||
let mut batch = message_body
|
let mut batch = message_body
|
||||||
.split_terminator(|c: char| !c.is_alphanumeric())
|
.split_terminator(|c: char| !c.is_alphanumeric())
|
||||||
.filter(|s| !s.is_empty())
|
.filter(|s| !s.is_empty())
|
||||||
|
@ -27,7 +27,7 @@ impl service::rooms::search::Data for KeyValueDatabase {
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
search_string: &str,
|
search_string: &str,
|
||||||
) -> Result<Option<(Box<dyn Iterator<Item = Vec<u8>>>, Vec<String>)>> {
|
) -> Result<Option<(Box<dyn Iterator<Item = Vec<u8>>>, Vec<String>)>> {
|
||||||
let prefix = self
|
let prefix = services().rooms.short
|
||||||
.get_shortroomid(room_id)?
|
.get_shortroomid(room_id)?
|
||||||
.expect("room exists")
|
.expect("room exists")
|
||||||
.to_be_bytes()
|
.to_be_bytes()
|
||||||
|
@ -60,11 +60,11 @@ impl service::rooms::search::Data for KeyValueDatabase {
|
||||||
})
|
})
|
||||||
.map(|iter| {
|
.map(|iter| {
|
||||||
(
|
(
|
||||||
iter.map(move |id| {
|
Box::new(iter.map(move |id| {
|
||||||
let mut pduid = prefix_clone.clone();
|
let mut pduid = prefix_clone.clone();
|
||||||
pduid.extend_from_slice(&id);
|
pduid.extend_from_slice(&id);
|
||||||
pduid
|
pduid
|
||||||
}),
|
})),
|
||||||
words,
|
words,
|
||||||
)
|
)
|
||||||
}))
|
}))
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
use std::{collections::{BTreeMap, HashMap}, sync::Arc};
|
use std::{collections::{BTreeMap, HashMap}, sync::Arc};
|
||||||
|
|
||||||
use crate::{database::KeyValueDatabase, service, PduEvent, Error, utils, Result};
|
use crate::{database::KeyValueDatabase, service, PduEvent, Error, utils, Result, services};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use ruma::{EventId, events::StateEventType, RoomId};
|
use ruma::{EventId, events::StateEventType, RoomId};
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
||||||
async fn state_full_ids(&self, shortstatehash: u64) -> Result<BTreeMap<u64, Arc<EventId>>> {
|
async fn state_full_ids(&self, shortstatehash: u64) -> Result<BTreeMap<u64, Arc<EventId>>> {
|
||||||
let full_state = self
|
let full_state = services().rooms.state_compressor
|
||||||
.load_shortstatehash_info(shortstatehash)?
|
.load_shortstatehash_info(shortstatehash)?
|
||||||
.pop()
|
.pop()
|
||||||
.expect("there is always one layer")
|
.expect("there is always one layer")
|
||||||
|
@ -15,7 +15,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
||||||
let mut result = BTreeMap::new();
|
let mut result = BTreeMap::new();
|
||||||
let mut i = 0;
|
let mut i = 0;
|
||||||
for compressed in full_state.into_iter() {
|
for compressed in full_state.into_iter() {
|
||||||
let parsed = self.parse_compressed_state_event(compressed)?;
|
let parsed = services().rooms.state_compressor.parse_compressed_state_event(compressed)?;
|
||||||
result.insert(parsed.0, parsed.1);
|
result.insert(parsed.0, parsed.1);
|
||||||
|
|
||||||
i += 1;
|
i += 1;
|
||||||
|
@ -30,7 +30,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
||||||
&self,
|
&self,
|
||||||
shortstatehash: u64,
|
shortstatehash: u64,
|
||||||
) -> Result<HashMap<(StateEventType, String), Arc<PduEvent>>> {
|
) -> Result<HashMap<(StateEventType, String), Arc<PduEvent>>> {
|
||||||
let full_state = self
|
let full_state = services().rooms.state_compressor
|
||||||
.load_shortstatehash_info(shortstatehash)?
|
.load_shortstatehash_info(shortstatehash)?
|
||||||
.pop()
|
.pop()
|
||||||
.expect("there is always one layer")
|
.expect("there is always one layer")
|
||||||
|
@ -39,8 +39,8 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
||||||
let mut result = HashMap::new();
|
let mut result = HashMap::new();
|
||||||
let mut i = 0;
|
let mut i = 0;
|
||||||
for compressed in full_state {
|
for compressed in full_state {
|
||||||
let (_, eventid) = self.parse_compressed_state_event(compressed)?;
|
let (_, eventid) = services().rooms.state_compressor.parse_compressed_state_event(compressed)?;
|
||||||
if let Some(pdu) = self.get_pdu(&eventid)? {
|
if let Some(pdu) = services().rooms.timeline.get_pdu(&eventid)? {
|
||||||
result.insert(
|
result.insert(
|
||||||
(
|
(
|
||||||
pdu.kind.to_string().into(),
|
pdu.kind.to_string().into(),
|
||||||
|
@ -69,11 +69,11 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
||||||
event_type: &StateEventType,
|
event_type: &StateEventType,
|
||||||
state_key: &str,
|
state_key: &str,
|
||||||
) -> Result<Option<Arc<EventId>>> {
|
) -> Result<Option<Arc<EventId>>> {
|
||||||
let shortstatekey = match self.get_shortstatekey(event_type, state_key)? {
|
let shortstatekey = match services().rooms.short.get_shortstatekey(event_type, state_key)? {
|
||||||
Some(s) => s,
|
Some(s) => s,
|
||||||
None => return Ok(None),
|
None => return Ok(None),
|
||||||
};
|
};
|
||||||
let full_state = self
|
let full_state = services().rooms.state_compressor
|
||||||
.load_shortstatehash_info(shortstatehash)?
|
.load_shortstatehash_info(shortstatehash)?
|
||||||
.pop()
|
.pop()
|
||||||
.expect("there is always one layer")
|
.expect("there is always one layer")
|
||||||
|
@ -82,7 +82,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes()))
|
.find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes()))
|
||||||
.and_then(|compressed| {
|
.and_then(|compressed| {
|
||||||
self.parse_compressed_state_event(compressed)
|
services().rooms.state_compressor.parse_compressed_state_event(compressed)
|
||||||
.ok()
|
.ok()
|
||||||
.map(|(_, id)| id)
|
.map(|(_, id)| id)
|
||||||
}))
|
}))
|
||||||
|
@ -96,7 +96,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
||||||
state_key: &str,
|
state_key: &str,
|
||||||
) -> Result<Option<Arc<PduEvent>>> {
|
) -> Result<Option<Arc<PduEvent>>> {
|
||||||
self.state_get_id(shortstatehash, event_type, state_key)?
|
self.state_get_id(shortstatehash, event_type, state_key)?
|
||||||
.map_or(Ok(None), |event_id| self.get_pdu(&event_id))
|
.map_or(Ok(None), |event_id| services().rooms.timeline.get_pdu(&event_id))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the state hash for this pdu.
|
/// Returns the state hash for this pdu.
|
||||||
|
@ -122,7 +122,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
||||||
&self,
|
&self,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
) -> Result<HashMap<(StateEventType, String), Arc<PduEvent>>> {
|
) -> Result<HashMap<(StateEventType, String), Arc<PduEvent>>> {
|
||||||
if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? {
|
if let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? {
|
||||||
self.state_full(current_shortstatehash).await
|
self.state_full(current_shortstatehash).await
|
||||||
} else {
|
} else {
|
||||||
Ok(HashMap::new())
|
Ok(HashMap::new())
|
||||||
|
@ -136,7 +136,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
||||||
event_type: &StateEventType,
|
event_type: &StateEventType,
|
||||||
state_key: &str,
|
state_key: &str,
|
||||||
) -> Result<Option<Arc<EventId>>> {
|
) -> Result<Option<Arc<EventId>>> {
|
||||||
if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? {
|
if let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? {
|
||||||
self.state_get_id(current_shortstatehash, event_type, state_key)
|
self.state_get_id(current_shortstatehash, event_type, state_key)
|
||||||
} else {
|
} else {
|
||||||
Ok(None)
|
Ok(None)
|
||||||
|
@ -150,7 +150,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
||||||
event_type: &StateEventType,
|
event_type: &StateEventType,
|
||||||
state_key: &str,
|
state_key: &str,
|
||||||
) -> Result<Option<Arc<PduEvent>>> {
|
) -> Result<Option<Arc<PduEvent>>> {
|
||||||
if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? {
|
if let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? {
|
||||||
self.state_get(current_shortstatehash, event_type, state_key)
|
self.state_get(current_shortstatehash, event_type, state_key)
|
||||||
} else {
|
} else {
|
||||||
Ok(None)
|
Ok(None)
|
||||||
|
|
|
@ -39,8 +39,8 @@ impl service::rooms::state_compressor::Data for KeyValueDatabase {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn save_statediff(&self, shortstatehash: u64, diff: StateDiff) -> Result<()> {
|
fn save_statediff(&self, shortstatehash: u64, diff: StateDiff) -> Result<()> {
|
||||||
let mut value = diff.parent.to_be_bytes().to_vec();
|
let mut value = diff.parent.unwrap_or(0).to_be_bytes().to_vec();
|
||||||
for new in &diff.new {
|
for new in &diff.added {
|
||||||
value.extend_from_slice(&new[..]);
|
value.extend_from_slice(&new[..]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,7 +3,7 @@ use std::{collections::hash_map, mem::size_of, sync::Arc};
|
||||||
use ruma::{UserId, RoomId, api::client::error::ErrorKind, EventId, signatures::CanonicalJsonObject};
|
use ruma::{UserId, RoomId, api::client::error::ErrorKind, EventId, signatures::CanonicalJsonObject};
|
||||||
use tracing::error;
|
use tracing::error;
|
||||||
|
|
||||||
use crate::{service, database::KeyValueDatabase, utils, Error, PduEvent, Result};
|
use crate::{service, database::KeyValueDatabase, utils, Error, PduEvent, Result, services};
|
||||||
|
|
||||||
impl service::rooms::timeline::Data for KeyValueDatabase {
|
impl service::rooms::timeline::Data for KeyValueDatabase {
|
||||||
fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result<u64> {
|
fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result<u64> {
|
||||||
|
@ -191,7 +191,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
since: u64,
|
since: u64,
|
||||||
) -> Result<Box<dyn Iterator<Item = Result<(Vec<u8>, PduEvent)>>>> {
|
) -> Result<Box<dyn Iterator<Item = Result<(Vec<u8>, PduEvent)>>>> {
|
||||||
let prefix = self
|
let prefix = services().rooms.short
|
||||||
.get_shortroomid(room_id)?
|
.get_shortroomid(room_id)?
|
||||||
.expect("room exists")
|
.expect("room exists")
|
||||||
.to_be_bytes()
|
.to_be_bytes()
|
||||||
|
@ -203,7 +203,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
||||||
|
|
||||||
let user_id = user_id.to_owned();
|
let user_id = user_id.to_owned();
|
||||||
|
|
||||||
Ok(self
|
Ok(Box::new(self
|
||||||
.pduid_pdu
|
.pduid_pdu
|
||||||
.iter_from(&first_pdu_id, false)
|
.iter_from(&first_pdu_id, false)
|
||||||
.take_while(move |(k, _)| k.starts_with(&prefix))
|
.take_while(move |(k, _)| k.starts_with(&prefix))
|
||||||
|
@ -214,7 +214,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
||||||
pdu.remove_transaction_id()?;
|
pdu.remove_transaction_id()?;
|
||||||
}
|
}
|
||||||
Ok((pdu_id, pdu))
|
Ok((pdu_id, pdu))
|
||||||
}))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an iterator over all events and their tokens in a room that happened before the
|
/// Returns an iterator over all events and their tokens in a room that happened before the
|
||||||
|
@ -226,7 +226,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
||||||
until: u64,
|
until: u64,
|
||||||
) -> Result<Box<dyn Iterator<Item = Result<(Vec<u8>, PduEvent)>>>> {
|
) -> Result<Box<dyn Iterator<Item = Result<(Vec<u8>, PduEvent)>>>> {
|
||||||
// Create the first part of the full pdu id
|
// Create the first part of the full pdu id
|
||||||
let prefix = self
|
let prefix = services().rooms.short
|
||||||
.get_shortroomid(room_id)?
|
.get_shortroomid(room_id)?
|
||||||
.expect("room exists")
|
.expect("room exists")
|
||||||
.to_be_bytes()
|
.to_be_bytes()
|
||||||
|
@ -239,7 +239,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
||||||
|
|
||||||
let user_id = user_id.to_owned();
|
let user_id = user_id.to_owned();
|
||||||
|
|
||||||
Ok(self
|
Ok(Box::new(self
|
||||||
.pduid_pdu
|
.pduid_pdu
|
||||||
.iter_from(current, true)
|
.iter_from(current, true)
|
||||||
.take_while(move |(k, _)| k.starts_with(&prefix))
|
.take_while(move |(k, _)| k.starts_with(&prefix))
|
||||||
|
@ -250,7 +250,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
||||||
pdu.remove_transaction_id()?;
|
pdu.remove_transaction_id()?;
|
||||||
}
|
}
|
||||||
Ok((pdu_id, pdu))
|
Ok((pdu_id, pdu))
|
||||||
}))
|
})))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn pdus_after<'a>(
|
fn pdus_after<'a>(
|
||||||
|
@ -260,7 +260,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
||||||
from: u64,
|
from: u64,
|
||||||
) -> Result<Box<dyn Iterator<Item = Result<(Vec<u8>, PduEvent)>>>> {
|
) -> Result<Box<dyn Iterator<Item = Result<(Vec<u8>, PduEvent)>>>> {
|
||||||
// Create the first part of the full pdu id
|
// Create the first part of the full pdu id
|
||||||
let prefix = self
|
let prefix = services().rooms.short
|
||||||
.get_shortroomid(room_id)?
|
.get_shortroomid(room_id)?
|
||||||
.expect("room exists")
|
.expect("room exists")
|
||||||
.to_be_bytes()
|
.to_be_bytes()
|
||||||
|
@ -273,7 +273,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
||||||
|
|
||||||
let user_id = user_id.to_owned();
|
let user_id = user_id.to_owned();
|
||||||
|
|
||||||
Ok(self
|
Ok(Box::new(self
|
||||||
.pduid_pdu
|
.pduid_pdu
|
||||||
.iter_from(current, false)
|
.iter_from(current, false)
|
||||||
.take_while(move |(k, _)| k.starts_with(&prefix))
|
.take_while(move |(k, _)| k.starts_with(&prefix))
|
||||||
|
@ -284,6 +284,6 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
||||||
pdu.remove_transaction_id()?;
|
pdu.remove_transaction_id()?;
|
||||||
}
|
}
|
||||||
Ok((pdu_id, pdu))
|
Ok((pdu_id, pdu))
|
||||||
}))
|
})))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use ruma::{UserId, RoomId};
|
use ruma::{UserId, RoomId};
|
||||||
|
|
||||||
use crate::{service, database::KeyValueDatabase, utils, Error, Result};
|
use crate::{service, database::KeyValueDatabase, utils, Error, Result, services};
|
||||||
|
|
||||||
impl service::rooms::user::Data for KeyValueDatabase {
|
impl service::rooms::user::Data for KeyValueDatabase {
|
||||||
fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> {
|
fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> {
|
||||||
|
@ -50,7 +50,7 @@ impl service::rooms::user::Data for KeyValueDatabase {
|
||||||
token: u64,
|
token: u64,
|
||||||
shortstatehash: u64,
|
shortstatehash: u64,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let shortroomid = self.get_shortroomid(room_id)?.expect("room exists");
|
let shortroomid = services().rooms.short.get_shortroomid(room_id)?.expect("room exists");
|
||||||
|
|
||||||
let mut key = shortroomid.to_be_bytes().to_vec();
|
let mut key = shortroomid.to_be_bytes().to_vec();
|
||||||
key.extend_from_slice(&token.to_be_bytes());
|
key.extend_from_slice(&token.to_be_bytes());
|
||||||
|
@ -60,7 +60,7 @@ impl service::rooms::user::Data for KeyValueDatabase {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result<Option<u64>> {
|
fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result<Option<u64>> {
|
||||||
let shortroomid = self.get_shortroomid(room_id)?.expect("room exists");
|
let shortroomid = services().rooms.short.get_shortroomid(room_id)?.expect("room exists");
|
||||||
|
|
||||||
let mut key = shortroomid.to_be_bytes().to_vec();
|
let mut key = shortroomid.to_be_bytes().to_vec();
|
||||||
key.extend_from_slice(&token.to_be_bytes());
|
key.extend_from_slice(&token.to_be_bytes());
|
||||||
|
|
|
@ -57,12 +57,12 @@ impl service::users::Data for KeyValueDatabase {
|
||||||
|
|
||||||
/// Returns an iterator over all users on this homeserver.
|
/// Returns an iterator over all users on this homeserver.
|
||||||
fn iter(&self) -> Box<dyn Iterator<Item = Result<Box<UserId>>>> {
|
fn iter(&self) -> Box<dyn Iterator<Item = Result<Box<UserId>>>> {
|
||||||
self.userid_password.iter().map(|(bytes, _)| {
|
Box::new(self.userid_password.iter().map(|(bytes, _)| {
|
||||||
UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| {
|
UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| {
|
||||||
Error::bad_database("User ID in userid_password is invalid unicode.")
|
Error::bad_database("User ID in userid_password is invalid unicode.")
|
||||||
})?)
|
})?)
|
||||||
.map_err(|_| Error::bad_database("User ID in userid_password is invalid."))
|
.map_err(|_| Error::bad_database("User ID in userid_password is invalid."))
|
||||||
})
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a list of local users as list of usernames.
|
/// Returns a list of local users as list of usernames.
|
||||||
|
@ -274,7 +274,7 @@ impl service::users::Data for KeyValueDatabase {
|
||||||
let mut prefix = user_id.as_bytes().to_vec();
|
let mut prefix = user_id.as_bytes().to_vec();
|
||||||
prefix.push(0xff);
|
prefix.push(0xff);
|
||||||
// All devices have metadata
|
// All devices have metadata
|
||||||
self.userdeviceid_metadata
|
Box::new(self.userdeviceid_metadata
|
||||||
.scan_prefix(prefix)
|
.scan_prefix(prefix)
|
||||||
.map(|(bytes, _)| {
|
.map(|(bytes, _)| {
|
||||||
Ok(utils::string_from_bytes(
|
Ok(utils::string_from_bytes(
|
||||||
|
@ -285,7 +285,7 @@ impl service::users::Data for KeyValueDatabase {
|
||||||
)
|
)
|
||||||
.map_err(|_| Error::bad_database("Device ID in userdeviceid_metadata is invalid."))?
|
.map_err(|_| Error::bad_database("Device ID in userdeviceid_metadata is invalid."))?
|
||||||
.into())
|
.into())
|
||||||
})
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Replaces the access token of one device.
|
/// Replaces the access token of one device.
|
||||||
|
@ -617,7 +617,7 @@ impl service::users::Data for KeyValueDatabase {
|
||||||
|
|
||||||
let to = to.unwrap_or(u64::MAX);
|
let to = to.unwrap_or(u64::MAX);
|
||||||
|
|
||||||
self.keychangeid_userid
|
Box::new(self.keychangeid_userid
|
||||||
.iter_from(&start, false)
|
.iter_from(&start, false)
|
||||||
.take_while(move |(k, _)| {
|
.take_while(move |(k, _)| {
|
||||||
k.starts_with(&prefix)
|
k.starts_with(&prefix)
|
||||||
|
@ -638,7 +638,7 @@ impl service::users::Data for KeyValueDatabase {
|
||||||
Error::bad_database("User ID in devicekeychangeid_userid is invalid unicode.")
|
Error::bad_database("User ID in devicekeychangeid_userid is invalid unicode.")
|
||||||
})?)
|
})?)
|
||||||
.map_err(|_| Error::bad_database("User ID in devicekeychangeid_userid is invalid."))
|
.map_err(|_| Error::bad_database("User ID in devicekeychangeid_userid is invalid."))
|
||||||
})
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn mark_device_key_update(
|
fn mark_device_key_update(
|
||||||
|
@ -646,9 +646,10 @@ impl service::users::Data for KeyValueDatabase {
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let count = services().globals.next_count()?.to_be_bytes();
|
let count = services().globals.next_count()?.to_be_bytes();
|
||||||
for room_id in services().rooms.rooms_joined(user_id).filter_map(|r| r.ok()) {
|
for room_id in services().rooms.state_cache.rooms_joined(user_id).filter_map(|r| r.ok()) {
|
||||||
// Don't send key updates to unencrypted rooms
|
// Don't send key updates to unencrypted rooms
|
||||||
if services().rooms
|
if services().rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomEncryption, "")?
|
.room_state_get(&room_id, &StateEventType::RoomEncryption, "")?
|
||||||
.is_none()
|
.is_none()
|
||||||
{
|
{
|
||||||
|
@ -882,12 +883,12 @@ impl service::users::Data for KeyValueDatabase {
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
|
|
||||||
self.userdeviceid_metadata
|
Box::new(self.userdeviceid_metadata
|
||||||
.scan_prefix(key)
|
.scan_prefix(key)
|
||||||
.map(|(_, bytes)| {
|
.map(|(_, bytes)| {
|
||||||
serde_json::from_slice::<Device>(&bytes)
|
serde_json::from_slice::<Device>(&bytes)
|
||||||
.map_err(|_| Error::bad_database("Device in userdeviceid_metadata is invalid."))
|
.map_err(|_| Error::bad_database("Device in userdeviceid_metadata is invalid."))
|
||||||
})
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new sync filter. Returns the filter id.
|
/// Creates a new sync filter. Returns the filter id.
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
pub mod abstraction;
|
pub mod abstraction;
|
||||||
pub mod key_value;
|
pub mod key_value;
|
||||||
|
|
||||||
use crate::{utils, Config, Error, Result, service::{users, globals, uiaa, rooms, account_data, media, key_backups, transaction_ids, sending, appservice, pusher}};
|
use crate::{utils, Config, Error, Result, service::{users, globals, uiaa, rooms::{self, state_compressor::CompressedStateEvent}, account_data, media, key_backups, transaction_ids, sending, appservice, pusher}, services, PduEvent, Services, SERVICES};
|
||||||
use abstraction::KeyValueDatabaseEngine;
|
use abstraction::KeyValueDatabaseEngine;
|
||||||
use directories::ProjectDirs;
|
use directories::ProjectDirs;
|
||||||
use futures_util::{stream::FuturesUnordered, StreamExt};
|
use futures_util::{stream::FuturesUnordered, StreamExt};
|
||||||
|
@ -9,7 +9,7 @@ use lru_cache::LruCache;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
events::{
|
events::{
|
||||||
push_rules::PushRulesEventContent, room::message::RoomMessageEventContent,
|
push_rules::PushRulesEventContent, room::message::RoomMessageEventContent,
|
||||||
GlobalAccountDataEvent, GlobalAccountDataEventType,
|
GlobalAccountDataEvent, GlobalAccountDataEventType, StateEventType,
|
||||||
},
|
},
|
||||||
push::Ruleset,
|
push::Ruleset,
|
||||||
DeviceId, EventId, RoomId, UserId, signatures::CanonicalJsonValue,
|
DeviceId, EventId, RoomId, UserId, signatures::CanonicalJsonValue,
|
||||||
|
@ -151,6 +151,30 @@ pub struct KeyValueDatabase {
|
||||||
|
|
||||||
//pub pusher: pusher::PushData,
|
//pub pusher: pusher::PushData,
|
||||||
pub(super) senderkey_pusher: Arc<dyn KvTree>,
|
pub(super) senderkey_pusher: Arc<dyn KvTree>,
|
||||||
|
|
||||||
|
pub(super) cached_registrations: Arc<RwLock<HashMap<String, serde_yaml::Value>>>,
|
||||||
|
pub(super) pdu_cache: Mutex<LruCache<Box<EventId>, Arc<PduEvent>>>,
|
||||||
|
pub(super) shorteventid_cache: Mutex<LruCache<u64, Arc<EventId>>>,
|
||||||
|
pub(super) auth_chain_cache: Mutex<LruCache<Vec<u64>, Arc<HashSet<u64>>>>,
|
||||||
|
pub(super) eventidshort_cache: Mutex<LruCache<Box<EventId>, u64>>,
|
||||||
|
pub(super) statekeyshort_cache: Mutex<LruCache<(StateEventType, String), u64>>,
|
||||||
|
pub(super) shortstatekey_cache: Mutex<LruCache<u64, (StateEventType, String)>>,
|
||||||
|
pub(super) our_real_users_cache: RwLock<HashMap<Box<RoomId>, Arc<HashSet<Box<UserId>>>>>,
|
||||||
|
pub(super) appservice_in_room_cache: RwLock<HashMap<Box<RoomId>, HashMap<String, bool>>>,
|
||||||
|
pub(super) lazy_load_waiting:
|
||||||
|
Mutex<HashMap<(Box<UserId>, Box<DeviceId>, Box<RoomId>, u64), HashSet<Box<UserId>>>>,
|
||||||
|
pub(super) stateinfo_cache: Mutex<
|
||||||
|
LruCache<
|
||||||
|
u64,
|
||||||
|
Vec<(
|
||||||
|
u64, // sstatehash
|
||||||
|
HashSet<CompressedStateEvent>, // full state
|
||||||
|
HashSet<CompressedStateEvent>, // added
|
||||||
|
HashSet<CompressedStateEvent>, // removed
|
||||||
|
)>,
|
||||||
|
>,
|
||||||
|
>,
|
||||||
|
pub(super) lasttimelinecount_cache: Mutex<HashMap<Box<RoomId>, u64>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyValueDatabase {
|
impl KeyValueDatabase {
|
||||||
|
@ -214,7 +238,7 @@ impl KeyValueDatabase {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Load an existing database or create a new one.
|
/// Load an existing database or create a new one.
|
||||||
pub async fn load_or_create(config: &Config) -> Result<Arc<TokioRwLock<Self>>> {
|
pub async fn load_or_create(config: &Config) -> Result<()> {
|
||||||
Self::check_db_setup(config)?;
|
Self::check_db_setup(config)?;
|
||||||
|
|
||||||
if !Path::new(&config.database_path).exists() {
|
if !Path::new(&config.database_path).exists() {
|
||||||
|
@ -253,7 +277,7 @@ impl KeyValueDatabase {
|
||||||
let (admin_sender, admin_receiver) = mpsc::unbounded_channel();
|
let (admin_sender, admin_receiver) = mpsc::unbounded_channel();
|
||||||
let (sending_sender, sending_receiver) = mpsc::unbounded_channel();
|
let (sending_sender, sending_receiver) = mpsc::unbounded_channel();
|
||||||
|
|
||||||
let db = Self {
|
let db = Arc::new(Self {
|
||||||
_db: builder.clone(),
|
_db: builder.clone(),
|
||||||
userid_password: builder.open_tree("userid_password")?,
|
userid_password: builder.open_tree("userid_password")?,
|
||||||
userid_displayname: builder.open_tree("userid_displayname")?,
|
userid_displayname: builder.open_tree("userid_displayname")?,
|
||||||
|
@ -345,18 +369,53 @@ impl KeyValueDatabase {
|
||||||
senderkey_pusher: builder.open_tree("senderkey_pusher")?,
|
senderkey_pusher: builder.open_tree("senderkey_pusher")?,
|
||||||
global: builder.open_tree("global")?,
|
global: builder.open_tree("global")?,
|
||||||
server_signingkeys: builder.open_tree("server_signingkeys")?,
|
server_signingkeys: builder.open_tree("server_signingkeys")?,
|
||||||
};
|
|
||||||
|
|
||||||
// TODO: do this after constructing the db
|
cached_registrations: Arc::new(RwLock::new(HashMap::new())),
|
||||||
|
pdu_cache: Mutex::new(LruCache::new(
|
||||||
|
config
|
||||||
|
.pdu_cache_capacity
|
||||||
|
.try_into()
|
||||||
|
.expect("pdu cache capacity fits into usize"),
|
||||||
|
)),
|
||||||
|
auth_chain_cache: Mutex::new(LruCache::new(
|
||||||
|
(100_000.0 * config.conduit_cache_capacity_modifier) as usize,
|
||||||
|
)),
|
||||||
|
shorteventid_cache: Mutex::new(LruCache::new(
|
||||||
|
(100_000.0 * config.conduit_cache_capacity_modifier) as usize,
|
||||||
|
)),
|
||||||
|
eventidshort_cache: Mutex::new(LruCache::new(
|
||||||
|
(100_000.0 * config.conduit_cache_capacity_modifier) as usize,
|
||||||
|
)),
|
||||||
|
shortstatekey_cache: Mutex::new(LruCache::new(
|
||||||
|
(100_000.0 * config.conduit_cache_capacity_modifier) as usize,
|
||||||
|
)),
|
||||||
|
statekeyshort_cache: Mutex::new(LruCache::new(
|
||||||
|
(100_000.0 * config.conduit_cache_capacity_modifier) as usize,
|
||||||
|
)),
|
||||||
|
our_real_users_cache: RwLock::new(HashMap::new()),
|
||||||
|
appservice_in_room_cache: RwLock::new(HashMap::new()),
|
||||||
|
lazy_load_waiting: Mutex::new(HashMap::new()),
|
||||||
|
stateinfo_cache: Mutex::new(LruCache::new(
|
||||||
|
(100.0 * config.conduit_cache_capacity_modifier) as usize,
|
||||||
|
)),
|
||||||
|
lasttimelinecount_cache: Mutex::new(HashMap::new()),
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
let services_raw = Services::build(Arc::clone(&db));
|
||||||
|
|
||||||
|
// This is the first and only time we initialize the SERVICE static
|
||||||
|
*SERVICES.write().unwrap() = Some(services_raw);
|
||||||
|
|
||||||
|
|
||||||
// Matrix resource ownership is based on the server name; changing it
|
// Matrix resource ownership is based on the server name; changing it
|
||||||
// requires recreating the database from scratch.
|
// requires recreating the database from scratch.
|
||||||
if guard.users.count()? > 0 {
|
if services().users.count()? > 0 {
|
||||||
let conduit_user =
|
let conduit_user =
|
||||||
UserId::parse_with_server_name("conduit", guard.globals.server_name())
|
UserId::parse_with_server_name("conduit", services().globals.server_name())
|
||||||
.expect("@conduit:server_name is valid");
|
.expect("@conduit:server_name is valid");
|
||||||
|
|
||||||
if !guard.users.exists(&conduit_user)? {
|
if !services().users.exists(&conduit_user)? {
|
||||||
error!(
|
error!(
|
||||||
"The {} server user does not exist, and the database is not new.",
|
"The {} server user does not exist, and the database is not new.",
|
||||||
conduit_user
|
conduit_user
|
||||||
|
@ -370,11 +429,10 @@ impl KeyValueDatabase {
|
||||||
// If the database has any data, perform data migrations before starting
|
// If the database has any data, perform data migrations before starting
|
||||||
let latest_database_version = 11;
|
let latest_database_version = 11;
|
||||||
|
|
||||||
if guard.users.count()? > 0 {
|
if services().users.count()? > 0 {
|
||||||
let db = &*guard;
|
|
||||||
// MIGRATIONS
|
// MIGRATIONS
|
||||||
if db.globals.database_version()? < 1 {
|
if services().globals.database_version()? < 1 {
|
||||||
for (roomserverid, _) in db.rooms.roomserverids.iter() {
|
for (roomserverid, _) in db.roomserverids.iter() {
|
||||||
let mut parts = roomserverid.split(|&b| b == 0xff);
|
let mut parts = roomserverid.split(|&b| b == 0xff);
|
||||||
let room_id = parts.next().expect("split always returns one element");
|
let room_id = parts.next().expect("split always returns one element");
|
||||||
let servername = match parts.next() {
|
let servername = match parts.next() {
|
||||||
|
@ -388,17 +446,17 @@ impl KeyValueDatabase {
|
||||||
serverroomid.push(0xff);
|
serverroomid.push(0xff);
|
||||||
serverroomid.extend_from_slice(room_id);
|
serverroomid.extend_from_slice(room_id);
|
||||||
|
|
||||||
db.rooms.serverroomids.insert(&serverroomid, &[])?;
|
db.serverroomids.insert(&serverroomid, &[])?;
|
||||||
}
|
}
|
||||||
|
|
||||||
db.globals.bump_database_version(1)?;
|
services().globals.bump_database_version(1)?;
|
||||||
|
|
||||||
warn!("Migration: 0 -> 1 finished");
|
warn!("Migration: 0 -> 1 finished");
|
||||||
}
|
}
|
||||||
|
|
||||||
if db.globals.database_version()? < 2 {
|
if services().globals.database_version()? < 2 {
|
||||||
// We accidentally inserted hashed versions of "" into the db instead of just ""
|
// We accidentally inserted hashed versions of "" into the db instead of just ""
|
||||||
for (userid, password) in db.users.userid_password.iter() {
|
for (userid, password) in db.userid_password.iter() {
|
||||||
let password = utils::string_from_bytes(&password);
|
let password = utils::string_from_bytes(&password);
|
||||||
|
|
||||||
let empty_hashed_password = password.map_or(false, |password| {
|
let empty_hashed_password = password.map_or(false, |password| {
|
||||||
|
@ -406,59 +464,59 @@ impl KeyValueDatabase {
|
||||||
});
|
});
|
||||||
|
|
||||||
if empty_hashed_password {
|
if empty_hashed_password {
|
||||||
db.users.userid_password.insert(&userid, b"")?;
|
db.userid_password.insert(&userid, b"")?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
db.globals.bump_database_version(2)?;
|
services().globals.bump_database_version(2)?;
|
||||||
|
|
||||||
warn!("Migration: 1 -> 2 finished");
|
warn!("Migration: 1 -> 2 finished");
|
||||||
}
|
}
|
||||||
|
|
||||||
if db.globals.database_version()? < 3 {
|
if services().globals.database_version()? < 3 {
|
||||||
// Move media to filesystem
|
// Move media to filesystem
|
||||||
for (key, content) in db.media.mediaid_file.iter() {
|
for (key, content) in db.mediaid_file.iter() {
|
||||||
if content.is_empty() {
|
if content.is_empty() {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let path = db.globals.get_media_file(&key);
|
let path = services().globals.get_media_file(&key);
|
||||||
let mut file = fs::File::create(path)?;
|
let mut file = fs::File::create(path)?;
|
||||||
file.write_all(&content)?;
|
file.write_all(&content)?;
|
||||||
db.media.mediaid_file.insert(&key, &[])?;
|
db.mediaid_file.insert(&key, &[])?;
|
||||||
}
|
}
|
||||||
|
|
||||||
db.globals.bump_database_version(3)?;
|
services().globals.bump_database_version(3)?;
|
||||||
|
|
||||||
warn!("Migration: 2 -> 3 finished");
|
warn!("Migration: 2 -> 3 finished");
|
||||||
}
|
}
|
||||||
|
|
||||||
if db.globals.database_version()? < 4 {
|
if services().globals.database_version()? < 4 {
|
||||||
// Add federated users to db as deactivated
|
// Add federated users to services() as deactivated
|
||||||
for our_user in db.users.iter() {
|
for our_user in services().users.iter() {
|
||||||
let our_user = our_user?;
|
let our_user = our_user?;
|
||||||
if db.users.is_deactivated(&our_user)? {
|
if services().users.is_deactivated(&our_user)? {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
for room in db.rooms.rooms_joined(&our_user) {
|
for room in services().rooms.state_cache.rooms_joined(&our_user) {
|
||||||
for user in db.rooms.room_members(&room?) {
|
for user in services().rooms.state_cache.room_members(&room?) {
|
||||||
let user = user?;
|
let user = user?;
|
||||||
if user.server_name() != db.globals.server_name() {
|
if user.server_name() != services().globals.server_name() {
|
||||||
println!("Migration: Creating user {}", user);
|
println!("Migration: Creating user {}", user);
|
||||||
db.users.create(&user, None)?;
|
services().users.create(&user, None)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
db.globals.bump_database_version(4)?;
|
services().globals.bump_database_version(4)?;
|
||||||
|
|
||||||
warn!("Migration: 3 -> 4 finished");
|
warn!("Migration: 3 -> 4 finished");
|
||||||
}
|
}
|
||||||
|
|
||||||
if db.globals.database_version()? < 5 {
|
if services().globals.database_version()? < 5 {
|
||||||
// Upgrade user data store
|
// Upgrade user data store
|
||||||
for (roomuserdataid, _) in db.account_data.roomuserdataid_accountdata.iter() {
|
for (roomuserdataid, _) in db.roomuserdataid_accountdata.iter() {
|
||||||
let mut parts = roomuserdataid.split(|&b| b == 0xff);
|
let mut parts = roomuserdataid.split(|&b| b == 0xff);
|
||||||
let room_id = parts.next().unwrap();
|
let room_id = parts.next().unwrap();
|
||||||
let user_id = parts.next().unwrap();
|
let user_id = parts.next().unwrap();
|
||||||
|
@ -470,30 +528,29 @@ impl KeyValueDatabase {
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
key.extend_from_slice(event_type);
|
key.extend_from_slice(event_type);
|
||||||
|
|
||||||
db.account_data
|
db.roomusertype_roomuserdataid
|
||||||
.roomusertype_roomuserdataid
|
|
||||||
.insert(&key, &roomuserdataid)?;
|
.insert(&key, &roomuserdataid)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
db.globals.bump_database_version(5)?;
|
services().globals.bump_database_version(5)?;
|
||||||
|
|
||||||
warn!("Migration: 4 -> 5 finished");
|
warn!("Migration: 4 -> 5 finished");
|
||||||
}
|
}
|
||||||
|
|
||||||
if db.globals.database_version()? < 6 {
|
if services().globals.database_version()? < 6 {
|
||||||
// Set room member count
|
// Set room member count
|
||||||
for (roomid, _) in db.rooms.roomid_shortstatehash.iter() {
|
for (roomid, _) in db.roomid_shortstatehash.iter() {
|
||||||
let string = utils::string_from_bytes(&roomid).unwrap();
|
let string = utils::string_from_bytes(&roomid).unwrap();
|
||||||
let room_id = <&RoomId>::try_from(string.as_str()).unwrap();
|
let room_id = <&RoomId>::try_from(string.as_str()).unwrap();
|
||||||
db.rooms.update_joined_count(room_id, &db)?;
|
services().rooms.state_cache.update_joined_count(room_id)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
db.globals.bump_database_version(6)?;
|
services().globals.bump_database_version(6)?;
|
||||||
|
|
||||||
warn!("Migration: 5 -> 6 finished");
|
warn!("Migration: 5 -> 6 finished");
|
||||||
}
|
}
|
||||||
|
|
||||||
if db.globals.database_version()? < 7 {
|
if services().globals.database_version()? < 7 {
|
||||||
// Upgrade state store
|
// Upgrade state store
|
||||||
let mut last_roomstates: HashMap<Box<RoomId>, u64> = HashMap::new();
|
let mut last_roomstates: HashMap<Box<RoomId>, u64> = HashMap::new();
|
||||||
let mut current_sstatehash: Option<u64> = None;
|
let mut current_sstatehash: Option<u64> = None;
|
||||||
|
@ -513,7 +570,7 @@ impl KeyValueDatabase {
|
||||||
let states_parents = last_roomsstatehash.map_or_else(
|
let states_parents = last_roomsstatehash.map_or_else(
|
||||||
|| Ok(Vec::new()),
|
|| Ok(Vec::new()),
|
||||||
|&last_roomsstatehash| {
|
|&last_roomsstatehash| {
|
||||||
db.rooms.state_accessor.load_shortstatehash_info(dbg!(last_roomsstatehash))
|
services().rooms.state_compressor.load_shortstatehash_info(dbg!(last_roomsstatehash))
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
@ -535,7 +592,7 @@ impl KeyValueDatabase {
|
||||||
(current_state, HashSet::new())
|
(current_state, HashSet::new())
|
||||||
};
|
};
|
||||||
|
|
||||||
db.rooms.save_state_from_diff(
|
services().rooms.state_compressor.save_state_from_diff(
|
||||||
dbg!(current_sstatehash),
|
dbg!(current_sstatehash),
|
||||||
statediffnew,
|
statediffnew,
|
||||||
statediffremoved,
|
statediffremoved,
|
||||||
|
@ -544,7 +601,7 @@ impl KeyValueDatabase {
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
let mut tmp = db.rooms.load_shortstatehash_info(¤t_sstatehash, &db)?;
|
let mut tmp = services().rooms.load_shortstatehash_info(¤t_sstatehash)?;
|
||||||
let state = tmp.pop().unwrap();
|
let state = tmp.pop().unwrap();
|
||||||
println!(
|
println!(
|
||||||
"{}\t{}{:?}: {:?} + {:?} - {:?}",
|
"{}\t{}{:?}: {:?} + {:?} - {:?}",
|
||||||
|
@ -587,14 +644,13 @@ impl KeyValueDatabase {
|
||||||
current_sstatehash = Some(sstatehash);
|
current_sstatehash = Some(sstatehash);
|
||||||
|
|
||||||
let event_id = db
|
let event_id = db
|
||||||
.rooms
|
|
||||||
.shorteventid_eventid
|
.shorteventid_eventid
|
||||||
.get(&seventid)
|
.get(&seventid)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let string = utils::string_from_bytes(&event_id).unwrap();
|
let string = utils::string_from_bytes(&event_id).unwrap();
|
||||||
let event_id = <&EventId>::try_from(string.as_str()).unwrap();
|
let event_id = <&EventId>::try_from(string.as_str()).unwrap();
|
||||||
let pdu = db.rooms.get_pdu(event_id).unwrap().unwrap();
|
let pdu = services().rooms.timeline.get_pdu(event_id).unwrap().unwrap();
|
||||||
|
|
||||||
if Some(&pdu.room_id) != current_room.as_ref() {
|
if Some(&pdu.room_id) != current_room.as_ref() {
|
||||||
current_room = Some(pdu.room_id.clone());
|
current_room = Some(pdu.room_id.clone());
|
||||||
|
@ -615,20 +671,20 @@ impl KeyValueDatabase {
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
db.globals.bump_database_version(7)?;
|
services().globals.bump_database_version(7)?;
|
||||||
|
|
||||||
warn!("Migration: 6 -> 7 finished");
|
warn!("Migration: 6 -> 7 finished");
|
||||||
}
|
}
|
||||||
|
|
||||||
if db.globals.database_version()? < 8 {
|
if services().globals.database_version()? < 8 {
|
||||||
// Generate short room ids for all rooms
|
// Generate short room ids for all rooms
|
||||||
for (room_id, _) in db.rooms.roomid_shortstatehash.iter() {
|
for (room_id, _) in db.roomid_shortstatehash.iter() {
|
||||||
let shortroomid = db.globals.next_count()?.to_be_bytes();
|
let shortroomid = services().globals.next_count()?.to_be_bytes();
|
||||||
db.rooms.roomid_shortroomid.insert(&room_id, &shortroomid)?;
|
db.roomid_shortroomid.insert(&room_id, &shortroomid)?;
|
||||||
info!("Migration: 8");
|
info!("Migration: 8");
|
||||||
}
|
}
|
||||||
// Update pduids db layout
|
// Update pduids db layout
|
||||||
let mut batch = db.rooms.pduid_pdu.iter().filter_map(|(key, v)| {
|
let mut batch = db.pduid_pdu.iter().filter_map(|(key, v)| {
|
||||||
if !key.starts_with(b"!") {
|
if !key.starts_with(b"!") {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
@ -637,7 +693,6 @@ impl KeyValueDatabase {
|
||||||
let count = parts.next().unwrap();
|
let count = parts.next().unwrap();
|
||||||
|
|
||||||
let short_room_id = db
|
let short_room_id = db
|
||||||
.rooms
|
|
||||||
.roomid_shortroomid
|
.roomid_shortroomid
|
||||||
.get(room_id)
|
.get(room_id)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -649,9 +704,9 @@ impl KeyValueDatabase {
|
||||||
Some((new_key, v))
|
Some((new_key, v))
|
||||||
});
|
});
|
||||||
|
|
||||||
db.rooms.pduid_pdu.insert_batch(&mut batch)?;
|
db.pduid_pdu.insert_batch(&mut batch)?;
|
||||||
|
|
||||||
let mut batch2 = db.rooms.eventid_pduid.iter().filter_map(|(k, value)| {
|
let mut batch2 = db.eventid_pduid.iter().filter_map(|(k, value)| {
|
||||||
if !value.starts_with(b"!") {
|
if !value.starts_with(b"!") {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
@ -660,7 +715,6 @@ impl KeyValueDatabase {
|
||||||
let count = parts.next().unwrap();
|
let count = parts.next().unwrap();
|
||||||
|
|
||||||
let short_room_id = db
|
let short_room_id = db
|
||||||
.rooms
|
|
||||||
.roomid_shortroomid
|
.roomid_shortroomid
|
||||||
.get(room_id)
|
.get(room_id)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -672,17 +726,16 @@ impl KeyValueDatabase {
|
||||||
Some((k, new_value))
|
Some((k, new_value))
|
||||||
});
|
});
|
||||||
|
|
||||||
db.rooms.eventid_pduid.insert_batch(&mut batch2)?;
|
db.eventid_pduid.insert_batch(&mut batch2)?;
|
||||||
|
|
||||||
db.globals.bump_database_version(8)?;
|
services().globals.bump_database_version(8)?;
|
||||||
|
|
||||||
warn!("Migration: 7 -> 8 finished");
|
warn!("Migration: 7 -> 8 finished");
|
||||||
}
|
}
|
||||||
|
|
||||||
if db.globals.database_version()? < 9 {
|
if services().globals.database_version()? < 9 {
|
||||||
// Update tokenids db layout
|
// Update tokenids db layout
|
||||||
let mut iter = db
|
let mut iter = db
|
||||||
.rooms
|
|
||||||
.tokenids
|
.tokenids
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|(key, _)| {
|
.filter_map(|(key, _)| {
|
||||||
|
@ -696,7 +749,6 @@ impl KeyValueDatabase {
|
||||||
let pdu_id_count = parts.next().unwrap();
|
let pdu_id_count = parts.next().unwrap();
|
||||||
|
|
||||||
let short_room_id = db
|
let short_room_id = db
|
||||||
.rooms
|
|
||||||
.roomid_shortroomid
|
.roomid_shortroomid
|
||||||
.get(room_id)
|
.get(room_id)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -712,8 +764,7 @@ impl KeyValueDatabase {
|
||||||
.peekable();
|
.peekable();
|
||||||
|
|
||||||
while iter.peek().is_some() {
|
while iter.peek().is_some() {
|
||||||
db.rooms
|
db.tokenids
|
||||||
.tokenids
|
|
||||||
.insert_batch(&mut iter.by_ref().take(1000))?;
|
.insert_batch(&mut iter.by_ref().take(1000))?;
|
||||||
println!("smaller batch done");
|
println!("smaller batch done");
|
||||||
}
|
}
|
||||||
|
@ -721,7 +772,6 @@ impl KeyValueDatabase {
|
||||||
info!("Deleting starts");
|
info!("Deleting starts");
|
||||||
|
|
||||||
let batch2: Vec<_> = db
|
let batch2: Vec<_> = db
|
||||||
.rooms
|
|
||||||
.tokenids
|
.tokenids
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|(key, _)| {
|
.filter_map(|(key, _)| {
|
||||||
|
@ -736,38 +786,37 @@ impl KeyValueDatabase {
|
||||||
|
|
||||||
for key in batch2 {
|
for key in batch2 {
|
||||||
println!("del");
|
println!("del");
|
||||||
db.rooms.tokenids.remove(&key)?;
|
db.tokenids.remove(&key)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
db.globals.bump_database_version(9)?;
|
services().globals.bump_database_version(9)?;
|
||||||
|
|
||||||
warn!("Migration: 8 -> 9 finished");
|
warn!("Migration: 8 -> 9 finished");
|
||||||
}
|
}
|
||||||
|
|
||||||
if db.globals.database_version()? < 10 {
|
if services().globals.database_version()? < 10 {
|
||||||
// Add other direction for shortstatekeys
|
// Add other direction for shortstatekeys
|
||||||
for (statekey, shortstatekey) in db.rooms.statekey_shortstatekey.iter() {
|
for (statekey, shortstatekey) in db.statekey_shortstatekey.iter() {
|
||||||
db.rooms
|
db.shortstatekey_statekey
|
||||||
.shortstatekey_statekey
|
|
||||||
.insert(&shortstatekey, &statekey)?;
|
.insert(&shortstatekey, &statekey)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Force E2EE device list updates so we can send them over federation
|
// Force E2EE device list updates so we can send them over federation
|
||||||
for user_id in db.users.iter().filter_map(|r| r.ok()) {
|
for user_id in services().users.iter().filter_map(|r| r.ok()) {
|
||||||
db.users
|
services().users
|
||||||
.mark_device_key_update(&user_id, &db.rooms, &db.globals)?;
|
.mark_device_key_update(&user_id)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
db.globals.bump_database_version(10)?;
|
services().globals.bump_database_version(10)?;
|
||||||
|
|
||||||
warn!("Migration: 9 -> 10 finished");
|
warn!("Migration: 9 -> 10 finished");
|
||||||
}
|
}
|
||||||
|
|
||||||
if db.globals.database_version()? < 11 {
|
if services().globals.database_version()? < 11 {
|
||||||
db._db
|
db._db
|
||||||
.open_tree("userdevicesessionid_uiaarequest")?
|
.open_tree("userdevicesessionid_uiaarequest")?
|
||||||
.clear()?;
|
.clear()?;
|
||||||
db.globals.bump_database_version(11)?;
|
services().globals.bump_database_version(11)?;
|
||||||
|
|
||||||
warn!("Migration: 10 -> 11 finished");
|
warn!("Migration: 10 -> 11 finished");
|
||||||
}
|
}
|
||||||
|
@ -779,12 +828,12 @@ impl KeyValueDatabase {
|
||||||
config.database_backend, latest_database_version
|
config.database_backend, latest_database_version
|
||||||
);
|
);
|
||||||
} else {
|
} else {
|
||||||
guard
|
services()
|
||||||
.globals
|
.globals
|
||||||
.bump_database_version(latest_database_version)?;
|
.bump_database_version(latest_database_version)?;
|
||||||
|
|
||||||
// Create the admin room and server user on first run
|
// Create the admin room and server user on first run
|
||||||
create_admin_room().await?;
|
services().admin.create_admin_room().await?;
|
||||||
|
|
||||||
warn!(
|
warn!(
|
||||||
"Created new {} database with version {}",
|
"Created new {} database with version {}",
|
||||||
|
@ -793,16 +842,16 @@ impl KeyValueDatabase {
|
||||||
}
|
}
|
||||||
|
|
||||||
// This data is probably outdated
|
// This data is probably outdated
|
||||||
guard.rooms.edus.presenceid_presence.clear()?;
|
db.presenceid_presence.clear()?;
|
||||||
|
|
||||||
guard.admin.start_handler(Arc::clone(&db), admin_receiver);
|
services().admin.start_handler(admin_receiver);
|
||||||
|
|
||||||
// Set emergency access for the conduit user
|
// Set emergency access for the conduit user
|
||||||
match set_emergency_access(&guard) {
|
match set_emergency_access() {
|
||||||
Ok(pwd_set) => {
|
Ok(pwd_set) => {
|
||||||
if pwd_set {
|
if pwd_set {
|
||||||
warn!("The Conduit account emergency password is set! Please unset it as soon as you finish admin account recovery!");
|
warn!("The Conduit account emergency password is set! Please unset it as soon as you finish admin account recovery!");
|
||||||
guard.admin.send_message(RoomMessageEventContent::text_plain("The Conduit account emergency password is set! Please unset it as soon as you finish admin account recovery!"));
|
services().admin.send_message(RoomMessageEventContent::text_plain("The Conduit account emergency password is set! Please unset it as soon as you finish admin account recovery!"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
|
@ -813,21 +862,19 @@ impl KeyValueDatabase {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
guard
|
services()
|
||||||
.sending
|
.sending
|
||||||
.start_handler(Arc::clone(&db), sending_receiver);
|
.start_handler(sending_receiver);
|
||||||
|
|
||||||
drop(guard);
|
Self::start_cleanup_task(config).await;
|
||||||
|
|
||||||
Self::start_cleanup_task(Arc::clone(&db), config).await;
|
Ok(())
|
||||||
|
|
||||||
Ok(db)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(feature = "conduit_bin")]
|
#[cfg(feature = "conduit_bin")]
|
||||||
pub async fn on_shutdown(db: Arc<TokioRwLock<Self>>) {
|
pub async fn on_shutdown() {
|
||||||
info!(target: "shutdown-sync", "Received shutdown notification, notifying sync helpers...");
|
info!(target: "shutdown-sync", "Received shutdown notification, notifying sync helpers...");
|
||||||
db.read().await.globals.rotate.fire();
|
services().globals.rotate.fire();
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) {
|
pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) {
|
||||||
|
@ -844,33 +891,30 @@ impl KeyValueDatabase {
|
||||||
// Return when *any* user changed his key
|
// Return when *any* user changed his key
|
||||||
// TODO: only send for user they share a room with
|
// TODO: only send for user they share a room with
|
||||||
futures.push(
|
futures.push(
|
||||||
self.users
|
self.todeviceid_events
|
||||||
.todeviceid_events
|
|
||||||
.watch_prefix(&userdeviceid_prefix),
|
.watch_prefix(&userdeviceid_prefix),
|
||||||
);
|
);
|
||||||
|
|
||||||
futures.push(self.rooms.userroomid_joined.watch_prefix(&userid_prefix));
|
futures.push(self.userroomid_joined.watch_prefix(&userid_prefix));
|
||||||
futures.push(
|
futures.push(
|
||||||
self.rooms
|
self.userroomid_invitestate
|
||||||
.userroomid_invitestate
|
|
||||||
.watch_prefix(&userid_prefix),
|
.watch_prefix(&userid_prefix),
|
||||||
);
|
);
|
||||||
futures.push(self.rooms.userroomid_leftstate.watch_prefix(&userid_prefix));
|
futures.push(self.userroomid_leftstate.watch_prefix(&userid_prefix));
|
||||||
futures.push(
|
futures.push(
|
||||||
self.rooms
|
self.userroomid_notificationcount
|
||||||
.userroomid_notificationcount
|
|
||||||
.watch_prefix(&userid_prefix),
|
.watch_prefix(&userid_prefix),
|
||||||
);
|
);
|
||||||
futures.push(
|
futures.push(
|
||||||
self.rooms
|
self.userroomid_highlightcount
|
||||||
.userroomid_highlightcount
|
|
||||||
.watch_prefix(&userid_prefix),
|
.watch_prefix(&userid_prefix),
|
||||||
);
|
);
|
||||||
|
|
||||||
// Events for rooms we are in
|
// Events for rooms we are in
|
||||||
for room_id in self.rooms.rooms_joined(user_id).filter_map(|r| r.ok()) {
|
for room_id in services().rooms.state_cache.rooms_joined(user_id).filter_map(|r| r.ok()) {
|
||||||
let short_roomid = self
|
let short_roomid = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.short
|
||||||
.get_shortroomid(&room_id)
|
.get_shortroomid(&room_id)
|
||||||
.ok()
|
.ok()
|
||||||
.flatten()
|
.flatten()
|
||||||
|
@ -883,33 +927,28 @@ impl KeyValueDatabase {
|
||||||
roomid_prefix.push(0xff);
|
roomid_prefix.push(0xff);
|
||||||
|
|
||||||
// PDUs
|
// PDUs
|
||||||
futures.push(self.rooms.pduid_pdu.watch_prefix(&short_roomid));
|
futures.push(self.pduid_pdu.watch_prefix(&short_roomid));
|
||||||
|
|
||||||
// EDUs
|
// EDUs
|
||||||
futures.push(
|
futures.push(
|
||||||
self.rooms
|
self.roomid_lasttypingupdate
|
||||||
.edus
|
|
||||||
.roomid_lasttypingupdate
|
|
||||||
.watch_prefix(&roomid_bytes),
|
.watch_prefix(&roomid_bytes),
|
||||||
);
|
);
|
||||||
|
|
||||||
futures.push(
|
futures.push(
|
||||||
self.rooms
|
self.readreceiptid_readreceipt
|
||||||
.edus
|
|
||||||
.readreceiptid_readreceipt
|
|
||||||
.watch_prefix(&roomid_prefix),
|
.watch_prefix(&roomid_prefix),
|
||||||
);
|
);
|
||||||
|
|
||||||
// Key changes
|
// Key changes
|
||||||
futures.push(self.users.keychangeid_userid.watch_prefix(&roomid_prefix));
|
futures.push(self.keychangeid_userid.watch_prefix(&roomid_prefix));
|
||||||
|
|
||||||
// Room account data
|
// Room account data
|
||||||
let mut roomuser_prefix = roomid_prefix.clone();
|
let mut roomuser_prefix = roomid_prefix.clone();
|
||||||
roomuser_prefix.extend_from_slice(&userid_prefix);
|
roomuser_prefix.extend_from_slice(&userid_prefix);
|
||||||
|
|
||||||
futures.push(
|
futures.push(
|
||||||
self.account_data
|
self.roomusertype_roomuserdataid
|
||||||
.roomusertype_roomuserdataid
|
|
||||||
.watch_prefix(&roomuser_prefix),
|
.watch_prefix(&roomuser_prefix),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -918,22 +957,20 @@ impl KeyValueDatabase {
|
||||||
globaluserdata_prefix.extend_from_slice(&userid_prefix);
|
globaluserdata_prefix.extend_from_slice(&userid_prefix);
|
||||||
|
|
||||||
futures.push(
|
futures.push(
|
||||||
self.account_data
|
self.roomusertype_roomuserdataid
|
||||||
.roomusertype_roomuserdataid
|
|
||||||
.watch_prefix(&globaluserdata_prefix),
|
.watch_prefix(&globaluserdata_prefix),
|
||||||
);
|
);
|
||||||
|
|
||||||
// More key changes (used when user is not joined to any rooms)
|
// More key changes (used when user is not joined to any rooms)
|
||||||
futures.push(self.users.keychangeid_userid.watch_prefix(&userid_prefix));
|
futures.push(self.keychangeid_userid.watch_prefix(&userid_prefix));
|
||||||
|
|
||||||
// One time keys
|
// One time keys
|
||||||
futures.push(
|
futures.push(
|
||||||
self.users
|
self.userid_lastonetimekeyupdate
|
||||||
.userid_lastonetimekeyupdate
|
|
||||||
.watch_prefix(&userid_bytes),
|
.watch_prefix(&userid_bytes),
|
||||||
);
|
);
|
||||||
|
|
||||||
futures.push(Box::pin(self.globals.rotate.watch()));
|
futures.push(Box::pin(services().globals.rotate.watch()));
|
||||||
|
|
||||||
// Wait until one of them finds something
|
// Wait until one of them finds something
|
||||||
futures.next().await;
|
futures.next().await;
|
||||||
|
@ -950,8 +987,8 @@ impl KeyValueDatabase {
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(db, config))]
|
#[tracing::instrument(skip(config))]
|
||||||
pub async fn start_cleanup_task(db: Arc<TokioRwLock<Self>>, config: &Config) {
|
pub async fn start_cleanup_task(config: &Config) {
|
||||||
use tokio::time::interval;
|
use tokio::time::interval;
|
||||||
|
|
||||||
#[cfg(unix)]
|
#[cfg(unix)]
|
||||||
|
@ -984,7 +1021,7 @@ impl KeyValueDatabase {
|
||||||
}
|
}
|
||||||
|
|
||||||
let start = Instant::now();
|
let start = Instant::now();
|
||||||
if let Err(e) = db.read().await._db.cleanup() {
|
if let Err(e) = services().globals.db._db.cleanup() {
|
||||||
error!("cleanup: Errored: {}", e);
|
error!("cleanup: Errored: {}", e);
|
||||||
} else {
|
} else {
|
||||||
info!("cleanup: Finished in {:?}", start.elapsed());
|
info!("cleanup: Finished in {:?}", start.elapsed());
|
||||||
|
@ -995,26 +1032,25 @@ impl KeyValueDatabase {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Sets the emergency password and push rules for the @conduit account in case emergency password is set
|
/// Sets the emergency password and push rules for the @conduit account in case emergency password is set
|
||||||
fn set_emergency_access(db: &KeyValueDatabase) -> Result<bool> {
|
fn set_emergency_access() -> Result<bool> {
|
||||||
let conduit_user = UserId::parse_with_server_name("conduit", db.globals.server_name())
|
let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name())
|
||||||
.expect("@conduit:server_name is a valid UserId");
|
.expect("@conduit:server_name is a valid UserId");
|
||||||
|
|
||||||
db.users
|
services().users
|
||||||
.set_password(&conduit_user, db.globals.emergency_password().as_deref())?;
|
.set_password(&conduit_user, services().globals.emergency_password().as_deref())?;
|
||||||
|
|
||||||
let (ruleset, res) = match db.globals.emergency_password() {
|
let (ruleset, res) = match services().globals.emergency_password() {
|
||||||
Some(_) => (Ruleset::server_default(&conduit_user), Ok(true)),
|
Some(_) => (Ruleset::server_default(&conduit_user), Ok(true)),
|
||||||
None => (Ruleset::new(), Ok(false)),
|
None => (Ruleset::new(), Ok(false)),
|
||||||
};
|
};
|
||||||
|
|
||||||
db.account_data.update(
|
services().account_data.update(
|
||||||
None,
|
None,
|
||||||
&conduit_user,
|
&conduit_user,
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||||
&GlobalAccountDataEvent {
|
&GlobalAccountDataEvent {
|
||||||
content: PushRulesEventContent { global: ruleset },
|
content: PushRulesEventContent { global: ruleset },
|
||||||
},
|
},
|
||||||
&db.globals,
|
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
res
|
res
|
||||||
|
|
14
src/lib.rs
14
src/lib.rs
|
@ -13,22 +13,16 @@ mod service;
|
||||||
pub mod api;
|
pub mod api;
|
||||||
mod utils;
|
mod utils;
|
||||||
|
|
||||||
use std::{cell::Cell, sync::RwLock};
|
use std::{cell::Cell, sync::{RwLock, Arc}};
|
||||||
|
|
||||||
pub use config::Config;
|
pub use config::Config;
|
||||||
pub use utils::error::{Error, Result};
|
pub use utils::error::{Error, Result};
|
||||||
pub use service::{Services, pdu::PduEvent};
|
pub use service::{Services, pdu::PduEvent};
|
||||||
pub use api::ruma_wrapper::{Ruma, RumaResponse};
|
pub use api::ruma_wrapper::{Ruma, RumaResponse};
|
||||||
|
|
||||||
use crate::database::KeyValueDatabase;
|
pub static SERVICES: RwLock<Option<Arc<Services>>> = RwLock::new(None);
|
||||||
|
|
||||||
pub static SERVICES: RwLock<Option<ServicesEnum>> = RwLock::new(None);
|
pub fn services<'a>() -> Arc<Services> {
|
||||||
|
Arc::clone(&SERVICES.read().unwrap())
|
||||||
enum ServicesEnum {
|
|
||||||
Rocksdb(Services<KeyValueDatabase>)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn services<'a>() -> &'a Services<KeyValueDatabase> {
|
|
||||||
&SERVICES.read().unwrap()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -69,19 +69,14 @@ async fn main() {
|
||||||
|
|
||||||
config.warn_deprecated();
|
config.warn_deprecated();
|
||||||
|
|
||||||
let db = match KeyValueDatabase::load_or_create(&config).await {
|
if let Err(e) = KeyValueDatabase::load_or_create(&config).await {
|
||||||
Ok(db) => db,
|
|
||||||
Err(e) => {
|
|
||||||
eprintln!(
|
eprintln!(
|
||||||
"The database couldn't be loaded or created. The following error occured: {}",
|
"The database couldn't be loaded or created. The following error occured: {}",
|
||||||
e
|
e
|
||||||
);
|
);
|
||||||
std::process::exit(1);
|
std::process::exit(1);
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
SERVICES.set(db).expect("this is the first and only time we initialize the SERVICE static");
|
|
||||||
|
|
||||||
let start = async {
|
let start = async {
|
||||||
run_server().await.unwrap();
|
run_server().await.unwrap();
|
||||||
};
|
};
|
||||||
|
|
|
@ -17,11 +17,11 @@ use tracing::error;
|
||||||
|
|
||||||
use crate::{service::*, services, utils, Error, Result};
|
use crate::{service::*, services, utils, Error, Result};
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
db: D,
|
db: Box<dyn Data>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Service<D> {
|
impl Service {
|
||||||
/// Places one event in the account data of the user and removes the previous entry.
|
/// Places one event in the account data of the user and removes the previous entry.
|
||||||
#[tracing::instrument(skip(self, room_id, user_id, event_type, data))]
|
#[tracing::instrument(skip(self, room_id, user_id, event_type, data))]
|
||||||
pub fn update<T: Serialize>(
|
pub fn update<T: Serialize>(
|
||||||
|
|
|
@ -3,11 +3,11 @@ pub use data::Data;
|
||||||
|
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
db: D,
|
db: Box<dyn Data>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Service<D> {
|
impl Service {
|
||||||
/// Registers an appservice and returns the ID to the caller
|
/// Registers an appservice and returns the ID to the caller
|
||||||
pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result<String> {
|
pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result<String> {
|
||||||
self.db.register_appservice(yaml)
|
self.db.register_appservice(yaml)
|
||||||
|
|
|
@ -36,8 +36,8 @@ type SyncHandle = (
|
||||||
Receiver<Option<Result<sync_events::v3::Response>>>, // rx
|
Receiver<Option<Result<sync_events::v3::Response>>>, // rx
|
||||||
);
|
);
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
pub db: D,
|
pub db: Box<dyn Data>,
|
||||||
|
|
||||||
pub actual_destination_cache: Arc<RwLock<WellKnownMap>>, // actual_destination, host
|
pub actual_destination_cache: Arc<RwLock<WellKnownMap>>, // actual_destination, host
|
||||||
pub tls_name_override: Arc<RwLock<TlsNameMap>>,
|
pub tls_name_override: Arc<RwLock<TlsNameMap>>,
|
||||||
|
@ -92,9 +92,9 @@ impl Default for RotationHandler {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
impl<D: Data> Service<D> {
|
impl Service {
|
||||||
pub fn load(
|
pub fn load(
|
||||||
db: D,
|
db: Box<dyn Data>,
|
||||||
config: Config,
|
config: Config,
|
||||||
) -> Result<Self> {
|
) -> Result<Self> {
|
||||||
let keypair = db.load_keypair();
|
let keypair = db.load_keypair();
|
||||||
|
|
|
@ -12,11 +12,11 @@ use ruma::{
|
||||||
};
|
};
|
||||||
use std::{collections::BTreeMap, sync::Arc};
|
use std::{collections::BTreeMap, sync::Arc};
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
db: D,
|
db: Box<dyn Data>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Service<D> {
|
impl Service {
|
||||||
pub fn create_backup(
|
pub fn create_backup(
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
|
|
|
@ -15,11 +15,11 @@ pub struct FileMeta {
|
||||||
pub file: Vec<u8>,
|
pub file: Vec<u8>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
db: D,
|
db: Box<dyn Data>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Service<D> {
|
impl Service {
|
||||||
/// Uploads a file.
|
/// Uploads a file.
|
||||||
pub async fn create(
|
pub async fn create(
|
||||||
&self,
|
&self,
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
pub mod account_data;
|
pub mod account_data;
|
||||||
pub mod admin;
|
pub mod admin;
|
||||||
pub mod appservice;
|
pub mod appservice;
|
||||||
|
@ -12,18 +14,36 @@ pub mod transaction_ids;
|
||||||
pub mod uiaa;
|
pub mod uiaa;
|
||||||
pub mod users;
|
pub mod users;
|
||||||
|
|
||||||
pub struct Services<D: appservice::Data + pusher::Data + rooms::Data + transaction_ids::Data + uiaa::Data + users::Data + account_data::Data + globals::Data + key_backups::Data + media::Data>
|
pub struct Services {
|
||||||
{
|
pub appservice: appservice::Service,
|
||||||
pub appservice: appservice::Service<D>,
|
pub pusher: pusher::Service,
|
||||||
pub pusher: pusher::Service<D>,
|
pub rooms: rooms::Service,
|
||||||
pub rooms: rooms::Service<D>,
|
pub transaction_ids: transaction_ids::Service,
|
||||||
pub transaction_ids: transaction_ids::Service<D>,
|
pub uiaa: uiaa::Service,
|
||||||
pub uiaa: uiaa::Service<D>,
|
pub users: users::Service,
|
||||||
pub users: users::Service<D>,
|
pub account_data: account_data::Service,
|
||||||
pub account_data: account_data::Service<D>,
|
|
||||||
pub admin: admin::Service,
|
pub admin: admin::Service,
|
||||||
pub globals: globals::Service<D>,
|
pub globals: globals::Service,
|
||||||
pub key_backups: key_backups::Service<D>,
|
pub key_backups: key_backups::Service,
|
||||||
pub media: media::Service<D>,
|
pub media: media::Service,
|
||||||
pub sending: sending::Service,
|
pub sending: sending::Service,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Services {
|
||||||
|
pub fn build<D: appservice::Data + pusher::Data + rooms::Data + transaction_ids::Data + uiaa::Data + users::Data + account_data::Data + globals::Data + key_backups::Data + media::Data>(db: Arc<D>) {
|
||||||
|
Self {
|
||||||
|
appservice: appservice::Service { db: Arc::clone(&db) },
|
||||||
|
pusher: appservice::Service { db: Arc::clone(&db) },
|
||||||
|
rooms: appservice::Service { db: Arc::clone(&db) },
|
||||||
|
transaction_ids: appservice::Service { db: Arc::clone(&db) },
|
||||||
|
uiaa: appservice::Service { db: Arc::clone(&db) },
|
||||||
|
users: appservice::Service { db: Arc::clone(&db) },
|
||||||
|
account_data: appservice::Service { db: Arc::clone(&db) },
|
||||||
|
admin: appservice::Service { db: Arc::clone(&db) },
|
||||||
|
globals: appservice::Service { db: Arc::clone(&db) },
|
||||||
|
key_backups: appservice::Service { db: Arc::clone(&db) },
|
||||||
|
media: appservice::Service { db: Arc::clone(&db) },
|
||||||
|
sending: appservice::Service { db: Arc::clone(&db) },
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -23,11 +23,11 @@ use ruma::{
|
||||||
use std::{fmt::Debug, mem};
|
use std::{fmt::Debug, mem};
|
||||||
use tracing::{error, info, warn};
|
use tracing::{error, info, warn};
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
db: D,
|
db: Box<dyn Data>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Service<D> {
|
impl Service {
|
||||||
pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> {
|
pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> {
|
||||||
self.db.set_pusher(sender, pusher)
|
self.db.set_pusher(sender, pusher)
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,5 +25,5 @@ pub trait Data {
|
||||||
fn local_aliases_for_room(
|
fn local_aliases_for_room(
|
||||||
&self,
|
&self,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
) -> Result<Box<dyn Iterator<Item=String>>>;
|
) -> Box<dyn Iterator<Item = Result<Box<RoomAliasId>>>>;
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,11 +4,11 @@ pub use data::Data;
|
||||||
use ruma::{RoomAliasId, RoomId};
|
use ruma::{RoomAliasId, RoomId};
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
db: D,
|
db: Box<dyn Data>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Service<D> {
|
impl Service {
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
pub fn set_alias(
|
pub fn set_alias(
|
||||||
&self,
|
&self,
|
||||||
|
|
|
@ -2,6 +2,6 @@ use std::collections::HashSet;
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
|
||||||
pub trait Data {
|
pub trait Data {
|
||||||
fn get_cached_eventid_authchain(&self, shorteventid: u64) -> Result<HashSet<u64>>;
|
fn get_cached_eventid_authchain(&self, shorteventid: u64) -> Result<Option<HashSet<u64>>>;
|
||||||
fn cache_eventid_authchain(&self, shorteventid: u64, auth_chain: &HashSet<u64>) -> Result<()>;
|
fn cache_eventid_authchain(&self, shorteventid: u64, auth_chain: &HashSet<u64>) -> Result<()>;
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,11 +5,11 @@ pub use data::Data;
|
||||||
|
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
db: D,
|
db: Box<dyn Data>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Service<D> {
|
impl Service {
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
pub fn get_cached_eventid_authchain<'a>(
|
pub fn get_cached_eventid_authchain<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
|
|
|
@ -4,11 +4,11 @@ use ruma::RoomId;
|
||||||
|
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
db: D,
|
db: Box<dyn Data>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Service<D> {
|
impl Service {
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
pub fn set_public(&self, room_id: &RoomId) -> Result<()> {
|
pub fn set_public(&self, room_id: &RoomId) -> Result<()> {
|
||||||
self.db.set_public(room_id)
|
self.db.set_public(room_id)
|
||||||
|
|
|
@ -4,8 +4,8 @@ pub mod typing;
|
||||||
|
|
||||||
pub trait Data: presence::Data + read_receipt::Data + typing::Data {}
|
pub trait Data: presence::Data + read_receipt::Data + typing::Data {}
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
pub presence: presence::Service<D>,
|
pub presence: presence::Service,
|
||||||
pub read_receipt: read_receipt::Service<D>,
|
pub read_receipt: read_receipt::Service,
|
||||||
pub typing: typing::Service<D>,
|
pub typing: typing::Service,
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,11 +6,11 @@ use ruma::{RoomId, UserId, events::presence::PresenceEvent};
|
||||||
|
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
db: D,
|
db: Box<dyn Data>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Service<D> {
|
impl Service {
|
||||||
/// Adds a presence event which will be saved until a new event replaces it.
|
/// Adds a presence event which will be saved until a new event replaces it.
|
||||||
///
|
///
|
||||||
/// Note: This method takes a RoomId because presence updates are always bound to rooms to
|
/// Note: This method takes a RoomId because presence updates are always bound to rooms to
|
||||||
|
|
|
@ -4,11 +4,11 @@ pub use data::Data;
|
||||||
use ruma::{RoomId, UserId, events::receipt::ReceiptEvent, serde::Raw};
|
use ruma::{RoomId, UserId, events::receipt::ReceiptEvent, serde::Raw};
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
db: D,
|
db: Box<dyn Data>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Service<D> {
|
impl Service {
|
||||||
/// Replaces the previous read receipt.
|
/// Replaces the previous read receipt.
|
||||||
pub fn readreceipt_update(
|
pub fn readreceipt_update(
|
||||||
&self,
|
&self,
|
||||||
|
|
|
@ -4,11 +4,11 @@ use ruma::{UserId, RoomId, events::SyncEphemeralRoomEvent};
|
||||||
|
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
db: D,
|
db: Box<dyn Data>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Service<D> {
|
impl Service {
|
||||||
/// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is
|
/// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is
|
||||||
/// called.
|
/// called.
|
||||||
pub fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> {
|
pub fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> {
|
||||||
|
|
|
@ -1,14 +1,16 @@
|
||||||
/// An async function that can recursively call itself.
|
/// An async function that can recursively call itself.
|
||||||
type AsyncRecursiveType<'a, T> = Pin<Box<dyn Future<Output = T> + 'a + Send>>;
|
type AsyncRecursiveType<'a, T> = Pin<Box<dyn Future<Output = T> + 'a + Send>>;
|
||||||
|
|
||||||
|
use ruma::{RoomVersionId, signatures::CanonicalJsonObject, api::federation::discovery::{get_server_keys, get_remote_server_keys}};
|
||||||
|
use tokio::sync::Semaphore;
|
||||||
use std::{
|
use std::{
|
||||||
collections::{btree_map, hash_map, BTreeMap, HashMap, HashSet},
|
collections::{btree_map, hash_map, BTreeMap, HashMap, HashSet},
|
||||||
pin::Pin,
|
pin::Pin,
|
||||||
sync::{Arc, RwLock},
|
sync::{Arc, RwLock, RwLockWriteGuard},
|
||||||
time::{Duration, Instant},
|
time::{Duration, Instant, SystemTime},
|
||||||
};
|
};
|
||||||
|
|
||||||
use futures_util::{Future, stream::FuturesUnordered};
|
use futures_util::{Future, stream::FuturesUnordered, StreamExt};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{
|
api::{
|
||||||
client::error::ErrorKind,
|
client::error::ErrorKind,
|
||||||
|
@ -22,7 +24,7 @@ use ruma::{
|
||||||
uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId,
|
uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId,
|
||||||
};
|
};
|
||||||
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
||||||
use tracing::{error, info, trace, warn};
|
use tracing::{error, info, trace, warn, debug};
|
||||||
|
|
||||||
use crate::{service::*, services, Result, Error, PduEvent};
|
use crate::{service::*, services, Result, Error, PduEvent};
|
||||||
|
|
||||||
|
@ -53,7 +55,7 @@ impl Service {
|
||||||
/// it
|
/// it
|
||||||
/// 14. Use state resolution to find new room state
|
/// 14. Use state resolution to find new room state
|
||||||
// We use some AsyncRecursiveType hacks here so we can call this async funtion recursively
|
// We use some AsyncRecursiveType hacks here so we can call this async funtion recursively
|
||||||
#[tracing::instrument(skip(value, is_timeline_event, pub_key_map))]
|
#[tracing::instrument(skip(self, value, is_timeline_event, pub_key_map))]
|
||||||
pub(crate) async fn handle_incoming_pdu<'a>(
|
pub(crate) async fn handle_incoming_pdu<'a>(
|
||||||
&self,
|
&self,
|
||||||
origin: &'a ServerName,
|
origin: &'a ServerName,
|
||||||
|
@ -64,10 +66,11 @@ impl Service {
|
||||||
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||||
) -> Result<Option<Vec<u8>>> {
|
) -> Result<Option<Vec<u8>>> {
|
||||||
if !services().rooms.metadata.exists(room_id)? {
|
if !services().rooms.metadata.exists(room_id)? {
|
||||||
return Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::NotFound,
|
ErrorKind::NotFound,
|
||||||
"Room is unknown to this server",
|
"Room is unknown to this server",
|
||||||
)};
|
));
|
||||||
|
}
|
||||||
|
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -732,7 +735,7 @@ impl Service {
|
||||||
&incoming_pdu.sender,
|
&incoming_pdu.sender,
|
||||||
incoming_pdu.state_key.as_deref(),
|
incoming_pdu.state_key.as_deref(),
|
||||||
&incoming_pdu.content,
|
&incoming_pdu.content,
|
||||||
)?
|
)?;
|
||||||
|
|
||||||
let soft_fail = !state_res::event_auth::auth_check(
|
let soft_fail = !state_res::event_auth::auth_check(
|
||||||
&room_version,
|
&room_version,
|
||||||
|
@ -821,7 +824,7 @@ impl Service {
|
||||||
let shortstatekey = services()
|
let shortstatekey = services()
|
||||||
.rooms
|
.rooms
|
||||||
.short
|
.short
|
||||||
.get_or_create_shortstatekey(&incoming_pdu.kind.to_string().into(), state_key)?
|
.get_or_create_shortstatekey(&incoming_pdu.kind.to_string().into(), state_key)?;
|
||||||
|
|
||||||
state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id));
|
state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id));
|
||||||
}
|
}
|
||||||
|
@ -1236,7 +1239,7 @@ impl Service {
|
||||||
|
|
||||||
let signature_ids = signature_object.keys().cloned().collect::<Vec<_>>();
|
let signature_ids = signature_object.keys().cloned().collect::<Vec<_>>();
|
||||||
|
|
||||||
let fetch_res = fetch_signing_keys(
|
let fetch_res = self.fetch_signing_keys(
|
||||||
signature_server.as_str().try_into().map_err(|_| {
|
signature_server.as_str().try_into().map_err(|_| {
|
||||||
Error::BadServerResponse("Invalid servername in signatures of server response pdu.")
|
Error::BadServerResponse("Invalid servername in signatures of server response pdu.")
|
||||||
})?,
|
})?,
|
||||||
|
@ -1481,4 +1484,168 @@ impl Service {
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Search the DB for the signing keys of the given server, if we don't have them
|
||||||
|
/// fetch them from the server and save to our DB.
|
||||||
|
#[tracing::instrument(skip_all)]
|
||||||
|
pub async fn fetch_signing_keys(
|
||||||
|
&self,
|
||||||
|
origin: &ServerName,
|
||||||
|
signature_ids: Vec<String>,
|
||||||
|
) -> Result<BTreeMap<String, Base64>> {
|
||||||
|
let contains_all_ids =
|
||||||
|
|keys: &BTreeMap<String, Base64>| signature_ids.iter().all(|id| keys.contains_key(id));
|
||||||
|
|
||||||
|
let permit = services()
|
||||||
|
.globals
|
||||||
|
.servername_ratelimiter
|
||||||
|
.read()
|
||||||
|
.unwrap()
|
||||||
|
.get(origin)
|
||||||
|
.map(|s| Arc::clone(s).acquire_owned());
|
||||||
|
|
||||||
|
let permit = match permit {
|
||||||
|
Some(p) => p,
|
||||||
|
None => {
|
||||||
|
let mut write = services().globals.servername_ratelimiter.write().unwrap();
|
||||||
|
let s = Arc::clone(
|
||||||
|
write
|
||||||
|
.entry(origin.to_owned())
|
||||||
|
.or_insert_with(|| Arc::new(Semaphore::new(1))),
|
||||||
|
);
|
||||||
|
|
||||||
|
s.acquire_owned()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let back_off = |id| match services()
|
||||||
|
.globals
|
||||||
|
.bad_signature_ratelimiter
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.entry(id)
|
||||||
|
{
|
||||||
|
hash_map::Entry::Vacant(e) => {
|
||||||
|
e.insert((Instant::now(), 1));
|
||||||
|
}
|
||||||
|
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some((time, tries)) = services()
|
||||||
|
.globals
|
||||||
|
.bad_signature_ratelimiter
|
||||||
|
.read()
|
||||||
|
.unwrap()
|
||||||
|
.get(&signature_ids)
|
||||||
|
{
|
||||||
|
// Exponential backoff
|
||||||
|
let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries);
|
||||||
|
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
|
||||||
|
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
|
||||||
|
}
|
||||||
|
|
||||||
|
if time.elapsed() < min_elapsed_duration {
|
||||||
|
debug!("Backing off from {:?}", signature_ids);
|
||||||
|
return Err(Error::BadServerResponse("bad signature, still backing off"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
trace!("Loading signing keys for {}", origin);
|
||||||
|
|
||||||
|
let mut result: BTreeMap<_, _> = services()
|
||||||
|
.globals
|
||||||
|
.signing_keys_for(origin)?
|
||||||
|
.into_iter()
|
||||||
|
.map(|(k, v)| (k.to_string(), v.key))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
if contains_all_ids(&result) {
|
||||||
|
return Ok(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
debug!("Fetching signing keys for {} over federation", origin);
|
||||||
|
|
||||||
|
if let Some(server_key) = services()
|
||||||
|
.sending
|
||||||
|
.send_federation_request(origin, get_server_keys::v2::Request::new())
|
||||||
|
.await
|
||||||
|
.ok()
|
||||||
|
.and_then(|resp| resp.server_key.deserialize().ok())
|
||||||
|
{
|
||||||
|
services().globals.add_signing_key(origin, server_key.clone())?;
|
||||||
|
|
||||||
|
result.extend(
|
||||||
|
server_key
|
||||||
|
.verify_keys
|
||||||
|
.into_iter()
|
||||||
|
.map(|(k, v)| (k.to_string(), v.key)),
|
||||||
|
);
|
||||||
|
result.extend(
|
||||||
|
server_key
|
||||||
|
.old_verify_keys
|
||||||
|
.into_iter()
|
||||||
|
.map(|(k, v)| (k.to_string(), v.key)),
|
||||||
|
);
|
||||||
|
|
||||||
|
if contains_all_ids(&result) {
|
||||||
|
return Ok(result);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for server in services().globals.trusted_servers() {
|
||||||
|
debug!("Asking {} for {}'s signing key", server, origin);
|
||||||
|
if let Some(server_keys) = services()
|
||||||
|
.sending
|
||||||
|
.send_federation_request(
|
||||||
|
server,
|
||||||
|
get_remote_server_keys::v2::Request::new(
|
||||||
|
origin,
|
||||||
|
MilliSecondsSinceUnixEpoch::from_system_time(
|
||||||
|
SystemTime::now()
|
||||||
|
.checked_add(Duration::from_secs(3600))
|
||||||
|
.expect("SystemTime to large"),
|
||||||
|
)
|
||||||
|
.expect("time is valid"),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.ok()
|
||||||
|
.map(|resp| {
|
||||||
|
resp.server_keys
|
||||||
|
.into_iter()
|
||||||
|
.filter_map(|e| e.deserialize().ok())
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
})
|
||||||
|
{
|
||||||
|
trace!("Got signing keys: {:?}", server_keys);
|
||||||
|
for k in server_keys {
|
||||||
|
services().globals.add_signing_key(origin, k.clone())?;
|
||||||
|
result.extend(
|
||||||
|
k.verify_keys
|
||||||
|
.into_iter()
|
||||||
|
.map(|(k, v)| (k.to_string(), v.key)),
|
||||||
|
);
|
||||||
|
result.extend(
|
||||||
|
k.old_verify_keys
|
||||||
|
.into_iter()
|
||||||
|
.map(|(k, v)| (k.to_string(), v.key)),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if contains_all_ids(&result) {
|
||||||
|
return Ok(result);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
drop(permit);
|
||||||
|
|
||||||
|
back_off(signature_ids);
|
||||||
|
|
||||||
|
warn!("Failed to find public key for server: {}", origin);
|
||||||
|
Err(Error::BadServerResponse(
|
||||||
|
"Failed to find public key for server",
|
||||||
|
))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,7 +15,7 @@ pub trait Data {
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
device_id: &DeviceId,
|
device_id: &DeviceId,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
since: u64,
|
confirmed_user_ids: &mut dyn Iterator<Item=&UserId>,
|
||||||
) -> Result<()>;
|
) -> Result<()>;
|
||||||
|
|
||||||
fn lazy_load_reset(
|
fn lazy_load_reset(
|
||||||
|
|
|
@ -1,16 +1,18 @@
|
||||||
mod data;
|
mod data;
|
||||||
use std::collections::HashSet;
|
use std::{collections::{HashSet, HashMap}, sync::Mutex};
|
||||||
|
|
||||||
pub use data::Data;
|
pub use data::Data;
|
||||||
use ruma::{DeviceId, UserId, RoomId};
|
use ruma::{DeviceId, UserId, RoomId};
|
||||||
|
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
db: D,
|
db: Box<dyn Data>,
|
||||||
|
|
||||||
|
lazy_load_waiting: Mutex<HashMap<(Box<UserId>, Box<DeviceId>, Box<RoomId>, u64), HashSet<Box<UserId>>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Service<D> {
|
impl Service {
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
pub fn lazy_load_was_sent_before(
|
pub fn lazy_load_was_sent_before(
|
||||||
&self,
|
&self,
|
||||||
|
@ -50,7 +52,18 @@ impl<D: Data> Service<D> {
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
since: u64,
|
since: u64,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
self.db.lazy_load_confirm_delivery(user_id, device_id, room_id, since)
|
if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&(
|
||||||
|
user_id.to_owned(),
|
||||||
|
device_id.to_owned(),
|
||||||
|
room_id.to_owned(),
|
||||||
|
since,
|
||||||
|
)) {
|
||||||
|
self.db.lazy_load_confirm_delivery(user_id, device_id, room_id, &mut user_ids.iter().map(|&u| &*u))?;
|
||||||
|
} else {
|
||||||
|
// Ignore
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
|
|
|
@ -4,11 +4,11 @@ use ruma::RoomId;
|
||||||
|
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
db: D,
|
db: Box<dyn Data>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Service<D> {
|
impl Service {
|
||||||
/// Checks if a room exists.
|
/// Checks if a room exists.
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
pub fn exists(&self, room_id: &RoomId) -> Result<bool> {
|
pub fn exists(&self, room_id: &RoomId) -> Result<bool> {
|
||||||
|
|
|
@ -18,22 +18,22 @@ pub mod user;
|
||||||
|
|
||||||
pub trait Data: alias::Data + auth_chain::Data + directory::Data + edus::Data + lazy_loading::Data + metadata::Data + outlier::Data + pdu_metadata::Data + search::Data + short::Data + state::Data + state_accessor::Data + state_cache::Data + state_compressor::Data + timeline::Data + user::Data {}
|
pub trait Data: alias::Data + auth_chain::Data + directory::Data + edus::Data + lazy_loading::Data + metadata::Data + outlier::Data + pdu_metadata::Data + search::Data + short::Data + state::Data + state_accessor::Data + state_cache::Data + state_compressor::Data + timeline::Data + user::Data {}
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
pub alias: alias::Service<D>,
|
pub alias: alias::Service,
|
||||||
pub auth_chain: auth_chain::Service<D>,
|
pub auth_chain: auth_chain::Service,
|
||||||
pub directory: directory::Service<D>,
|
pub directory: directory::Service,
|
||||||
pub edus: edus::Service<D>,
|
pub edus: edus::Service,
|
||||||
pub event_handler: event_handler::Service,
|
pub event_handler: event_handler::Service,
|
||||||
pub lazy_loading: lazy_loading::Service<D>,
|
pub lazy_loading: lazy_loading::Service,
|
||||||
pub metadata: metadata::Service<D>,
|
pub metadata: metadata::Service,
|
||||||
pub outlier: outlier::Service<D>,
|
pub outlier: outlier::Service,
|
||||||
pub pdu_metadata: pdu_metadata::Service<D>,
|
pub pdu_metadata: pdu_metadata::Service,
|
||||||
pub search: search::Service<D>,
|
pub search: search::Service,
|
||||||
pub short: short::Service<D>,
|
pub short: short::Service,
|
||||||
pub state: state::Service<D>,
|
pub state: state::Service,
|
||||||
pub state_accessor: state_accessor::Service<D>,
|
pub state_accessor: state_accessor::Service,
|
||||||
pub state_cache: state_cache::Service<D>,
|
pub state_cache: state_cache::Service,
|
||||||
pub state_compressor: state_compressor::Service<D>,
|
pub state_compressor: state_compressor::Service,
|
||||||
pub timeline: timeline::Service<D>,
|
pub timeline: timeline::Service,
|
||||||
pub user: user::Service<D>,
|
pub user: user::Service,
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,11 +4,11 @@ use ruma::{EventId, signatures::CanonicalJsonObject};
|
||||||
|
|
||||||
use crate::{Result, PduEvent};
|
use crate::{Result, PduEvent};
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
db: D,
|
db: Box<dyn Data>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Service<D> {
|
impl Service {
|
||||||
/// Returns the pdu from the outlier tree.
|
/// Returns the pdu from the outlier tree.
|
||||||
pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result<Option<CanonicalJsonObject>> {
|
pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result<Option<CanonicalJsonObject>> {
|
||||||
self.db.get_outlier_pdu_json(event_id)
|
self.db.get_outlier_pdu_json(event_id)
|
||||||
|
|
|
@ -6,11 +6,11 @@ use ruma::{RoomId, EventId};
|
||||||
|
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
db: D,
|
db: Box<dyn Data>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Service<D> {
|
impl Service {
|
||||||
#[tracing::instrument(skip(self, room_id, event_ids))]
|
#[tracing::instrument(skip(self, room_id, event_ids))]
|
||||||
pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc<EventId>]) -> Result<()> {
|
pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc<EventId>]) -> Result<()> {
|
||||||
self.db.mark_as_referenced(room_id, event_ids)
|
self.db.mark_as_referenced(room_id, event_ids)
|
||||||
|
|
|
@ -2,7 +2,7 @@ use ruma::RoomId;
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
|
||||||
pub trait Data {
|
pub trait Data {
|
||||||
fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: u64, message_body: String) -> Result<()>;
|
fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: String) -> Result<()>;
|
||||||
|
|
||||||
fn search_pdus<'a>(
|
fn search_pdus<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
|
|
|
@ -4,11 +4,16 @@ pub use data::Data;
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
use ruma::RoomId;
|
use ruma::RoomId;
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
db: D,
|
db: Box<dyn Data>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Service<D> {
|
impl Service {
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
pub fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: String) -> Result<()> {
|
||||||
|
self.db.index_pdu(shortroomid, pdu_id, message_body)
|
||||||
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
pub fn search_pdus<'a>(
|
pub fn search_pdus<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
|
|
|
@ -6,11 +6,11 @@ use ruma::{EventId, events::StateEventType, RoomId};
|
||||||
|
|
||||||
use crate::{Result, Error, utils, services};
|
use crate::{Result, Error, utils, services};
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
db: D,
|
db: Box<dyn Data>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Service<D> {
|
impl Service {
|
||||||
pub fn get_or_create_shorteventid(
|
pub fn get_or_create_shorteventid(
|
||||||
&self,
|
&self,
|
||||||
event_id: &EventId,
|
event_id: &EventId,
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::{sync::MutexGuard, collections::HashSet};
|
use std::{sync::MutexGuard, collections::HashSet};
|
||||||
use std::fmt::Debug;
|
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
use ruma::{EventId, RoomId};
|
use ruma::{EventId, RoomId};
|
||||||
|
|
||||||
|
@ -22,7 +21,7 @@ pub trait Data {
|
||||||
/// Replace the forward extremities of the room.
|
/// Replace the forward extremities of the room.
|
||||||
fn set_forward_extremities<'a>(&self,
|
fn set_forward_extremities<'a>(&self,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
event_ids: impl IntoIterator<Item = &'a EventId> + Debug,
|
event_ids: &dyn Iterator<Item = &'a EventId>,
|
||||||
_mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
|
_mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
|
||||||
) -> Result<()>;
|
) -> Result<()>;
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,11 +10,11 @@ use crate::{Result, services, PduEvent, Error, utils::calculate_hash};
|
||||||
|
|
||||||
use super::state_compressor::CompressedStateEvent;
|
use super::state_compressor::CompressedStateEvent;
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
db: D,
|
db: Box<dyn Data>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Service<D> {
|
impl Service {
|
||||||
/// Set the room to the given statehash and update caches.
|
/// Set the room to the given statehash and update caches.
|
||||||
pub fn force_state(
|
pub fn force_state(
|
||||||
&self,
|
&self,
|
||||||
|
@ -23,6 +23,15 @@ impl<D: Data> Service<D> {
|
||||||
statediffnew: HashSet<CompressedStateEvent>,
|
statediffnew: HashSet<CompressedStateEvent>,
|
||||||
statediffremoved: HashSet<CompressedStateEvent>,
|
statediffremoved: HashSet<CompressedStateEvent>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
|
let mutex_state = Arc::clone(
|
||||||
|
services().globals
|
||||||
|
.roomid_mutex_state
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.entry(body.room_id.to_owned())
|
||||||
|
.or_default(),
|
||||||
|
);
|
||||||
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
for event_id in statediffnew.into_iter().filter_map(|new| {
|
for event_id in statediffnew.into_iter().filter_map(|new| {
|
||||||
services().rooms.state_compressor.parse_compressed_state_event(new)
|
services().rooms.state_compressor.parse_compressed_state_event(new)
|
||||||
|
@ -70,7 +79,9 @@ impl<D: Data> Service<D> {
|
||||||
|
|
||||||
services().room.state_cache.update_joined_count(room_id)?;
|
services().room.state_cache.update_joined_count(room_id)?;
|
||||||
|
|
||||||
self.db.set_room_state(room_id, shortstatehash);
|
self.db.set_room_state(room_id, shortstatehash, &state_lock);
|
||||||
|
|
||||||
|
drop(state_lock);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,11 +6,11 @@ use ruma::{events::StateEventType, RoomId, EventId};
|
||||||
|
|
||||||
use crate::{Result, PduEvent};
|
use crate::{Result, PduEvent};
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
db: D,
|
db: Box<dyn Data>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Service<D> {
|
impl Service {
|
||||||
/// Builds a StateMap by iterating over all keys that start
|
/// Builds a StateMap by iterating over all keys that start
|
||||||
/// with state_hash, this gives the full state for the given state_hash.
|
/// with state_hash, this gives the full state for the given state_hash.
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
|
|
|
@ -7,11 +7,11 @@ use ruma::{RoomId, UserId, events::{room::{member::MembershipState, create::Room
|
||||||
|
|
||||||
use crate::{Result, services, utils, Error};
|
use crate::{Result, services, utils, Error};
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
db: D,
|
db: Box<dyn Data>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Service<D> {
|
impl Service {
|
||||||
/// Update current membership data.
|
/// Update current membership data.
|
||||||
#[tracing::instrument(skip(self, last_state))]
|
#[tracing::instrument(skip(self, last_state))]
|
||||||
pub fn update_membership(
|
pub fn update_membership(
|
||||||
|
|
|
@ -1,10 +1,12 @@
|
||||||
|
use std::collections::HashSet;
|
||||||
|
|
||||||
use super::CompressedStateEvent;
|
use super::CompressedStateEvent;
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
|
||||||
pub struct StateDiff {
|
pub struct StateDiff {
|
||||||
parent: Option<u64>,
|
pub parent: Option<u64>,
|
||||||
added: Vec<CompressedStateEvent>,
|
pub added: HashSet<CompressedStateEvent>,
|
||||||
removed: Vec<CompressedStateEvent>,
|
pub removed: HashSet<CompressedStateEvent>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait Data {
|
pub trait Data {
|
||||||
|
|
|
@ -8,13 +8,13 @@ use crate::{Result, utils, services};
|
||||||
|
|
||||||
use self::data::StateDiff;
|
use self::data::StateDiff;
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
db: D,
|
db: Box<dyn Data>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type CompressedStateEvent = [u8; 2 * size_of::<u64>()];
|
pub type CompressedStateEvent = [u8; 2 * size_of::<u64>()];
|
||||||
|
|
||||||
impl<D: Data> Service<D> {
|
impl Service {
|
||||||
/// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer.
|
/// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer.
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
pub fn load_shortstatehash_info(
|
pub fn load_shortstatehash_info(
|
||||||
|
|
|
@ -20,11 +20,11 @@ use crate::{services, Result, service::pdu::{PduBuilder, EventHash}, Error, PduE
|
||||||
|
|
||||||
use super::state_compressor::CompressedStateEvent;
|
use super::state_compressor::CompressedStateEvent;
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
db: D,
|
db: Box<dyn Data>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Service<D> {
|
impl Service {
|
||||||
/*
|
/*
|
||||||
/// Checks if a room exists.
|
/// Checks if a room exists.
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
|
|
|
@ -4,11 +4,11 @@ use ruma::{RoomId, UserId};
|
||||||
|
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
db: D,
|
db: Box<dyn Data>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Service<D> {
|
impl Service {
|
||||||
pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> {
|
pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> {
|
||||||
self.db.reset_notification_counts(user_id, room_id)
|
self.db.reset_notification_counts(user_id, room_id)
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,11 +4,11 @@ pub use data::Data;
|
||||||
use ruma::{UserId, DeviceId, TransactionId};
|
use ruma::{UserId, DeviceId, TransactionId};
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
db: D,
|
db: Box<dyn Data>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Service<D> {
|
impl Service {
|
||||||
pub fn add_txnid(
|
pub fn add_txnid(
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
|
|
|
@ -6,11 +6,11 @@ use tracing::error;
|
||||||
|
|
||||||
use crate::{Result, utils, Error, services, api::client_server::SESSION_ID_LENGTH};
|
use crate::{Result, utils, Error, services, api::client_server::SESSION_ID_LENGTH};
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
db: D,
|
db: Box<dyn Data>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Service<D> {
|
impl Service {
|
||||||
/// Creates a new Uiaa session. Make sure the session token is unique.
|
/// Creates a new Uiaa session. Make sure the session token is unique.
|
||||||
pub fn create(
|
pub fn create(
|
||||||
&self,
|
&self,
|
||||||
|
|
|
@ -2,7 +2,7 @@ use std::collections::BTreeMap;
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
use ruma::{UserId, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, serde::Raw, encryption::{OneTimeKey, DeviceKeys, CrossSigningKey}, UInt, events::AnyToDeviceEvent, api::client::{device::Device, filter::IncomingFilterDefinition}, MxcUri};
|
use ruma::{UserId, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, serde::Raw, encryption::{OneTimeKey, DeviceKeys, CrossSigningKey}, UInt, events::AnyToDeviceEvent, api::client::{device::Device, filter::IncomingFilterDefinition}, MxcUri};
|
||||||
|
|
||||||
pub trait Data {
|
pub trait Data: Send + Sync {
|
||||||
/// Check if a user has an account on this homeserver.
|
/// Check if a user has an account on this homeserver.
|
||||||
fn exists(&self, user_id: &UserId) -> Result<bool>;
|
fn exists(&self, user_id: &UserId) -> Result<bool>;
|
||||||
|
|
||||||
|
@ -138,16 +138,16 @@ pub trait Data {
|
||||||
device_id: &DeviceId,
|
device_id: &DeviceId,
|
||||||
) -> Result<Option<Raw<DeviceKeys>>>;
|
) -> Result<Option<Raw<DeviceKeys>>>;
|
||||||
|
|
||||||
fn get_master_key<F: Fn(&UserId) -> bool>(
|
fn get_master_key(
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
allowed_signatures: F,
|
allowed_signatures: &dyn Fn(&UserId) -> bool,
|
||||||
) -> Result<Option<Raw<CrossSigningKey>>>;
|
) -> Result<Option<Raw<CrossSigningKey>>>;
|
||||||
|
|
||||||
fn get_self_signing_key<F: Fn(&UserId) -> bool>(
|
fn get_self_signing_key(
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
allowed_signatures: F,
|
allowed_signatures: &dyn Fn(&UserId) -> bool,
|
||||||
) -> Result<Option<Raw<CrossSigningKey>>>;
|
) -> Result<Option<Raw<CrossSigningKey>>>;
|
||||||
|
|
||||||
fn get_user_signing_key(&self, user_id: &UserId) -> Result<Option<Raw<CrossSigningKey>>>;
|
fn get_user_signing_key(&self, user_id: &UserId) -> Result<Option<Raw<CrossSigningKey>>>;
|
||||||
|
|
|
@ -6,11 +6,11 @@ use ruma::{UserId, MxcUri, DeviceId, DeviceKeyId, serde::Raw, encryption::{OneTi
|
||||||
|
|
||||||
use crate::{Result, Error, services};
|
use crate::{Result, Error, services};
|
||||||
|
|
||||||
pub struct Service<D: Data> {
|
pub struct Service {
|
||||||
db: D,
|
db: Box<dyn Data>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Data> Service<D> {
|
impl Service {
|
||||||
/// Check if a user has an account on this homeserver.
|
/// Check if a user has an account on this homeserver.
|
||||||
pub fn exists(&self, user_id: &UserId) -> Result<bool> {
|
pub fn exists(&self, user_id: &UserId) -> Result<bool> {
|
||||||
self.db.exists(user_id)
|
self.db.exists(user_id)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue