improvement: federation get_keys and optimize signingkey storage
- get encryption keys over federation - optimize signing key storage - rate limit parsing of bad events - rate limit signature fetching - dependency bumps
This commit is contained in:
parent
ae41bc5067
commit
09157b2096
18 changed files with 566 additions and 371 deletions
|
@ -1,7 +1,7 @@
|
|||
use crate::{utils, Error, Result};
|
||||
use ruma::{
|
||||
api::client::error::ErrorKind,
|
||||
events::{AnyEvent as EduEvent, EventType},
|
||||
events::{AnyEphemeralRoomEvent, EventType},
|
||||
serde::Raw,
|
||||
RoomId, UserId,
|
||||
};
|
||||
|
@ -80,7 +80,7 @@ impl AccountData {
|
|||
room_id: Option<&RoomId>,
|
||||
user_id: &UserId,
|
||||
since: u64,
|
||||
) -> Result<HashMap<EventType, Raw<EduEvent>>> {
|
||||
) -> Result<HashMap<EventType, Raw<AnyEphemeralRoomEvent>>> {
|
||||
let mut userdata = HashMap::new();
|
||||
|
||||
let mut prefix = room_id
|
||||
|
@ -110,7 +110,7 @@ impl AccountData {
|
|||
.map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?,
|
||||
)
|
||||
.map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?,
|
||||
serde_json::from_slice::<Raw<EduEvent>>(&v).map_err(|_| {
|
||||
serde_json::from_slice::<Raw<AnyEphemeralRoomEvent>>(&v).map_err(|_| {
|
||||
Error::bad_database("Database contains invalid account data.")
|
||||
})?,
|
||||
))
|
||||
|
|
|
@ -2,20 +2,22 @@ use crate::{database::Config, utils, Error, Result};
|
|||
use log::{error, info};
|
||||
use ruma::{
|
||||
api::federation::discovery::{ServerSigningKeys, VerifyKey},
|
||||
ServerName, ServerSigningKeyId,
|
||||
EventId, MilliSecondsSinceUnixEpoch, ServerName, ServerSigningKeyId,
|
||||
};
|
||||
use rustls::{ServerCertVerifier, WebPKIVerifier};
|
||||
use std::{
|
||||
collections::{BTreeMap, HashMap},
|
||||
sync::{Arc, RwLock},
|
||||
time::Duration,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tokio::sync::Semaphore;
|
||||
use trust_dns_resolver::TokioAsyncResolver;
|
||||
|
||||
pub const COUNTER: &str = "c";
|
||||
|
||||
type WellKnownMap = HashMap<Box<ServerName>, (String, String)>;
|
||||
type TlsNameMap = HashMap<String, webpki::DNSName>;
|
||||
type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries
|
||||
#[derive(Clone)]
|
||||
pub struct Globals {
|
||||
pub actual_destination_cache: Arc<RwLock<WellKnownMap>>, // actual_destination, host
|
||||
|
@ -26,7 +28,10 @@ pub struct Globals {
|
|||
reqwest_client: reqwest::Client,
|
||||
dns_resolver: TokioAsyncResolver,
|
||||
jwt_decoding_key: Option<jsonwebtoken::DecodingKey<'static>>,
|
||||
pub(super) servertimeout_signingkey: sled::Tree, // ServerName + Timeout Timestamp -> algorithm:key + pubkey
|
||||
pub(super) server_signingkeys: sled::Tree,
|
||||
pub bad_event_ratelimiter: Arc<RwLock<BTreeMap<EventId, RateLimitState>>>,
|
||||
pub bad_signature_ratelimiter: Arc<RwLock<BTreeMap<Vec<String>, RateLimitState>>>,
|
||||
pub servername_ratelimiter: Arc<RwLock<BTreeMap<Box<ServerName>, Arc<Semaphore>>>>,
|
||||
}
|
||||
|
||||
struct MatrixServerVerifier {
|
||||
|
@ -65,7 +70,7 @@ impl ServerCertVerifier for MatrixServerVerifier {
|
|||
impl Globals {
|
||||
pub fn load(
|
||||
globals: sled::Tree,
|
||||
servertimeout_signingkey: sled::Tree,
|
||||
server_signingkeys: sled::Tree,
|
||||
config: Config,
|
||||
) -> Result<Self> {
|
||||
let bytes = &*globals
|
||||
|
@ -135,8 +140,11 @@ impl Globals {
|
|||
})?,
|
||||
actual_destination_cache: Arc::new(RwLock::new(WellKnownMap::new())),
|
||||
tls_name_override,
|
||||
servertimeout_signingkey,
|
||||
server_signingkeys,
|
||||
jwt_decoding_key,
|
||||
bad_event_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())),
|
||||
bad_signature_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())),
|
||||
servername_ratelimiter: Arc::new(RwLock::new(BTreeMap::new())),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -203,31 +211,21 @@ impl Globals {
|
|||
/// Remove the outdated keys and insert the new ones.
|
||||
///
|
||||
/// This doesn't actually check that the keys provided are newer than the old set.
|
||||
pub fn add_signing_key(&self, origin: &ServerName, keys: &ServerSigningKeys) -> Result<()> {
|
||||
let mut key1 = origin.as_bytes().to_vec();
|
||||
key1.push(0xff);
|
||||
|
||||
let mut key2 = key1.clone();
|
||||
|
||||
let ts = keys
|
||||
.valid_until_ts
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.expect("time is valid")
|
||||
.as_millis() as u64;
|
||||
|
||||
key1.extend_from_slice(&ts.to_be_bytes());
|
||||
key2.extend_from_slice(&(ts + 1).to_be_bytes());
|
||||
|
||||
self.servertimeout_signingkey.insert(
|
||||
key1,
|
||||
serde_json::to_vec(&keys.verify_keys).expect("ServerSigningKeys are a valid string"),
|
||||
)?;
|
||||
|
||||
self.servertimeout_signingkey.insert(
|
||||
key2,
|
||||
serde_json::to_vec(&keys.old_verify_keys)
|
||||
.expect("ServerSigningKeys are a valid string"),
|
||||
)?;
|
||||
pub fn add_signing_key(&self, origin: &ServerName, new_keys: &ServerSigningKeys) -> Result<()> {
|
||||
self.server_signingkeys
|
||||
.update_and_fetch(origin.as_bytes(), |signingkeys| {
|
||||
let mut keys = signingkeys
|
||||
.and_then(|keys| serde_json::from_slice(keys).ok())
|
||||
.unwrap_or_else(|| {
|
||||
// Just insert "now", it doesn't matter
|
||||
ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now())
|
||||
});
|
||||
keys.verify_keys
|
||||
.extend(new_keys.verify_keys.clone().into_iter());
|
||||
keys.old_verify_keys
|
||||
.extend(new_keys.old_verify_keys.clone().into_iter());
|
||||
Some(serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"))
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -237,26 +235,22 @@ impl Globals {
|
|||
&self,
|
||||
origin: &ServerName,
|
||||
) -> Result<BTreeMap<ServerSigningKeyId, VerifyKey>> {
|
||||
let mut response = BTreeMap::new();
|
||||
let signingkeys = self
|
||||
.server_signingkeys
|
||||
.get(origin.as_bytes())?
|
||||
.and_then(|bytes| serde_json::from_slice::<ServerSigningKeys>(&bytes).ok())
|
||||
.map(|keys| {
|
||||
let mut tree = keys.verify_keys;
|
||||
tree.extend(
|
||||
keys.old_verify_keys
|
||||
.into_iter()
|
||||
.map(|old| (old.0, VerifyKey::new(old.1.key))),
|
||||
);
|
||||
tree
|
||||
})
|
||||
.unwrap_or_else(BTreeMap::new);
|
||||
|
||||
let now = crate::utils::millis_since_unix_epoch();
|
||||
|
||||
for item in self.servertimeout_signingkey.scan_prefix(origin.as_bytes()) {
|
||||
let (k, bytes) = item?;
|
||||
let valid_until = k
|
||||
.splitn(2, |&b| b == 0xff)
|
||||
.nth(1)
|
||||
.map(crate::utils::u64_from_bytes)
|
||||
.ok_or_else(|| Error::bad_database("Invalid signing keys."))?
|
||||
.map_err(|_| Error::bad_database("Invalid signing key valid until bytes"))?;
|
||||
// If these keys are still valid use em!
|
||||
if valid_until > now {
|
||||
let btree: BTreeMap<_, _> = serde_json::from_slice(&bytes)
|
||||
.map_err(|_| Error::bad_database("Invalid BTreeMap<> of signing keys"))?;
|
||||
response.extend(btree);
|
||||
}
|
||||
}
|
||||
Ok(response)
|
||||
Ok(signingkeys)
|
||||
}
|
||||
|
||||
pub fn database_version(&self) -> Result<u64> {
|
||||
|
|
|
@ -294,7 +294,8 @@ async fn send_notice(
|
|||
} else {
|
||||
notifi.sender = Some(&event.sender);
|
||||
notifi.event_type = Some(&event.kind);
|
||||
notifi.content = serde_json::value::to_raw_value(&event.content).ok();
|
||||
let content = serde_json::value::to_raw_value(&event.content).ok();
|
||||
notifi.content = content.as_deref();
|
||||
|
||||
if event.kind == EventType::RoomMember {
|
||||
notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str());
|
||||
|
|
|
@ -2,7 +2,7 @@ use crate::{utils, Error, Result};
|
|||
use ruma::{
|
||||
events::{
|
||||
presence::{PresenceEvent, PresenceEventContent},
|
||||
AnyEvent as EduEvent, SyncEphemeralRoomEvent,
|
||||
AnyEphemeralRoomEvent, SyncEphemeralRoomEvent,
|
||||
},
|
||||
presence::PresenceState,
|
||||
serde::Raw,
|
||||
|
@ -32,7 +32,7 @@ impl RoomEdus {
|
|||
&self,
|
||||
user_id: &UserId,
|
||||
room_id: &RoomId,
|
||||
event: EduEvent,
|
||||
event: AnyEphemeralRoomEvent,
|
||||
globals: &super::super::globals::Globals,
|
||||
) -> Result<()> {
|
||||
let mut prefix = room_id.as_bytes().to_vec();
|
||||
|
|
|
@ -3,7 +3,7 @@ use std::{
|
|||
convert::{TryFrom, TryInto},
|
||||
fmt::Debug,
|
||||
sync::Arc,
|
||||
time::{Duration, Instant, SystemTime},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
|
@ -23,7 +23,9 @@ use ruma::{
|
|||
OutgoingRequest,
|
||||
},
|
||||
events::{push_rules, AnySyncEphemeralRoomEvent, EventType},
|
||||
push, ServerName, UInt, UserId,
|
||||
push,
|
||||
receipt::ReceiptType,
|
||||
MilliSecondsSinceUnixEpoch, ServerName, UInt, UserId,
|
||||
};
|
||||
use sled::IVec;
|
||||
use tokio::{select, sync::Semaphore};
|
||||
|
@ -277,17 +279,14 @@ impl Sending {
|
|||
events.push(e);
|
||||
}
|
||||
|
||||
match outgoing_kind {
|
||||
OutgoingKind::Normal(server_name) => {
|
||||
if let Ok((select_edus, last_count)) = Self::select_edus(db, server_name) {
|
||||
events.extend_from_slice(&select_edus);
|
||||
db.sending
|
||||
.servername_educount
|
||||
.insert(server_name.as_bytes(), &last_count.to_be_bytes())
|
||||
.unwrap();
|
||||
}
|
||||
if let OutgoingKind::Normal(server_name) = outgoing_kind {
|
||||
if let Ok((select_edus, last_count)) = Self::select_edus(db, server_name) {
|
||||
events.extend_from_slice(&select_edus);
|
||||
db.sending
|
||||
.servername_educount
|
||||
.insert(server_name.as_bytes(), &last_count.to_be_bytes())
|
||||
.unwrap();
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -326,14 +325,14 @@ impl Sending {
|
|||
AnySyncEphemeralRoomEvent::Receipt(r) => {
|
||||
let mut read = BTreeMap::new();
|
||||
|
||||
let (event_id, receipt) = r
|
||||
let (event_id, mut receipt) = r
|
||||
.content
|
||||
.0
|
||||
.into_iter()
|
||||
.next()
|
||||
.expect("we only use one event per read receipt");
|
||||
let receipt = receipt
|
||||
.read
|
||||
.remove(&ReceiptType::Read)
|
||||
.expect("our read receipts always set this")
|
||||
.remove(&user_id)
|
||||
.expect("our read receipts always have the user here");
|
||||
|
@ -436,7 +435,7 @@ impl Sending {
|
|||
),
|
||||
)
|
||||
})?
|
||||
.to_any_event())
|
||||
.to_room_event())
|
||||
}
|
||||
SendingEventType::Edu(_) => {
|
||||
// Appservices don't need EDUs (?)
|
||||
|
@ -610,7 +609,7 @@ impl Sending {
|
|||
origin: db.globals.server_name(),
|
||||
pdus: &pdu_jsons,
|
||||
edus: &edu_jsons,
|
||||
origin_server_ts: SystemTime::now(),
|
||||
origin_server_ts: MilliSecondsSinceUnixEpoch::now(),
|
||||
transaction_id: &base64::encode_config(
|
||||
Self::calculate_hash(
|
||||
&events
|
||||
|
|
|
@ -1,19 +1,13 @@
|
|||
use crate::{utils, Error, Result};
|
||||
use ruma::{
|
||||
api::client::{
|
||||
error::ErrorKind,
|
||||
r0::{
|
||||
device::Device,
|
||||
keys::{CrossSigningKey, OneTimeKey},
|
||||
},
|
||||
},
|
||||
encryption::DeviceKeys,
|
||||
api::client::{error::ErrorKind, r0::device::Device},
|
||||
encryption::{CrossSigningKey, DeviceKeys, OneTimeKey},
|
||||
events::{AnyToDeviceEvent, EventType},
|
||||
identifiers::MxcUri,
|
||||
serde::Raw,
|
||||
DeviceId, DeviceKeyAlgorithm, DeviceKeyId, UInt, UserId,
|
||||
DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, UInt, UserId,
|
||||
};
|
||||
use std::{collections::BTreeMap, convert::TryFrom, mem, time::SystemTime};
|
||||
use std::{collections::BTreeMap, convert::TryFrom, mem};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Users {
|
||||
|
@ -200,7 +194,7 @@ impl Users {
|
|||
device_id: device_id.into(),
|
||||
display_name: initial_device_display_name,
|
||||
last_seen_ip: None, // TODO
|
||||
last_seen_ts: Some(SystemTime::now()),
|
||||
last_seen_ts: Some(MilliSecondsSinceUnixEpoch::now()),
|
||||
})
|
||||
.expect("Device::to_string never fails.")
|
||||
.as_bytes(),
|
||||
|
@ -653,12 +647,11 @@ impl Users {
|
|||
})
|
||||
}
|
||||
|
||||
pub fn get_master_key(
|
||||
pub fn get_master_key<F: Fn(&UserId) -> bool>(
|
||||
&self,
|
||||
user_id: &UserId,
|
||||
sender_id: &UserId,
|
||||
allowed_signatures: F,
|
||||
) -> Result<Option<CrossSigningKey>> {
|
||||
// TODO: hide some signatures
|
||||
self.userid_masterkeyid
|
||||
.get(user_id.to_string())?
|
||||
.map_or(Ok(None), |key| {
|
||||
|
@ -673,7 +666,7 @@ impl Users {
|
|||
cross_signing_key.signatures = cross_signing_key
|
||||
.signatures
|
||||
.into_iter()
|
||||
.filter(|(user, _)| user == user_id || user == sender_id)
|
||||
.filter(|(user, _)| allowed_signatures(user))
|
||||
.collect();
|
||||
|
||||
Ok(Some(cross_signing_key))
|
||||
|
@ -681,10 +674,10 @@ impl Users {
|
|||
})
|
||||
}
|
||||
|
||||
pub fn get_self_signing_key(
|
||||
pub fn get_self_signing_key<F: Fn(&UserId) -> bool>(
|
||||
&self,
|
||||
user_id: &UserId,
|
||||
sender_id: &UserId,
|
||||
allowed_signatures: F,
|
||||
) -> Result<Option<CrossSigningKey>> {
|
||||
self.userid_selfsigningkeyid
|
||||
.get(user_id.to_string())?
|
||||
|
@ -700,7 +693,7 @@ impl Users {
|
|||
cross_signing_key.signatures = cross_signing_key
|
||||
.signatures
|
||||
.into_iter()
|
||||
.filter(|(user, _)| user == user_id || user == sender_id)
|
||||
.filter(|(user, _)| user == user_id || allowed_signatures(user))
|
||||
.collect();
|
||||
|
||||
Ok(Some(cross_signing_key))
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue