fix as conversions

Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
Jason Volk 2024-07-07 06:17:58 +00:00
parent 7397064edd
commit dcd7422c45
11 changed files with 107 additions and 51 deletions

View file

@ -770,7 +770,7 @@ perf = "warn"
#restriction = "warn" #restriction = "warn"
arithmetic_side_effects = "warn" arithmetic_side_effects = "warn"
#as_conversions = "warn" # TODO as_conversions = "warn"
assertions_on_result_states = "warn" assertions_on_result_states = "warn"
dbg_macro = "warn" dbg_macro = "warn"
default_union_representation = "warn" default_union_representation = "warn"

View file

@ -47,7 +47,7 @@ pub(crate) async fn get_hierarchy_route(body: Ruma<get_hierarchy::v1::Request>)
&body.room_id, &body.room_id,
limit.try_into().unwrap_or(10), limit.try_into().unwrap_or(10),
key.map_or(vec![], |token| token.short_room_ids), key.map_or(vec![], |token| token.short_room_ids),
max_depth.try_into().unwrap_or(3), max_depth.into(),
body.suggested_only, body.suggested_only,
) )
.await .await

View file

@ -4,7 +4,11 @@ use std::{
time::Duration, time::Duration,
}; };
use conduit::PduCount; use conduit::{
error,
utils::math::{ruma_from_u64, ruma_from_usize, usize_from_ruma, usize_from_u64_truncated},
PduCount,
};
use ruma::{ use ruma::{
api::client::{ api::client::{
filter::{FilterDefinition, LazyLoadOptions}, filter::{FilterDefinition, LazyLoadOptions},
@ -27,7 +31,7 @@ use ruma::{
serde::Raw, serde::Raw,
uint, DeviceId, EventId, OwnedUserId, RoomId, UInt, UserId, uint, DeviceId, EventId, OwnedUserId, RoomId, UInt, UserId,
}; };
use tracing::{error, Instrument as _, Span}; use tracing::{Instrument as _, Span};
use crate::{service::pdu::EventHash, services, utils, Error, PduEvent, Result, Ruma, RumaResponse}; use crate::{service::pdu::EventHash, services, utils, Error, PduEvent, Result, Ruma, RumaResponse};
@ -975,8 +979,8 @@ async fn load_joined_room(
}, },
summary: RoomSummary { summary: RoomSummary {
heroes, heroes,
joined_member_count: joined_member_count.map(|n| (n as u32).into()), joined_member_count: joined_member_count.map(ruma_from_u64),
invited_member_count: invited_member_count.map(|n| (n as u32).into()), invited_member_count: invited_member_count.map(ruma_from_u64),
}, },
unread_notifications: UnreadNotificationsCount { unread_notifications: UnreadNotificationsCount {
highlight_count, highlight_count,
@ -1026,7 +1030,7 @@ fn load_timeline(
// Take the last events for the timeline // Take the last events for the timeline
timeline_pdus = non_timeline_pdus timeline_pdus = non_timeline_pdus
.by_ref() .by_ref()
.take(limit as usize) .take(usize_from_u64_truncated(limit))
.collect::<Vec<_>>() .collect::<Vec<_>>()
.into_iter() .into_iter()
.rev() .rev()
@ -1300,7 +1304,7 @@ pub(crate) async fn sync_events_v4_route(
r.0, r.0,
UInt::try_from(all_joined_rooms.len().saturating_sub(1)).unwrap_or(UInt::MAX), UInt::try_from(all_joined_rooms.len().saturating_sub(1)).unwrap_or(UInt::MAX),
); );
let room_ids = all_joined_rooms[(u64::from(r.0) as usize)..=(u64::from(r.1) as usize)].to_vec(); let room_ids = all_joined_rooms[usize_from_ruma(r.0)..=usize_from_ruma(r.1)].to_vec();
new_known_rooms.extend(room_ids.iter().cloned()); new_known_rooms.extend(room_ids.iter().cloned());
for room_id in &room_ids { for room_id in &room_ids {
let todo_room = todo_rooms let todo_room = todo_rooms
@ -1333,7 +1337,7 @@ pub(crate) async fn sync_events_v4_route(
} }
}) })
.collect(), .collect(),
count: UInt::from(all_joined_rooms.len() as u32), count: ruma_from_usize(all_joined_rooms.len()),
}, },
); );
@ -1529,20 +1533,22 @@ pub(crate) async fn sync_events_v4_route(
prev_batch, prev_batch,
limited, limited,
joined_count: Some( joined_count: Some(
(services() services()
.rooms .rooms
.state_cache .state_cache
.room_joined_count(room_id)? .room_joined_count(room_id)?
.unwrap_or(0) as u32) .unwrap_or(0)
.into(), .try_into()
.unwrap_or_else(|_| uint!(0)),
), ),
invited_count: Some( invited_count: Some(
(services() services()
.rooms .rooms
.state_cache .state_cache
.room_invited_count(room_id)? .room_invited_count(room_id)?
.unwrap_or(0) as u32) .unwrap_or(0)
.into(), .try_into()
.unwrap_or_else(|_| uint!(0)),
), ),
num_live: None, // Count events in timeline greater than global sync counter num_live: None, // Count events in timeline greater than global sync counter
timestamp: None, timestamp: None,

View file

@ -12,15 +12,24 @@ static JEMALLOC: jemalloc::Jemalloc = jemalloc::Jemalloc;
#[must_use] #[must_use]
pub fn memory_usage() -> String { pub fn memory_usage() -> String {
use mallctl::stats; use mallctl::stats;
let allocated = stats::allocated::read().unwrap_or_default() as f64 / 1024.0 / 1024.0;
let active = stats::active::read().unwrap_or_default() as f64 / 1024.0 / 1024.0; let mibs = |input: Result<usize, mallctl::Error>| {
let mapped = stats::mapped::read().unwrap_or_default() as f64 / 1024.0 / 1024.0; let input = input.unwrap_or_default();
let metadata = stats::metadata::read().unwrap_or_default() as f64 / 1024.0 / 1024.0; let kibs = input / 1024;
let resident = stats::resident::read().unwrap_or_default() as f64 / 1024.0 / 1024.0; let kibs = u32::try_from(kibs).unwrap_or_default();
let retained = stats::retained::read().unwrap_or_default() as f64 / 1024.0 / 1024.0; let kibs = f64::from(kibs);
kibs / 1024.0
};
let allocated = mibs(stats::allocated::read());
let active = mibs(stats::active::read());
let mapped = mibs(stats::mapped::read());
let metadata = mibs(stats::metadata::read());
let resident = mibs(stats::resident::read());
let retained = mibs(stats::retained::read());
format!( format!(
"allocated: {allocated:.2} MiB\n active: {active:.2} MiB\n mapped: {mapped:.2} MiB\n metadata: {metadata:.2} \ "allocated: {allocated:.2} MiB\nactive: {active:.2} MiB\nmapped: {mapped:.2} MiB\nmetadata: {metadata:.2} \
MiB\n resident: {resident:.2} MiB\n retained: {retained:.2} MiB\n " MiB\nresident: {resident:.2} MiB\nretained: {retained:.2} MiB\n"
) )
} }

View file

@ -2,6 +2,8 @@ use std::{cmp, time::Duration};
pub use checked_ops::checked_ops; pub use checked_ops::checked_ops;
use crate::{Error, Result};
/// Checked arithmetic expression. Returns a Result<R, Error::Arithmetic> /// Checked arithmetic expression. Returns a Result<R, Error::Arithmetic>
#[macro_export] #[macro_export]
macro_rules! checked { macro_rules! checked {
@ -50,3 +52,36 @@ pub fn continue_exponential_backoff(min: Duration, max: Duration, elapsed: Durat
let min = cmp::min(min, max); let min = cmp::min(min, max);
elapsed < min elapsed < min
} }
#[inline]
#[allow(clippy::as_conversions)]
pub fn usize_from_f64(val: f64) -> Result<usize, Error> {
if val < 0.0 {
return Err(Error::Arithmetic("Converting negative float to unsigned integer"));
}
Ok(val as usize)
}
#[inline]
#[must_use]
pub fn usize_from_ruma(val: ruma::UInt) -> usize {
usize::try_from(val).expect("failed conversion from ruma::UInt to usize")
}
#[inline]
#[must_use]
pub fn ruma_from_u64(val: u64) -> ruma::UInt {
ruma::UInt::try_from(val).expect("failed conversion from u64 to ruma::UInt")
}
#[inline]
#[must_use]
pub fn ruma_from_usize(val: usize) -> ruma::UInt {
ruma::UInt::try_from(val).expect("failed conversion from usize to ruma::UInt")
}
#[inline]
#[must_use]
#[allow(clippy::as_conversions)]
pub fn usize_from_u64_truncated(val: u64) -> usize { val as usize }

View file

@ -28,11 +28,12 @@ pub fn format(ts: SystemTime, str: &str) -> String {
} }
#[must_use] #[must_use]
#[allow(clippy::as_conversions)]
pub fn pretty(d: Duration) -> String { pub fn pretty(d: Duration) -> String {
use Unit::*; use Unit::*;
let fmt = |w, f, u| format!("{w}.{f} {u}"); let fmt = |w, f, u| format!("{w}.{f} {u}");
let gen64 = |w, f, u| fmt(w, (f * 100.0) as u64, u); let gen64 = |w, f, u| fmt(w, (f * 100.0) as u32, u);
let gen128 = |w, f, u| gen64(u64::try_from(w).expect("u128 to u64"), f, u); let gen128 = |w, f, u| gen64(u64::try_from(w).expect("u128 to u64"), f, u);
match whole_and_frac(d) { match whole_and_frac(d) {
(Days(whole), frac) => gen64(whole, frac, "days"), (Days(whole), frac) => gen64(whole, frac, "days"),
@ -49,6 +50,7 @@ pub fn pretty(d: Duration) -> String {
/// part is the largest Unit containing a non-zero value, the frac part is a /// part is the largest Unit containing a non-zero value, the frac part is a
/// rational remainder left over. /// rational remainder left over.
#[must_use] #[must_use]
#[allow(clippy::as_conversions)]
pub fn whole_and_frac(d: Duration) -> (Unit, f64) { pub fn whole_and_frac(d: Duration) -> (Unit, f64) {
use Unit::*; use Unit::*;

View file

@ -3,7 +3,7 @@ use std::{
sync::{Arc, Mutex}, sync::{Arc, Mutex},
}; };
use conduit::{utils, Result, Server}; use conduit::{utils, utils::math::usize_from_f64, Result, Server};
use database::{Database, Map}; use database::{Database, Map};
use lru_cache::LruCache; use lru_cache::LruCache;
@ -16,7 +16,7 @@ impl Data {
pub(super) fn new(server: &Arc<Server>, db: &Arc<Database>) -> Self { pub(super) fn new(server: &Arc<Server>, db: &Arc<Database>) -> Self {
let config = &server.config; let config = &server.config;
let cache_size = f64::from(config.auth_chain_cache_capacity); let cache_size = f64::from(config.auth_chain_cache_capacity);
let cache_size = (cache_size * config.conduit_cache_capacity_modifier) as usize; let cache_size = usize_from_f64(cache_size * config.conduit_cache_capacity_modifier).expect("valid cache size");
Self { Self {
shorteventid_authchain: db["shorteventid_authchain"].clone(), shorteventid_authchain: db["shorteventid_authchain"].clone(),
auth_chain_cache: Mutex::new(LruCache::new(cache_size)), auth_chain_cache: Mutex::new(LruCache::new(cache_size)),

View file

@ -43,11 +43,11 @@ impl Service {
#[tracing::instrument(skip_all, name = "auth_chain")] #[tracing::instrument(skip_all, name = "auth_chain")]
pub async fn get_auth_chain(&self, room_id: &RoomId, starting_events: &[&EventId]) -> Result<Vec<u64>> { pub async fn get_auth_chain(&self, room_id: &RoomId, starting_events: &[&EventId]) -> Result<Vec<u64>> {
const NUM_BUCKETS: u64 = 50; //TODO: change possible w/o disrupting db? const NUM_BUCKETS: usize = 50; //TODO: change possible w/o disrupting db?
const BUCKET: BTreeSet<(u64, &EventId)> = BTreeSet::new(); const BUCKET: BTreeSet<(u64, &EventId)> = BTreeSet::new();
let started = std::time::Instant::now(); let started = std::time::Instant::now();
let mut buckets = [BUCKET; NUM_BUCKETS as usize]; let mut buckets = [BUCKET; NUM_BUCKETS];
for (i, &short) in services() for (i, &short) in services()
.rooms .rooms
.short .short
@ -55,8 +55,9 @@ impl Service {
.iter() .iter()
.enumerate() .enumerate()
{ {
let bucket = validated!(short % NUM_BUCKETS)?; let bucket: usize = short.try_into()?;
buckets[bucket as usize].insert((short, starting_events[i])); let bucket: usize = validated!(bucket % NUM_BUCKETS)?;
buckets[bucket].insert((short, starting_events[i]));
} }
debug!( debug!(

View file

@ -7,7 +7,7 @@ use std::{
sync::Arc, sync::Arc,
}; };
use conduit::{checked, debug_info}; use conduit::{checked, debug_info, utils::math::usize_from_f64};
use lru_cache::LruCache; use lru_cache::LruCache;
use ruma::{ use ruma::{
api::{ api::{
@ -161,11 +161,10 @@ impl From<CachedSpaceHierarchySummary> for SpaceHierarchyRoomsChunk {
impl crate::Service for Service { impl crate::Service for Service {
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> { fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
let config = &args.server.config; let config = &args.server.config;
let cache_size = f64::from(config.roomid_spacehierarchy_cache_capacity);
let cache_size = cache_size * config.conduit_cache_capacity_modifier;
Ok(Arc::new(Self { Ok(Arc::new(Self {
roomid_spacehierarchy_cache: Mutex::new(LruCache::new( roomid_spacehierarchy_cache: Mutex::new(LruCache::new(usize_from_f64(cache_size)?)),
(f64::from(config.roomid_spacehierarchy_cache_capacity) * config.conduit_cache_capacity_modifier)
as usize,
)),
})) }))
} }
@ -447,7 +446,7 @@ impl Service {
} }
pub async fn get_client_hierarchy( pub async fn get_client_hierarchy(
&self, sender_user: &UserId, room_id: &RoomId, limit: usize, short_room_ids: Vec<u64>, max_depth: usize, &self, sender_user: &UserId, room_id: &RoomId, limit: usize, short_room_ids: Vec<u64>, max_depth: u64,
suggested_only: bool, suggested_only: bool,
) -> Result<client::space::get_hierarchy::v1::Response> { ) -> Result<client::space::get_hierarchy::v1::Response> {
let mut parents = VecDeque::new(); let mut parents = VecDeque::new();
@ -514,7 +513,8 @@ impl Service {
} }
} }
if !children.is_empty() && parents.len() < max_depth { let parents_len: u64 = parents.len().try_into()?;
if !children.is_empty() && parents_len < max_depth {
parents.push_back(current_room.clone()); parents.push_back(current_room.clone());
stack.push(children); stack.push(children);
} }
@ -549,9 +549,8 @@ impl Service {
Some( Some(
PaginationToken { PaginationToken {
short_room_ids, short_room_ids,
limit: UInt::new(max_depth as u64).expect("When sent in request it must have been valid UInt"), limit: UInt::new(max_depth).expect("When sent in request it must have been valid UInt"),
max_depth: UInt::new(max_depth as u64) max_depth: UInt::new(max_depth).expect("When sent in request it must have been valid UInt"),
.expect("When sent in request it must have been valid UInt"),
suggested_only, suggested_only,
} }
.to_string(), .to_string(),

View file

@ -6,7 +6,11 @@ use std::{
sync::{Arc, Mutex as StdMutex, Mutex}, sync::{Arc, Mutex as StdMutex, Mutex},
}; };
use conduit::{error, utils::mutex_map, warn, Error, Result}; use conduit::{
error,
utils::{math::usize_from_f64, mutex_map},
warn, Error, Result,
};
use data::Data; use data::Data;
use lru_cache::LruCache; use lru_cache::LruCache;
use ruma::{ use ruma::{
@ -44,14 +48,15 @@ pub struct Service {
impl crate::Service for Service { impl crate::Service for Service {
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> { fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
let config = &args.server.config; let config = &args.server.config;
let server_visibility_cache_capacity =
f64::from(config.server_visibility_cache_capacity) * config.conduit_cache_capacity_modifier;
let user_visibility_cache_capacity =
f64::from(config.user_visibility_cache_capacity) * config.conduit_cache_capacity_modifier;
Ok(Arc::new(Self { Ok(Arc::new(Self {
db: Data::new(args.db), db: Data::new(args.db),
server_visibility_cache: StdMutex::new(LruCache::new( server_visibility_cache: StdMutex::new(LruCache::new(usize_from_f64(server_visibility_cache_capacity)?)),
(f64::from(config.server_visibility_cache_capacity) * config.conduit_cache_capacity_modifier) as usize, user_visibility_cache: StdMutex::new(LruCache::new(usize_from_f64(user_visibility_cache_capacity)?)),
)),
user_visibility_cache: StdMutex::new(LruCache::new(
(f64::from(config.user_visibility_cache_capacity) * config.conduit_cache_capacity_modifier) as usize,
)),
})) }))
} }

View file

@ -7,7 +7,7 @@ use std::{
sync::{Arc, Mutex as StdMutex, Mutex}, sync::{Arc, Mutex as StdMutex, Mutex},
}; };
use conduit::{checked, utils, Result}; use conduit::{checked, utils, utils::math::usize_from_f64, Result};
use data::Data; use data::Data;
use lru_cache::LruCache; use lru_cache::LruCache;
use ruma::{EventId, RoomId}; use ruma::{EventId, RoomId};
@ -55,11 +55,10 @@ pub struct Service {
impl crate::Service for Service { impl crate::Service for Service {
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> { fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
let config = &args.server.config; let config = &args.server.config;
let cache_capacity = f64::from(config.stateinfo_cache_capacity) * config.conduit_cache_capacity_modifier;
Ok(Arc::new(Self { Ok(Arc::new(Self {
db: Data::new(args.db), db: Data::new(args.db),
stateinfo_cache: StdMutex::new(LruCache::new( stateinfo_cache: StdMutex::new(LruCache::new(usize_from_f64(cache_capacity)?)),
(f64::from(config.stateinfo_cache_capacity) * config.conduit_cache_capacity_modifier) as usize,
)),
})) }))
} }