fix arithmetic side-effects

Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
Jason Volk 2024-07-07 04:46:16 +00:00
parent 52a561ff9e
commit 7397064edd
25 changed files with 139 additions and 114 deletions

View file

@ -5,7 +5,7 @@ use std::{
sync::Arc,
};
use conduit::{debug, error, trace, warn, Error, Result};
use conduit::{debug, error, trace, validated, warn, Error, Result};
use data::Data;
use ruma::{api::client::error::ErrorKind, EventId, RoomId};
@ -43,20 +43,20 @@ impl Service {
#[tracing::instrument(skip_all, name = "auth_chain")]
pub async fn get_auth_chain(&self, room_id: &RoomId, starting_events: &[&EventId]) -> Result<Vec<u64>> {
const NUM_BUCKETS: usize = 50; //TODO: change possible w/o disrupting db?
const NUM_BUCKETS: u64 = 50; //TODO: change possible w/o disrupting db?
const BUCKET: BTreeSet<(u64, &EventId)> = BTreeSet::new();
let started = std::time::Instant::now();
let mut buckets = [BUCKET; NUM_BUCKETS];
for (i, short) in services()
let mut buckets = [BUCKET; NUM_BUCKETS as usize];
for (i, &short) in services()
.rooms
.short
.multi_get_or_create_shorteventid(starting_events)?
.iter()
.enumerate()
{
let bucket = short % NUM_BUCKETS as u64;
buckets[bucket as usize].insert((*short, starting_events[i]));
let bucket = validated!(short % NUM_BUCKETS)?;
buckets[bucket as usize].insert((short, starting_events[i]));
}
debug!(

View file

@ -191,7 +191,7 @@ impl Service {
e.insert((Instant::now(), 1));
},
hash_map::Entry::Occupied(mut e) => {
*e.get_mut() = (Instant::now(), e.get().1 + 1);
*e.get_mut() = (Instant::now(), e.get().1.saturating_add(1));
},
};
},
@ -1072,7 +1072,7 @@ impl Service {
let mut todo_auth_events = vec![Arc::clone(id)];
let mut events_in_reverse_order = Vec::with_capacity(todo_auth_events.len());
let mut events_all = HashSet::with_capacity(todo_auth_events.len());
let mut i = 0;
let mut i: u64 = 0;
while let Some(next_id) = todo_auth_events.pop() {
if let Some((time, tries)) = services()
.globals
@ -1094,7 +1094,7 @@ impl Service {
continue;
}
i += 1;
i = i.saturating_add(1);
if i % 100 == 0 {
tokio::task::yield_now().await;
}

View file

@ -205,7 +205,7 @@ impl Service {
if let Ok(relations) = self.db.relations_until(user_id, room_id, target, until) {
for relation in relations.flatten() {
if stack_pdu.1 < max_depth {
stack.push((relation.clone(), stack_pdu.1 + 1));
stack.push((relation.clone(), stack_pdu.1.saturating_add(1)));
}
pdus.push(relation);

View file

@ -76,10 +76,12 @@ impl Data {
.iter_from(&first_possible_edu, false)
.take_while(move |(k, _)| k.starts_with(&prefix2))
.map(move |(k, v)| {
let count = utils::u64_from_bytes(&k[prefix.len()..prefix.len() + size_of::<u64>()])
let count_offset = prefix.len().saturating_add(size_of::<u64>());
let count = utils::u64_from_bytes(&k[prefix.len()..count_offset])
.map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?;
let user_id_offset = count_offset.saturating_add(1);
let user_id = UserId::parse(
utils::string_from_bytes(&k[prefix.len() + size_of::<u64>() + 1..])
utils::string_from_bytes(&k[user_id_offset..])
.map_err(|_| Error::bad_database("Invalid readreceiptid userid bytes in db."))?,
)
.map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?;

View file

@ -7,7 +7,7 @@ use std::{
sync::Arc,
};
use conduit::debug_info;
use conduit::{checked, debug_info};
use lru_cache::LruCache;
use ruma::{
api::{
@ -508,7 +508,8 @@ impl Service {
}
// We have reached the room after where we last left off
if parents.len() + 1 == short_room_ids.len() {
let parents_len = parents.len();
if checked!(parents_len + 1)? == short_room_ids.len() {
populate_results = true;
}
}

View file

@ -1,6 +1,6 @@
use std::{collections::HashSet, mem::size_of, sync::Arc};
use conduit::{utils, Error, Result};
use conduit::{checked, utils, Error, Result};
use database::{Database, Map};
use super::CompressedStateEvent;
@ -38,11 +38,12 @@ impl Data {
let mut added = HashSet::new();
let mut removed = HashSet::new();
let mut i = size_of::<u64>();
while let Some(v) = value.get(i..i + 2 * size_of::<u64>()) {
let stride = size_of::<u64>();
let mut i = stride;
while let Some(v) = value.get(i..checked!(i + 2 * stride)?) {
if add_mode && v.starts_with(&0_u64.to_be_bytes()) {
add_mode = false;
i += size_of::<u64>();
i = checked!(i + stride)?;
continue;
}
if add_mode {
@ -50,7 +51,7 @@ impl Data {
} else {
removed.insert(v.try_into().expect("we checked the size above"));
}
i += 2 * size_of::<u64>();
i = checked!(i + 2 * stride)?;
}
Ok(StateDiff {

View file

@ -7,7 +7,7 @@ use std::{
sync::{Arc, Mutex as StdMutex, Mutex},
};
use conduit::{utils, Result};
use conduit::{checked, utils, Result};
use data::Data;
use lru_cache::LruCache;
use ruma::{EventId, RoomId};
@ -169,12 +169,14 @@ impl Service {
statediffremoved: Arc<HashSet<CompressedStateEvent>>, diff_to_sibling: usize,
mut parent_states: ParentStatesVec,
) -> Result<()> {
let diffsum = statediffnew.len() + statediffremoved.len();
let statediffnew_len = statediffnew.len();
let statediffremoved_len = statediffremoved.len();
let diffsum = checked!(statediffnew_len + statediffremoved_len)?;
if parent_states.len() > 3 {
// Number of layers
// To many layers, we have to go deeper
let parent = parent_states.pop().unwrap();
let parent = parent_states.pop().expect("parent must have a state");
let mut parent_new = (*parent.2).clone();
let mut parent_removed = (*parent.3).clone();
@ -226,10 +228,12 @@ impl Service {
// 1. We add the current diff on top of the parent layer.
// 2. We replace a layer above
let parent = parent_states.pop().unwrap();
let parent_diff = parent.2.len() + parent.3.len();
let parent = parent_states.pop().expect("parent must have a state");
let parent_2_len = parent.2.len();
let parent_3_len = parent.3.len();
let parent_diff = checked!(parent_2_len + parent_3_len)?;
if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff {
if checked!(diffsum * diffsum)? >= checked!(2 * diff_to_sibling * parent_diff)? {
// Diff too big, we replace above layer(s)
let mut parent_new = (*parent.2).clone();
let mut parent_removed = (*parent.3).clone();

View file

@ -1,6 +1,6 @@
use std::{mem::size_of, sync::Arc};
use conduit::{utils, Error, Result};
use conduit::{checked, utils, Error, Result};
use database::{Database, Map};
use ruma::{api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, UserId};
@ -31,7 +31,7 @@ impl Data {
.to_vec();
let mut current = prefix.clone();
current.extend_from_slice(&(until - 1).to_be_bytes());
current.extend_from_slice(&(checked!(until - 1)?).to_be_bytes());
Ok(Box::new(
self.threadid_userids

View file

@ -64,7 +64,7 @@ impl Service {
.and_then(|relations| serde_json::from_value::<BundledThread>(relations.clone().into()).ok())
{
// Thread already existed
relations.count += uint!(1);
relations.count = relations.count.saturating_add(uint!(1));
relations.latest_event = pdu.to_message_like_event();
let content = serde_json::to_value(relations).expect("to_value always works");

View file

@ -4,7 +4,7 @@ use std::{
sync::{Arc, Mutex},
};
use conduit::{error, utils, Error, Result};
use conduit::{checked, error, utils, Error, Result};
use database::{Database, Map};
use ruma::{api::client::error::ErrorKind, CanonicalJsonObject, EventId, OwnedRoomId, OwnedUserId, RoomId, UserId};
@ -281,10 +281,12 @@ impl Data {
/// Returns the `count` of this pdu's id.
pub(super) fn pdu_count(pdu_id: &[u8]) -> Result<PduCount> {
let last_u64 = utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::<u64>()..])
let stride = size_of::<u64>();
let pdu_id_len = pdu_id.len();
let last_u64 = utils::u64_from_bytes(&pdu_id[checked!(pdu_id_len - stride)?..])
.map_err(|_| Error::bad_database("PDU has invalid count bytes."))?;
let second_last_u64 =
utils::u64_from_bytes(&pdu_id[pdu_id.len() - 2 * size_of::<u64>()..pdu_id.len() - size_of::<u64>()]);
utils::u64_from_bytes(&pdu_id[checked!(pdu_id_len - 2 * stride)?..checked!(pdu_id_len - stride)?]);
if matches!(second_last_u64, Ok(0)) {
Ok(PduCount::Backfilled(u64::MAX.saturating_sub(last_u64)))

View file

@ -6,7 +6,7 @@ use std::{
sync::Arc,
};
use conduit::{debug, error, info, utils, utils::mutex_map, warn, Error, Result};
use conduit::{debug, error, info, utils, utils::mutex_map, validated, warn, Error, Result};
use data::Data;
use itertools::Itertools;
use rand::prelude::SliceRandom;
@ -670,7 +670,7 @@ impl Service {
.filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth))
.max()
.unwrap_or_else(|| uint!(0))
+ uint!(1);
.saturating_add(uint!(1));
let mut unsigned = unsigned.unwrap_or_default();
@ -1240,10 +1240,11 @@ impl Service {
let insert_lock = services().globals.roomid_mutex_insert.lock(&room_id).await;
let max = u64::MAX;
let count = services().globals.next_count()?;
let mut pdu_id = shortroomid.to_be_bytes().to_vec();
pdu_id.extend_from_slice(&0_u64.to_be_bytes());
pdu_id.extend_from_slice(&(u64::MAX - count).to_be_bytes());
pdu_id.extend_from_slice(&(validated!(max - count)?).to_be_bytes());
// Insert pdu
self.db.prepend_backfill_pdu(&pdu_id, &event_id, &value)?;