resolve half of the integer_arithmetic lints, couple misc changes

Signed-off-by: strawberry <strawberry@puppygock.gay>
This commit is contained in:
strawberry 2024-05-03 21:42:47 -04:00 committed by June
parent ac4590952b
commit b5c0c30a5e
34 changed files with 188 additions and 109 deletions

View file

@ -105,7 +105,7 @@ impl service::account_data::Data for KeyValueDatabase {
// Skip the data that's exactly at since, because we sent that last time
let mut first_possible = prefix.clone();
first_possible.extend_from_slice(&(since + 1).to_be_bytes());
first_possible.extend_from_slice(&(since.saturating_add(1)).to_be_bytes());
for r in self
.roomuserdataid_accountdata

View file

@ -158,18 +158,15 @@ impl service::globals::Data for KeyValueDatabase {
let max_appservice_in_room_cache = self.appservice_in_room_cache.read().unwrap().capacity();
let max_lasttimelinecount_cache = self.lasttimelinecount_cache.lock().unwrap().capacity();
let mut response = format!(
format!(
"\
auth_chain_cache: {auth_chain_cache} / {max_auth_chain_cache}
our_real_users_cache: {our_real_users_cache} / {max_our_real_users_cache}
appservice_in_room_cache: {appservice_in_room_cache} / {max_appservice_in_room_cache}
lasttimelinecount_cache: {lasttimelinecount_cache} / {max_lasttimelinecount_cache}\n\n"
);
if let Ok(db_stats) = self.db.memory_usage() {
response += &db_stats;
}
response
lasttimelinecount_cache: {lasttimelinecount_cache} / {max_lasttimelinecount_cache}\n\n
{}",
self.db.memory_usage().unwrap_or_default()
)
}
fn clear_caches(&self, amount: u32) {

View file

@ -23,10 +23,10 @@ impl service::rooms::pdu_metadata::Data for KeyValueDatabase {
let mut current = prefix.clone();
let count_raw = match until {
PduCount::Normal(x) => x - 1,
PduCount::Normal(x) => x.saturating_sub(1),
PduCount::Backfilled(x) => {
current.extend_from_slice(&0_u64.to_be_bytes());
u64::MAX - x - 1
u64::MAX.saturating_sub(x).saturating_sub(1)
},
};
current.extend_from_slice(&count_raw.to_be_bytes());

View file

@ -48,7 +48,7 @@ impl service::rooms::read_receipt::Data for KeyValueDatabase {
let prefix2 = prefix.clone();
let mut first_possible_edu = prefix.clone();
first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since
first_possible_edu.extend_from_slice(&(since.saturating_add(1)).to_be_bytes()); // +1 so we don't send the event at since
Box::new(
self.readreceiptid_readreceipt

View file

@ -17,7 +17,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
.expect("there is always one layer")
.1;
let mut result = HashMap::new();
let mut i = 0;
let mut i: u8 = 0;
for compressed in full_state.iter() {
let parsed = services()
.rooms
@ -25,7 +25,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
.parse_compressed_state_event(compressed)?;
result.insert(parsed.0, parsed.1);
i += 1;
i = i.saturating_add(1);
if i % 100 == 0 {
tokio::task::yield_now().await;
}
@ -44,7 +44,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
.1;
let mut result = HashMap::new();
let mut i = 0;
let mut i: u8 = 0;
for compressed in full_state.iter() {
let (_, eventid) = services()
.rooms
@ -63,7 +63,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase {
);
}
i += 1;
i = i.saturating_add(1);
if i % 100 == 0 {
tokio::task::yield_now().await;
}

View file

@ -154,11 +154,11 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
if user_is_local(&joined) && !services().users.is_deactivated(&joined).unwrap_or(true) {
real_users.insert(joined);
}
joinedcount += 1;
joinedcount = joinedcount.saturating_add(1);
}
for _invited in self.room_members_invited(room_id).filter_map(Result::ok) {
invitedcount += 1;
invitedcount = invitedcount.saturating_add(1);
}
self.roomid_joinedcount

View file

@ -19,7 +19,7 @@ impl service::rooms::threads::Data for KeyValueDatabase {
.to_vec();
let mut current = prefix.clone();
current.extend_from_slice(&(until - 1).to_be_bytes());
current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes());
Ok(Box::new(
self.threadid_userids

View file

@ -256,7 +256,7 @@ fn pdu_count(pdu_id: &[u8]) -> Result<PduCount> {
utils::u64_from_bytes(&pdu_id[pdu_id.len() - 2 * size_of::<u64>()..pdu_id.len() - size_of::<u64>()]);
if matches!(second_last_u64, Ok(0)) {
Ok(PduCount::Backfilled(u64::MAX - last_u64))
Ok(PduCount::Backfilled(u64::MAX.saturating_sub(last_u64)))
} else {
Ok(PduCount::Normal(last_u64))
}
@ -275,22 +275,22 @@ fn count_to_id(room_id: &RoomId, count: PduCount, offset: u64, subtract: bool) -
let count_raw = match count {
PduCount::Normal(x) => {
if subtract {
x - offset
x.saturating_sub(offset)
} else {
x + offset
x.saturating_add(offset)
}
},
PduCount::Backfilled(x) => {
pdu_id.extend_from_slice(&0_u64.to_be_bytes());
let num = u64::MAX - x;
let num = u64::MAX.saturating_sub(x);
if subtract {
if num > 0 {
num - offset
num.saturating_sub(offset)
} else {
num
}
} else {
num + offset
num.saturating_add(offset)
}
},
};

View file

@ -110,7 +110,8 @@ impl service::rooms::user::Data for KeyValueDatabase {
.enumerate()
.find(|(_, &b)| b == 0xFF)
.ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))?
.0 + 1; // +1 because the room id starts AFTER the separator
.0
.saturating_add(1); // +1 because the room id starts AFTER the separator
let room_id = key[roomid_index..].to_vec();

View file

@ -5,7 +5,7 @@ use ruma::{
encryption::{CrossSigningKey, DeviceKeys, OneTimeKey},
events::{AnyToDeviceEvent, StateEventType},
serde::Raw,
DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedDeviceKeyId,
uint, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, OwnedDeviceId, OwnedDeviceKeyId,
OwnedMxcUri, OwnedUserId, UInt, UserId,
};
use tracing::warn;
@ -414,7 +414,7 @@ impl service::users::Data for KeyValueDatabase {
.algorithm(),
)
}) {
*counts.entry(algorithm?).or_default() += UInt::from(1_u32);
*counts.entry(algorithm?).or_default() += uint!(1);
}
Ok(counts)
@ -561,7 +561,7 @@ impl service::users::Data for KeyValueDatabase {
prefix.push(0xFF);
let mut start = prefix.clone();
start.extend_from_slice(&(from + 1).to_be_bytes());
start.extend_from_slice(&(from.saturating_add(1)).to_be_bytes());
let to = to.unwrap_or(u64::MAX);

View file

@ -173,13 +173,13 @@ pub(crate) async fn migrations(db: &KeyValueDatabase, config: &Config) -> Result
let mut current_sstatehash: Option<u64> = None;
let mut current_room = None;
let mut current_state = HashSet::new();
let mut counter = 0;
let mut counter: u32 = 0;
let mut handle_state = |current_sstatehash: u64,
current_room: &RoomId,
current_state: HashSet<_>,
last_roomstates: &mut HashMap<_, _>| {
counter += 1;
counter = counter.saturating_add(1);
let last_roomsstatehash = last_roomstates.get(current_room);
let states_parents = last_roomsstatehash.map_or_else(

View file

@ -401,18 +401,18 @@ impl KeyValueDatabase {
let sqlite_exists = path.join("conduit.db").exists();
let rocksdb_exists = path.join("IDENTITY").exists();
let mut count = 0;
let mut count: u8 = 0;
if sqlite_exists {
count += 1;
count = count.saturating_add(1);
}
if rocksdb_exists {
count += 1;
count = count.saturating_add(1);
}
if count > 1 {
warn!("Multiple databases at database_path detected");
error!("Multiple databases at database_path detected");
return Ok(());
}

View file

@ -145,7 +145,14 @@ pub(crate) fn cf_options(cfg: &Config, name: &str, mut opts: Options, cache: &mu
cache_size(cfg, cfg.statekeyshort_cache_capacity, 1024),
),
"pduid_pdu" => set_table_with_new_cache(&mut opts, cfg, cache, name, cfg.pdu_cache_capacity as usize * 1536),
#[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)]
"pduid_pdu" => set_table_with_new_cache(
&mut opts,
cfg,
cache,
name,
(cfg.pdu_cache_capacity as usize).saturating_mul(1536),
),
"eventid_outlierpdu" => set_table_with_shared_cache(&mut opts, cfg, cache, name, "pduid_pdu"),
@ -309,7 +316,10 @@ fn set_table_with_shared_cache(
fn cache_size(config: &Config, base_size: u32, entity_size: usize) -> usize {
let ents = f64::from(base_size) * config.conduit_cache_capacity_modifier;
ents as usize * entity_size
#[allow(clippy::as_conversions, clippy::cast_sign_loss, clippy::cast_possible_truncation)]
(ents as usize)
.checked_mul(entity_size)
.expect("cache capacity size is too large")
}
fn table_options(_config: &Config) -> BlockBasedOptions {