actually fix all let_underscore_must_use lints

CI caught some more

Signed-off-by: strawberry <strawberry@puppygock.gay>
This commit is contained in:
strawberry 2024-05-24 19:20:19 -04:00 committed by June 🍓🦴
parent 0877ee6191
commit 6269822613
8 changed files with 54 additions and 21 deletions

View file

@ -124,7 +124,9 @@ pub(crate) async fn get_remote_pdu_list(
for pdu in list { for pdu in list {
if force { if force {
_ = get_remote_pdu(Vec::new(), Box::from(pdu), server.clone()).await; if let Err(e) = get_remote_pdu(Vec::new(), Box::from(pdu), server.clone()).await {
warn!(%e, "Failed to get remote PDU, ignoring error");
}
} else { } else {
get_remote_pdu(Vec::new(), Box::from(pdu), server.clone()).await?; get_remote_pdu(Vec::new(), Box::from(pdu), server.clone()).await?;
} }

View file

@ -46,7 +46,7 @@ pub(crate) enum RoomAliasCommand {
room_alias_localpart: String, room_alias_localpart: String,
}, },
/// - Remove an alias /// - Remove a local alias
Remove { Remove {
/// The alias localpart to remove (`alias`, not `#alias:servername.tld`) /// The alias localpart to remove (`alias`, not `#alias:servername.tld`)
room_alias_localpart: String, room_alias_localpart: String,

View file

@ -317,8 +317,12 @@ pub(crate) async fn sync_events_route(
if duration.as_secs() > 30 { if duration.as_secs() > 30 {
duration = Duration::from_secs(30); duration = Duration::from_secs(30);
} }
#[allow(clippy::let_underscore_must_use)]
{
_ = tokio::time::timeout(duration, watcher).await; _ = tokio::time::timeout(duration, watcher).await;
} }
}
Ok(response) Ok(response)
} }
@ -1594,8 +1598,11 @@ pub(crate) async fn sync_events_v4_route(
if duration.as_secs() > 30 { if duration.as_secs() > 30 {
duration = Duration::from_secs(30); duration = Duration::from_secs(30);
} }
#[allow(clippy::let_underscore_must_use)]
{
_ = tokio::time::timeout(duration, watcher).await; _ = tokio::time::timeout(duration, watcher).await;
} }
}
Ok(sync_events::v4::Response { Ok(sync_events::v4::Response {
initial: globalsince == 0, initial: globalsince == 0,

View file

@ -22,6 +22,7 @@ use crate::{layers, serve};
/// Main loop base /// Main loop base
#[tracing::instrument(skip_all)] #[tracing::instrument(skip_all)]
#[allow(clippy::let_underscore_must_use)] // various of these are intended
pub(crate) async fn run(server: Arc<Server>) -> Result<(), Error> { pub(crate) async fn run(server: Arc<Server>) -> Result<(), Error> {
let config = &server.config; let config = &server.config;
let app = layers::build(&server)?; let app = layers::build(&server)?;
@ -70,6 +71,7 @@ pub(crate) async fn run(server: Arc<Server>) -> Result<(), Error> {
/// Async initializations /// Async initializations
#[tracing::instrument(skip_all)] #[tracing::instrument(skip_all)]
#[allow(clippy::let_underscore_must_use)]
pub(crate) async fn start(server: Arc<Server>) -> Result<(), Error> { pub(crate) async fn start(server: Arc<Server>) -> Result<(), Error> {
debug!("Starting..."); debug!("Starting...");
let d = Arc::new(KeyValueDatabase::load_or_create(&server).await?); let d = Arc::new(KeyValueDatabase::load_or_create(&server).await?);

View file

@ -79,11 +79,13 @@ impl RotationHandler {
pub fn watch(&self) -> impl Future<Output = ()> { pub fn watch(&self) -> impl Future<Output = ()> {
let mut r = self.0.subscribe(); let mut r = self.0.subscribe();
#[allow(clippy::let_underscore_must_use)]
async move { async move {
_ = r.recv().await; _ = r.recv().await;
} }
} }
#[allow(clippy::let_underscore_must_use)]
pub fn fire(&self) { _ = self.0.send(()); } pub fn fire(&self) { _ = self.0.send(()); }
} }

View file

@ -271,17 +271,20 @@ impl super::Service {
{ {
let mut pkm = pub_key_map.write().await; let mut pkm = pub_key_map.write().await;
// Try to fetch keys, failure is okay // Try to fetch keys, failure is okay. Servers we couldn't find in the cache
// Servers we couldn't find in the cache will be added to `servers` // will be added to `servers`
for pdu in &event.room_state.state { for pdu in event
_ = self .room_state
.state
.iter()
.chain(&event.room_state.auth_chain)
{
if let Err(error) = self
.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm) .get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm)
.await; .await
} {
for pdu in &event.room_state.auth_chain { debug!(%error, "failed to get server keys from cache");
_ = self };
.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm)
.await;
} }
drop(pkm); drop(pkm);

View file

@ -6,6 +6,7 @@ use ruma::{
OwnedRoomId, OwnedUserId, RoomId, UserId, OwnedRoomId, OwnedUserId, RoomId, UserId,
}; };
use tokio::sync::{broadcast, RwLock}; use tokio::sync::{broadcast, RwLock};
use tracing::trace;
use crate::{ use crate::{
debug_info, services, user_is_local, debug_info, services, user_is_local,
@ -37,7 +38,9 @@ impl Service {
.write() .write()
.await .await
.insert(room_id.to_owned(), services().globals.next_count()?); .insert(room_id.to_owned(), services().globals.next_count()?);
_ = self.typing_update_sender.send(room_id.to_owned()); if self.typing_update_sender.send(room_id.to_owned()).is_err() {
trace!("receiver found what it was looking for and is no longer interested");
}
// update federation // update federation
if user_is_local(user_id) { if user_is_local(user_id) {
@ -61,7 +64,9 @@ impl Service {
.write() .write()
.await .await
.insert(room_id.to_owned(), services().globals.next_count()?); .insert(room_id.to_owned(), services().globals.next_count()?);
_ = self.typing_update_sender.send(room_id.to_owned()); if self.typing_update_sender.send(room_id.to_owned()).is_err() {
trace!("receiver found what it was looking for and is no longer interested");
}
// update federation // update federation
if user_is_local(user_id) { if user_is_local(user_id) {
@ -114,7 +119,9 @@ impl Service {
.write() .write()
.await .await
.insert(room_id.to_owned(), services().globals.next_count()?); .insert(room_id.to_owned(), services().globals.next_count()?);
_ = self.typing_update_sender.send(room_id.to_owned()); if self.typing_update_sender.send(room_id.to_owned()).is_err() {
trace!("receiver found what it was looking for and is no longer interested");
}
// update federation // update federation
for user in removable { for user in removable {

View file

@ -10,7 +10,7 @@ use tokio::{
fs, fs,
sync::{broadcast, Mutex, RwLock}, sync::{broadcast, Mutex, RwLock},
}; };
use tracing::{debug, info, trace}; use tracing::{debug, info, trace, warn};
use crate::{ use crate::{
account_data, admin, appservice, globals, key_backups, media, presence, pusher, rooms, sending, transaction_ids, account_data, admin, appservice, globals, key_backups, media, presence, pusher, rooms, sending, transaction_ids,
@ -293,8 +293,12 @@ bad_signature_ratelimiter: {bad_signature_ratelimiter}
if self.globals.allow_check_for_updates() { if self.globals.allow_check_for_updates() {
let handle = globals::updates::start_check_for_updates_task().await?; let handle = globals::updates::start_check_for_updates_task().await?;
#[allow(clippy::let_underscore_must_use)] // needed for shutdown
{
_ = self.globals.updates_handle.lock().await.insert(handle); _ = self.globals.updates_handle.lock().await.insert(handle);
} }
}
debug_info!("Services startup complete."); debug_info!("Services startup complete.");
Ok(()) Ok(())
@ -319,14 +323,20 @@ bad_signature_ratelimiter: {bad_signature_ratelimiter}
debug!("Removing unix socket file."); debug!("Removing unix socket file.");
if let Some(path) = self.globals.unix_socket_path().as_ref() { if let Some(path) = self.globals.unix_socket_path().as_ref() {
_ = fs::remove_file(path).await; if let Err(e) = fs::remove_file(path).await {
warn!("Failed to remove UNIX socket file: {e}");
}
} }
debug!("Waiting for update worker..."); debug!("Waiting for update worker...");
if let Some(updates_handle) = self.globals.updates_handle.lock().await.take() { if let Some(updates_handle) = self.globals.updates_handle.lock().await.take() {
updates_handle.abort(); updates_handle.abort();
#[allow(clippy::let_underscore_must_use)]
{
_ = updates_handle.await; _ = updates_handle.await;
} }
}
debug!("Waiting for admin worker..."); debug!("Waiting for admin worker...");
self.admin.close().await; self.admin.close().await;