refactor: use async-aware RwLocks and Mutexes where possible
squashed from https://gitlab.com/famedly/conduit/-/merge_requests/595 Signed-off-by: strawberry <strawberry@puppygock.gay>
This commit is contained in:
parent
46b543eebe
commit
4ec2d3ecb5
20 changed files with 174 additions and 194 deletions
|
@ -281,17 +281,19 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
|||
|
||||
let mut failures = BTreeMap::new();
|
||||
|
||||
let back_off = |id| match services().globals.bad_query_ratelimiter.write().unwrap().entry(id) {
|
||||
hash_map::Entry::Vacant(e) => {
|
||||
e.insert((Instant::now(), 1));
|
||||
},
|
||||
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
||||
let back_off = |id| async {
|
||||
match services().globals.bad_query_ratelimiter.write().await.entry(id) {
|
||||
hash_map::Entry::Vacant(e) => {
|
||||
e.insert((Instant::now(), 1));
|
||||
},
|
||||
hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
||||
}
|
||||
};
|
||||
|
||||
let mut futures: FuturesUnordered<_> = get_over_federation
|
||||
.into_iter()
|
||||
.map(|(server, vec)| async move {
|
||||
if let Some((time, tries)) = services().globals.bad_query_ratelimiter.read().unwrap().get(server) {
|
||||
if let Some((time, tries)) = services().globals.bad_query_ratelimiter.read().await.get(server) {
|
||||
// Exponential backoff
|
||||
let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries);
|
||||
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
|
||||
|
@ -354,7 +356,7 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
|||
device_keys.extend(response.device_keys);
|
||||
},
|
||||
_ => {
|
||||
back_off(server.to_owned());
|
||||
back_off(server.to_owned()).await;
|
||||
failures.insert(server.to_string(), json!({}));
|
||||
},
|
||||
}
|
||||
|
|
|
@ -440,8 +440,7 @@ async fn get_url_preview(url: &str) -> Result<UrlPreviewData> {
|
|||
}
|
||||
|
||||
// ensure that only one request is made per URL
|
||||
let mutex_request =
|
||||
Arc::clone(services().media.url_preview_mutex.write().unwrap().entry(url.to_owned()).or_default());
|
||||
let mutex_request = Arc::clone(services().media.url_preview_mutex.write().await.entry(url.to_owned()).or_default());
|
||||
let _request_lock = mutex_request.lock().await;
|
||||
|
||||
match services().media.get_url_preview(url).await {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use std::{
|
||||
collections::{hash_map::Entry, BTreeMap, HashMap, HashSet},
|
||||
sync::{Arc, RwLock},
|
||||
sync::Arc,
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
|
@ -29,6 +29,7 @@ use ruma::{
|
|||
OwnedUserId, RoomId, RoomVersionId, UserId,
|
||||
};
|
||||
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use super::get_alias_helper;
|
||||
|
@ -242,7 +243,7 @@ pub async fn kick_user_route(body: Ruma<kick_user::v3::Request>) -> Result<kick_
|
|||
event.reason = body.reason.clone();
|
||||
|
||||
let mutex_state =
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().unwrap().entry(body.room_id.clone()).or_default());
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().await.entry(body.room_id.clone()).or_default());
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
services()
|
||||
|
@ -303,7 +304,7 @@ pub async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<ban_use
|
|||
)?;
|
||||
|
||||
let mutex_state =
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().unwrap().entry(body.room_id.clone()).or_default());
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().await.entry(body.room_id.clone()).or_default());
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
services()
|
||||
|
@ -349,7 +350,7 @@ pub async fn unban_user_route(body: Ruma<unban_user::v3::Request>) -> Result<unb
|
|||
event.reason = body.reason.clone();
|
||||
|
||||
let mutex_state =
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().unwrap().entry(body.room_id.clone()).or_default());
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().await.entry(body.room_id.clone()).or_default());
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
services()
|
||||
|
@ -480,7 +481,7 @@ async fn join_room_by_id_helper(
|
|||
let sender_user = sender_user.expect("user is authenticated");
|
||||
|
||||
let mutex_state =
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().unwrap().entry(room_id.to_owned()).or_default());
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().await.entry(room_id.to_owned()).or_default());
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
// Ask a remote server if we are not participating in this room
|
||||
|
@ -680,7 +681,7 @@ async fn join_room_by_id_helper(
|
|||
.iter()
|
||||
.map(|pdu| validate_and_add_event_id(pdu, &room_version_id, &pub_key_map))
|
||||
{
|
||||
let (event_id, value) = match result {
|
||||
let (event_id, value) = match result.await {
|
||||
Ok(t) => t,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
@ -705,7 +706,7 @@ async fn join_room_by_id_helper(
|
|||
.iter()
|
||||
.map(|pdu| validate_and_add_event_id(pdu, &room_version_id, &pub_key_map))
|
||||
{
|
||||
let (event_id, value) = match result {
|
||||
let (event_id, value) = match result.await {
|
||||
Ok(t) => t,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
@ -1048,7 +1049,7 @@ async fn make_join_request(
|
|||
make_join_response_and_server
|
||||
}
|
||||
|
||||
fn validate_and_add_event_id(
|
||||
async fn validate_and_add_event_id(
|
||||
pdu: &RawJsonValue, room_version: &RoomVersionId, pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||
) -> Result<(OwnedEventId, CanonicalJsonObject)> {
|
||||
let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
|
||||
|
@ -1061,14 +1062,16 @@ fn validate_and_add_event_id(
|
|||
))
|
||||
.expect("ruma's reference hashes are valid event ids");
|
||||
|
||||
let back_off = |id| match services().globals.bad_event_ratelimiter.write().unwrap().entry(id) {
|
||||
Entry::Vacant(e) => {
|
||||
e.insert((Instant::now(), 1));
|
||||
},
|
||||
Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
||||
let back_off = |id| async {
|
||||
match services().globals.bad_event_ratelimiter.write().await.entry(id) {
|
||||
Entry::Vacant(e) => {
|
||||
e.insert((Instant::now(), 1));
|
||||
},
|
||||
Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
|
||||
}
|
||||
};
|
||||
|
||||
if let Some((time, tries)) = services().globals.bad_event_ratelimiter.read().unwrap().get(&event_id) {
|
||||
if let Some((time, tries)) = services().globals.bad_event_ratelimiter.read().await.get(&event_id) {
|
||||
// Exponential backoff
|
||||
let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries);
|
||||
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
|
||||
|
@ -1081,13 +1084,9 @@ fn validate_and_add_event_id(
|
|||
}
|
||||
}
|
||||
|
||||
if let Err(e) = ruma::signatures::verify_event(
|
||||
&*pub_key_map.read().map_err(|_| Error::bad_database("RwLock is poisoned."))?,
|
||||
&value,
|
||||
room_version,
|
||||
) {
|
||||
if let Err(e) = ruma::signatures::verify_event(&*pub_key_map.read().await, &value, room_version) {
|
||||
warn!("Event {} failed verification {:?} {}", event_id, pdu, e);
|
||||
back_off(event_id);
|
||||
back_off(event_id).await;
|
||||
return Err(Error::BadServerResponse("Event failed verification."));
|
||||
}
|
||||
|
||||
|
@ -1109,9 +1108,8 @@ pub(crate) async fn invite_helper(
|
|||
|
||||
if user_id.server_name() != services().globals.server_name() {
|
||||
let (pdu, pdu_json, invite_room_state) = {
|
||||
let mutex_state = Arc::clone(
|
||||
services().globals.roomid_mutex_state.write().unwrap().entry(room_id.to_owned()).or_default(),
|
||||
);
|
||||
let mutex_state =
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().await.entry(room_id.to_owned()).or_default());
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
let content = to_raw_value(&RoomMemberEventContent {
|
||||
|
@ -1229,7 +1227,7 @@ pub(crate) async fn invite_helper(
|
|||
}
|
||||
|
||||
let mutex_state =
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().unwrap().entry(room_id.to_owned()).or_default());
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().await.entry(room_id.to_owned()).or_default());
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
services()
|
||||
|
@ -1314,7 +1312,7 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<Strin
|
|||
.await?;
|
||||
} else {
|
||||
let mutex_state =
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().unwrap().entry(room_id.to_owned()).or_default());
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().await.entry(room_id.to_owned()).or_default());
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
let member_event =
|
||||
|
|
|
@ -33,7 +33,7 @@ pub async fn send_message_event_route(
|
|||
let sender_device = body.sender_device.as_deref();
|
||||
|
||||
let mutex_state =
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().unwrap().entry(body.room_id.clone()).or_default());
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().await.entry(body.room_id.clone()).or_default());
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
// Forbid m.room.encrypted if encryption is disabled
|
||||
|
@ -160,7 +160,7 @@ pub async fn get_message_events_route(
|
|||
|
||||
let to = body.to.as_ref().and_then(|t| PduCount::try_from_string(t).ok());
|
||||
|
||||
services().rooms.lazy_loading.lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)?;
|
||||
services().rooms.lazy_loading.lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from).await?;
|
||||
|
||||
let limit = u64::from(body.limit).min(100) as usize;
|
||||
|
||||
|
@ -276,7 +276,7 @@ pub async fn get_message_events_route(
|
|||
&body.room_id,
|
||||
lazy_loaded,
|
||||
next_token,
|
||||
);
|
||||
).await;
|
||||
}
|
||||
*/
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ pub async fn set_displayname_route(
|
|||
|
||||
for (pdu_builder, room_id) in all_rooms_joined {
|
||||
let mutex_state =
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().unwrap().entry(room_id.clone()).or_default());
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().await.entry(room_id.clone()).or_default());
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
let _ = services().rooms.timeline.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock).await;
|
||||
|
@ -176,7 +176,7 @@ pub async fn set_avatar_url_route(body: Ruma<set_avatar_url::v3::Request>) -> Re
|
|||
|
||||
for (pdu_builder, room_id) in all_joined_rooms {
|
||||
let mutex_state =
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().unwrap().entry(room_id.clone()).or_default());
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().await.entry(room_id.clone()).or_default());
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
let _ = services().rooms.timeline.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock).await;
|
||||
|
|
|
@ -18,7 +18,7 @@ pub async fn redact_event_route(body: Ruma<redact_event::v3::Request>) -> Result
|
|||
let body = body.body;
|
||||
|
||||
let mutex_state =
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().unwrap().entry(body.room_id.clone()).or_default());
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().await.entry(body.room_id.clone()).or_default());
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
let event_id = services()
|
||||
|
|
|
@ -111,7 +111,7 @@ pub async fn create_room_route(body: Ruma<create_room::v3::Request>) -> Result<c
|
|||
services().rooms.short.get_or_create_shortroomid(&room_id)?;
|
||||
|
||||
let mutex_state =
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().unwrap().entry(room_id.clone()).or_default());
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().await.entry(room_id.clone()).or_default());
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
let alias: Option<OwnedRoomAliasId> = body.room_alias_name.as_ref().map_or(Ok(None), |localpart| {
|
||||
|
@ -610,7 +610,7 @@ pub async fn upgrade_room_route(body: Ruma<upgrade_room::v3::Request>) -> Result
|
|||
services().rooms.short.get_or_create_shortroomid(&replacement_room)?;
|
||||
|
||||
let mutex_state =
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().unwrap().entry(body.room_id.clone()).or_default());
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().await.entry(body.room_id.clone()).or_default());
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
// Send a m.room.tombstone event to the old room to indicate that it is not
|
||||
|
@ -640,7 +640,7 @@ pub async fn upgrade_room_route(body: Ruma<upgrade_room::v3::Request>) -> Result
|
|||
// Change lock to replacement room
|
||||
drop(state_lock);
|
||||
let mutex_state =
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().unwrap().entry(replacement_room.clone()).or_default());
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().await.entry(replacement_room.clone()).or_default());
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
// Get the old room creation event
|
||||
|
|
|
@ -230,7 +230,7 @@ async fn send_state_event_for_key_helper(
|
|||
}
|
||||
|
||||
let mutex_state =
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().unwrap().entry(room_id.to_owned()).or_default());
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().await.entry(room_id.to_owned()).or_default());
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
let event_id = services()
|
||||
|
|
|
@ -82,7 +82,7 @@ pub async fn sync_events_route(
|
|||
let body = body.body;
|
||||
|
||||
let mut rx =
|
||||
match services().globals.sync_receivers.write().unwrap().entry((sender_user.clone(), sender_device.clone())) {
|
||||
match services().globals.sync_receivers.write().await.entry((sender_user.clone(), sender_device.clone())) {
|
||||
Entry::Vacant(v) => {
|
||||
let (tx, rx) = tokio::sync::watch::channel(None);
|
||||
|
||||
|
@ -132,7 +132,7 @@ async fn sync_helper_wrapper(
|
|||
|
||||
if let Ok((_, caching_allowed)) = r {
|
||||
if !caching_allowed {
|
||||
match services().globals.sync_receivers.write().unwrap().entry((sender_user, sender_device)) {
|
||||
match services().globals.sync_receivers.write().await.entry((sender_user, sender_device)) {
|
||||
Entry::Occupied(o) => {
|
||||
// Only remove if the device didn't start a different /sync already
|
||||
if o.get().0 == since {
|
||||
|
@ -233,7 +233,7 @@ async fn sync_helper(
|
|||
{
|
||||
// Get and drop the lock to wait for remaining operations to finish
|
||||
let mutex_insert =
|
||||
Arc::clone(services().globals.roomid_mutex_insert.write().unwrap().entry(room_id.clone()).or_default());
|
||||
Arc::clone(services().globals.roomid_mutex_insert.write().await.entry(room_id.clone()).or_default());
|
||||
let insert_lock = mutex_insert.lock().await;
|
||||
drop(insert_lock);
|
||||
};
|
||||
|
@ -339,7 +339,7 @@ async fn sync_helper(
|
|||
{
|
||||
// Get and drop the lock to wait for remaining operations to finish
|
||||
let mutex_insert =
|
||||
Arc::clone(services().globals.roomid_mutex_insert.write().unwrap().entry(room_id.clone()).or_default());
|
||||
Arc::clone(services().globals.roomid_mutex_insert.write().await.entry(room_id.clone()).or_default());
|
||||
let insert_lock = mutex_insert.lock().await;
|
||||
drop(insert_lock);
|
||||
};
|
||||
|
@ -485,7 +485,7 @@ async fn load_joined_room(
|
|||
// Get and drop the lock to wait for remaining operations to finish
|
||||
// This will make sure the we have all events until next_batch
|
||||
let mutex_insert =
|
||||
Arc::clone(services().globals.roomid_mutex_insert.write().unwrap().entry(room_id.to_owned()).or_default());
|
||||
Arc::clone(services().globals.roomid_mutex_insert.write().await.entry(room_id.to_owned()).or_default());
|
||||
let insert_lock = mutex_insert.lock().await;
|
||||
drop(insert_lock);
|
||||
};
|
||||
|
@ -500,7 +500,7 @@ async fn load_joined_room(
|
|||
timeline_users.insert(event.sender.as_str().to_owned());
|
||||
}
|
||||
|
||||
services().rooms.lazy_loading.lazy_load_confirm_delivery(sender_user, sender_device, room_id, sincecount)?;
|
||||
services().rooms.lazy_loading.lazy_load_confirm_delivery(sender_user, sender_device, room_id, sincecount).await?;
|
||||
|
||||
// Database queries:
|
||||
|
||||
|
@ -653,13 +653,11 @@ async fn load_joined_room(
|
|||
|
||||
// The state_events above should contain all timeline_users, let's mark them as
|
||||
// lazy loaded.
|
||||
services().rooms.lazy_loading.lazy_load_mark_sent(
|
||||
sender_user,
|
||||
sender_device,
|
||||
room_id,
|
||||
lazy_loaded,
|
||||
next_batchcount,
|
||||
);
|
||||
services()
|
||||
.rooms
|
||||
.lazy_loading
|
||||
.lazy_load_mark_sent(sender_user, sender_device, room_id, lazy_loaded, next_batchcount)
|
||||
.await;
|
||||
|
||||
(heroes, joined_member_count, invited_member_count, true, state_events)
|
||||
} else {
|
||||
|
@ -721,13 +719,11 @@ async fn load_joined_room(
|
|||
}
|
||||
}
|
||||
|
||||
services().rooms.lazy_loading.lazy_load_mark_sent(
|
||||
sender_user,
|
||||
sender_device,
|
||||
room_id,
|
||||
lazy_loaded,
|
||||
next_batchcount,
|
||||
);
|
||||
services()
|
||||
.rooms
|
||||
.lazy_loading
|
||||
.lazy_load_mark_sent(sender_user, sender_device, room_id, lazy_loaded, next_batchcount)
|
||||
.await;
|
||||
|
||||
let encrypted_room = services()
|
||||
.rooms
|
||||
|
|
|
@ -6,7 +6,7 @@ use std::{
|
|||
fmt::Debug,
|
||||
mem,
|
||||
net::{IpAddr, SocketAddr},
|
||||
sync::{Arc, RwLock},
|
||||
sync::Arc,
|
||||
time::{Duration, Instant, SystemTime},
|
||||
};
|
||||
|
||||
|
@ -50,6 +50,7 @@ use ruma::{
|
|||
OwnedRoomId, OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, RoomId, ServerName,
|
||||
};
|
||||
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, error, info, warn};
|
||||
use trust_dns_resolver::{error::ResolveError, lookup::SrvLookup};
|
||||
|
||||
|
@ -157,7 +158,7 @@ where
|
|||
|
||||
let mut write_destination_to_cache = false;
|
||||
|
||||
let cached_result = services().globals.actual_destination_cache.read().unwrap().get(destination).cloned();
|
||||
let cached_result = services().globals.actual_destination_cache.read().await.get(destination).cloned();
|
||||
|
||||
let (actual_destination, host) = if let Some(result) = cached_result {
|
||||
result
|
||||
|
@ -276,7 +277,7 @@ where
|
|||
.globals
|
||||
.actual_destination_cache
|
||||
.write()
|
||||
.unwrap()
|
||||
.await
|
||||
.insert(OwnedServerName::from(destination), (actual_destination, host));
|
||||
}
|
||||
|
||||
|
@ -291,7 +292,7 @@ where
|
|||
// well-knowns
|
||||
if !write_destination_to_cache {
|
||||
info!("Evicting {destination} from our true destination cache due to failed request.");
|
||||
services().globals.actual_destination_cache.write().unwrap().remove(destination);
|
||||
services().globals.actual_destination_cache.write().await.remove(destination);
|
||||
}
|
||||
|
||||
Err(Error::FederationError(
|
||||
|
@ -767,7 +768,7 @@ pub async fn send_transaction_message_route(
|
|||
|
||||
for (event_id, value, room_id) in parsed_pdus {
|
||||
let mutex =
|
||||
Arc::clone(services().globals.roomid_mutex_federation.write().unwrap().entry(room_id.clone()).or_default());
|
||||
Arc::clone(services().globals.roomid_mutex_federation.write().await.entry(room_id.clone()).or_default());
|
||||
let mutex_lock = mutex.lock().await;
|
||||
let start_time = Instant::now();
|
||||
resolved_map.insert(
|
||||
|
@ -1264,7 +1265,7 @@ pub async fn create_join_event_template_route(
|
|||
services().rooms.event_handler.acl_check(sender_servername, &body.room_id)?;
|
||||
|
||||
let mutex_state =
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().unwrap().entry(body.room_id.clone()).or_default());
|
||||
Arc::clone(services().globals.roomid_mutex_state.write().await.entry(body.room_id.clone()).or_default());
|
||||
let state_lock = mutex_state.lock().await;
|
||||
|
||||
// TODO: Conduit does not implement restricted join rules yet, we always reject
|
||||
|
@ -1413,7 +1414,7 @@ async fn create_join_event(
|
|||
services().rooms.event_handler.fetch_required_signing_keys([&value], &pub_key_map).await?;
|
||||
|
||||
let mutex =
|
||||
Arc::clone(services().globals.roomid_mutex_federation.write().unwrap().entry(room_id.to_owned()).or_default());
|
||||
Arc::clone(services().globals.roomid_mutex_federation.write().await.entry(room_id.to_owned()).or_default());
|
||||
let mutex_lock = mutex.lock().await;
|
||||
let pdu_id: Vec<u8> = services()
|
||||
.rooms
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue