reduce roomid_mutex_state
Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
parent
539aa27815
commit
08bf074cbb
18 changed files with 93 additions and 269 deletions
|
@ -595,17 +595,7 @@ pub(crate) async fn force_set_room_state_from_server(
|
||||||
.state_compressor
|
.state_compressor
|
||||||
.save_state(room_id.clone().as_ref(), new_room_state)?;
|
.save_state(room_id.clone().as_ref(), new_room_state)?;
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let state_lock = services().globals.roomid_mutex_state.lock(&room_id).await;
|
||||||
services()
|
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.entry(room_id.clone().into())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state
|
.state
|
||||||
|
|
|
@ -7,6 +7,7 @@ use std::{
|
||||||
};
|
};
|
||||||
|
|
||||||
use axum_client_ip::InsecureClientIp;
|
use axum_client_ip::InsecureClientIp;
|
||||||
|
use conduit::utils::mutex_map;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{
|
api::{
|
||||||
client::{
|
client::{
|
||||||
|
@ -32,7 +33,7 @@ use ruma::{
|
||||||
OwnedUserId, RoomId, RoomVersionId, ServerName, UserId,
|
OwnedUserId, RoomId, RoomVersionId, ServerName, UserId,
|
||||||
};
|
};
|
||||||
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
||||||
use tokio::sync::{MutexGuard, RwLock};
|
use tokio::sync::RwLock;
|
||||||
use tracing::{debug, error, info, trace, warn};
|
use tracing::{debug, error, info, trace, warn};
|
||||||
|
|
||||||
use super::get_alias_helper;
|
use super::get_alias_helper;
|
||||||
|
@ -373,16 +374,11 @@ pub(crate) async fn kick_user_route(body: Ruma<kick_user::v3::Request>) -> Resul
|
||||||
event.membership = MembershipState::Leave;
|
event.membership = MembershipState::Leave;
|
||||||
event.reason.clone_from(&body.reason);
|
event.reason.clone_from(&body.reason);
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let state_lock = services()
|
||||||
services()
|
.globals
|
||||||
.globals
|
.roomid_mutex_state
|
||||||
.roomid_mutex_state
|
.lock(&body.room_id)
|
||||||
.write()
|
.await;
|
||||||
.await
|
|
||||||
.entry(body.room_id.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -442,16 +438,11 @@ pub(crate) async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<
|
||||||
},
|
},
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let state_lock = services()
|
||||||
services()
|
.globals
|
||||||
.globals
|
.roomid_mutex_state
|
||||||
.roomid_mutex_state
|
.lock(&body.room_id)
|
||||||
.write()
|
.await;
|
||||||
.await
|
|
||||||
.entry(body.room_id.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -496,16 +487,11 @@ pub(crate) async fn unban_user_route(body: Ruma<unban_user::v3::Request>) -> Res
|
||||||
event.reason.clone_from(&body.reason);
|
event.reason.clone_from(&body.reason);
|
||||||
event.join_authorized_via_users_server = None;
|
event.join_authorized_via_users_server = None;
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let state_lock = services()
|
||||||
services()
|
.globals
|
||||||
.globals
|
.roomid_mutex_state
|
||||||
.roomid_mutex_state
|
.lock(&body.room_id)
|
||||||
.write()
|
.await;
|
||||||
.await
|
|
||||||
.entry(body.room_id.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -670,16 +656,7 @@ pub async fn join_room_by_id_helper(
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
|
||||||
services()
|
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.entry(room_id.to_owned())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
// Ask a remote server if we are not participating in this room
|
// Ask a remote server if we are not participating in this room
|
||||||
if !services()
|
if !services()
|
||||||
|
@ -695,7 +672,7 @@ pub async fn join_room_by_id_helper(
|
||||||
|
|
||||||
async fn join_room_by_id_helper_remote(
|
async fn join_room_by_id_helper_remote(
|
||||||
sender_user: &UserId, room_id: &RoomId, reason: Option<String>, servers: &[OwnedServerName],
|
sender_user: &UserId, room_id: &RoomId, reason: Option<String>, servers: &[OwnedServerName],
|
||||||
_third_party_signed: Option<&ThirdPartySigned>, state_lock: MutexGuard<'_, ()>,
|
_third_party_signed: Option<&ThirdPartySigned>, state_lock: mutex_map::Guard<()>,
|
||||||
) -> Result<join_room_by_id::v3::Response> {
|
) -> Result<join_room_by_id::v3::Response> {
|
||||||
info!("Joining {room_id} over federation.");
|
info!("Joining {room_id} over federation.");
|
||||||
|
|
||||||
|
@ -1030,7 +1007,7 @@ async fn join_room_by_id_helper_remote(
|
||||||
|
|
||||||
async fn join_room_by_id_helper_local(
|
async fn join_room_by_id_helper_local(
|
||||||
sender_user: &UserId, room_id: &RoomId, reason: Option<String>, servers: &[OwnedServerName],
|
sender_user: &UserId, room_id: &RoomId, reason: Option<String>, servers: &[OwnedServerName],
|
||||||
_third_party_signed: Option<&ThirdPartySigned>, state_lock: MutexGuard<'_, ()>,
|
_third_party_signed: Option<&ThirdPartySigned>, state_lock: mutex_map::Guard<()>,
|
||||||
) -> Result<join_room_by_id::v3::Response> {
|
) -> Result<join_room_by_id::v3::Response> {
|
||||||
info!("We can join locally");
|
info!("We can join locally");
|
||||||
|
|
||||||
|
@ -1413,17 +1390,7 @@ pub(crate) async fn invite_helper(
|
||||||
|
|
||||||
if !user_is_local(user_id) {
|
if !user_is_local(user_id) {
|
||||||
let (pdu, pdu_json, invite_room_state) = {
|
let (pdu, pdu_json, invite_room_state) = {
|
||||||
let mutex_state = Arc::clone(
|
let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
|
||||||
services()
|
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.entry(room_id.to_owned())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
let content = to_raw_value(&RoomMemberEventContent {
|
let content = to_raw_value(&RoomMemberEventContent {
|
||||||
avatar_url: services().users.avatar_url(user_id)?,
|
avatar_url: services().users.avatar_url(user_id)?,
|
||||||
displayname: None,
|
displayname: None,
|
||||||
|
@ -1535,16 +1502,7 @@ pub(crate) async fn invite_helper(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
|
||||||
services()
|
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.entry(room_id.to_owned())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -1638,16 +1596,7 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<Strin
|
||||||
true,
|
true,
|
||||||
)?;
|
)?;
|
||||||
} else {
|
} else {
|
||||||
let mutex_state = Arc::clone(
|
let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
|
||||||
services()
|
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.entry(room_id.to_owned())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
let member_event =
|
let member_event =
|
||||||
services()
|
services()
|
||||||
|
|
|
@ -1,7 +1,4 @@
|
||||||
use std::{
|
use std::collections::{BTreeMap, HashSet};
|
||||||
collections::{BTreeMap, HashSet},
|
|
||||||
sync::Arc,
|
|
||||||
};
|
|
||||||
|
|
||||||
use conduit::PduCount;
|
use conduit::PduCount;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
|
@ -32,16 +29,11 @@ pub(crate) async fn send_message_event_route(
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_deref();
|
let sender_device = body.sender_device.as_deref();
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let state_lock = services()
|
||||||
services()
|
.globals
|
||||||
.globals
|
.roomid_mutex_state
|
||||||
.roomid_mutex_state
|
.lock(&body.room_id)
|
||||||
.write()
|
.await;
|
||||||
.await
|
|
||||||
.entry(body.room_id.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
// Forbid m.room.encrypted if encryption is disabled
|
// Forbid m.room.encrypted if encryption is disabled
|
||||||
if MessageLikeEventType::RoomEncrypted == body.event_type && !services().globals.allow_encryption() {
|
if MessageLikeEventType::RoomEncrypted == body.event_type && !services().globals.allow_encryption() {
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{
|
api::{
|
||||||
client::{
|
client::{
|
||||||
|
@ -355,17 +353,7 @@ pub async fn update_avatar_url(
|
||||||
|
|
||||||
pub async fn update_all_rooms(all_joined_rooms: Vec<(PduBuilder, &OwnedRoomId)>, user_id: OwnedUserId) {
|
pub async fn update_all_rooms(all_joined_rooms: Vec<(PduBuilder, &OwnedRoomId)>, user_id: OwnedUserId) {
|
||||||
for (pdu_builder, room_id) in all_joined_rooms {
|
for (pdu_builder, room_id) in all_joined_rooms {
|
||||||
let mutex_state = Arc::clone(
|
let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
|
||||||
services()
|
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.entry(room_id.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
if let Err(e) = services()
|
if let Err(e) = services()
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::redact::redact_event,
|
api::client::redact::redact_event,
|
||||||
events::{room::redaction::RoomRedactionEventContent, TimelineEventType},
|
events::{room::redaction::RoomRedactionEventContent, TimelineEventType},
|
||||||
|
@ -17,16 +15,11 @@ pub(crate) async fn redact_event_route(body: Ruma<redact_event::v3::Request>) ->
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let body = body.body;
|
let body = body.body;
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let state_lock = services()
|
||||||
services()
|
.globals
|
||||||
.globals
|
.roomid_mutex_state
|
||||||
.roomid_mutex_state
|
.lock(&body.room_id)
|
||||||
.write()
|
.await;
|
||||||
.await
|
|
||||||
.entry(body.room_id.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
let event_id = services()
|
let event_id = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use std::{cmp::max, collections::BTreeMap, sync::Arc};
|
use std::{cmp::max, collections::BTreeMap};
|
||||||
|
|
||||||
use conduit::{debug_info, debug_warn};
|
use conduit::{debug_info, debug_warn};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
|
@ -89,18 +89,8 @@ pub(crate) async fn create_room_route(body: Ruma<create_room::v3::Request>) -> R
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
services().rooms.short.get_or_create_shortroomid(&room_id)?;
|
let _short_id = services().rooms.short.get_or_create_shortroomid(&room_id)?;
|
||||||
|
let state_lock = services().globals.roomid_mutex_state.lock(&room_id).await;
|
||||||
let mutex_state = Arc::clone(
|
|
||||||
services()
|
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.entry(room_id.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
let alias: Option<OwnedRoomAliasId> = if let Some(alias) = &body.room_alias_name {
|
let alias: Option<OwnedRoomAliasId> = if let Some(alias) = &body.room_alias_name {
|
||||||
Some(room_alias_check(alias, &body.appservice_info).await?)
|
Some(room_alias_check(alias, &body.appservice_info).await?)
|
||||||
|
@ -577,21 +567,17 @@ pub(crate) async fn upgrade_room_route(body: Ruma<upgrade_room::v3::Request>) ->
|
||||||
|
|
||||||
// Create a replacement room
|
// Create a replacement room
|
||||||
let replacement_room = RoomId::new(services().globals.server_name());
|
let replacement_room = RoomId::new(services().globals.server_name());
|
||||||
services()
|
|
||||||
|
let _short_id = services()
|
||||||
.rooms
|
.rooms
|
||||||
.short
|
.short
|
||||||
.get_or_create_shortroomid(&replacement_room)?;
|
.get_or_create_shortroomid(&replacement_room)?;
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let state_lock = services()
|
||||||
services()
|
.globals
|
||||||
.globals
|
.roomid_mutex_state
|
||||||
.roomid_mutex_state
|
.lock(&body.room_id)
|
||||||
.write()
|
.await;
|
||||||
.await
|
|
||||||
.entry(body.room_id.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
// Send a m.room.tombstone event to the old room to indicate that it is not
|
// Send a m.room.tombstone event to the old room to indicate that it is not
|
||||||
// intended to be used any further Fail if the sender does not have the required
|
// intended to be used any further Fail if the sender does not have the required
|
||||||
|
@ -619,16 +605,11 @@ pub(crate) async fn upgrade_room_route(body: Ruma<upgrade_room::v3::Request>) ->
|
||||||
|
|
||||||
// Change lock to replacement room
|
// Change lock to replacement room
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
let mutex_state = Arc::clone(
|
let state_lock = services()
|
||||||
services()
|
.globals
|
||||||
.globals
|
.roomid_mutex_state
|
||||||
.roomid_mutex_state
|
.lock(&replacement_room)
|
||||||
.write()
|
.await;
|
||||||
.await
|
|
||||||
.entry(replacement_room.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
// Get the old room creation event
|
// Get the old room creation event
|
||||||
let mut create_event_content = serde_json::from_str::<CanonicalJsonObject>(
|
let mut create_event_content = serde_json::from_str::<CanonicalJsonObject>(
|
||||||
|
|
|
@ -172,18 +172,7 @@ async fn send_state_event_for_key_helper(
|
||||||
sender: &UserId, room_id: &RoomId, event_type: &StateEventType, json: &Raw<AnyStateEventContent>, state_key: String,
|
sender: &UserId, room_id: &RoomId, event_type: &StateEventType, json: &Raw<AnyStateEventContent>, state_key: String,
|
||||||
) -> Result<Arc<EventId>> {
|
) -> Result<Arc<EventId>> {
|
||||||
allowed_to_send_state_event(room_id, event_type, json).await?;
|
allowed_to_send_state_event(room_id, event_type, json).await?;
|
||||||
|
let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
|
||||||
let mutex_state = Arc::clone(
|
|
||||||
services()
|
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.entry(room_id.to_owned())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
let event_id = services()
|
let event_id = services()
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{client::error::ErrorKind, federation::membership::prepare_join_event},
|
api::{client::error::ErrorKind, federation::membership::prepare_join_event},
|
||||||
events::{
|
events::{
|
||||||
|
@ -74,17 +72,11 @@ pub(crate) async fn create_join_event_template_route(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let state_lock = services()
|
||||||
services()
|
.globals
|
||||||
.globals
|
.roomid_mutex_state
|
||||||
.roomid_mutex_state
|
.lock(&body.room_id)
|
||||||
.write()
|
.await;
|
||||||
.await
|
|
||||||
.entry(body.room_id.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
let join_rules_event =
|
let join_rules_event =
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{client::error::ErrorKind, federation::membership::prepare_leave_event},
|
api::{client::error::ErrorKind, federation::membership::prepare_leave_event},
|
||||||
events::{
|
events::{
|
||||||
|
@ -37,18 +35,11 @@ pub(crate) async fn create_leave_event_template_route(
|
||||||
.acl_check(origin, &body.room_id)?;
|
.acl_check(origin, &body.room_id)?;
|
||||||
|
|
||||||
let room_version_id = services().rooms.state.get_room_version(&body.room_id)?;
|
let room_version_id = services().rooms.state.get_room_version(&body.room_id)?;
|
||||||
|
let state_lock = services()
|
||||||
let mutex_state = Arc::clone(
|
.globals
|
||||||
services()
|
.roomid_mutex_state
|
||||||
.globals
|
.lock(&body.room_id)
|
||||||
.roomid_mutex_state
|
.await;
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.entry(body.room_id.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
let content = to_raw_value(&RoomMemberEventContent {
|
let content = to_raw_value(&RoomMemberEventContent {
|
||||||
avatar_url: None,
|
avatar_url: None,
|
||||||
blurhash: None,
|
blurhash: None,
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use std::{collections::BTreeMap, sync::Arc};
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
use conduit::{Error, Result};
|
use conduit::{Error, Result};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
|
@ -32,18 +32,9 @@ use crate::{pdu::PduBuilder, services};
|
||||||
pub async fn create_admin_room() -> Result<()> {
|
pub async fn create_admin_room() -> Result<()> {
|
||||||
let room_id = RoomId::new(services().globals.server_name());
|
let room_id = RoomId::new(services().globals.server_name());
|
||||||
|
|
||||||
services().rooms.short.get_or_create_shortroomid(&room_id)?;
|
let _short_id = services().rooms.short.get_or_create_shortroomid(&room_id)?;
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let state_lock = services().globals.roomid_mutex_state.lock(&room_id).await;
|
||||||
services()
|
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.entry(room_id.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
// Create a user for the server
|
// Create a user for the server
|
||||||
let server_user = &services().globals.server_user;
|
let server_user = &services().globals.server_user;
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use std::{collections::BTreeMap, sync::Arc};
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
use conduit::Result;
|
use conduit::Result;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
|
@ -22,16 +22,7 @@ use crate::{pdu::PduBuilder, services};
|
||||||
/// In conduit, this is equivalent to granting admin privileges.
|
/// In conduit, this is equivalent to granting admin privileges.
|
||||||
pub async fn make_user_admin(user_id: &UserId, displayname: String) -> Result<()> {
|
pub async fn make_user_admin(user_id: &UserId, displayname: String) -> Result<()> {
|
||||||
if let Some(room_id) = Service::get_admin_room()? {
|
if let Some(room_id) = Service::get_admin_room()? {
|
||||||
let mutex_state = Arc::clone(
|
let state_lock = services().globals.roomid_mutex_state.lock(&room_id).await;
|
||||||
services()
|
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.entry(room_id.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
// Use the server user to grant the new admin's power level
|
// Use the server user to grant the new admin's power level
|
||||||
let server_user = &services().globals.server_user;
|
let server_user = &services().globals.server_user;
|
||||||
|
|
|
@ -4,7 +4,7 @@ mod grant;
|
||||||
|
|
||||||
use std::{future::Future, pin::Pin, sync::Arc};
|
use std::{future::Future, pin::Pin, sync::Arc};
|
||||||
|
|
||||||
use conduit::{Error, Result};
|
use conduit::{utils::mutex_map, Error, Result};
|
||||||
pub use create::create_admin_room;
|
pub use create::create_admin_room;
|
||||||
pub use grant::make_user_admin;
|
pub use grant::make_user_admin;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
|
@ -15,10 +15,7 @@ use ruma::{
|
||||||
EventId, OwnedRoomId, RoomId, UserId,
|
EventId, OwnedRoomId, RoomId, UserId,
|
||||||
};
|
};
|
||||||
use serde_json::value::to_raw_value;
|
use serde_json::value::to_raw_value;
|
||||||
use tokio::{
|
use tokio::{sync::Mutex, task::JoinHandle};
|
||||||
sync::{Mutex, MutexGuard},
|
|
||||||
task::JoinHandle,
|
|
||||||
};
|
|
||||||
use tracing::error;
|
use tracing::error;
|
||||||
|
|
||||||
use crate::{pdu::PduBuilder, services, PduEvent};
|
use crate::{pdu::PduBuilder, services, PduEvent};
|
||||||
|
@ -218,17 +215,7 @@ async fn respond_to_room(content: &RoomMessageEventContent, room_id: &RoomId, us
|
||||||
"sender is not admin"
|
"sender is not admin"
|
||||||
);
|
);
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
|
||||||
services()
|
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.entry(room_id.to_owned())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
let response_pdu = PduBuilder {
|
let response_pdu = PduBuilder {
|
||||||
event_type: TimelineEventType::RoomMessage,
|
event_type: TimelineEventType::RoomMessage,
|
||||||
content: to_raw_value(content).expect("event is valid, we just created it"),
|
content: to_raw_value(content).expect("event is valid, we just created it"),
|
||||||
|
@ -250,7 +237,7 @@ async fn respond_to_room(content: &RoomMessageEventContent, room_id: &RoomId, us
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_response_error(
|
async fn handle_response_error(
|
||||||
e: &Error, room_id: &RoomId, user_id: &UserId, state_lock: &MutexGuard<'_, ()>,
|
e: &Error, room_id: &RoomId, user_id: &UserId, state_lock: &mutex_map::Guard<()>,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
error!("Failed to build and append admin room response PDU: \"{e}\"");
|
error!("Failed to build and append admin room response PDU: \"{e}\"");
|
||||||
let error_room_message = RoomMessageEventContent::text_plain(format!(
|
let error_room_message = RoomMessageEventContent::text_plain(format!(
|
||||||
|
|
|
@ -55,7 +55,7 @@ pub struct Service {
|
||||||
pub bad_signature_ratelimiter: Arc<RwLock<HashMap<Vec<String>, RateLimitState>>>,
|
pub bad_signature_ratelimiter: Arc<RwLock<HashMap<Vec<String>, RateLimitState>>>,
|
||||||
pub bad_query_ratelimiter: Arc<RwLock<HashMap<OwnedServerName, RateLimitState>>>,
|
pub bad_query_ratelimiter: Arc<RwLock<HashMap<OwnedServerName, RateLimitState>>>,
|
||||||
pub roomid_mutex_insert: MutexMap<OwnedRoomId, ()>,
|
pub roomid_mutex_insert: MutexMap<OwnedRoomId, ()>,
|
||||||
pub roomid_mutex_state: RwLock<HashMap<OwnedRoomId, Arc<Mutex<()>>>>,
|
pub roomid_mutex_state: MutexMap<OwnedRoomId, ()>,
|
||||||
pub roomid_mutex_federation: MutexMap<OwnedRoomId, ()>,
|
pub roomid_mutex_federation: MutexMap<OwnedRoomId, ()>,
|
||||||
pub roomid_federationhandletime: RwLock<HashMap<OwnedRoomId, (OwnedEventId, Instant)>>,
|
pub roomid_federationhandletime: RwLock<HashMap<OwnedRoomId, (OwnedEventId, Instant)>>,
|
||||||
pub updates_handle: Mutex<Option<JoinHandle<()>>>,
|
pub updates_handle: Mutex<Option<JoinHandle<()>>>,
|
||||||
|
@ -116,7 +116,7 @@ impl Service {
|
||||||
bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
|
bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
|
||||||
bad_signature_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
|
bad_signature_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
|
||||||
bad_query_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
|
bad_query_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
|
||||||
roomid_mutex_state: RwLock::new(HashMap::new()),
|
roomid_mutex_state: MutexMap::<OwnedRoomId, ()>::new(),
|
||||||
roomid_mutex_insert: MutexMap::<OwnedRoomId, ()>::new(),
|
roomid_mutex_insert: MutexMap::<OwnedRoomId, ()>::new(),
|
||||||
roomid_mutex_federation: MutexMap::<OwnedRoomId, ()>::new(),
|
roomid_mutex_federation: MutexMap::<OwnedRoomId, ()>::new(),
|
||||||
roomid_federationhandletime: RwLock::new(HashMap::new()),
|
roomid_federationhandletime: RwLock::new(HashMap::new()),
|
||||||
|
|
|
@ -530,18 +530,8 @@ impl Service {
|
||||||
// 13. Use state resolution to find new room state
|
// 13. Use state resolution to find new room state
|
||||||
|
|
||||||
// We start looking at current room state now, so lets lock the room
|
// We start looking at current room state now, so lets lock the room
|
||||||
let mutex_state = Arc::clone(
|
|
||||||
services()
|
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.write()
|
|
||||||
.await
|
|
||||||
.entry(room_id.to_owned())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
|
|
||||||
trace!("Locking the room");
|
trace!("Locking the room");
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
|
||||||
|
|
||||||
// Now we calculate the set of extremities this room has after the incoming
|
// Now we calculate the set of extremities this room has after the incoming
|
||||||
// event has been applied. We start with the previous extremities (aka leaves)
|
// event has been applied. We start with the previous extremities (aka leaves)
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use std::{collections::HashSet, sync::Arc};
|
use std::{collections::HashSet, sync::Arc};
|
||||||
|
|
||||||
|
use conduit::utils::mutex_map;
|
||||||
use ruma::{EventId, OwnedEventId, RoomId};
|
use ruma::{EventId, OwnedEventId, RoomId};
|
||||||
use tokio::sync::MutexGuard;
|
|
||||||
|
|
||||||
use crate::{utils, Error, KeyValueDatabase, Result};
|
use crate::{utils, Error, KeyValueDatabase, Result};
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ pub trait Data: Send + Sync {
|
||||||
&self,
|
&self,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
new_shortstatehash: u64,
|
new_shortstatehash: u64,
|
||||||
_mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
|
_mutex_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
|
||||||
) -> Result<()>;
|
) -> Result<()>;
|
||||||
|
|
||||||
/// Associates a state with an event.
|
/// Associates a state with an event.
|
||||||
|
@ -28,7 +28,7 @@ pub trait Data: Send + Sync {
|
||||||
&self,
|
&self,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
event_ids: Vec<OwnedEventId>,
|
event_ids: Vec<OwnedEventId>,
|
||||||
_mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
|
_mutex_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
|
||||||
) -> Result<()>;
|
) -> Result<()>;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -47,7 +47,7 @@ impl Data for KeyValueDatabase {
|
||||||
&self,
|
&self,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
new_shortstatehash: u64,
|
new_shortstatehash: u64,
|
||||||
_mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
|
_mutex_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
self.roomid_shortstatehash
|
self.roomid_shortstatehash
|
||||||
.insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?;
|
.insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?;
|
||||||
|
@ -80,7 +80,7 @@ impl Data for KeyValueDatabase {
|
||||||
&self,
|
&self,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
event_ids: Vec<OwnedEventId>,
|
event_ids: Vec<OwnedEventId>,
|
||||||
_mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
|
_mutex_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut prefix = room_id.as_bytes().to_vec();
|
let mut prefix = room_id.as_bytes().to_vec();
|
||||||
prefix.push(0xFF);
|
prefix.push(0xFF);
|
||||||
|
|
|
@ -4,6 +4,7 @@ use std::{
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use conduit::utils::mutex_map;
|
||||||
use data::Data;
|
use data::Data;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::error::ErrorKind,
|
api::client::error::ErrorKind,
|
||||||
|
@ -15,7 +16,6 @@ use ruma::{
|
||||||
state_res::{self, StateMap},
|
state_res::{self, StateMap},
|
||||||
EventId, OwnedEventId, RoomId, RoomVersionId, UserId,
|
EventId, OwnedEventId, RoomId, RoomVersionId, UserId,
|
||||||
};
|
};
|
||||||
use tokio::sync::MutexGuard;
|
|
||||||
use tracing::warn;
|
use tracing::warn;
|
||||||
|
|
||||||
use super::state_compressor::CompressedStateEvent;
|
use super::state_compressor::CompressedStateEvent;
|
||||||
|
@ -33,7 +33,7 @@ impl Service {
|
||||||
shortstatehash: u64,
|
shortstatehash: u64,
|
||||||
statediffnew: Arc<HashSet<CompressedStateEvent>>,
|
statediffnew: Arc<HashSet<CompressedStateEvent>>,
|
||||||
_statediffremoved: Arc<HashSet<CompressedStateEvent>>,
|
_statediffremoved: Arc<HashSet<CompressedStateEvent>>,
|
||||||
state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
|
state_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
for event_id in statediffnew.iter().filter_map(|new| {
|
for event_id in statediffnew.iter().filter_map(|new| {
|
||||||
services()
|
services()
|
||||||
|
@ -299,12 +299,12 @@ impl Service {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Set the state hash to a new version, but does not update state_cache.
|
/// Set the state hash to a new version, but does not update state_cache.
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self, mutex_lock))]
|
||||||
pub fn set_room_state(
|
pub fn set_room_state(
|
||||||
&self,
|
&self,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
shortstatehash: u64,
|
shortstatehash: u64,
|
||||||
mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
|
mutex_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
self.db.set_room_state(room_id, shortstatehash, mutex_lock)
|
self.db.set_room_state(room_id, shortstatehash, mutex_lock)
|
||||||
}
|
}
|
||||||
|
@ -343,7 +343,7 @@ impl Service {
|
||||||
&self,
|
&self,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
event_ids: Vec<OwnedEventId>,
|
event_ids: Vec<OwnedEventId>,
|
||||||
state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
|
state_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
self.db
|
self.db
|
||||||
.set_forward_extremities(room_id, event_ids, state_lock)
|
.set_forward_extremities(room_id, event_ids, state_lock)
|
||||||
|
|
|
@ -4,6 +4,7 @@ use std::{
|
||||||
sync::{Arc, Mutex},
|
sync::{Arc, Mutex},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use conduit::utils::mutex_map;
|
||||||
use data::Data;
|
use data::Data;
|
||||||
use lru_cache::LruCache;
|
use lru_cache::LruCache;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
|
@ -22,7 +23,6 @@ use ruma::{
|
||||||
EventId, OwnedRoomAliasId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
|
EventId, OwnedRoomAliasId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
|
||||||
};
|
};
|
||||||
use serde_json::value::to_raw_value;
|
use serde_json::value::to_raw_value;
|
||||||
use tokio::sync::MutexGuard;
|
|
||||||
use tracing::{error, warn};
|
use tracing::{error, warn};
|
||||||
|
|
||||||
use crate::{service::pdu::PduBuilder, services, Error, PduEvent, Result};
|
use crate::{service::pdu::PduBuilder, services, Error, PduEvent, Result};
|
||||||
|
@ -285,7 +285,7 @@ impl Service {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn user_can_invite(
|
pub async fn user_can_invite(
|
||||||
&self, room_id: &RoomId, sender: &UserId, target_user: &UserId, state_lock: &MutexGuard<'_, ()>,
|
&self, room_id: &RoomId, sender: &UserId, target_user: &UserId, state_lock: &mutex_map::Guard<()>,
|
||||||
) -> Result<bool> {
|
) -> Result<bool> {
|
||||||
let content = to_raw_value(&RoomMemberEventContent::new(MembershipState::Invite))
|
let content = to_raw_value(&RoomMemberEventContent::new(MembershipState::Invite))
|
||||||
.expect("Event content always serializes");
|
.expect("Event content always serializes");
|
||||||
|
|
|
@ -30,7 +30,7 @@ use ruma::{
|
||||||
};
|
};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
||||||
use tokio::sync::{Mutex, MutexGuard, RwLock};
|
use tokio::sync::{Mutex, RwLock};
|
||||||
use tracing::{debug, error, info, warn};
|
use tracing::{debug, error, info, warn};
|
||||||
|
|
||||||
use super::state_compressor::CompressedStateEvent;
|
use super::state_compressor::CompressedStateEvent;
|
||||||
|
@ -44,7 +44,7 @@ use crate::{
|
||||||
rooms::event_handler::parse_incoming_pdu,
|
rooms::event_handler::parse_incoming_pdu,
|
||||||
},
|
},
|
||||||
services,
|
services,
|
||||||
utils::{self},
|
utils::{self, mutex_map},
|
||||||
Error,
|
Error,
|
||||||
PduCount,
|
PduCount,
|
||||||
PduEvent,
|
PduEvent,
|
||||||
|
@ -200,13 +200,13 @@ impl Service {
|
||||||
/// happens in `append_pdu`.
|
/// happens in `append_pdu`.
|
||||||
///
|
///
|
||||||
/// Returns pdu id
|
/// Returns pdu id
|
||||||
#[tracing::instrument(skip(self, pdu, pdu_json, leaves))]
|
#[tracing::instrument(skip_all)]
|
||||||
pub async fn append_pdu(
|
pub async fn append_pdu(
|
||||||
&self,
|
&self,
|
||||||
pdu: &PduEvent,
|
pdu: &PduEvent,
|
||||||
mut pdu_json: CanonicalJsonObject,
|
mut pdu_json: CanonicalJsonObject,
|
||||||
leaves: Vec<OwnedEventId>,
|
leaves: Vec<OwnedEventId>,
|
||||||
state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
|
state_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
|
||||||
) -> Result<Vec<u8>> {
|
) -> Result<Vec<u8>> {
|
||||||
// Coalesce database writes for the remainder of this scope.
|
// Coalesce database writes for the remainder of this scope.
|
||||||
let _cork = services().globals.db.cork_and_flush();
|
let _cork = services().globals.db.cork_and_flush();
|
||||||
|
@ -581,7 +581,7 @@ impl Service {
|
||||||
pdu_builder: PduBuilder,
|
pdu_builder: PduBuilder,
|
||||||
sender: &UserId,
|
sender: &UserId,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
_mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
|
_mutex_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
|
||||||
) -> Result<(PduEvent, CanonicalJsonObject)> {
|
) -> Result<(PduEvent, CanonicalJsonObject)> {
|
||||||
let PduBuilder {
|
let PduBuilder {
|
||||||
event_type,
|
event_type,
|
||||||
|
@ -768,7 +768,7 @@ impl Service {
|
||||||
pdu_builder: PduBuilder,
|
pdu_builder: PduBuilder,
|
||||||
sender: &UserId,
|
sender: &UserId,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
|
state_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
|
||||||
) -> Result<Arc<EventId>> {
|
) -> Result<Arc<EventId>> {
|
||||||
let (pdu, pdu_json) = self.create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock)?;
|
let (pdu, pdu_json) = self.create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock)?;
|
||||||
if let Some(admin_room) = admin::Service::get_admin_room()? {
|
if let Some(admin_room) = admin::Service::get_admin_room()? {
|
||||||
|
@ -909,7 +909,7 @@ impl Service {
|
||||||
new_room_leaves: Vec<OwnedEventId>,
|
new_room_leaves: Vec<OwnedEventId>,
|
||||||
state_ids_compressed: Arc<HashSet<CompressedStateEvent>>,
|
state_ids_compressed: Arc<HashSet<CompressedStateEvent>>,
|
||||||
soft_fail: bool,
|
soft_fail: bool,
|
||||||
state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
|
state_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
|
||||||
) -> Result<Option<Vec<u8>>> {
|
) -> Result<Option<Vec<u8>>> {
|
||||||
// We append to state before appending the pdu, so we don't have a moment in
|
// We append to state before appending the pdu, so we don't have a moment in
|
||||||
// time with the pdu without it's state. This is okay because append_pdu can't
|
// time with the pdu without it's state. This is okay because append_pdu can't
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue