move mutex maps out of globals into respective service
Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
parent
2d251eb19c
commit
271f720286
23 changed files with 93 additions and 121 deletions
|
@ -570,7 +570,7 @@ pub(super) async fn force_set_room_state_from_server(
|
||||||
.state_compressor
|
.state_compressor
|
||||||
.save_state(room_id.clone().as_ref(), new_room_state)?;
|
.save_state(room_id.clone().as_ref(), new_room_state)?;
|
||||||
|
|
||||||
let state_lock = services().globals.roomid_mutex_state.lock(&room_id).await;
|
let state_lock = services().rooms.state.mutex.lock(&room_id).await;
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
.state
|
.state
|
||||||
|
|
|
@ -16,8 +16,9 @@ pub(super) async fn enable_room(_body: Vec<&str>, room_id: Box<RoomId>) -> Resul
|
||||||
|
|
||||||
pub(super) async fn incoming_federation(_body: Vec<&str>) -> Result<RoomMessageEventContent> {
|
pub(super) async fn incoming_federation(_body: Vec<&str>) -> Result<RoomMessageEventContent> {
|
||||||
let map = services()
|
let map = services()
|
||||||
.globals
|
.rooms
|
||||||
.roomid_federationhandletime
|
.event_handler
|
||||||
|
.federation_handletime
|
||||||
.read()
|
.read()
|
||||||
.expect("locked");
|
.expect("locked");
|
||||||
let mut msg = format!("Handling {} incoming pdus:\n", map.len());
|
let mut msg = format!("Handling {} incoming pdus:\n", map.len());
|
||||||
|
|
|
@ -40,8 +40,8 @@ use tokio::sync::RwLock;
|
||||||
use crate::{
|
use crate::{
|
||||||
client::{update_avatar_url, update_displayname},
|
client::{update_avatar_url, update_displayname},
|
||||||
service::{
|
service::{
|
||||||
globals::RoomMutexGuard,
|
|
||||||
pdu::{gen_event_id_canonical_json, PduBuilder},
|
pdu::{gen_event_id_canonical_json, PduBuilder},
|
||||||
|
rooms::state::RoomMutexGuard,
|
||||||
sending::convert_to_outgoing_federation_event,
|
sending::convert_to_outgoing_federation_event,
|
||||||
server_is_ours, user_is_local,
|
server_is_ours, user_is_local,
|
||||||
},
|
},
|
||||||
|
@ -366,11 +366,7 @@ pub(crate) async fn invite_user_route(
|
||||||
pub(crate) async fn kick_user_route(body: Ruma<kick_user::v3::Request>) -> Result<kick_user::v3::Response> {
|
pub(crate) async fn kick_user_route(body: Ruma<kick_user::v3::Request>) -> Result<kick_user::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let state_lock = services()
|
let state_lock = services().rooms.state.mutex.lock(&body.room_id).await;
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.lock(&body.room_id)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
let mut event: RoomMemberEventContent = serde_json::from_str(
|
let mut event: RoomMemberEventContent = serde_json::from_str(
|
||||||
services()
|
services()
|
||||||
|
@ -417,11 +413,7 @@ pub(crate) async fn kick_user_route(body: Ruma<kick_user::v3::Request>) -> Resul
|
||||||
pub(crate) async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<ban_user::v3::Response> {
|
pub(crate) async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<ban_user::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let state_lock = services()
|
let state_lock = services().rooms.state.mutex.lock(&body.room_id).await;
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.lock(&body.room_id)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
let event = services()
|
let event = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -481,11 +473,7 @@ pub(crate) async fn ban_user_route(body: Ruma<ban_user::v3::Request>) -> Result<
|
||||||
pub(crate) async fn unban_user_route(body: Ruma<unban_user::v3::Request>) -> Result<unban_user::v3::Response> {
|
pub(crate) async fn unban_user_route(body: Ruma<unban_user::v3::Request>) -> Result<unban_user::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let state_lock = services()
|
let state_lock = services().rooms.state.mutex.lock(&body.room_id).await;
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.lock(&body.room_id)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
let mut event: RoomMemberEventContent = serde_json::from_str(
|
let mut event: RoomMemberEventContent = serde_json::from_str(
|
||||||
services()
|
services()
|
||||||
|
@ -1399,7 +1387,7 @@ pub(crate) async fn invite_helper(
|
||||||
|
|
||||||
if !user_is_local(user_id) {
|
if !user_is_local(user_id) {
|
||||||
let (pdu, pdu_json, invite_room_state) = {
|
let (pdu, pdu_json, invite_room_state) = {
|
||||||
let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
|
let state_lock = services().rooms.state.mutex.lock(room_id).await;
|
||||||
let content = to_raw_value(&RoomMemberEventContent {
|
let content = to_raw_value(&RoomMemberEventContent {
|
||||||
avatar_url: services().users.avatar_url(user_id)?,
|
avatar_url: services().users.avatar_url(user_id)?,
|
||||||
displayname: None,
|
displayname: None,
|
||||||
|
@ -1511,7 +1499,7 @@ pub(crate) async fn invite_helper(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
|
let state_lock = services().rooms.state.mutex.lock(room_id).await;
|
||||||
|
|
||||||
services()
|
services()
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -1605,7 +1593,7 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId, reason: Option<Strin
|
||||||
true,
|
true,
|
||||||
)?;
|
)?;
|
||||||
} else {
|
} else {
|
||||||
let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
|
let state_lock = services().rooms.state.mutex.lock(room_id).await;
|
||||||
|
|
||||||
let member_event =
|
let member_event =
|
||||||
services()
|
services()
|
||||||
|
|
|
@ -29,11 +29,7 @@ pub(crate) async fn send_message_event_route(
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_deref();
|
let sender_device = body.sender_device.as_deref();
|
||||||
|
|
||||||
let state_lock = services()
|
let state_lock = services().rooms.state.mutex.lock(&body.room_id).await;
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.lock(&body.room_id)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
// Forbid m.room.encrypted if encryption is disabled
|
// Forbid m.room.encrypted if encryption is disabled
|
||||||
if MessageLikeEventType::RoomEncrypted == body.event_type && !services().globals.allow_encryption() {
|
if MessageLikeEventType::RoomEncrypted == body.event_type && !services().globals.allow_encryption() {
|
||||||
|
|
|
@ -353,7 +353,7 @@ pub async fn update_avatar_url(
|
||||||
|
|
||||||
pub async fn update_all_rooms(all_joined_rooms: Vec<(PduBuilder, &OwnedRoomId)>, user_id: OwnedUserId) {
|
pub async fn update_all_rooms(all_joined_rooms: Vec<(PduBuilder, &OwnedRoomId)>, user_id: OwnedUserId) {
|
||||||
for (pdu_builder, room_id) in all_joined_rooms {
|
for (pdu_builder, room_id) in all_joined_rooms {
|
||||||
let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
|
let state_lock = services().rooms.state.mutex.lock(room_id).await;
|
||||||
if let Err(e) = services()
|
if let Err(e) = services()
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
|
|
|
@ -15,11 +15,7 @@ pub(crate) async fn redact_event_route(body: Ruma<redact_event::v3::Request>) ->
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let body = body.body;
|
let body = body.body;
|
||||||
|
|
||||||
let state_lock = services()
|
let state_lock = services().rooms.state.mutex.lock(&body.room_id).await;
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.lock(&body.room_id)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
let event_id = services()
|
let event_id = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
|
|
@ -90,7 +90,7 @@ pub(crate) async fn create_room_route(body: Ruma<create_room::v3::Request>) -> R
|
||||||
}
|
}
|
||||||
|
|
||||||
let _short_id = services().rooms.short.get_or_create_shortroomid(&room_id)?;
|
let _short_id = services().rooms.short.get_or_create_shortroomid(&room_id)?;
|
||||||
let state_lock = services().globals.roomid_mutex_state.lock(&room_id).await;
|
let state_lock = services().rooms.state.mutex.lock(&room_id).await;
|
||||||
|
|
||||||
let alias: Option<OwnedRoomAliasId> = if let Some(alias) = &body.room_alias_name {
|
let alias: Option<OwnedRoomAliasId> = if let Some(alias) = &body.room_alias_name {
|
||||||
Some(room_alias_check(alias, &body.appservice_info).await?)
|
Some(room_alias_check(alias, &body.appservice_info).await?)
|
||||||
|
@ -573,11 +573,7 @@ pub(crate) async fn upgrade_room_route(body: Ruma<upgrade_room::v3::Request>) ->
|
||||||
.short
|
.short
|
||||||
.get_or_create_shortroomid(&replacement_room)?;
|
.get_or_create_shortroomid(&replacement_room)?;
|
||||||
|
|
||||||
let state_lock = services()
|
let state_lock = services().rooms.state.mutex.lock(&body.room_id).await;
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.lock(&body.room_id)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
// Send a m.room.tombstone event to the old room to indicate that it is not
|
// Send a m.room.tombstone event to the old room to indicate that it is not
|
||||||
// intended to be used any further Fail if the sender does not have the required
|
// intended to be used any further Fail if the sender does not have the required
|
||||||
|
@ -605,11 +601,7 @@ pub(crate) async fn upgrade_room_route(body: Ruma<upgrade_room::v3::Request>) ->
|
||||||
|
|
||||||
// Change lock to replacement room
|
// Change lock to replacement room
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
let state_lock = services()
|
let state_lock = services().rooms.state.mutex.lock(&replacement_room).await;
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.lock(&replacement_room)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
// Get the old room creation event
|
// Get the old room creation event
|
||||||
let mut create_event_content = serde_json::from_str::<CanonicalJsonObject>(
|
let mut create_event_content = serde_json::from_str::<CanonicalJsonObject>(
|
||||||
|
|
|
@ -170,7 +170,7 @@ async fn send_state_event_for_key_helper(
|
||||||
sender: &UserId, room_id: &RoomId, event_type: &StateEventType, json: &Raw<AnyStateEventContent>, state_key: String,
|
sender: &UserId, room_id: &RoomId, event_type: &StateEventType, json: &Raw<AnyStateEventContent>, state_key: String,
|
||||||
) -> Result<Arc<EventId>> {
|
) -> Result<Arc<EventId>> {
|
||||||
allowed_to_send_state_event(room_id, event_type, json).await?;
|
allowed_to_send_state_event(room_id, event_type, json).await?;
|
||||||
let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
|
let state_lock = services().rooms.state.mutex.lock(room_id).await;
|
||||||
let event_id = services()
|
let event_id = services()
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
|
|
|
@ -199,7 +199,7 @@ pub(crate) async fn sync_events_route(
|
||||||
let (room_id, invite_state_events) = result?;
|
let (room_id, invite_state_events) = result?;
|
||||||
|
|
||||||
// Get and drop the lock to wait for remaining operations to finish
|
// Get and drop the lock to wait for remaining operations to finish
|
||||||
let insert_lock = services().globals.roomid_mutex_insert.lock(&room_id).await;
|
let insert_lock = services().rooms.timeline.mutex_insert.lock(&room_id).await;
|
||||||
drop(insert_lock);
|
drop(insert_lock);
|
||||||
|
|
||||||
let invite_count = services()
|
let invite_count = services()
|
||||||
|
@ -317,7 +317,7 @@ async fn handle_left_room(
|
||||||
next_batch_string: &str, full_state: bool, lazy_load_enabled: bool,
|
next_batch_string: &str, full_state: bool, lazy_load_enabled: bool,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
// Get and drop the lock to wait for remaining operations to finish
|
// Get and drop the lock to wait for remaining operations to finish
|
||||||
let insert_lock = services().globals.roomid_mutex_insert.lock(room_id).await;
|
let insert_lock = services().rooms.timeline.mutex_insert.lock(room_id).await;
|
||||||
drop(insert_lock);
|
drop(insert_lock);
|
||||||
|
|
||||||
let left_count = services()
|
let left_count = services()
|
||||||
|
@ -519,7 +519,7 @@ async fn load_joined_room(
|
||||||
) -> Result<JoinedRoom> {
|
) -> Result<JoinedRoom> {
|
||||||
// Get and drop the lock to wait for remaining operations to finish
|
// Get and drop the lock to wait for remaining operations to finish
|
||||||
// This will make sure the we have all events until next_batch
|
// This will make sure the we have all events until next_batch
|
||||||
let insert_lock = services().globals.roomid_mutex_insert.lock(room_id).await;
|
let insert_lock = services().rooms.timeline.mutex_insert.lock(room_id).await;
|
||||||
drop(insert_lock);
|
drop(insert_lock);
|
||||||
|
|
||||||
let (timeline_pdus, limited) = load_timeline(sender_user, room_id, sincecount, 10)?;
|
let (timeline_pdus, limited) = load_timeline(sender_user, room_id, sincecount, 10)?;
|
||||||
|
|
|
@ -71,11 +71,7 @@ pub(crate) async fn create_join_event_template_route(
|
||||||
|
|
||||||
let room_version_id = services().rooms.state.get_room_version(&body.room_id)?;
|
let room_version_id = services().rooms.state.get_room_version(&body.room_id)?;
|
||||||
|
|
||||||
let state_lock = services()
|
let state_lock = services().rooms.state.mutex.lock(&body.room_id).await;
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.lock(&body.room_id)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
let join_authorized_via_users_server = if (services()
|
let join_authorized_via_users_server = if (services()
|
||||||
.rooms
|
.rooms
|
||||||
|
|
|
@ -35,11 +35,7 @@ pub(crate) async fn create_leave_event_template_route(
|
||||||
.acl_check(origin, &body.room_id)?;
|
.acl_check(origin, &body.room_id)?;
|
||||||
|
|
||||||
let room_version_id = services().rooms.state.get_room_version(&body.room_id)?;
|
let room_version_id = services().rooms.state.get_room_version(&body.room_id)?;
|
||||||
let state_lock = services()
|
let state_lock = services().rooms.state.mutex.lock(&body.room_id).await;
|
||||||
.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.lock(&body.room_id)
|
|
||||||
.await;
|
|
||||||
let content = to_raw_value(&RoomMemberEventContent {
|
let content = to_raw_value(&RoomMemberEventContent {
|
||||||
avatar_url: None,
|
avatar_url: None,
|
||||||
blurhash: None,
|
blurhash: None,
|
||||||
|
|
|
@ -127,8 +127,9 @@ async fn handle_pdus(
|
||||||
for (event_id, value, room_id) in parsed_pdus {
|
for (event_id, value, room_id) in parsed_pdus {
|
||||||
let pdu_start_time = Instant::now();
|
let pdu_start_time = Instant::now();
|
||||||
let mutex_lock = services()
|
let mutex_lock = services()
|
||||||
.globals
|
.rooms
|
||||||
.roomid_mutex_federation
|
.event_handler
|
||||||
|
.mutex_federation
|
||||||
.lock(&room_id)
|
.lock(&room_id)
|
||||||
.await;
|
.await;
|
||||||
resolved_map.insert(
|
resolved_map.insert(
|
||||||
|
|
|
@ -156,8 +156,9 @@ async fn create_join_event(
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "origin is not a server name."))?;
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "origin is not a server name."))?;
|
||||||
|
|
||||||
let mutex_lock = services()
|
let mutex_lock = services()
|
||||||
.globals
|
.rooms
|
||||||
.roomid_mutex_federation
|
.event_handler
|
||||||
|
.mutex_federation
|
||||||
.lock(room_id)
|
.lock(room_id)
|
||||||
.await;
|
.await;
|
||||||
let pdu_id: Vec<u8> = services()
|
let pdu_id: Vec<u8> = services()
|
||||||
|
|
|
@ -152,8 +152,9 @@ async fn create_leave_event(origin: &ServerName, room_id: &RoomId, pdu: &RawJson
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let mutex_lock = services()
|
let mutex_lock = services()
|
||||||
.globals
|
.rooms
|
||||||
.roomid_mutex_federation
|
.event_handler
|
||||||
|
.mutex_federation
|
||||||
.lock(room_id)
|
.lock(room_id)
|
||||||
.await;
|
.await;
|
||||||
let pdu_id: Vec<u8> = services()
|
let pdu_id: Vec<u8> = services()
|
||||||
|
|
|
@ -34,7 +34,7 @@ pub async fn create_admin_room() -> Result<()> {
|
||||||
|
|
||||||
let _short_id = services().rooms.short.get_or_create_shortroomid(&room_id)?;
|
let _short_id = services().rooms.short.get_or_create_shortroomid(&room_id)?;
|
||||||
|
|
||||||
let state_lock = services().globals.roomid_mutex_state.lock(&room_id).await;
|
let state_lock = services().rooms.state.mutex.lock(&room_id).await;
|
||||||
|
|
||||||
// Create a user for the server
|
// Create a user for the server
|
||||||
let server_user = &services().globals.server_user;
|
let server_user = &services().globals.server_user;
|
||||||
|
|
|
@ -22,7 +22,7 @@ use crate::{pdu::PduBuilder, services};
|
||||||
/// In conduit, this is equivalent to granting admin privileges.
|
/// In conduit, this is equivalent to granting admin privileges.
|
||||||
pub async fn make_user_admin(user_id: &UserId, displayname: String) -> Result<()> {
|
pub async fn make_user_admin(user_id: &UserId, displayname: String) -> Result<()> {
|
||||||
if let Some(room_id) = Service::get_admin_room()? {
|
if let Some(room_id) = Service::get_admin_room()? {
|
||||||
let state_lock = services().globals.roomid_mutex_state.lock(&room_id).await;
|
let state_lock = services().rooms.state.mutex.lock(&room_id).await;
|
||||||
|
|
||||||
// Use the server user to grant the new admin's power level
|
// Use the server user to grant the new admin's power level
|
||||||
let server_user = &services().globals.server_user;
|
let server_user = &services().globals.server_user;
|
||||||
|
|
|
@ -26,7 +26,7 @@ use tokio::{
|
||||||
task::JoinHandle,
|
task::JoinHandle,
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::{globals::RoomMutexGuard, pdu::PduBuilder, services, user_is_local, PduEvent};
|
use crate::{pdu::PduBuilder, rooms::state::RoomMutexGuard, services, user_is_local, PduEvent};
|
||||||
|
|
||||||
const COMMAND_QUEUE_LIMIT: usize = 512;
|
const COMMAND_QUEUE_LIMIT: usize = 512;
|
||||||
|
|
||||||
|
@ -248,7 +248,7 @@ async fn respond_to_room(content: RoomMessageEventContent, room_id: &RoomId, use
|
||||||
"sender is not admin"
|
"sender is not admin"
|
||||||
);
|
);
|
||||||
|
|
||||||
let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
|
let state_lock = services().rooms.state.mutex.lock(room_id).await;
|
||||||
let response_pdu = PduBuilder {
|
let response_pdu = PduBuilder {
|
||||||
event_type: TimelineEventType::RoomMessage,
|
event_type: TimelineEventType::RoomMessage,
|
||||||
content: to_raw_value(&content).expect("event is valid, we just created it"),
|
content: to_raw_value(&content).expect("event is valid, we just created it"),
|
||||||
|
|
|
@ -12,19 +12,15 @@ use std::{
|
||||||
time::Instant,
|
time::Instant,
|
||||||
};
|
};
|
||||||
|
|
||||||
use conduit::{
|
use conduit::{error, trace, Config, Result};
|
||||||
error, trace,
|
|
||||||
utils::{MutexMap, MutexMapGuard},
|
|
||||||
Config, Result,
|
|
||||||
};
|
|
||||||
use data::Data;
|
use data::Data;
|
||||||
use ipaddress::IPAddress;
|
use ipaddress::IPAddress;
|
||||||
use regex::RegexSet;
|
use regex::RegexSet;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{client::discovery::discover_support::ContactRole, federation::discovery::VerifyKey},
|
api::{client::discovery::discover_support::ContactRole, federation::discovery::VerifyKey},
|
||||||
serde::Base64,
|
serde::Base64,
|
||||||
DeviceId, OwnedEventId, OwnedRoomAliasId, OwnedRoomId, OwnedServerName, OwnedServerSigningKeyId, OwnedUserId,
|
DeviceId, OwnedEventId, OwnedRoomAliasId, OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, RoomAliasId,
|
||||||
RoomAliasId, RoomVersionId, ServerName, UserId,
|
RoomVersionId, ServerName, UserId,
|
||||||
};
|
};
|
||||||
use tokio::{sync::Mutex, task::JoinHandle};
|
use tokio::{sync::Mutex, task::JoinHandle};
|
||||||
use url::Url;
|
use url::Url;
|
||||||
|
@ -45,18 +41,12 @@ pub struct Service {
|
||||||
pub bad_event_ratelimiter: Arc<RwLock<HashMap<OwnedEventId, RateLimitState>>>,
|
pub bad_event_ratelimiter: Arc<RwLock<HashMap<OwnedEventId, RateLimitState>>>,
|
||||||
pub bad_signature_ratelimiter: Arc<RwLock<HashMap<Vec<String>, RateLimitState>>>,
|
pub bad_signature_ratelimiter: Arc<RwLock<HashMap<Vec<String>, RateLimitState>>>,
|
||||||
pub bad_query_ratelimiter: Arc<RwLock<HashMap<OwnedServerName, RateLimitState>>>,
|
pub bad_query_ratelimiter: Arc<RwLock<HashMap<OwnedServerName, RateLimitState>>>,
|
||||||
pub roomid_mutex_insert: RoomMutexMap,
|
|
||||||
pub roomid_mutex_state: RoomMutexMap,
|
|
||||||
pub roomid_mutex_federation: RoomMutexMap,
|
|
||||||
pub roomid_federationhandletime: RwLock<HashMap<OwnedRoomId, (OwnedEventId, Instant)>>,
|
|
||||||
pub updates_handle: Mutex<Option<JoinHandle<()>>>,
|
pub updates_handle: Mutex<Option<JoinHandle<()>>>,
|
||||||
pub stateres_mutex: Arc<Mutex<()>>,
|
pub stateres_mutex: Arc<Mutex<()>>,
|
||||||
pub server_user: OwnedUserId,
|
pub server_user: OwnedUserId,
|
||||||
pub admin_alias: OwnedRoomAliasId,
|
pub admin_alias: OwnedRoomAliasId,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type RoomMutexMap = MutexMap<OwnedRoomId, ()>;
|
|
||||||
pub type RoomMutexGuard = MutexMapGuard<OwnedRoomId, ()>;
|
|
||||||
type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries
|
type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries
|
||||||
|
|
||||||
impl crate::Service for Service {
|
impl crate::Service for Service {
|
||||||
|
@ -113,10 +103,6 @@ impl crate::Service for Service {
|
||||||
bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
|
bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
|
||||||
bad_signature_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
|
bad_signature_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
|
||||||
bad_query_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
|
bad_query_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
|
||||||
roomid_mutex_state: MutexMap::<OwnedRoomId, ()>::new(),
|
|
||||||
roomid_mutex_insert: MutexMap::<OwnedRoomId, ()>::new(),
|
|
||||||
roomid_mutex_federation: MutexMap::<OwnedRoomId, ()>::new(),
|
|
||||||
roomid_federationhandletime: RwLock::new(HashMap::new()),
|
|
||||||
updates_handle: Mutex::new(None),
|
updates_handle: Mutex::new(None),
|
||||||
stateres_mutex: Arc::new(Mutex::new(())),
|
stateres_mutex: Arc::new(Mutex::new(())),
|
||||||
admin_alias: RoomAliasId::parse(format!("#admins:{}", &config.server_name))
|
admin_alias: RoomAliasId::parse(format!("#admins:{}", &config.server_name))
|
||||||
|
|
|
@ -4,13 +4,14 @@ mod signing_keys;
|
||||||
use std::{
|
use std::{
|
||||||
collections::{hash_map, BTreeMap, HashMap, HashSet},
|
collections::{hash_map, BTreeMap, HashMap, HashSet},
|
||||||
pin::Pin,
|
pin::Pin,
|
||||||
sync::Arc,
|
sync::{Arc, RwLock as StdRwLock},
|
||||||
time::Instant,
|
time::Instant,
|
||||||
};
|
};
|
||||||
|
|
||||||
use conduit::{
|
use conduit::{
|
||||||
debug, debug_error, debug_info, error, info, trace, utils::math::continue_exponential_backoff_secs, warn, Error,
|
debug, debug_error, debug_info, error, info, trace,
|
||||||
Result,
|
utils::{math::continue_exponential_backoff_secs, MutexMap},
|
||||||
|
warn, Error, Result,
|
||||||
};
|
};
|
||||||
use futures_util::Future;
|
use futures_util::Future;
|
||||||
pub use parse_incoming_pdu::parse_incoming_pdu;
|
pub use parse_incoming_pdu::parse_incoming_pdu;
|
||||||
|
@ -28,14 +29,21 @@ use ruma::{
|
||||||
int,
|
int,
|
||||||
serde::Base64,
|
serde::Base64,
|
||||||
state_res::{self, RoomVersion, StateMap},
|
state_res::{self, RoomVersion, StateMap},
|
||||||
uint, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedUserId, RoomId, RoomVersionId, ServerName,
|
uint, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId,
|
||||||
|
RoomVersionId, ServerName,
|
||||||
};
|
};
|
||||||
use tokio::sync::RwLock;
|
use tokio::sync::RwLock;
|
||||||
|
|
||||||
use super::state_compressor::CompressedStateEvent;
|
use super::state_compressor::CompressedStateEvent;
|
||||||
use crate::{pdu, services, PduEvent};
|
use crate::{pdu, services, PduEvent};
|
||||||
|
|
||||||
pub struct Service;
|
pub struct Service {
|
||||||
|
pub federation_handletime: StdRwLock<HandleTimeMap>,
|
||||||
|
pub mutex_federation: RoomMutexMap,
|
||||||
|
}
|
||||||
|
|
||||||
|
type RoomMutexMap = MutexMap<OwnedRoomId, ()>;
|
||||||
|
type HandleTimeMap = HashMap<OwnedRoomId, (OwnedEventId, Instant)>;
|
||||||
|
|
||||||
// We use some AsyncRecursiveType hacks here so we can call async funtion
|
// We use some AsyncRecursiveType hacks here so we can call async funtion
|
||||||
// recursively.
|
// recursively.
|
||||||
|
@ -46,7 +54,12 @@ type AsyncRecursiveCanonicalJsonResult<'a> =
|
||||||
AsyncRecursiveType<'a, Result<(Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>)>>;
|
AsyncRecursiveType<'a, Result<(Arc<PduEvent>, BTreeMap<String, CanonicalJsonValue>)>>;
|
||||||
|
|
||||||
impl crate::Service for Service {
|
impl crate::Service for Service {
|
||||||
fn build(_args: crate::Args<'_>) -> Result<Arc<Self>> { Ok(Arc::new(Self {})) }
|
fn build(_args: crate::Args<'_>) -> Result<Arc<Self>> {
|
||||||
|
Ok(Arc::new(Self {
|
||||||
|
federation_handletime: HandleTimeMap::new().into(),
|
||||||
|
mutex_federation: RoomMutexMap::new(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
fn name(&self) -> &str { crate::service::make_name(std::module_path!()) }
|
fn name(&self) -> &str { crate::service::make_name(std::module_path!()) }
|
||||||
}
|
}
|
||||||
|
@ -200,9 +213,7 @@ impl Service {
|
||||||
|
|
||||||
// Done with prev events, now handling the incoming event
|
// Done with prev events, now handling the incoming event
|
||||||
let start_time = Instant::now();
|
let start_time = Instant::now();
|
||||||
services()
|
self.federation_handletime
|
||||||
.globals
|
|
||||||
.roomid_federationhandletime
|
|
||||||
.write()
|
.write()
|
||||||
.expect("locked")
|
.expect("locked")
|
||||||
.insert(room_id.to_owned(), (event_id.to_owned(), start_time));
|
.insert(room_id.to_owned(), (event_id.to_owned(), start_time));
|
||||||
|
@ -211,9 +222,7 @@ impl Service {
|
||||||
.upgrade_outlier_to_timeline_pdu(incoming_pdu, val, &create_event, origin, room_id, pub_key_map)
|
.upgrade_outlier_to_timeline_pdu(incoming_pdu, val, &create_event, origin, room_id, pub_key_map)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
services()
|
self.federation_handletime
|
||||||
.globals
|
|
||||||
.roomid_federationhandletime
|
|
||||||
.write()
|
.write()
|
||||||
.expect("locked")
|
.expect("locked")
|
||||||
.remove(&room_id.to_owned());
|
.remove(&room_id.to_owned());
|
||||||
|
@ -272,9 +281,7 @@ impl Service {
|
||||||
}
|
}
|
||||||
|
|
||||||
let start_time = Instant::now();
|
let start_time = Instant::now();
|
||||||
services()
|
self.federation_handletime
|
||||||
.globals
|
|
||||||
.roomid_federationhandletime
|
|
||||||
.write()
|
.write()
|
||||||
.expect("locked")
|
.expect("locked")
|
||||||
.insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time));
|
.insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time));
|
||||||
|
@ -282,9 +289,7 @@ impl Service {
|
||||||
self.upgrade_outlier_to_timeline_pdu(pdu, json, create_event, origin, room_id, pub_key_map)
|
self.upgrade_outlier_to_timeline_pdu(pdu, json, create_event, origin, room_id, pub_key_map)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
services()
|
self.federation_handletime
|
||||||
.globals
|
|
||||||
.roomid_federationhandletime
|
|
||||||
.write()
|
.write()
|
||||||
.expect("locked")
|
.expect("locked")
|
||||||
.remove(&room_id.to_owned());
|
.remove(&room_id.to_owned());
|
||||||
|
@ -579,7 +584,7 @@ impl Service {
|
||||||
|
|
||||||
// We start looking at current room state now, so lets lock the room
|
// We start looking at current room state now, so lets lock the room
|
||||||
trace!("Locking the room");
|
trace!("Locking the room");
|
||||||
let state_lock = services().globals.roomid_mutex_state.lock(room_id).await;
|
let state_lock = services().rooms.state.mutex.lock(room_id).await;
|
||||||
|
|
||||||
// Now we calculate the set of extremities this room has after the incoming
|
// Now we calculate the set of extremities this room has after the incoming
|
||||||
// event has been applied. We start with the previous extremities (aka leaves)
|
// event has been applied. We start with the previous extremities (aka leaves)
|
||||||
|
|
|
@ -4,7 +4,7 @@ use conduit::{utils, Error, Result};
|
||||||
use database::{Database, Map};
|
use database::{Database, Map};
|
||||||
use ruma::{EventId, OwnedEventId, RoomId};
|
use ruma::{EventId, OwnedEventId, RoomId};
|
||||||
|
|
||||||
use crate::globals::RoomMutexGuard;
|
use super::RoomMutexGuard;
|
||||||
|
|
||||||
pub(super) struct Data {
|
pub(super) struct Data {
|
||||||
shorteventid_shortstatehash: Arc<Map>,
|
shorteventid_shortstatehash: Arc<Map>,
|
||||||
|
|
|
@ -5,7 +5,10 @@ use std::{
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
};
|
};
|
||||||
|
|
||||||
use conduit::{utils::calculate_hash, warn, Error, Result};
|
use conduit::{
|
||||||
|
utils::{calculate_hash, MutexMap, MutexMapGuard},
|
||||||
|
warn, Error, Result,
|
||||||
|
};
|
||||||
use data::Data;
|
use data::Data;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::error::ErrorKind,
|
api::client::error::ErrorKind,
|
||||||
|
@ -15,20 +18,25 @@ use ruma::{
|
||||||
},
|
},
|
||||||
serde::Raw,
|
serde::Raw,
|
||||||
state_res::{self, StateMap},
|
state_res::{self, StateMap},
|
||||||
EventId, OwnedEventId, RoomId, RoomVersionId, UserId,
|
EventId, OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, UserId,
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::state_compressor::CompressedStateEvent;
|
use super::state_compressor::CompressedStateEvent;
|
||||||
use crate::{globals::RoomMutexGuard, services, PduEvent};
|
use crate::{services, PduEvent};
|
||||||
|
|
||||||
pub struct Service {
|
pub struct Service {
|
||||||
db: Data,
|
db: Data,
|
||||||
|
pub mutex: RoomMutexMap,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type RoomMutexMap = MutexMap<OwnedRoomId, ()>;
|
||||||
|
pub type RoomMutexGuard = MutexMapGuard<OwnedRoomId, ()>;
|
||||||
|
|
||||||
impl crate::Service for Service {
|
impl crate::Service for Service {
|
||||||
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
|
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
|
||||||
Ok(Arc::new(Self {
|
Ok(Arc::new(Self {
|
||||||
db: Data::new(args.db),
|
db: Data::new(args.db),
|
||||||
|
mutex: RoomMutexMap::new(),
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,7 @@ use ruma::{
|
||||||
};
|
};
|
||||||
use serde_json::value::to_raw_value;
|
use serde_json::value::to_raw_value;
|
||||||
|
|
||||||
use crate::{globals::RoomMutexGuard, pdu::PduBuilder, services, PduEvent};
|
use crate::{pdu::PduBuilder, rooms::state::RoomMutexGuard, services, PduEvent};
|
||||||
|
|
||||||
pub struct Service {
|
pub struct Service {
|
||||||
db: Data,
|
db: Data,
|
||||||
|
|
|
@ -6,7 +6,11 @@ use std::{
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
};
|
};
|
||||||
|
|
||||||
use conduit::{debug, error, info, utils, validated, warn, Error, Result};
|
use conduit::{
|
||||||
|
debug, error, info, utils,
|
||||||
|
utils::{MutexMap, MutexMapGuard},
|
||||||
|
validated, warn, Error, Result,
|
||||||
|
};
|
||||||
use data::Data;
|
use data::Data;
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
|
@ -26,8 +30,8 @@ use ruma::{
|
||||||
push::{Action, Ruleset, Tweak},
|
push::{Action, Ruleset, Tweak},
|
||||||
serde::Base64,
|
serde::Base64,
|
||||||
state_res::{self, Event, RoomVersion},
|
state_res::{self, Event, RoomVersion},
|
||||||
uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedServerName, RoomId,
|
uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName,
|
||||||
RoomVersionId, ServerName, UserId,
|
RoomId, RoomVersionId, ServerName, UserId,
|
||||||
};
|
};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
||||||
|
@ -36,7 +40,6 @@ use tokio::sync::RwLock;
|
||||||
use crate::{
|
use crate::{
|
||||||
admin,
|
admin,
|
||||||
appservice::NamespaceRegex,
|
appservice::NamespaceRegex,
|
||||||
globals::RoomMutexGuard,
|
|
||||||
pdu::{EventHash, PduBuilder},
|
pdu::{EventHash, PduBuilder},
|
||||||
rooms::{event_handler::parse_incoming_pdu, state_compressor::CompressedStateEvent},
|
rooms::{event_handler::parse_incoming_pdu, state_compressor::CompressedStateEvent},
|
||||||
server_is_ours, services, PduCount, PduEvent,
|
server_is_ours, services, PduCount, PduEvent,
|
||||||
|
@ -66,12 +69,17 @@ struct ExtractBody {
|
||||||
|
|
||||||
pub struct Service {
|
pub struct Service {
|
||||||
db: Data,
|
db: Data,
|
||||||
|
pub mutex_insert: RoomMutexMap,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type RoomMutexMap = MutexMap<OwnedRoomId, ()>;
|
||||||
|
pub type RoomMutexGuard = MutexMapGuard<OwnedRoomId, ()>;
|
||||||
|
|
||||||
impl crate::Service for Service {
|
impl crate::Service for Service {
|
||||||
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
|
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
|
||||||
Ok(Arc::new(Self {
|
Ok(Arc::new(Self {
|
||||||
db: Data::new(args.db),
|
db: Data::new(args.db),
|
||||||
|
mutex_insert: RoomMutexMap::new(),
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -269,11 +277,7 @@ impl Service {
|
||||||
.state
|
.state
|
||||||
.set_forward_extremities(&pdu.room_id, leaves, state_lock)?;
|
.set_forward_extremities(&pdu.room_id, leaves, state_lock)?;
|
||||||
|
|
||||||
let insert_lock = services()
|
let insert_lock = self.mutex_insert.lock(&pdu.room_id).await;
|
||||||
.globals
|
|
||||||
.roomid_mutex_insert
|
|
||||||
.lock(&pdu.room_id)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
let count1 = services().globals.next_count()?;
|
let count1 = services().globals.next_count()?;
|
||||||
// Mark as read first so the sending client doesn't get a notification even if
|
// Mark as read first so the sending client doesn't get a notification even if
|
||||||
|
@ -1154,8 +1158,9 @@ impl Service {
|
||||||
|
|
||||||
// Lock so we cannot backfill the same pdu twice at the same time
|
// Lock so we cannot backfill the same pdu twice at the same time
|
||||||
let mutex_lock = services()
|
let mutex_lock = services()
|
||||||
.globals
|
.rooms
|
||||||
.roomid_mutex_federation
|
.event_handler
|
||||||
|
.mutex_federation
|
||||||
.lock(&room_id)
|
.lock(&room_id)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
|
@ -1187,7 +1192,7 @@ impl Service {
|
||||||
.get_shortroomid(&room_id)?
|
.get_shortroomid(&room_id)?
|
||||||
.expect("room exists");
|
.expect("room exists");
|
||||||
|
|
||||||
let insert_lock = services().globals.roomid_mutex_insert.lock(&room_id).await;
|
let insert_lock = self.mutex_insert.lock(&room_id).await;
|
||||||
|
|
||||||
let max = u64::MAX;
|
let max = u64::MAX;
|
||||||
let count = services().globals.next_count()?;
|
let count = services().globals.next_count()?;
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue