cleanup on drop for utils::mutex_map.
Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
parent
01b2928d55
commit
2d251eb19c
10 changed files with 131 additions and 54 deletions
|
@ -9,7 +9,7 @@ use std::{
|
|||
};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use conduit::{error, utils::mutex_map, Error, Result};
|
||||
use conduit::{error, Error, Result};
|
||||
pub use create::create_admin_room;
|
||||
pub use grant::make_user_admin;
|
||||
use loole::{Receiver, Sender};
|
||||
|
@ -26,7 +26,7 @@ use tokio::{
|
|||
task::JoinHandle,
|
||||
};
|
||||
|
||||
use crate::{pdu::PduBuilder, services, user_is_local, PduEvent};
|
||||
use crate::{globals::RoomMutexGuard, pdu::PduBuilder, services, user_is_local, PduEvent};
|
||||
|
||||
const COMMAND_QUEUE_LIMIT: usize = 512;
|
||||
|
||||
|
@ -270,7 +270,7 @@ async fn respond_to_room(content: RoomMessageEventContent, room_id: &RoomId, use
|
|||
}
|
||||
|
||||
async fn handle_response_error(
|
||||
e: &Error, room_id: &RoomId, user_id: &UserId, state_lock: &mutex_map::Guard<()>,
|
||||
e: &Error, room_id: &RoomId, user_id: &UserId, state_lock: &RoomMutexGuard,
|
||||
) -> Result<()> {
|
||||
error!("Failed to build and append admin room response PDU: \"{e}\"");
|
||||
let error_room_message = RoomMessageEventContent::text_plain(format!(
|
||||
|
|
|
@ -12,7 +12,11 @@ use std::{
|
|||
time::Instant,
|
||||
};
|
||||
|
||||
use conduit::{error, trace, utils::MutexMap, Config, Result};
|
||||
use conduit::{
|
||||
error, trace,
|
||||
utils::{MutexMap, MutexMapGuard},
|
||||
Config, Result,
|
||||
};
|
||||
use data::Data;
|
||||
use ipaddress::IPAddress;
|
||||
use regex::RegexSet;
|
||||
|
@ -27,8 +31,6 @@ use url::Url;
|
|||
|
||||
use crate::services;
|
||||
|
||||
type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries
|
||||
|
||||
pub struct Service {
|
||||
pub db: Data,
|
||||
|
||||
|
@ -43,9 +45,9 @@ pub struct Service {
|
|||
pub bad_event_ratelimiter: Arc<RwLock<HashMap<OwnedEventId, RateLimitState>>>,
|
||||
pub bad_signature_ratelimiter: Arc<RwLock<HashMap<Vec<String>, RateLimitState>>>,
|
||||
pub bad_query_ratelimiter: Arc<RwLock<HashMap<OwnedServerName, RateLimitState>>>,
|
||||
pub roomid_mutex_insert: MutexMap<OwnedRoomId, ()>,
|
||||
pub roomid_mutex_state: MutexMap<OwnedRoomId, ()>,
|
||||
pub roomid_mutex_federation: MutexMap<OwnedRoomId, ()>,
|
||||
pub roomid_mutex_insert: RoomMutexMap,
|
||||
pub roomid_mutex_state: RoomMutexMap,
|
||||
pub roomid_mutex_federation: RoomMutexMap,
|
||||
pub roomid_federationhandletime: RwLock<HashMap<OwnedRoomId, (OwnedEventId, Instant)>>,
|
||||
pub updates_handle: Mutex<Option<JoinHandle<()>>>,
|
||||
pub stateres_mutex: Arc<Mutex<()>>,
|
||||
|
@ -53,6 +55,10 @@ pub struct Service {
|
|||
pub admin_alias: OwnedRoomAliasId,
|
||||
}
|
||||
|
||||
pub type RoomMutexMap = MutexMap<OwnedRoomId, ()>;
|
||||
pub type RoomMutexGuard = MutexMapGuard<OwnedRoomId, ()>;
|
||||
type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries
|
||||
|
||||
impl crate::Service for Service {
|
||||
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
|
||||
let config = &args.server.config;
|
||||
|
|
|
@ -3,7 +3,8 @@ use std::{collections::HashSet, sync::Arc};
|
|||
use conduit::{utils, Error, Result};
|
||||
use database::{Database, Map};
|
||||
use ruma::{EventId, OwnedEventId, RoomId};
|
||||
use utils::mutex_map;
|
||||
|
||||
use crate::globals::RoomMutexGuard;
|
||||
|
||||
pub(super) struct Data {
|
||||
shorteventid_shortstatehash: Arc<Map>,
|
||||
|
@ -35,7 +36,7 @@ impl Data {
|
|||
&self,
|
||||
room_id: &RoomId,
|
||||
new_shortstatehash: u64,
|
||||
_mutex_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
|
||||
_mutex_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex
|
||||
) -> Result<()> {
|
||||
self.roomid_shortstatehash
|
||||
.insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?;
|
||||
|
@ -68,7 +69,7 @@ impl Data {
|
|||
&self,
|
||||
room_id: &RoomId,
|
||||
event_ids: Vec<OwnedEventId>,
|
||||
_mutex_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
|
||||
_mutex_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex
|
||||
) -> Result<()> {
|
||||
let mut prefix = room_id.as_bytes().to_vec();
|
||||
prefix.push(0xFF);
|
||||
|
|
|
@ -5,10 +5,7 @@ use std::{
|
|||
sync::Arc,
|
||||
};
|
||||
|
||||
use conduit::{
|
||||
utils::{calculate_hash, mutex_map},
|
||||
warn, Error, Result,
|
||||
};
|
||||
use conduit::{utils::calculate_hash, warn, Error, Result};
|
||||
use data::Data;
|
||||
use ruma::{
|
||||
api::client::error::ErrorKind,
|
||||
|
@ -22,7 +19,7 @@ use ruma::{
|
|||
};
|
||||
|
||||
use super::state_compressor::CompressedStateEvent;
|
||||
use crate::{services, PduEvent};
|
||||
use crate::{globals::RoomMutexGuard, services, PduEvent};
|
||||
|
||||
pub struct Service {
|
||||
db: Data,
|
||||
|
@ -46,7 +43,7 @@ impl Service {
|
|||
shortstatehash: u64,
|
||||
statediffnew: Arc<HashSet<CompressedStateEvent>>,
|
||||
_statediffremoved: Arc<HashSet<CompressedStateEvent>>,
|
||||
state_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
|
||||
state_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex
|
||||
) -> Result<()> {
|
||||
for event_id in statediffnew.iter().filter_map(|new| {
|
||||
services()
|
||||
|
@ -318,7 +315,7 @@ impl Service {
|
|||
&self,
|
||||
room_id: &RoomId,
|
||||
shortstatehash: u64,
|
||||
mutex_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
|
||||
mutex_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex
|
||||
) -> Result<()> {
|
||||
self.db.set_room_state(room_id, shortstatehash, mutex_lock)
|
||||
}
|
||||
|
@ -358,7 +355,7 @@ impl Service {
|
|||
&self,
|
||||
room_id: &RoomId,
|
||||
event_ids: Vec<OwnedEventId>,
|
||||
state_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
|
||||
state_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex
|
||||
) -> Result<()> {
|
||||
self.db
|
||||
.set_forward_extremities(room_id, event_ids, state_lock)
|
||||
|
|
|
@ -6,11 +6,7 @@ use std::{
|
|||
sync::{Arc, Mutex as StdMutex, Mutex},
|
||||
};
|
||||
|
||||
use conduit::{
|
||||
error,
|
||||
utils::{math::usize_from_f64, mutex_map},
|
||||
warn, Error, Result,
|
||||
};
|
||||
use conduit::{error, utils::math::usize_from_f64, warn, Error, Result};
|
||||
use data::Data;
|
||||
use lru_cache::LruCache;
|
||||
use ruma::{
|
||||
|
@ -37,7 +33,7 @@ use ruma::{
|
|||
};
|
||||
use serde_json::value::to_raw_value;
|
||||
|
||||
use crate::{pdu::PduBuilder, services, PduEvent};
|
||||
use crate::{globals::RoomMutexGuard, pdu::PduBuilder, services, PduEvent};
|
||||
|
||||
pub struct Service {
|
||||
db: Data,
|
||||
|
@ -333,7 +329,7 @@ impl Service {
|
|||
}
|
||||
|
||||
pub fn user_can_invite(
|
||||
&self, room_id: &RoomId, sender: &UserId, target_user: &UserId, state_lock: &mutex_map::Guard<()>,
|
||||
&self, room_id: &RoomId, sender: &UserId, target_user: &UserId, state_lock: &RoomMutexGuard,
|
||||
) -> Result<bool> {
|
||||
let content = to_raw_value(&RoomMemberEventContent::new(MembershipState::Invite))
|
||||
.expect("Event content always serializes");
|
||||
|
|
|
@ -6,7 +6,7 @@ use std::{
|
|||
sync::Arc,
|
||||
};
|
||||
|
||||
use conduit::{debug, error, info, utils, utils::mutex_map, validated, warn, Error, Result};
|
||||
use conduit::{debug, error, info, utils, validated, warn, Error, Result};
|
||||
use data::Data;
|
||||
use itertools::Itertools;
|
||||
use ruma::{
|
||||
|
@ -36,6 +36,7 @@ use tokio::sync::RwLock;
|
|||
use crate::{
|
||||
admin,
|
||||
appservice::NamespaceRegex,
|
||||
globals::RoomMutexGuard,
|
||||
pdu::{EventHash, PduBuilder},
|
||||
rooms::{event_handler::parse_incoming_pdu, state_compressor::CompressedStateEvent},
|
||||
server_is_ours, services, PduCount, PduEvent,
|
||||
|
@ -203,7 +204,7 @@ impl Service {
|
|||
pdu: &PduEvent,
|
||||
mut pdu_json: CanonicalJsonObject,
|
||||
leaves: Vec<OwnedEventId>,
|
||||
state_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
|
||||
state_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex
|
||||
) -> Result<Vec<u8>> {
|
||||
// Coalesce database writes for the remainder of this scope.
|
||||
let _cork = services().db.cork_and_flush();
|
||||
|
@ -593,7 +594,7 @@ impl Service {
|
|||
pdu_builder: PduBuilder,
|
||||
sender: &UserId,
|
||||
room_id: &RoomId,
|
||||
_mutex_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
|
||||
_mutex_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex
|
||||
) -> Result<(PduEvent, CanonicalJsonObject)> {
|
||||
let PduBuilder {
|
||||
event_type,
|
||||
|
@ -780,7 +781,7 @@ impl Service {
|
|||
pdu_builder: PduBuilder,
|
||||
sender: &UserId,
|
||||
room_id: &RoomId,
|
||||
state_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
|
||||
state_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex
|
||||
) -> Result<Arc<EventId>> {
|
||||
let (pdu, pdu_json) = self.create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock)?;
|
||||
if let Some(admin_room) = admin::Service::get_admin_room()? {
|
||||
|
@ -963,7 +964,7 @@ impl Service {
|
|||
new_room_leaves: Vec<OwnedEventId>,
|
||||
state_ids_compressed: Arc<HashSet<CompressedStateEvent>>,
|
||||
soft_fail: bool,
|
||||
state_lock: &mutex_map::Guard<()>, // Take mutex guard to make sure users get the room state mutex
|
||||
state_lock: &RoomMutexGuard, // Take mutex guard to make sure users get the room state mutex
|
||||
) -> Result<Option<Vec<u8>>> {
|
||||
// We append to state before appending the pdu, so we don't have a moment in
|
||||
// time with the pdu without it's state. This is okay because append_pdu can't
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue