optimize state compressor I/O w/ batch operation

Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
Jason Volk 2024-11-25 23:27:16 +00:00
parent 527494a34b
commit dd8c646b63
3 changed files with 69 additions and 35 deletions

View file

@ -79,23 +79,30 @@ pub async fn resolve_state(
drop(lock);
debug!("State resolution done. Compressing state");
let mut new_room_state = HashSet::new();
for ((event_type, state_key), event_id) in state {
let shortstatekey = self
.services
.short
.get_or_create_shortstatekey(&event_type.to_string().into(), &state_key)
.await;
debug!("State resolution done.");
let state_events: Vec<_> = state
.iter()
.stream()
.then(|((event_type, state_key), event_id)| {
self.services
.short
.get_or_create_shortstatekey(event_type, state_key)
.map(move |shortstatekey| (shortstatekey, event_id))
})
.collect()
.await;
let compressed = self
.services
.state_compressor
.compress_state_event(shortstatekey, &event_id)
.await;
new_room_state.insert(compressed);
}
debug!("Compressing state...");
let new_room_state: HashSet<_> = self
.services
.state_compressor
.compress_state_events(
state_events
.iter()
.map(|(ref ssk, eid)| (ssk, (*eid).borrow())),
)
.collect()
.await;
Ok(Arc::new(new_room_state))
}

View file

@ -1,4 +1,5 @@
use std::{
borrow::Borrow,
collections::{BTreeMap, HashSet},
sync::Arc,
time::Instant,
@ -193,15 +194,16 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu(
extremities.retain(|id| retained.contains(id));
debug!("Retained {} extremities. Compressing state", extremities.len());
let mut state_ids_compressed = HashSet::new();
for (shortstatekey, id) in &state_at_incoming_event {
state_ids_compressed.insert(
self.services
.state_compressor
.compress_state_event(*shortstatekey, id)
.await,
);
}
let state_ids_compressed: HashSet<_> = self
.services
.state_compressor
.compress_state_events(
state_at_incoming_event
.iter()
.map(|(ssk, eid)| (ssk, eid.borrow())),
)
.collect()
.await;
let state_ids_compressed = Arc::new(state_ids_compressed);