delete unnecessary real_users_cache
, fix overwriting push_target iter, add proper function for getting local active users in room
this `real_users_cache` cache seems weird, and i have no idea what prompted its creation upstream. perhaps they did this because sqlite was very slow and their rocksdb setup is very poor, so a "solution" was to stick member counts in memory. slow iterators, scanning, etc do not apply to conduwuit where our rocksdb is extremely tuned, and i seriously doubt something like this would have any real world net-positive performance impact. also for some reason, there is suspicious logic where we overwrite the entire push target collection. both of these things could be a potential cause for receiving notifications in rooms we've left. Signed-off-by: strawberry <strawberry@puppygock.gay>
This commit is contained in:
parent
c1227340b3
commit
c738c119f8
6 changed files with 30 additions and 58 deletions
|
@ -309,21 +309,19 @@ impl Service {
|
|||
let mut push_target = services()
|
||||
.rooms
|
||||
.state_cache
|
||||
.get_our_real_users(&pdu.room_id)?;
|
||||
.active_local_users_in_room(&pdu.room_id);
|
||||
|
||||
if pdu.kind == TimelineEventType::RoomMember {
|
||||
if let Some(state_key) = &pdu.state_key {
|
||||
let target_user_id = UserId::parse(state_key.clone()).expect("This state_key was previously validated");
|
||||
|
||||
if !push_target.contains(&target_user_id) {
|
||||
let mut target = push_target.as_ref().clone();
|
||||
target.insert(target_user_id);
|
||||
push_target = Arc::new(target);
|
||||
push_target.push(target_user_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for user in push_target.iter() {
|
||||
for user in &push_target {
|
||||
// Don't notify the user of their own events
|
||||
if user == &pdu.sender {
|
||||
continue;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue