2023-07-06 10:32:25 +02:00
|
|
|
use std::{
|
2024-08-31 18:58:39 +02:00
|
|
|
cmp::{self, Ordering},
|
2023-07-06 10:32:25 +02:00
|
|
|
collections::{hash_map::Entry, BTreeMap, BTreeSet, HashMap, HashSet},
|
|
|
|
time::Duration,
|
|
|
|
};
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-07-16 08:05:25 +00:00
|
|
|
use axum::extract::State;
|
2024-07-07 06:17:58 +00:00
|
|
|
use conduit::{
|
2024-10-04 17:07:31 +00:00
|
|
|
debug, err, error, is_equal_to,
|
|
|
|
result::{FlatOk, IntoIsOk},
|
2024-08-08 17:18:30 +00:00
|
|
|
utils::{
|
|
|
|
math::{ruma_from_u64, ruma_from_usize, usize_from_ruma, usize_from_u64_truncated},
|
2024-10-01 02:47:39 +00:00
|
|
|
BoolExt, IterStream, ReadyExt, TryFutureExtExt,
|
2024-08-08 17:18:30 +00:00
|
|
|
},
|
|
|
|
warn, PduCount,
|
2024-07-07 06:17:58 +00:00
|
|
|
};
|
2024-10-03 09:38:10 +00:00
|
|
|
use futures::{future::OptionFuture, pin_mut, FutureExt, StreamExt, TryFutureExt};
|
2020-07-30 18:14:47 +02:00
|
|
|
use ruma::{
|
2022-02-18 15:33:14 +01:00
|
|
|
api::client::{
|
2024-08-24 06:57:31 +02:00
|
|
|
error::ErrorKind,
|
2022-12-14 13:09:10 +01:00
|
|
|
filter::{FilterDefinition, LazyLoadOptions},
|
2023-03-13 10:39:02 +01:00
|
|
|
sync::sync_events::{
|
|
|
|
self,
|
|
|
|
v3::{
|
|
|
|
Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom, LeftRoom, Presence,
|
2024-07-16 08:05:25 +00:00
|
|
|
RoomAccountData, RoomSummary, Rooms, State as RoomState, Timeline, ToDevice,
|
2023-03-13 10:39:02 +01:00
|
|
|
},
|
2024-08-21 19:47:39 -04:00
|
|
|
v4::{SlidingOp, SlidingSyncRoomHero},
|
2023-03-13 10:39:02 +01:00
|
|
|
DeviceLists, UnreadNotificationsCount,
|
|
|
|
},
|
2022-01-05 18:15:00 +01:00
|
|
|
uiaa::UiaaResponse,
|
2024-08-30 10:31:08 +02:00
|
|
|
},
|
2024-09-07 18:09:21 +02:00
|
|
|
directory::RoomTypeFilter,
|
2024-08-30 10:31:08 +02:00
|
|
|
events::{
|
|
|
|
presence::PresenceEvent,
|
|
|
|
room::member::{MembershipState, RoomMemberEventContent},
|
2024-09-29 20:57:33 -04:00
|
|
|
AnyRawAccountDataEvent, StateEventType,
|
|
|
|
TimelineEventType::{self, *},
|
2024-08-30 10:31:08 +02:00
|
|
|
},
|
|
|
|
serde::Raw,
|
|
|
|
state_res::Event,
|
|
|
|
uint, DeviceId, EventId, MilliSecondsSinceUnixEpoch, OwnedRoomId, OwnedUserId, RoomId, UInt, UserId,
|
2020-07-30 18:14:47 +02:00
|
|
|
};
|
2024-08-30 18:23:42 +02:00
|
|
|
use service::rooms::read_receipt::pack_receipts;
|
2024-07-07 06:17:58 +00:00
|
|
|
use tracing::{Instrument as _, Span};
|
2021-06-30 09:52:01 +02:00
|
|
|
|
2024-07-16 08:05:25 +00:00
|
|
|
use crate::{
|
|
|
|
service::{pdu::EventHash, Services},
|
|
|
|
utils, Error, PduEvent, Result, Ruma, RumaResponse,
|
|
|
|
};
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-24 06:45:33 +02:00
|
|
|
const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync";
|
2024-09-29 20:57:33 -04:00
|
|
|
const DEFAULT_BUMP_TYPES: &[TimelineEventType; 6] =
|
|
|
|
&[RoomMessage, RoomEncrypted, Sticker, CallInvite, PollStart, Beacon];
|
2024-08-24 06:45:33 +02:00
|
|
|
|
2024-08-30 10:31:08 +02:00
|
|
|
macro_rules! extract_variant {
|
|
|
|
($e:expr, $variant:path) => {
|
|
|
|
match $e {
|
|
|
|
$variant(value) => Some(value),
|
|
|
|
_ => None,
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2020-08-22 21:38:01 +02:00
|
|
|
/// # `GET /_matrix/client/r0/sync`
|
|
|
|
///
|
|
|
|
/// Synchronize the client's state with the latest state on the server.
|
|
|
|
///
|
|
|
|
/// - This endpoint takes a `since` parameter which should be the `next_batch`
|
2024-06-16 00:36:49 +00:00
|
|
|
/// value from a previous request for incremental syncs.
|
2021-08-31 19:14:37 +02:00
|
|
|
///
|
|
|
|
/// Calling this endpoint without a `since` parameter returns:
|
|
|
|
/// - Some of the most recent events of each timeline
|
|
|
|
/// - Notification counts for each room
|
|
|
|
/// - Joined and invited member counts, heroes
|
|
|
|
/// - All state events
|
|
|
|
///
|
|
|
|
/// Calling this endpoint with a `since` parameter from a previous `next_batch`
|
|
|
|
/// returns: For joined rooms:
|
|
|
|
/// - Some of the most recent events of each timeline that happened after since
|
2022-01-09 13:42:25 +01:00
|
|
|
/// - If user joined the room after since: All state events (unless lazy loading
|
2024-06-16 00:36:49 +00:00
|
|
|
/// is activated) and all device list updates in that room
|
2021-08-31 19:14:37 +02:00
|
|
|
/// - If the user was already in the room: A list of all events that are in the
|
2024-06-16 00:36:49 +00:00
|
|
|
/// state now, but were not in the state at `since`
|
2021-08-31 19:14:37 +02:00
|
|
|
/// - If the state we send contains a member event: Joined and invited member
|
|
|
|
/// counts, heroes
|
|
|
|
/// - Device list updates that happened after `since`
|
|
|
|
/// - If there are events in the timeline we send or the user send updated his
|
|
|
|
/// read mark: Notification counts
|
|
|
|
/// - EDUs that are active now (read receipts, typing updates, presence)
|
2022-01-09 13:42:25 +01:00
|
|
|
/// - TODO: Allow multiple sync streams to support Pantalaimon
|
2021-08-31 19:14:37 +02:00
|
|
|
///
|
|
|
|
/// For invited rooms:
|
|
|
|
/// - If the user was invited after `since`: A subset of the state of the room
|
|
|
|
/// at the point of the invite
|
|
|
|
///
|
|
|
|
/// For left rooms:
|
2024-03-22 22:44:31 -04:00
|
|
|
/// - If the user left after `since`: `prev_batch` token, empty state (TODO:
|
2021-08-31 19:14:37 +02:00
|
|
|
/// subset of the state at the point of the leave)
|
2024-04-22 23:48:57 -04:00
|
|
|
pub(crate) async fn sync_events_route(
|
2024-07-16 08:05:25 +00:00
|
|
|
State(services): State<crate::State>, body: Ruma<sync_events::v3::Request>,
|
2022-02-18 15:33:14 +01:00
|
|
|
) -> Result<sync_events::v3::Response, RumaResponse<UiaaResponse>> {
|
2021-12-15 13:58:25 +01:00
|
|
|
let sender_user = body.sender_user.expect("user is authenticated");
|
|
|
|
let sender_device = body.sender_device.expect("user is authenticated");
|
|
|
|
let body = body.body;
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-07-10 16:41:00 +02:00
|
|
|
// Presence update
|
2024-07-16 08:05:25 +00:00
|
|
|
if services.globals.allow_local_presence() {
|
|
|
|
services
|
2024-04-01 20:48:40 -07:00
|
|
|
.presence
|
2024-08-08 17:18:30 +00:00
|
|
|
.ping_presence(&sender_user, &body.set_presence)
|
|
|
|
.await?;
|
2024-04-01 20:48:40 -07:00
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2020-07-30 18:14:47 +02:00
|
|
|
// Setup watchers, so if there's no response, we can wait for them
|
2024-07-16 08:05:25 +00:00
|
|
|
let watcher = services.globals.watch(&sender_user, &sender_device);
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-07-16 08:05:25 +00:00
|
|
|
let next_batch = services.globals.current_count()?;
|
2023-02-20 22:59:45 +01:00
|
|
|
let next_batchcount = PduCount::Normal(next_batch);
|
2021-06-30 09:52:01 +02:00
|
|
|
let next_batch_string = next_batch.to_string();
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2022-01-05 18:15:00 +01:00
|
|
|
// Load filter
|
|
|
|
let filter = match body.filter {
|
2022-12-14 13:09:10 +01:00
|
|
|
None => FilterDefinition::default(),
|
|
|
|
Some(Filter::FilterDefinition(filter)) => filter,
|
2024-07-16 08:05:25 +00:00
|
|
|
Some(Filter::FilterId(filter_id)) => services
|
2024-03-25 17:05:11 -04:00
|
|
|
.users
|
2024-08-08 17:18:30 +00:00
|
|
|
.get_filter(&sender_user, &filter_id)
|
|
|
|
.await
|
2024-03-25 17:05:11 -04:00
|
|
|
.unwrap_or_default(),
|
2022-01-05 18:15:00 +01:00
|
|
|
};
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-07-25 23:06:14 -04:00
|
|
|
// some clients, at least element, seem to require knowledge of redundant
|
|
|
|
// members for "inline" profiles on the timeline to work properly
|
2022-01-05 18:15:00 +01:00
|
|
|
let (lazy_load_enabled, lazy_load_send_redundant) = match filter.room.state.lazy_load_options {
|
|
|
|
LazyLoadOptions::Enabled {
|
2024-07-25 23:06:14 -04:00
|
|
|
include_redundant_members,
|
|
|
|
} => (true, include_redundant_members),
|
|
|
|
LazyLoadOptions::Disabled => (false, cfg!(feature = "element_hacks")),
|
2022-01-05 18:15:00 +01:00
|
|
|
};
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-03-13 10:39:02 +01:00
|
|
|
let full_state = body.full_state;
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2020-07-30 18:14:47 +02:00
|
|
|
let mut joined_rooms = BTreeMap::new();
|
2024-03-25 17:05:11 -04:00
|
|
|
let since = body
|
|
|
|
.since
|
|
|
|
.as_ref()
|
|
|
|
.and_then(|string| string.parse().ok())
|
|
|
|
.unwrap_or(0);
|
2023-02-20 22:59:45 +01:00
|
|
|
let sincecount = PduCount::Normal(since);
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2020-07-30 18:14:47 +02:00
|
|
|
let mut presence_updates = HashMap::new();
|
2020-08-21 21:22:59 +02:00
|
|
|
let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in
|
2020-07-30 18:14:47 +02:00
|
|
|
let mut device_list_updates = HashSet::new();
|
2020-08-21 21:22:59 +02:00
|
|
|
let mut device_list_left = HashSet::new();
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2020-07-30 18:14:47 +02:00
|
|
|
// Look for device list updates of this account
|
2024-03-25 17:05:11 -04:00
|
|
|
device_list_updates.extend(
|
2024-07-16 08:05:25 +00:00
|
|
|
services
|
2024-03-25 17:05:11 -04:00
|
|
|
.users
|
|
|
|
.keys_changed(sender_user.as_ref(), since, None)
|
2024-08-08 17:18:30 +00:00
|
|
|
.map(ToOwned::to_owned)
|
|
|
|
.collect::<Vec<_>>()
|
|
|
|
.await,
|
2024-03-25 17:05:11 -04:00
|
|
|
);
|
|
|
|
|
2024-07-16 08:05:25 +00:00
|
|
|
if services.globals.allow_local_presence() {
|
2024-07-27 07:17:07 +00:00
|
|
|
process_presence_updates(&services, &mut presence_updates, since, &sender_user).await?;
|
2024-04-01 20:48:40 -07:00
|
|
|
}
|
|
|
|
|
2024-10-01 02:47:39 +00:00
|
|
|
let all_joined_rooms: Vec<_> = services
|
2024-03-25 17:05:11 -04:00
|
|
|
.rooms
|
|
|
|
.state_cache
|
|
|
|
.rooms_joined(&sender_user)
|
2024-08-08 17:18:30 +00:00
|
|
|
.map(ToOwned::to_owned)
|
2024-10-01 02:47:39 +00:00
|
|
|
.collect()
|
2024-08-08 17:18:30 +00:00
|
|
|
.await;
|
2024-03-21 04:13:08 -07:00
|
|
|
|
|
|
|
// Coalesce database writes for the remainder of this scope.
|
2024-07-16 08:05:25 +00:00
|
|
|
let _cork = services.db.cork_and_flush();
|
2024-03-21 04:13:08 -07:00
|
|
|
|
2021-08-02 10:13:34 +02:00
|
|
|
for room_id in all_joined_rooms {
|
2023-03-13 10:39:02 +01:00
|
|
|
if let Ok(joined_room) = load_joined_room(
|
2024-07-27 07:17:07 +00:00
|
|
|
&services,
|
2023-03-13 10:39:02 +01:00
|
|
|
&sender_user,
|
|
|
|
&sender_device,
|
|
|
|
&room_id,
|
|
|
|
since,
|
|
|
|
sincecount,
|
|
|
|
next_batch,
|
|
|
|
next_batchcount,
|
|
|
|
lazy_load_enabled,
|
|
|
|
lazy_load_send_redundant,
|
|
|
|
full_state,
|
|
|
|
&mut device_list_updates,
|
|
|
|
&mut left_encrypted_users,
|
|
|
|
)
|
|
|
|
.await
|
|
|
|
{
|
|
|
|
if !joined_room.is_empty() {
|
|
|
|
joined_rooms.insert(room_id.clone(), joined_room);
|
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-03-13 10:39:02 +01:00
|
|
|
let mut left_rooms = BTreeMap::new();
|
2024-07-16 08:05:25 +00:00
|
|
|
let all_left_rooms: Vec<_> = services
|
2024-03-25 17:05:11 -04:00
|
|
|
.rooms
|
|
|
|
.state_cache
|
|
|
|
.rooms_left(&sender_user)
|
2024-08-08 17:18:30 +00:00
|
|
|
.collect()
|
|
|
|
.await;
|
|
|
|
|
2023-03-13 10:39:02 +01:00
|
|
|
for result in all_left_rooms {
|
2024-05-01 12:39:59 -04:00
|
|
|
handle_left_room(
|
2024-07-27 07:17:07 +00:00
|
|
|
&services,
|
2024-05-01 12:39:59 -04:00
|
|
|
since,
|
2024-08-08 17:18:30 +00:00
|
|
|
&result.0,
|
2024-05-01 12:39:59 -04:00
|
|
|
&sender_user,
|
|
|
|
&mut left_rooms,
|
|
|
|
&next_batch_string,
|
|
|
|
full_state,
|
|
|
|
lazy_load_enabled,
|
|
|
|
)
|
|
|
|
.instrument(Span::current())
|
|
|
|
.await?;
|
2023-03-13 10:39:02 +01:00
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-03-13 10:39:02 +01:00
|
|
|
let mut invited_rooms = BTreeMap::new();
|
2024-07-16 08:05:25 +00:00
|
|
|
let all_invited_rooms: Vec<_> = services
|
2024-03-25 17:05:11 -04:00
|
|
|
.rooms
|
|
|
|
.state_cache
|
|
|
|
.rooms_invited(&sender_user)
|
2024-08-08 17:18:30 +00:00
|
|
|
.collect()
|
|
|
|
.await;
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
for (room_id, invite_state_events) in all_invited_rooms {
|
2024-06-14 21:28:00 +00:00
|
|
|
// Get and drop the lock to wait for remaining operations to finish
|
2024-07-16 08:05:25 +00:00
|
|
|
let insert_lock = services.rooms.timeline.mutex_insert.lock(&room_id).await;
|
2024-06-14 21:28:00 +00:00
|
|
|
drop(insert_lock);
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-07-16 08:05:25 +00:00
|
|
|
let invite_count = services
|
2024-03-25 17:05:11 -04:00
|
|
|
.rooms
|
|
|
|
.state_cache
|
2024-08-08 17:18:30 +00:00
|
|
|
.get_invite_count(&room_id, &sender_user)
|
|
|
|
.await
|
|
|
|
.ok();
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-03-13 10:39:02 +01:00
|
|
|
// Invited before last sync
|
|
|
|
if Some(since) >= invite_count {
|
|
|
|
continue;
|
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-03-13 10:39:02 +01:00
|
|
|
invited_rooms.insert(
|
|
|
|
room_id.clone(),
|
|
|
|
InvitedRoom {
|
|
|
|
invite_state: InviteState {
|
|
|
|
events: invite_state_events,
|
2024-03-05 19:48:54 -05:00
|
|
|
},
|
2023-03-13 10:39:02 +01:00
|
|
|
},
|
|
|
|
);
|
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-03-13 10:39:02 +01:00
|
|
|
for user_id in left_encrypted_users {
|
2024-08-08 17:18:30 +00:00
|
|
|
let dont_share_encrypted_room = !share_encrypted_room(&services, &sender_user, &user_id, None).await;
|
|
|
|
|
2023-03-13 10:39:02 +01:00
|
|
|
// If the user doesn't share an encrypted room with the target anymore, we need
|
|
|
|
// to tell them
|
2023-07-24 10:41:50 +02:00
|
|
|
if dont_share_encrypted_room {
|
2023-03-13 10:39:02 +01:00
|
|
|
device_list_left.insert(user_id);
|
2024-03-05 19:48:54 -05:00
|
|
|
}
|
2023-03-13 10:39:02 +01:00
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-03-13 10:39:02 +01:00
|
|
|
// Remove all to-device events the device received *last time*
|
2024-07-16 08:05:25 +00:00
|
|
|
services
|
2024-03-25 17:05:11 -04:00
|
|
|
.users
|
2024-08-08 17:18:30 +00:00
|
|
|
.remove_to_device_events(&sender_user, &sender_device, since)
|
|
|
|
.await;
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-03-13 10:39:02 +01:00
|
|
|
let response = sync_events::v3::Response {
|
|
|
|
next_batch: next_batch_string,
|
|
|
|
rooms: Rooms {
|
|
|
|
leave: left_rooms,
|
|
|
|
join: joined_rooms,
|
|
|
|
invite: invited_rooms,
|
|
|
|
knock: BTreeMap::new(), // TODO
|
|
|
|
},
|
|
|
|
presence: Presence {
|
|
|
|
events: presence_updates
|
|
|
|
.into_values()
|
|
|
|
.map(|v| Raw::new(&v).expect("PresenceEvent always serializes successfully"))
|
|
|
|
.collect(),
|
|
|
|
},
|
|
|
|
account_data: GlobalAccountData {
|
2024-07-16 08:05:25 +00:00
|
|
|
events: services
|
2023-03-13 10:39:02 +01:00
|
|
|
.account_data
|
2024-08-08 17:18:30 +00:00
|
|
|
.changes_since(None, &sender_user, since)
|
|
|
|
.await?
|
2023-03-13 10:39:02 +01:00
|
|
|
.into_iter()
|
2024-08-30 10:31:08 +02:00
|
|
|
.filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global))
|
2023-03-13 10:39:02 +01:00
|
|
|
.collect(),
|
|
|
|
},
|
|
|
|
device_lists: DeviceLists {
|
|
|
|
changed: device_list_updates.into_iter().collect(),
|
|
|
|
left: device_list_left.into_iter().collect(),
|
|
|
|
},
|
2024-07-16 08:05:25 +00:00
|
|
|
device_one_time_keys_count: services
|
2024-03-25 17:05:11 -04:00
|
|
|
.users
|
2024-08-08 17:18:30 +00:00
|
|
|
.count_one_time_keys(&sender_user, &sender_device)
|
|
|
|
.await,
|
2023-03-13 10:39:02 +01:00
|
|
|
to_device: ToDevice {
|
2024-07-16 08:05:25 +00:00
|
|
|
events: services
|
2024-03-25 17:05:11 -04:00
|
|
|
.users
|
2024-08-08 17:18:30 +00:00
|
|
|
.get_to_device_events(&sender_user, &sender_device)
|
|
|
|
.collect()
|
|
|
|
.await,
|
2023-03-13 10:39:02 +01:00
|
|
|
},
|
|
|
|
// Fallback keys are not yet supported
|
|
|
|
device_unused_fallback_key_types: None,
|
|
|
|
};
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-03-23 14:38:15 -04:00
|
|
|
// TODO: Retry the endpoint instead of returning
|
2023-03-13 10:39:02 +01:00
|
|
|
if !full_state
|
|
|
|
&& response.rooms.is_empty()
|
|
|
|
&& response.presence.is_empty()
|
|
|
|
&& response.account_data.is_empty()
|
|
|
|
&& response.device_lists.is_empty()
|
|
|
|
&& response.to_device.is_empty()
|
|
|
|
{
|
|
|
|
// Hang a few seconds so requests are not spammed
|
|
|
|
// Stop hanging if new info arrives
|
2024-07-08 16:13:31 +00:00
|
|
|
let default = Duration::from_secs(30);
|
|
|
|
let duration = cmp::min(body.timeout.unwrap_or(default), default);
|
2024-07-08 16:10:18 +00:00
|
|
|
_ = tokio::time::timeout(duration, watcher).await;
|
2023-03-13 10:39:02 +01:00
|
|
|
}
|
2024-05-16 21:02:05 -07:00
|
|
|
|
|
|
|
Ok(response)
|
2023-03-13 10:39:02 +01:00
|
|
|
}
|
|
|
|
|
2024-07-16 08:05:25 +00:00
|
|
|
#[allow(clippy::too_many_arguments)]
|
2024-06-17 04:12:11 +00:00
|
|
|
#[tracing::instrument(skip_all, fields(user_id = %sender_user, room_id = %room_id), name = "left_room")]
|
2024-05-01 12:39:59 -04:00
|
|
|
async fn handle_left_room(
|
2024-07-16 08:05:25 +00:00
|
|
|
services: &Services, since: u64, room_id: &RoomId, sender_user: &UserId,
|
2024-08-24 06:44:32 +02:00
|
|
|
left_rooms: &mut BTreeMap<OwnedRoomId, LeftRoom>, next_batch_string: &str, full_state: bool,
|
2024-07-16 08:05:25 +00:00
|
|
|
lazy_load_enabled: bool,
|
2024-05-01 12:39:59 -04:00
|
|
|
) -> Result<()> {
|
2024-06-14 21:28:00 +00:00
|
|
|
// Get and drop the lock to wait for remaining operations to finish
|
2024-07-16 08:05:25 +00:00
|
|
|
let insert_lock = services.rooms.timeline.mutex_insert.lock(room_id).await;
|
2024-06-14 21:28:00 +00:00
|
|
|
drop(insert_lock);
|
2024-05-01 12:39:59 -04:00
|
|
|
|
2024-07-16 08:05:25 +00:00
|
|
|
let left_count = services
|
2024-05-01 12:39:59 -04:00
|
|
|
.rooms
|
|
|
|
.state_cache
|
2024-08-08 17:18:30 +00:00
|
|
|
.get_left_count(room_id, sender_user)
|
|
|
|
.await
|
|
|
|
.ok();
|
2024-05-01 12:39:59 -04:00
|
|
|
|
|
|
|
// Left before last sync
|
|
|
|
if Some(since) >= left_count {
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
if !services.rooms.metadata.exists(room_id).await {
|
2024-05-01 12:39:59 -04:00
|
|
|
// This is just a rejected invite, not a room we know
|
|
|
|
// Insert a leave event anyways
|
|
|
|
let event = PduEvent {
|
2024-07-16 08:05:25 +00:00
|
|
|
event_id: EventId::new(services.globals.server_name()).into(),
|
2024-05-01 12:39:59 -04:00
|
|
|
sender: sender_user.to_owned(),
|
|
|
|
origin: None,
|
|
|
|
origin_server_ts: utils::millis_since_unix_epoch()
|
|
|
|
.try_into()
|
|
|
|
.expect("Timestamp is valid js_int value"),
|
2024-09-29 20:57:33 -04:00
|
|
|
kind: RoomMember,
|
2024-05-01 12:39:59 -04:00
|
|
|
content: serde_json::from_str(r#"{"membership":"leave"}"#).expect("this is valid JSON"),
|
|
|
|
state_key: Some(sender_user.to_string()),
|
|
|
|
unsigned: None,
|
|
|
|
// The following keys are dropped on conversion
|
|
|
|
room_id: room_id.to_owned(),
|
|
|
|
prev_events: vec![],
|
|
|
|
depth: uint!(1),
|
|
|
|
auth_events: vec![],
|
|
|
|
redacts: None,
|
|
|
|
hashes: EventHash {
|
|
|
|
sha256: String::new(),
|
|
|
|
},
|
|
|
|
signatures: None,
|
|
|
|
};
|
|
|
|
|
|
|
|
left_rooms.insert(
|
|
|
|
room_id.to_owned(),
|
|
|
|
LeftRoom {
|
|
|
|
account_data: RoomAccountData {
|
|
|
|
events: Vec::new(),
|
|
|
|
},
|
|
|
|
timeline: Timeline {
|
|
|
|
limited: false,
|
|
|
|
prev_batch: Some(next_batch_string.to_owned()),
|
|
|
|
events: Vec::new(),
|
|
|
|
},
|
2024-07-16 08:05:25 +00:00
|
|
|
state: RoomState {
|
2024-05-01 12:39:59 -04:00
|
|
|
events: vec![event.to_sync_state_event()],
|
|
|
|
},
|
|
|
|
},
|
|
|
|
);
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut left_state_events = Vec::new();
|
|
|
|
|
2024-07-16 08:05:25 +00:00
|
|
|
let since_shortstatehash = services
|
2024-05-01 12:39:59 -04:00
|
|
|
.rooms
|
|
|
|
.user
|
2024-08-08 17:18:30 +00:00
|
|
|
.get_token_shortstatehash(room_id, since)
|
|
|
|
.await;
|
2024-05-01 12:39:59 -04:00
|
|
|
|
|
|
|
let since_state_ids = match since_shortstatehash {
|
2024-08-08 17:18:30 +00:00
|
|
|
Ok(s) => services.rooms.state_accessor.state_full_ids(s).await?,
|
|
|
|
Err(_) => HashMap::new(),
|
2024-05-01 12:39:59 -04:00
|
|
|
};
|
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
let Ok(left_event_id) = services
|
|
|
|
.rooms
|
|
|
|
.state_accessor
|
|
|
|
.room_state_get_id(room_id, &StateEventType::RoomMember, sender_user.as_str())
|
|
|
|
.await
|
2024-05-01 12:39:59 -04:00
|
|
|
else {
|
|
|
|
error!("Left room but no left state event");
|
|
|
|
return Ok(());
|
|
|
|
};
|
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
let Ok(left_shortstatehash) = services
|
2024-05-01 12:39:59 -04:00
|
|
|
.rooms
|
|
|
|
.state_accessor
|
2024-08-08 17:18:30 +00:00
|
|
|
.pdu_shortstatehash(&left_event_id)
|
|
|
|
.await
|
2024-05-01 12:39:59 -04:00
|
|
|
else {
|
|
|
|
error!(event_id = %left_event_id, "Leave event has no state");
|
|
|
|
return Ok(());
|
|
|
|
};
|
|
|
|
|
2024-07-16 08:05:25 +00:00
|
|
|
let mut left_state_ids = services
|
2024-05-01 12:39:59 -04:00
|
|
|
.rooms
|
|
|
|
.state_accessor
|
|
|
|
.state_full_ids(left_shortstatehash)
|
|
|
|
.await?;
|
|
|
|
|
2024-07-16 08:05:25 +00:00
|
|
|
let leave_shortstatekey = services
|
2024-05-01 12:39:59 -04:00
|
|
|
.rooms
|
|
|
|
.short
|
2024-08-08 17:18:30 +00:00
|
|
|
.get_or_create_shortstatekey(&StateEventType::RoomMember, sender_user.as_str())
|
|
|
|
.await;
|
2024-05-01 12:39:59 -04:00
|
|
|
|
|
|
|
left_state_ids.insert(leave_shortstatekey, left_event_id);
|
|
|
|
|
2024-05-03 21:42:47 -04:00
|
|
|
let mut i: u8 = 0;
|
2024-05-01 12:39:59 -04:00
|
|
|
for (key, id) in left_state_ids {
|
|
|
|
if full_state || since_state_ids.get(&key) != Some(&id) {
|
2024-08-08 17:18:30 +00:00
|
|
|
let (event_type, state_key) = services.rooms.short.get_statekey_from_short(key).await?;
|
2024-05-01 12:39:59 -04:00
|
|
|
|
|
|
|
if !lazy_load_enabled
|
|
|
|
|| event_type != StateEventType::RoomMember
|
|
|
|
|| full_state
|
|
|
|
// TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565
|
|
|
|
|| (cfg!(feature = "element_hacks") && *sender_user == state_key)
|
|
|
|
{
|
2024-08-08 17:18:30 +00:00
|
|
|
let Ok(pdu) = services.rooms.timeline.get_pdu(&id).await else {
|
2024-05-01 12:39:59 -04:00
|
|
|
error!("Pdu in state not found: {}", id);
|
|
|
|
continue;
|
|
|
|
};
|
|
|
|
|
|
|
|
left_state_events.push(pdu.to_sync_state_event());
|
|
|
|
|
2024-05-05 20:40:58 -04:00
|
|
|
i = i.wrapping_add(1);
|
2024-05-01 12:39:59 -04:00
|
|
|
if i % 100 == 0 {
|
|
|
|
tokio::task::yield_now().await;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
left_rooms.insert(
|
|
|
|
room_id.to_owned(),
|
|
|
|
LeftRoom {
|
|
|
|
account_data: RoomAccountData {
|
|
|
|
events: Vec::new(),
|
|
|
|
},
|
|
|
|
timeline: Timeline {
|
|
|
|
limited: false,
|
|
|
|
prev_batch: Some(next_batch_string.to_owned()),
|
|
|
|
events: Vec::new(),
|
|
|
|
},
|
2024-07-16 08:05:25 +00:00
|
|
|
state: RoomState {
|
2024-05-01 12:39:59 -04:00
|
|
|
events: left_state_events,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
);
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2024-04-01 20:48:40 -07:00
|
|
|
async fn process_presence_updates(
|
2024-07-16 08:05:25 +00:00
|
|
|
services: &Services, presence_updates: &mut HashMap<OwnedUserId, PresenceEvent>, since: u64, syncing_user: &UserId,
|
2023-09-08 14:36:39 +02:00
|
|
|
) -> Result<()> {
|
2024-08-08 17:18:30 +00:00
|
|
|
let presence_since = services.presence.presence_since(since);
|
|
|
|
|
2024-04-01 20:48:40 -07:00
|
|
|
// Take presence updates
|
2024-08-08 17:18:30 +00:00
|
|
|
pin_mut!(presence_since);
|
|
|
|
while let Some((user_id, _, presence_bytes)) = presence_since.next().await {
|
2024-07-16 08:05:25 +00:00
|
|
|
if !services
|
2024-04-01 20:48:40 -07:00
|
|
|
.rooms
|
|
|
|
.state_cache
|
2024-08-08 17:18:30 +00:00
|
|
|
.user_sees_user(syncing_user, &user_id)
|
|
|
|
.await
|
2024-04-01 20:48:40 -07:00
|
|
|
{
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2024-07-18 06:37:47 +00:00
|
|
|
let presence_event = services
|
|
|
|
.presence
|
2024-08-08 17:18:30 +00:00
|
|
|
.from_json_bytes_to_event(&presence_bytes, &user_id)
|
|
|
|
.await?;
|
|
|
|
|
2023-09-08 14:36:39 +02:00
|
|
|
match presence_updates.entry(user_id) {
|
|
|
|
Entry::Vacant(slot) => {
|
|
|
|
slot.insert(presence_event);
|
|
|
|
},
|
|
|
|
Entry::Occupied(mut slot) => {
|
|
|
|
let curr_event = slot.get_mut();
|
|
|
|
let curr_content = &mut curr_event.content;
|
|
|
|
let new_content = presence_event.content;
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-09-08 14:36:39 +02:00
|
|
|
// Update existing presence event with more info
|
|
|
|
curr_content.presence = new_content.presence;
|
2024-03-25 17:05:11 -04:00
|
|
|
curr_content.status_msg = new_content
|
|
|
|
.status_msg
|
|
|
|
.or_else(|| curr_content.status_msg.take());
|
2023-12-20 13:34:45 +01:00
|
|
|
curr_content.last_active_ago = new_content.last_active_ago.or(curr_content.last_active_ago);
|
2024-03-25 17:05:11 -04:00
|
|
|
curr_content.displayname = new_content
|
|
|
|
.displayname
|
|
|
|
.or_else(|| curr_content.displayname.take());
|
|
|
|
curr_content.avatar_url = new_content
|
|
|
|
.avatar_url
|
|
|
|
.or_else(|| curr_content.avatar_url.take());
|
|
|
|
curr_content.currently_active = new_content
|
|
|
|
.currently_active
|
|
|
|
.or(curr_content.currently_active);
|
2024-03-05 19:48:54 -05:00
|
|
|
},
|
|
|
|
}
|
2023-09-08 14:36:39 +02:00
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-09-08 14:36:39 +02:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2023-11-27 00:39:50 -05:00
|
|
|
#[allow(clippy::too_many_arguments)]
|
2023-03-13 10:39:02 +01:00
|
|
|
async fn load_joined_room(
|
2024-07-16 08:05:25 +00:00
|
|
|
services: &Services, sender_user: &UserId, sender_device: &DeviceId, room_id: &RoomId, since: u64,
|
|
|
|
sincecount: PduCount, next_batch: u64, next_batchcount: PduCount, lazy_load_enabled: bool,
|
|
|
|
lazy_load_send_redundant: bool, full_state: bool, device_list_updates: &mut HashSet<OwnedUserId>,
|
|
|
|
left_encrypted_users: &mut HashSet<OwnedUserId>,
|
2023-03-13 10:39:02 +01:00
|
|
|
) -> Result<JoinedRoom> {
|
2024-06-14 21:28:00 +00:00
|
|
|
// Get and drop the lock to wait for remaining operations to finish
|
|
|
|
// This will make sure the we have all events until next_batch
|
2024-07-16 08:05:25 +00:00
|
|
|
let insert_lock = services.rooms.timeline.mutex_insert.lock(room_id).await;
|
2024-06-14 21:28:00 +00:00
|
|
|
drop(insert_lock);
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
let (timeline_pdus, limited) = load_timeline(services, sender_user, room_id, sincecount, 10).await?;
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-03-25 17:05:11 -04:00
|
|
|
let send_notification_counts = !timeline_pdus.is_empty()
|
2024-07-16 08:05:25 +00:00
|
|
|
|| services
|
2024-03-25 17:05:11 -04:00
|
|
|
.rooms
|
|
|
|
.user
|
2024-08-08 17:18:30 +00:00
|
|
|
.last_notification_read(sender_user, room_id)
|
|
|
|
.await > since;
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-03-13 10:39:02 +01:00
|
|
|
let mut timeline_users = HashSet::new();
|
|
|
|
for (_, event) in &timeline_pdus {
|
|
|
|
timeline_users.insert(event.sender.as_str().to_owned());
|
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-07-16 08:05:25 +00:00
|
|
|
services
|
2024-03-25 17:05:11 -04:00
|
|
|
.rooms
|
|
|
|
.lazy_loading
|
2024-08-08 17:18:30 +00:00
|
|
|
.lazy_load_confirm_delivery(sender_user, sender_device, room_id, sincecount);
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-03-13 10:39:02 +01:00
|
|
|
// Database queries:
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
let current_shortstatehash = services
|
|
|
|
.rooms
|
|
|
|
.state
|
|
|
|
.get_room_shortstatehash(room_id)
|
|
|
|
.await
|
|
|
|
.map_err(|_| err!(Database(error!("Room {room_id} has no state"))))?;
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-07-16 08:05:25 +00:00
|
|
|
let since_shortstatehash = services
|
2024-03-25 17:05:11 -04:00
|
|
|
.rooms
|
|
|
|
.user
|
2024-08-08 17:18:30 +00:00
|
|
|
.get_token_shortstatehash(room_id, since)
|
|
|
|
.await
|
|
|
|
.ok();
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
let (heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events) = if timeline_pdus
|
|
|
|
.is_empty()
|
|
|
|
&& (since_shortstatehash.is_none() || since_shortstatehash.is_some_and(is_equal_to!(current_shortstatehash)))
|
|
|
|
{
|
|
|
|
// No state changes
|
|
|
|
(Vec::new(), None, None, false, Vec::new())
|
|
|
|
} else {
|
|
|
|
// Calculates joined_member_count, invited_member_count and heroes
|
|
|
|
let calculate_counts = || async {
|
|
|
|
let joined_member_count = services
|
|
|
|
.rooms
|
|
|
|
.state_cache
|
|
|
|
.room_joined_count(room_id)
|
|
|
|
.await
|
|
|
|
.unwrap_or(0);
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
let invited_member_count = services
|
|
|
|
.rooms
|
|
|
|
.state_cache
|
|
|
|
.room_invited_count(room_id)
|
|
|
|
.await
|
|
|
|
.unwrap_or(0);
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
if joined_member_count.saturating_add(invited_member_count) > 5 {
|
|
|
|
return Ok::<_, Error>((Some(joined_member_count), Some(invited_member_count), Vec::new()));
|
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
// Go through all PDUs and for each member event, check if the user is still
|
|
|
|
// joined or invited until we have 5 or we reach the end
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
// Recalculate heroes (first 5 members)
|
|
|
|
let heroes = services
|
|
|
|
.rooms
|
|
|
|
.timeline
|
|
|
|
.all_pdus(sender_user, room_id)
|
|
|
|
.await?
|
2024-09-29 20:57:33 -04:00
|
|
|
.ready_filter(|(_, pdu)| pdu.kind == RoomMember)
|
2024-08-08 17:18:30 +00:00
|
|
|
.filter_map(|(_, pdu)| async move {
|
|
|
|
let Ok(content) = serde_json::from_str::<RoomMemberEventContent>(pdu.content.get()) else {
|
|
|
|
return None;
|
|
|
|
};
|
|
|
|
|
|
|
|
let Some(state_key) = &pdu.state_key else {
|
|
|
|
return None;
|
|
|
|
};
|
|
|
|
|
|
|
|
let Ok(user_id) = UserId::parse(state_key) else {
|
|
|
|
return None;
|
|
|
|
};
|
|
|
|
|
|
|
|
if user_id == sender_user {
|
|
|
|
return None;
|
2024-03-25 17:05:11 -04:00
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
// The membership was and still is invite or join
|
|
|
|
if !matches!(content.membership, MembershipState::Join | MembershipState::Invite) {
|
|
|
|
return None;
|
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
if !services
|
2024-03-25 17:05:11 -04:00
|
|
|
.rooms
|
2024-08-08 17:18:30 +00:00
|
|
|
.state_cache
|
|
|
|
.is_joined(&user_id, room_id)
|
|
|
|
.await && services
|
|
|
|
.rooms
|
|
|
|
.state_cache
|
|
|
|
.is_invited(&user_id, room_id)
|
|
|
|
.await
|
|
|
|
{
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
|
|
|
Some(user_id)
|
2024-03-25 17:05:11 -04:00
|
|
|
})
|
2024-08-08 17:18:30 +00:00
|
|
|
.collect::<HashSet<_>>()
|
|
|
|
.await;
|
|
|
|
|
|
|
|
Ok::<_, Error>((
|
|
|
|
Some(joined_member_count),
|
|
|
|
Some(invited_member_count),
|
|
|
|
heroes.into_iter().collect::<Vec<_>>(),
|
|
|
|
))
|
|
|
|
};
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-10-03 09:38:10 +00:00
|
|
|
let get_sender_member_content = |short| {
|
2024-08-08 17:18:30 +00:00
|
|
|
services
|
|
|
|
.rooms
|
|
|
|
.state_accessor
|
2024-10-03 09:38:10 +00:00
|
|
|
.state_get_content(short, &StateEventType::RoomMember, sender_user.as_str())
|
2024-08-08 17:18:30 +00:00
|
|
|
.ok()
|
|
|
|
};
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-10-03 09:38:10 +00:00
|
|
|
let since_sender_member: OptionFuture<_> = since_shortstatehash.map(get_sender_member_content).into();
|
|
|
|
|
|
|
|
let joined_since_last_sync = since_sender_member
|
|
|
|
.await
|
|
|
|
.flatten()
|
|
|
|
.map_or(true, |content: RoomMemberEventContent| {
|
|
|
|
content.membership != MembershipState::Join
|
|
|
|
});
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
if since_shortstatehash.is_none() || joined_since_last_sync {
|
|
|
|
// Probably since = 0, we will do an initial sync
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
let (joined_member_count, invited_member_count, heroes) = calculate_counts().await?;
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
let current_state_ids = services
|
|
|
|
.rooms
|
|
|
|
.state_accessor
|
|
|
|
.state_full_ids(current_shortstatehash)
|
|
|
|
.await?;
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
let mut state_events = Vec::new();
|
|
|
|
let mut lazy_loaded = HashSet::new();
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
let mut i: u8 = 0;
|
|
|
|
for (shortstatekey, id) in current_state_ids {
|
|
|
|
let (event_type, state_key) = services
|
|
|
|
.rooms
|
|
|
|
.short
|
|
|
|
.get_statekey_from_short(shortstatekey)
|
|
|
|
.await?;
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
if event_type != StateEventType::RoomMember {
|
|
|
|
let Ok(pdu) = services.rooms.timeline.get_pdu(&id).await else {
|
|
|
|
error!("Pdu in state not found: {id}");
|
|
|
|
continue;
|
|
|
|
};
|
|
|
|
state_events.push(pdu);
|
|
|
|
|
|
|
|
i = i.wrapping_add(1);
|
|
|
|
if i % 100 == 0 {
|
|
|
|
tokio::task::yield_now().await;
|
|
|
|
}
|
|
|
|
} else if !lazy_load_enabled
|
2023-07-03 19:37:54 +02:00
|
|
|
|| full_state
|
|
|
|
|| timeline_users.contains(&state_key)
|
|
|
|
// TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565
|
2024-03-23 23:40:09 -04:00
|
|
|
|| (cfg!(feature = "element_hacks") && *sender_user == state_key)
|
2024-08-08 17:18:30 +00:00
|
|
|
{
|
|
|
|
let Ok(pdu) = services.rooms.timeline.get_pdu(&id).await else {
|
|
|
|
error!("Pdu in state not found: {id}");
|
|
|
|
continue;
|
|
|
|
};
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
// This check is in case a bad user ID made it into the database
|
|
|
|
if let Ok(uid) = UserId::parse(&state_key) {
|
|
|
|
lazy_loaded.insert(uid);
|
|
|
|
}
|
|
|
|
state_events.push(pdu);
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
i = i.wrapping_add(1);
|
|
|
|
if i % 100 == 0 {
|
|
|
|
tokio::task::yield_now().await;
|
2022-01-06 00:15:34 +01:00
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
}
|
2024-08-08 17:18:30 +00:00
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
// Reset lazy loading because this is an initial sync
|
|
|
|
services
|
|
|
|
.rooms
|
|
|
|
.lazy_loading
|
|
|
|
.lazy_load_reset(sender_user, sender_device, room_id)
|
|
|
|
.await;
|
|
|
|
|
|
|
|
// The state_events above should contain all timeline_users, let's mark them as
|
|
|
|
// lazy loaded.
|
|
|
|
services.rooms.lazy_loading.lazy_load_mark_sent(
|
|
|
|
sender_user,
|
|
|
|
sender_device,
|
|
|
|
room_id,
|
|
|
|
lazy_loaded,
|
|
|
|
next_batchcount,
|
|
|
|
);
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
(heroes, joined_member_count, invited_member_count, true, state_events)
|
|
|
|
} else {
|
|
|
|
// Incremental /sync
|
|
|
|
let since_shortstatehash = since_shortstatehash.expect("missing since_shortstatehash on incremental sync");
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
let mut delta_state_events = Vec::new();
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
if since_shortstatehash != current_shortstatehash {
|
|
|
|
let current_state_ids = services
|
|
|
|
.rooms
|
|
|
|
.state_accessor
|
|
|
|
.state_full_ids(current_shortstatehash)
|
|
|
|
.await?;
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
let since_state_ids = services
|
|
|
|
.rooms
|
|
|
|
.state_accessor
|
|
|
|
.state_full_ids(since_shortstatehash)
|
|
|
|
.await?;
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
for (key, id) in current_state_ids {
|
|
|
|
if full_state || since_state_ids.get(&key) != Some(&id) {
|
|
|
|
let Ok(pdu) = services.rooms.timeline.get_pdu(&id).await else {
|
|
|
|
error!("Pdu in state not found: {id}");
|
|
|
|
continue;
|
|
|
|
};
|
2024-03-25 17:05:11 -04:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
delta_state_events.push(pdu);
|
|
|
|
tokio::task::yield_now().await;
|
2024-03-05 19:48:54 -05:00
|
|
|
}
|
|
|
|
}
|
2024-08-08 17:18:30 +00:00
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
let encrypted_room = services
|
|
|
|
.rooms
|
|
|
|
.state_accessor
|
|
|
|
.state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")
|
|
|
|
.await
|
|
|
|
.is_ok();
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
let since_encryption = services
|
|
|
|
.rooms
|
|
|
|
.state_accessor
|
|
|
|
.state_get(since_shortstatehash, &StateEventType::RoomEncryption, "")
|
|
|
|
.await;
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
// Calculations:
|
|
|
|
let new_encrypted_room = encrypted_room && since_encryption.is_err();
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
let send_member_count = delta_state_events
|
|
|
|
.iter()
|
2024-09-29 20:57:33 -04:00
|
|
|
.any(|event| event.kind == RoomMember);
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
if encrypted_room {
|
|
|
|
for state_event in &delta_state_events {
|
2024-09-29 20:57:33 -04:00
|
|
|
if state_event.kind != RoomMember {
|
2024-08-08 17:18:30 +00:00
|
|
|
continue;
|
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
if let Some(state_key) = &state_event.state_key {
|
|
|
|
let user_id = UserId::parse(state_key.clone())
|
|
|
|
.map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?;
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
if user_id == sender_user {
|
|
|
|
continue;
|
|
|
|
}
|
2024-03-25 17:05:11 -04:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
let new_membership = serde_json::from_str::<RoomMemberEventContent>(state_event.content.get())
|
|
|
|
.map_err(|_| Error::bad_database("Invalid PDU in database."))?
|
|
|
|
.membership;
|
2024-03-25 17:05:11 -04:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
match new_membership {
|
|
|
|
MembershipState::Join => {
|
|
|
|
// A new user joined an encrypted room
|
|
|
|
if !share_encrypted_room(services, sender_user, &user_id, Some(room_id)).await {
|
|
|
|
device_list_updates.insert(user_id);
|
|
|
|
}
|
|
|
|
},
|
|
|
|
MembershipState::Leave => {
|
|
|
|
// Write down users that have left encrypted rooms we are in
|
|
|
|
left_encrypted_users.insert(user_id);
|
|
|
|
},
|
|
|
|
_ => {},
|
2024-03-05 19:48:54 -05:00
|
|
|
}
|
|
|
|
}
|
2023-07-03 19:37:54 +02:00
|
|
|
}
|
2024-08-08 17:18:30 +00:00
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
if joined_since_last_sync && encrypted_room || new_encrypted_room {
|
|
|
|
// If the user is in a new encrypted room, give them all joined users
|
|
|
|
device_list_updates.extend(
|
|
|
|
services
|
|
|
|
.rooms
|
|
|
|
.state_cache
|
|
|
|
.room_members(room_id)
|
2024-10-01 02:47:39 +00:00
|
|
|
// Don't send key updates from the sender to the sender
|
|
|
|
.ready_filter(|user_id| sender_user != *user_id)
|
|
|
|
// Only send keys if the sender doesn't share an encrypted room with the target
|
|
|
|
// already
|
|
|
|
.filter_map(|user_id| {
|
|
|
|
share_encrypted_room(services, sender_user, user_id, Some(room_id))
|
|
|
|
.map(|res| res.or_some(user_id.to_owned()))
|
2024-08-08 17:18:30 +00:00
|
|
|
})
|
|
|
|
.collect::<Vec<_>>()
|
|
|
|
.await,
|
|
|
|
);
|
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
let (joined_member_count, invited_member_count, heroes) = if send_member_count {
|
|
|
|
calculate_counts().await?
|
|
|
|
} else {
|
|
|
|
(None, None, Vec::new())
|
|
|
|
};
|
2024-03-25 17:05:11 -04:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
let mut state_events = delta_state_events;
|
|
|
|
let mut lazy_loaded = HashSet::new();
|
|
|
|
|
|
|
|
// Mark all member events we're returning as lazy-loaded
|
|
|
|
for pdu in &state_events {
|
2024-09-29 20:57:33 -04:00
|
|
|
if pdu.kind == RoomMember {
|
2024-08-08 17:18:30 +00:00
|
|
|
match UserId::parse(
|
|
|
|
pdu.state_key
|
|
|
|
.as_ref()
|
|
|
|
.expect("State event has state key")
|
|
|
|
.clone(),
|
|
|
|
) {
|
|
|
|
Ok(state_key_userid) => {
|
|
|
|
lazy_loaded.insert(state_key_userid);
|
|
|
|
},
|
|
|
|
Err(e) => error!("Invalid state key for member event: {}", e),
|
don't return extra member count or e2ee device updates from sync
Previously, we were returning redundant member count updates or encrypted
device updates from the /sync endpoint in some cases. The extra member
count updates are spec-compliant, but unnecessary, while the extra
encrypted device updates violate the spec.
The refactor necessary to fix this bug is also necessary to support
filtering on state events in sync.
Details:
Joined room incremental sync needs to examine state events for four
purposes:
1. determining whether we need to return an update to room member counts
2. determining the set of left/joined devices for encrypted rooms
(returned in `device_lists`)
3. returning state events to the client (in `rooms.joined.*.state`)
4. tracking which member events we have sent to the client, so they can
be omitted on future requests when lazy-loading is enabled.
The state events that we need to examine for the first two cases is member
events in the delta between `since` and the end of `timeline`. For the
second two cases, we need the delta between `since` and the start of
`timeline`, plus contextual member events for any senders that occur in
`timeline`. The second list is subject to filtering, while the first is
not.
Before this change, we were using the same set of state events that we are
returning to the client (cases 3/4) to do the analysis for cases 1/2.
In a compliant implementation, this would result in us missing some
relevant member events in 1/2 in addition to seeing redundant member
events. In current conduwuit this is not the case because the set of
events that we return to the client is always a superset of the set that
is needed for cases 1/2. This is because we don't support filtering, and
we have an existing bug[1] where we are returning the delta between
`since` and the end of `timeline` rather than the start.
[1]: https://github.com/girlbossceo/conduwuit/issues/361
Fixing this is necessary to implement filtering because otherwise
we would start missing some member events for member count or encrypted
device updates if the relevant member events are rejected by the filter.
This would be much worse than our current behavior.
2024-05-10 17:17:50 -07:00
|
|
|
}
|
|
|
|
}
|
2024-08-08 17:18:30 +00:00
|
|
|
}
|
don't return extra member count or e2ee device updates from sync
Previously, we were returning redundant member count updates or encrypted
device updates from the /sync endpoint in some cases. The extra member
count updates are spec-compliant, but unnecessary, while the extra
encrypted device updates violate the spec.
The refactor necessary to fix this bug is also necessary to support
filtering on state events in sync.
Details:
Joined room incremental sync needs to examine state events for four
purposes:
1. determining whether we need to return an update to room member counts
2. determining the set of left/joined devices for encrypted rooms
(returned in `device_lists`)
3. returning state events to the client (in `rooms.joined.*.state`)
4. tracking which member events we have sent to the client, so they can
be omitted on future requests when lazy-loading is enabled.
The state events that we need to examine for the first two cases is member
events in the delta between `since` and the end of `timeline`. For the
second two cases, we need the delta between `since` and the start of
`timeline`, plus contextual member events for any senders that occur in
`timeline`. The second list is subject to filtering, while the first is
not.
Before this change, we were using the same set of state events that we are
returning to the client (cases 3/4) to do the analysis for cases 1/2.
In a compliant implementation, this would result in us missing some
relevant member events in 1/2 in addition to seeing redundant member
events. In current conduwuit this is not the case because the set of
events that we return to the client is always a superset of the set that
is needed for cases 1/2. This is because we don't support filtering, and
we have an existing bug[1] where we are returning the delta between
`since` and the end of `timeline` rather than the start.
[1]: https://github.com/girlbossceo/conduwuit/issues/361
Fixing this is necessary to implement filtering because otherwise
we would start missing some member events for member count or encrypted
device updates if the relevant member events are rejected by the filter.
This would be much worse than our current behavior.
2024-05-10 17:17:50 -07:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
// Fetch contextual member state events for events from the timeline, and
|
|
|
|
// mark them as lazy-loaded as well.
|
|
|
|
for (_, event) in &timeline_pdus {
|
|
|
|
if lazy_loaded.contains(&event.sender) {
|
|
|
|
continue;
|
|
|
|
}
|
don't return extra member count or e2ee device updates from sync
Previously, we were returning redundant member count updates or encrypted
device updates from the /sync endpoint in some cases. The extra member
count updates are spec-compliant, but unnecessary, while the extra
encrypted device updates violate the spec.
The refactor necessary to fix this bug is also necessary to support
filtering on state events in sync.
Details:
Joined room incremental sync needs to examine state events for four
purposes:
1. determining whether we need to return an update to room member counts
2. determining the set of left/joined devices for encrypted rooms
(returned in `device_lists`)
3. returning state events to the client (in `rooms.joined.*.state`)
4. tracking which member events we have sent to the client, so they can
be omitted on future requests when lazy-loading is enabled.
The state events that we need to examine for the first two cases is member
events in the delta between `since` and the end of `timeline`. For the
second two cases, we need the delta between `since` and the start of
`timeline`, plus contextual member events for any senders that occur in
`timeline`. The second list is subject to filtering, while the first is
not.
Before this change, we were using the same set of state events that we are
returning to the client (cases 3/4) to do the analysis for cases 1/2.
In a compliant implementation, this would result in us missing some
relevant member events in 1/2 in addition to seeing redundant member
events. In current conduwuit this is not the case because the set of
events that we return to the client is always a superset of the set that
is needed for cases 1/2. This is because we don't support filtering, and
we have an existing bug[1] where we are returning the delta between
`since` and the end of `timeline` rather than the start.
[1]: https://github.com/girlbossceo/conduwuit/issues/361
Fixing this is necessary to implement filtering because otherwise
we would start missing some member events for member count or encrypted
device updates if the relevant member events are rejected by the filter.
This would be much worse than our current behavior.
2024-05-10 17:17:50 -07:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
if !services
|
|
|
|
.rooms
|
|
|
|
.lazy_loading
|
|
|
|
.lazy_load_was_sent_before(sender_user, sender_device, room_id, &event.sender)
|
|
|
|
.await || lazy_load_send_redundant
|
|
|
|
{
|
|
|
|
if let Ok(member_event) = services
|
|
|
|
.rooms
|
|
|
|
.state_accessor
|
|
|
|
.room_state_get(room_id, &StateEventType::RoomMember, event.sender.as_str())
|
|
|
|
.await
|
don't return extra member count or e2ee device updates from sync
Previously, we were returning redundant member count updates or encrypted
device updates from the /sync endpoint in some cases. The extra member
count updates are spec-compliant, but unnecessary, while the extra
encrypted device updates violate the spec.
The refactor necessary to fix this bug is also necessary to support
filtering on state events in sync.
Details:
Joined room incremental sync needs to examine state events for four
purposes:
1. determining whether we need to return an update to room member counts
2. determining the set of left/joined devices for encrypted rooms
(returned in `device_lists`)
3. returning state events to the client (in `rooms.joined.*.state`)
4. tracking which member events we have sent to the client, so they can
be omitted on future requests when lazy-loading is enabled.
The state events that we need to examine for the first two cases is member
events in the delta between `since` and the end of `timeline`. For the
second two cases, we need the delta between `since` and the start of
`timeline`, plus contextual member events for any senders that occur in
`timeline`. The second list is subject to filtering, while the first is
not.
Before this change, we were using the same set of state events that we are
returning to the client (cases 3/4) to do the analysis for cases 1/2.
In a compliant implementation, this would result in us missing some
relevant member events in 1/2 in addition to seeing redundant member
events. In current conduwuit this is not the case because the set of
events that we return to the client is always a superset of the set that
is needed for cases 1/2. This is because we don't support filtering, and
we have an existing bug[1] where we are returning the delta between
`since` and the end of `timeline` rather than the start.
[1]: https://github.com/girlbossceo/conduwuit/issues/361
Fixing this is necessary to implement filtering because otherwise
we would start missing some member events for member count or encrypted
device updates if the relevant member events are rejected by the filter.
This would be much worse than our current behavior.
2024-05-10 17:17:50 -07:00
|
|
|
{
|
2024-08-08 17:18:30 +00:00
|
|
|
lazy_loaded.insert(event.sender.clone());
|
|
|
|
state_events.push(member_event);
|
don't return extra member count or e2ee device updates from sync
Previously, we were returning redundant member count updates or encrypted
device updates from the /sync endpoint in some cases. The extra member
count updates are spec-compliant, but unnecessary, while the extra
encrypted device updates violate the spec.
The refactor necessary to fix this bug is also necessary to support
filtering on state events in sync.
Details:
Joined room incremental sync needs to examine state events for four
purposes:
1. determining whether we need to return an update to room member counts
2. determining the set of left/joined devices for encrypted rooms
(returned in `device_lists`)
3. returning state events to the client (in `rooms.joined.*.state`)
4. tracking which member events we have sent to the client, so they can
be omitted on future requests when lazy-loading is enabled.
The state events that we need to examine for the first two cases is member
events in the delta between `since` and the end of `timeline`. For the
second two cases, we need the delta between `since` and the start of
`timeline`, plus contextual member events for any senders that occur in
`timeline`. The second list is subject to filtering, while the first is
not.
Before this change, we were using the same set of state events that we are
returning to the client (cases 3/4) to do the analysis for cases 1/2.
In a compliant implementation, this would result in us missing some
relevant member events in 1/2 in addition to seeing redundant member
events. In current conduwuit this is not the case because the set of
events that we return to the client is always a superset of the set that
is needed for cases 1/2. This is because we don't support filtering, and
we have an existing bug[1] where we are returning the delta between
`since` and the end of `timeline` rather than the start.
[1]: https://github.com/girlbossceo/conduwuit/issues/361
Fixing this is necessary to implement filtering because otherwise
we would start missing some member events for member count or encrypted
device updates if the relevant member events are rejected by the filter.
This would be much worse than our current behavior.
2024-05-10 17:17:50 -07:00
|
|
|
}
|
|
|
|
}
|
2024-08-08 17:18:30 +00:00
|
|
|
}
|
don't return extra member count or e2ee device updates from sync
Previously, we were returning redundant member count updates or encrypted
device updates from the /sync endpoint in some cases. The extra member
count updates are spec-compliant, but unnecessary, while the extra
encrypted device updates violate the spec.
The refactor necessary to fix this bug is also necessary to support
filtering on state events in sync.
Details:
Joined room incremental sync needs to examine state events for four
purposes:
1. determining whether we need to return an update to room member counts
2. determining the set of left/joined devices for encrypted rooms
(returned in `device_lists`)
3. returning state events to the client (in `rooms.joined.*.state`)
4. tracking which member events we have sent to the client, so they can
be omitted on future requests when lazy-loading is enabled.
The state events that we need to examine for the first two cases is member
events in the delta between `since` and the end of `timeline`. For the
second two cases, we need the delta between `since` and the start of
`timeline`, plus contextual member events for any senders that occur in
`timeline`. The second list is subject to filtering, while the first is
not.
Before this change, we were using the same set of state events that we are
returning to the client (cases 3/4) to do the analysis for cases 1/2.
In a compliant implementation, this would result in us missing some
relevant member events in 1/2 in addition to seeing redundant member
events. In current conduwuit this is not the case because the set of
events that we return to the client is always a superset of the set that
is needed for cases 1/2. This is because we don't support filtering, and
we have an existing bug[1] where we are returning the delta between
`since` and the end of `timeline` rather than the start.
[1]: https://github.com/girlbossceo/conduwuit/issues/361
Fixing this is necessary to implement filtering because otherwise
we would start missing some member events for member count or encrypted
device updates if the relevant member events are rejected by the filter.
This would be much worse than our current behavior.
2024-05-10 17:17:50 -07:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
services.rooms.lazy_loading.lazy_load_mark_sent(
|
|
|
|
sender_user,
|
|
|
|
sender_device,
|
|
|
|
room_id,
|
|
|
|
lazy_loaded,
|
|
|
|
next_batchcount,
|
|
|
|
);
|
don't return extra member count or e2ee device updates from sync
Previously, we were returning redundant member count updates or encrypted
device updates from the /sync endpoint in some cases. The extra member
count updates are spec-compliant, but unnecessary, while the extra
encrypted device updates violate the spec.
The refactor necessary to fix this bug is also necessary to support
filtering on state events in sync.
Details:
Joined room incremental sync needs to examine state events for four
purposes:
1. determining whether we need to return an update to room member counts
2. determining the set of left/joined devices for encrypted rooms
(returned in `device_lists`)
3. returning state events to the client (in `rooms.joined.*.state`)
4. tracking which member events we have sent to the client, so they can
be omitted on future requests when lazy-loading is enabled.
The state events that we need to examine for the first two cases is member
events in the delta between `since` and the end of `timeline`. For the
second two cases, we need the delta between `since` and the start of
`timeline`, plus contextual member events for any senders that occur in
`timeline`. The second list is subject to filtering, while the first is
not.
Before this change, we were using the same set of state events that we are
returning to the client (cases 3/4) to do the analysis for cases 1/2.
In a compliant implementation, this would result in us missing some
relevant member events in 1/2 in addition to seeing redundant member
events. In current conduwuit this is not the case because the set of
events that we return to the client is always a superset of the set that
is needed for cases 1/2. This is because we don't support filtering, and
we have an existing bug[1] where we are returning the delta between
`since` and the end of `timeline` rather than the start.
[1]: https://github.com/girlbossceo/conduwuit/issues/361
Fixing this is necessary to implement filtering because otherwise
we would start missing some member events for member count or encrypted
device updates if the relevant member events are rejected by the filter.
This would be much worse than our current behavior.
2024-05-10 17:17:50 -07:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
(
|
|
|
|
heroes,
|
|
|
|
joined_member_count,
|
|
|
|
invited_member_count,
|
|
|
|
joined_since_last_sync,
|
|
|
|
state_events,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
};
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-03-13 10:39:02 +01:00
|
|
|
// Look for device list updates in this room
|
2024-03-25 17:05:11 -04:00
|
|
|
device_list_updates.extend(
|
2024-07-16 08:05:25 +00:00
|
|
|
services
|
2024-03-25 17:05:11 -04:00
|
|
|
.users
|
|
|
|
.keys_changed(room_id.as_ref(), since, None)
|
2024-08-08 17:18:30 +00:00
|
|
|
.map(ToOwned::to_owned)
|
|
|
|
.collect::<Vec<_>>()
|
|
|
|
.await,
|
2024-03-25 17:05:11 -04:00
|
|
|
);
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-03-13 10:39:02 +01:00
|
|
|
let notification_count = if send_notification_counts {
|
|
|
|
Some(
|
2024-07-16 08:05:25 +00:00
|
|
|
services
|
2023-03-13 10:39:02 +01:00
|
|
|
.rooms
|
|
|
|
.user
|
2024-08-08 17:18:30 +00:00
|
|
|
.notification_count(sender_user, room_id)
|
|
|
|
.await
|
2023-03-13 10:39:02 +01:00
|
|
|
.try_into()
|
|
|
|
.expect("notification count can't go that high"),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-03-13 10:39:02 +01:00
|
|
|
let highlight_count = if send_notification_counts {
|
|
|
|
Some(
|
2024-07-16 08:05:25 +00:00
|
|
|
services
|
2023-03-13 10:39:02 +01:00
|
|
|
.rooms
|
|
|
|
.user
|
2024-08-08 17:18:30 +00:00
|
|
|
.highlight_count(sender_user, room_id)
|
|
|
|
.await
|
2023-03-13 10:39:02 +01:00
|
|
|
.try_into()
|
|
|
|
.expect("highlight count can't go that high"),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-03-25 17:05:11 -04:00
|
|
|
let prev_batch = timeline_pdus
|
|
|
|
.first()
|
|
|
|
.map_or(Ok::<_, Error>(None), |(pdu_count, _)| {
|
|
|
|
Ok(Some(match pdu_count {
|
|
|
|
PduCount::Backfilled(_) => {
|
|
|
|
error!("timeline in backfill state?!");
|
|
|
|
"0".to_owned()
|
|
|
|
},
|
|
|
|
PduCount::Normal(c) => c.to_string(),
|
|
|
|
}))
|
|
|
|
})?;
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-03-25 17:05:11 -04:00
|
|
|
let room_events: Vec<_> = timeline_pdus
|
|
|
|
.iter()
|
2024-09-28 22:12:17 -04:00
|
|
|
.stream()
|
|
|
|
.filter_map(|(_, pdu)| async move {
|
|
|
|
// list of safe and common non-state events to ignore
|
|
|
|
if matches!(
|
|
|
|
&pdu.kind,
|
|
|
|
RoomMessage
|
|
|
|
| Sticker | CallInvite
|
|
|
|
| CallNotify | RoomEncrypted
|
|
|
|
| Image | File | Audio
|
|
|
|
| Voice | Video | UnstablePollStart
|
|
|
|
| PollStart | KeyVerificationStart
|
|
|
|
| Reaction | Emote
|
|
|
|
| Location
|
|
|
|
) && services
|
|
|
|
.users
|
|
|
|
.user_is_ignored(&pdu.sender, sender_user)
|
|
|
|
.await
|
|
|
|
{
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
|
|
|
Some(pdu.to_sync_room_event())
|
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
.await;
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-07-16 08:05:25 +00:00
|
|
|
let mut edus: Vec<_> = services
|
2023-03-13 10:39:02 +01:00
|
|
|
.rooms
|
|
|
|
.read_receipt
|
2023-11-27 00:39:50 -05:00
|
|
|
.readreceipts_since(room_id, since)
|
2024-09-28 21:44:38 -04:00
|
|
|
.filter_map(|(read_user, _, v)| async move {
|
|
|
|
(!services
|
|
|
|
.users
|
|
|
|
.user_is_ignored(&read_user, sender_user)
|
|
|
|
.await)
|
|
|
|
.then_some(v)
|
|
|
|
})
|
2024-08-08 17:18:30 +00:00
|
|
|
.collect()
|
|
|
|
.await;
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-07-16 08:05:25 +00:00
|
|
|
if services.rooms.typing.last_typing_update(room_id).await? > since {
|
2023-03-13 10:39:02 +01:00
|
|
|
edus.push(
|
|
|
|
serde_json::from_str(
|
2024-09-28 21:44:38 -04:00
|
|
|
&serde_json::to_string(
|
|
|
|
&services
|
|
|
|
.rooms
|
|
|
|
.typing
|
|
|
|
.typings_all(room_id, sender_user)
|
|
|
|
.await?,
|
|
|
|
)
|
|
|
|
.expect("event is valid, we just created it"),
|
2024-03-05 19:48:54 -05:00
|
|
|
)
|
2023-03-13 10:39:02 +01:00
|
|
|
.expect("event is valid, we just created it"),
|
|
|
|
);
|
2020-08-21 21:22:59 +02:00
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-03-13 10:39:02 +01:00
|
|
|
// Save the state after this sync so we can send the correct state diff next
|
|
|
|
// sync
|
2024-07-16 08:05:25 +00:00
|
|
|
services
|
2024-03-25 17:05:11 -04:00
|
|
|
.rooms
|
|
|
|
.user
|
2024-08-08 17:18:30 +00:00
|
|
|
.associate_token_shortstatehash(room_id, next_batch, current_shortstatehash)
|
|
|
|
.await;
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-03-13 10:39:02 +01:00
|
|
|
Ok(JoinedRoom {
|
|
|
|
account_data: RoomAccountData {
|
2024-07-16 08:05:25 +00:00
|
|
|
events: services
|
2020-07-30 18:14:47 +02:00
|
|
|
.account_data
|
2024-08-08 17:18:30 +00:00
|
|
|
.changes_since(Some(room_id), sender_user, since)
|
|
|
|
.await?
|
2020-07-30 18:14:47 +02:00
|
|
|
.into_iter()
|
2024-08-30 10:31:08 +02:00
|
|
|
.filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room))
|
2021-10-13 11:51:30 +02:00
|
|
|
.collect(),
|
2020-07-30 18:14:47 +02:00
|
|
|
},
|
2023-03-13 10:39:02 +01:00
|
|
|
summary: RoomSummary {
|
|
|
|
heroes,
|
2024-07-07 06:17:58 +00:00
|
|
|
joined_member_count: joined_member_count.map(ruma_from_u64),
|
|
|
|
invited_member_count: invited_member_count.map(ruma_from_u64),
|
2020-07-30 18:14:47 +02:00
|
|
|
},
|
2023-03-13 10:39:02 +01:00
|
|
|
unread_notifications: UnreadNotificationsCount {
|
|
|
|
highlight_count,
|
|
|
|
notification_count,
|
2020-07-30 18:14:47 +02:00
|
|
|
},
|
2023-03-13 10:39:02 +01:00
|
|
|
timeline: Timeline {
|
|
|
|
limited: limited || joined_since_last_sync,
|
|
|
|
prev_batch,
|
|
|
|
events: room_events,
|
|
|
|
},
|
2024-07-16 08:05:25 +00:00
|
|
|
state: RoomState {
|
2024-03-25 17:05:11 -04:00
|
|
|
events: state_events
|
|
|
|
.iter()
|
|
|
|
.map(|pdu| pdu.to_sync_state_event())
|
|
|
|
.collect(),
|
2023-03-13 10:39:02 +01:00
|
|
|
},
|
|
|
|
ephemeral: Ephemeral {
|
|
|
|
events: edus,
|
|
|
|
},
|
|
|
|
unread_thread_notifications: BTreeMap::new(),
|
|
|
|
})
|
2020-07-30 18:14:47 +02:00
|
|
|
}
|
2020-08-21 21:22:59 +02:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
async fn load_timeline(
|
2024-07-16 08:05:25 +00:00
|
|
|
services: &Services, sender_user: &UserId, room_id: &RoomId, roomsincecount: PduCount, limit: u64,
|
2023-07-06 10:32:25 +02:00
|
|
|
) -> Result<(Vec<(PduCount, PduEvent)>, bool), Error> {
|
|
|
|
let timeline_pdus;
|
2024-07-16 08:05:25 +00:00
|
|
|
let limited = if services
|
2024-03-25 17:05:11 -04:00
|
|
|
.rooms
|
|
|
|
.timeline
|
2024-08-08 17:18:30 +00:00
|
|
|
.last_timeline_count(sender_user, room_id)
|
|
|
|
.await?
|
2024-03-25 17:05:11 -04:00
|
|
|
> roomsincecount
|
|
|
|
{
|
2024-07-16 08:05:25 +00:00
|
|
|
let mut non_timeline_pdus = services
|
2023-07-06 10:32:25 +02:00
|
|
|
.rooms
|
|
|
|
.timeline
|
2024-08-08 17:18:30 +00:00
|
|
|
.pdus_until(sender_user, room_id, PduCount::max())
|
|
|
|
.await?
|
|
|
|
.ready_take_while(|(pducount, _)| pducount > &roomsincecount);
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-07-06 10:32:25 +02:00
|
|
|
// Take the last events for the timeline
|
2024-03-25 17:05:11 -04:00
|
|
|
timeline_pdus = non_timeline_pdus
|
|
|
|
.by_ref()
|
2024-07-07 06:17:58 +00:00
|
|
|
.take(usize_from_u64_truncated(limit))
|
2024-03-25 17:05:11 -04:00
|
|
|
.collect::<Vec<_>>()
|
2024-08-08 17:18:30 +00:00
|
|
|
.await
|
2024-03-25 17:05:11 -04:00
|
|
|
.into_iter()
|
|
|
|
.rev()
|
|
|
|
.collect::<Vec<_>>();
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-07-06 10:32:25 +02:00
|
|
|
// They /sync response doesn't always return all messages, so we say the output
|
|
|
|
// is limited unless there are events in non_timeline_pdus
|
2024-08-08 17:18:30 +00:00
|
|
|
non_timeline_pdus.next().await.is_some()
|
2023-07-06 10:32:25 +02:00
|
|
|
} else {
|
|
|
|
timeline_pdus = Vec::new();
|
2024-06-09 10:29:53 +00:00
|
|
|
false
|
|
|
|
};
|
2023-07-06 10:32:25 +02:00
|
|
|
Ok((timeline_pdus, limited))
|
|
|
|
}
|
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
async fn share_encrypted_room(
|
|
|
|
services: &Services, sender_user: &UserId, user_id: &UserId, ignore_room: Option<&RoomId>,
|
|
|
|
) -> bool {
|
|
|
|
services
|
2021-06-08 18:10:00 +02:00
|
|
|
.rooms
|
2022-10-05 09:34:25 +02:00
|
|
|
.user
|
2024-08-08 17:18:30 +00:00
|
|
|
.get_shared_rooms(sender_user, user_id)
|
|
|
|
.ready_filter(|&room_id| Some(room_id) != ignore_room)
|
2024-10-01 02:47:39 +00:00
|
|
|
.any(|other_room_id| {
|
2024-08-08 17:18:30 +00:00
|
|
|
services
|
|
|
|
.rooms
|
|
|
|
.state_accessor
|
|
|
|
.room_state_get(other_room_id, &StateEventType::RoomEncryption, "")
|
2024-10-01 02:47:39 +00:00
|
|
|
.map(Result::into_is_ok)
|
2020-08-21 21:22:59 +02:00
|
|
|
})
|
2024-08-08 17:18:30 +00:00
|
|
|
.await
|
2020-08-21 21:22:59 +02:00
|
|
|
}
|
2023-07-06 10:32:25 +02:00
|
|
|
|
2024-03-23 23:13:40 -04:00
|
|
|
/// POST `/_matrix/client/unstable/org.matrix.msc3575/sync`
|
|
|
|
///
|
|
|
|
/// Sliding Sync endpoint (future endpoint: `/_matrix/client/v4/sync`)
|
2024-04-22 23:48:57 -04:00
|
|
|
pub(crate) async fn sync_events_v4_route(
|
2024-07-16 08:05:25 +00:00
|
|
|
State(services): State<crate::State>, body: Ruma<sync_events::v4::Request>,
|
2024-08-24 06:57:31 +02:00
|
|
|
) -> Result<sync_events::v4::Response> {
|
2024-09-28 22:12:17 -04:00
|
|
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
2023-07-06 10:32:25 +02:00
|
|
|
let sender_device = body.sender_device.expect("user is authenticated");
|
2023-08-09 18:27:30 +02:00
|
|
|
let mut body = body.body;
|
2023-07-06 10:32:25 +02:00
|
|
|
// Setup watchers, so if there's no response, we can wait for them
|
2024-09-28 22:12:17 -04:00
|
|
|
let watcher = services.globals.watch(sender_user, &sender_device);
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-07-16 08:05:25 +00:00
|
|
|
let next_batch = services.globals.next_count()?;
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-24 06:45:33 +02:00
|
|
|
let conn_id = body
|
|
|
|
.conn_id
|
|
|
|
.clone()
|
|
|
|
.unwrap_or_else(|| SINGLE_CONNECTION_SYNC.to_owned());
|
|
|
|
|
2024-03-25 17:05:11 -04:00
|
|
|
let globalsince = body
|
|
|
|
.pos
|
|
|
|
.as_ref()
|
|
|
|
.and_then(|string| string.parse().ok())
|
|
|
|
.unwrap_or(0);
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-24 06:57:31 +02:00
|
|
|
if globalsince != 0
|
|
|
|
&& !services
|
2024-08-08 17:18:30 +00:00
|
|
|
.sync
|
2024-08-24 06:57:31 +02:00
|
|
|
.remembered(sender_user.clone(), sender_device.clone(), conn_id.clone())
|
|
|
|
{
|
|
|
|
debug!("Restarting sync stream because it was gone from the database");
|
|
|
|
return Err(Error::Request(
|
|
|
|
ErrorKind::UnknownPos,
|
|
|
|
"Connection data lost since last time".into(),
|
|
|
|
http::StatusCode::BAD_REQUEST,
|
|
|
|
));
|
|
|
|
}
|
|
|
|
|
2023-09-13 20:54:53 +02:00
|
|
|
if globalsince == 0 {
|
2024-08-24 06:45:33 +02:00
|
|
|
services
|
2024-08-08 17:18:30 +00:00
|
|
|
.sync
|
2024-08-24 06:45:33 +02:00
|
|
|
.forget_sync_request_connection(sender_user.clone(), sender_device.clone(), conn_id.clone());
|
2023-07-23 21:57:11 +02:00
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-07-24 10:41:50 +02:00
|
|
|
// Get sticky parameters from cache
|
2023-07-23 21:57:11 +02:00
|
|
|
let known_rooms =
|
2024-07-16 08:05:25 +00:00
|
|
|
services
|
2024-08-08 17:18:30 +00:00
|
|
|
.sync
|
2024-03-25 17:05:11 -04:00
|
|
|
.update_sync_request_with_cache(sender_user.clone(), sender_device.clone(), &mut body);
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-10-01 02:47:39 +00:00
|
|
|
let all_joined_rooms: Vec<_> = services
|
2024-03-25 17:05:11 -04:00
|
|
|
.rooms
|
|
|
|
.state_cache
|
2024-09-28 22:12:17 -04:00
|
|
|
.rooms_joined(sender_user)
|
2024-08-08 17:18:30 +00:00
|
|
|
.map(ToOwned::to_owned)
|
2024-10-01 02:47:39 +00:00
|
|
|
.collect()
|
2024-08-08 17:18:30 +00:00
|
|
|
.await;
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-10-01 02:47:39 +00:00
|
|
|
let all_invited_rooms: Vec<_> = services
|
2024-08-24 06:44:32 +02:00
|
|
|
.rooms
|
|
|
|
.state_cache
|
2024-09-28 22:12:17 -04:00
|
|
|
.rooms_invited(sender_user)
|
2024-08-24 06:44:32 +02:00
|
|
|
.map(|r| r.0)
|
2024-10-01 02:47:39 +00:00
|
|
|
.collect()
|
2024-08-08 17:18:30 +00:00
|
|
|
.await;
|
2024-08-24 06:44:32 +02:00
|
|
|
|
|
|
|
let all_rooms = all_joined_rooms
|
|
|
|
.iter()
|
2024-08-08 17:18:30 +00:00
|
|
|
.chain(all_invited_rooms.iter())
|
|
|
|
.map(Clone::clone)
|
2024-08-24 06:44:32 +02:00
|
|
|
.collect();
|
|
|
|
|
2023-07-24 10:41:50 +02:00
|
|
|
if body.extensions.to_device.enabled.unwrap_or(false) {
|
2024-07-16 08:05:25 +00:00
|
|
|
services
|
2024-03-25 17:05:11 -04:00
|
|
|
.users
|
2024-09-28 22:12:17 -04:00
|
|
|
.remove_to_device_events(sender_user, &sender_device, globalsince)
|
2024-08-08 17:18:30 +00:00
|
|
|
.await;
|
2023-07-24 10:41:50 +02:00
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-07-24 10:41:50 +02:00
|
|
|
let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in
|
|
|
|
let mut device_list_changes = HashSet::new();
|
|
|
|
let mut device_list_left = HashSet::new();
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-30 18:23:42 +02:00
|
|
|
let mut receipts = sync_events::v4::Receipts {
|
|
|
|
rooms: BTreeMap::new(),
|
|
|
|
};
|
|
|
|
|
2024-08-30 10:31:08 +02:00
|
|
|
let mut account_data = sync_events::v4::AccountData {
|
|
|
|
global: Vec::new(),
|
|
|
|
rooms: BTreeMap::new(),
|
|
|
|
};
|
|
|
|
if body.extensions.account_data.enabled.unwrap_or(false) {
|
|
|
|
account_data.global = services
|
|
|
|
.account_data
|
2024-09-28 22:12:17 -04:00
|
|
|
.changes_since(None, sender_user, globalsince)
|
2024-08-08 17:18:30 +00:00
|
|
|
.await?
|
2024-08-30 10:31:08 +02:00
|
|
|
.into_iter()
|
|
|
|
.filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Global))
|
|
|
|
.collect();
|
|
|
|
|
|
|
|
if let Some(rooms) = body.extensions.account_data.rooms {
|
|
|
|
for room in rooms {
|
|
|
|
account_data.rooms.insert(
|
|
|
|
room.clone(),
|
|
|
|
services
|
|
|
|
.account_data
|
2024-09-28 22:12:17 -04:00
|
|
|
.changes_since(Some(&room), sender_user, globalsince)
|
2024-08-08 17:18:30 +00:00
|
|
|
.await?
|
2024-08-30 10:31:08 +02:00
|
|
|
.into_iter()
|
|
|
|
.filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room))
|
|
|
|
.collect(),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-24 10:41:50 +02:00
|
|
|
if body.extensions.e2ee.enabled.unwrap_or(false) {
|
|
|
|
// Look for device list updates of this account
|
2024-03-25 17:05:11 -04:00
|
|
|
device_list_changes.extend(
|
2024-07-16 08:05:25 +00:00
|
|
|
services
|
2024-03-25 17:05:11 -04:00
|
|
|
.users
|
|
|
|
.keys_changed(sender_user.as_ref(), globalsince, None)
|
2024-08-08 17:18:30 +00:00
|
|
|
.map(ToOwned::to_owned)
|
|
|
|
.collect::<Vec<_>>()
|
|
|
|
.await,
|
2024-03-25 17:05:11 -04:00
|
|
|
);
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-07-24 10:41:50 +02:00
|
|
|
for room_id in &all_joined_rooms {
|
2024-08-08 17:18:30 +00:00
|
|
|
let Ok(current_shortstatehash) = services.rooms.state.get_room_shortstatehash(room_id).await else {
|
|
|
|
error!("Room {room_id} has no state");
|
2023-07-24 10:41:50 +02:00
|
|
|
continue;
|
|
|
|
};
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-07-16 08:05:25 +00:00
|
|
|
let since_shortstatehash = services
|
2024-03-25 17:05:11 -04:00
|
|
|
.rooms
|
|
|
|
.user
|
2024-08-08 17:18:30 +00:00
|
|
|
.get_token_shortstatehash(room_id, globalsince)
|
|
|
|
.await
|
|
|
|
.ok();
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-07-16 08:05:25 +00:00
|
|
|
let encrypted_room = services
|
2023-07-24 10:41:50 +02:00
|
|
|
.rooms
|
|
|
|
.state_accessor
|
2024-08-08 17:18:30 +00:00
|
|
|
.state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")
|
|
|
|
.await
|
|
|
|
.is_ok();
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-07-24 10:41:50 +02:00
|
|
|
if let Some(since_shortstatehash) = since_shortstatehash {
|
|
|
|
// Skip if there are only timeline changes
|
|
|
|
if since_shortstatehash == current_shortstatehash {
|
|
|
|
continue;
|
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
let since_encryption = services
|
|
|
|
.rooms
|
|
|
|
.state_accessor
|
|
|
|
.state_get(since_shortstatehash, &StateEventType::RoomEncryption, "")
|
|
|
|
.await;
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-10-03 09:38:10 +00:00
|
|
|
let since_sender_member: Option<RoomMemberEventContent> = services
|
|
|
|
.rooms
|
|
|
|
.state_accessor
|
|
|
|
.state_get_content(since_shortstatehash, &StateEventType::RoomMember, sender_user.as_str())
|
|
|
|
.ok()
|
|
|
|
.await;
|
|
|
|
|
2023-07-24 10:41:50 +02:00
|
|
|
let joined_since_last_sync =
|
|
|
|
since_sender_member.map_or(true, |member| member.membership != MembershipState::Join);
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
let new_encrypted_room = encrypted_room && since_encryption.is_err();
|
|
|
|
|
2023-07-24 10:41:50 +02:00
|
|
|
if encrypted_room {
|
2024-07-16 08:05:25 +00:00
|
|
|
let current_state_ids = services
|
2024-03-25 17:05:11 -04:00
|
|
|
.rooms
|
|
|
|
.state_accessor
|
|
|
|
.state_full_ids(current_shortstatehash)
|
|
|
|
.await?;
|
2024-08-08 17:18:30 +00:00
|
|
|
|
2024-07-16 08:05:25 +00:00
|
|
|
let since_state_ids = services
|
2024-03-25 17:05:11 -04:00
|
|
|
.rooms
|
|
|
|
.state_accessor
|
|
|
|
.state_full_ids(since_shortstatehash)
|
|
|
|
.await?;
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-07-24 10:41:50 +02:00
|
|
|
for (key, id) in current_state_ids {
|
|
|
|
if since_state_ids.get(&key) != Some(&id) {
|
2024-08-08 17:18:30 +00:00
|
|
|
let Ok(pdu) = services.rooms.timeline.get_pdu(&id).await else {
|
|
|
|
error!("Pdu in state not found: {id}");
|
2024-03-27 16:33:09 -04:00
|
|
|
continue;
|
2023-07-24 10:41:50 +02:00
|
|
|
};
|
2024-09-29 20:57:33 -04:00
|
|
|
if pdu.kind == RoomMember {
|
2023-07-24 10:41:50 +02:00
|
|
|
if let Some(state_key) = &pdu.state_key {
|
|
|
|
let user_id = UserId::parse(state_key.clone())
|
|
|
|
.map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?;
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-09-28 22:12:17 -04:00
|
|
|
if user_id == *sender_user {
|
2023-07-24 10:41:50 +02:00
|
|
|
continue;
|
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-07-24 10:41:50 +02:00
|
|
|
let new_membership =
|
|
|
|
serde_json::from_str::<RoomMemberEventContent>(pdu.content.get())
|
|
|
|
.map_err(|_| Error::bad_database("Invalid PDU in database."))?
|
|
|
|
.membership;
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-07-24 10:41:50 +02:00
|
|
|
match new_membership {
|
|
|
|
MembershipState::Join => {
|
|
|
|
// A new user joined an encrypted room
|
2024-09-28 22:12:17 -04:00
|
|
|
if !share_encrypted_room(&services, sender_user, &user_id, Some(room_id))
|
2024-08-08 17:18:30 +00:00
|
|
|
.await
|
|
|
|
{
|
2023-07-24 10:41:50 +02:00
|
|
|
device_list_changes.insert(user_id);
|
2024-03-05 19:48:54 -05:00
|
|
|
}
|
2023-07-24 10:41:50 +02:00
|
|
|
},
|
|
|
|
MembershipState::Leave => {
|
|
|
|
// Write down users that have left encrypted rooms we are in
|
|
|
|
left_encrypted_users.insert(user_id);
|
|
|
|
},
|
|
|
|
_ => {},
|
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-07-24 10:41:50 +02:00
|
|
|
}
|
|
|
|
if joined_since_last_sync || new_encrypted_room {
|
|
|
|
// If the user is in a new encrypted room, give them all joined users
|
|
|
|
device_list_changes.extend(
|
2024-07-16 08:05:25 +00:00
|
|
|
services
|
2023-07-24 10:41:50 +02:00
|
|
|
.rooms
|
|
|
|
.state_cache
|
2023-11-27 00:39:50 -05:00
|
|
|
.room_members(room_id)
|
2024-10-01 02:47:39 +00:00
|
|
|
// Don't send key updates from the sender to the sender
|
|
|
|
.ready_filter(|user_id| sender_user != user_id)
|
|
|
|
// Only send keys if the sender doesn't share an encrypted room with the target
|
|
|
|
// already
|
|
|
|
.filter_map(|user_id| {
|
|
|
|
share_encrypted_room(&services, sender_user, user_id, Some(room_id))
|
|
|
|
.map(|res| res.or_some(user_id.to_owned()))
|
2024-08-08 17:18:30 +00:00
|
|
|
})
|
|
|
|
.collect::<Vec<_>>()
|
|
|
|
.await,
|
2023-07-24 10:41:50 +02:00
|
|
|
);
|
2024-03-05 19:48:54 -05:00
|
|
|
}
|
|
|
|
}
|
2023-07-24 10:41:50 +02:00
|
|
|
}
|
|
|
|
// Look for device list updates in this room
|
2024-03-25 17:05:11 -04:00
|
|
|
device_list_changes.extend(
|
2024-07-16 08:05:25 +00:00
|
|
|
services
|
2024-03-25 17:05:11 -04:00
|
|
|
.users
|
|
|
|
.keys_changed(room_id.as_ref(), globalsince, None)
|
2024-08-08 17:18:30 +00:00
|
|
|
.map(ToOwned::to_owned)
|
|
|
|
.collect::<Vec<_>>()
|
|
|
|
.await,
|
2024-03-25 17:05:11 -04:00
|
|
|
);
|
2023-07-24 10:41:50 +02:00
|
|
|
}
|
2024-08-08 17:18:30 +00:00
|
|
|
|
2023-07-24 10:41:50 +02:00
|
|
|
for user_id in left_encrypted_users {
|
2024-09-28 22:12:17 -04:00
|
|
|
let dont_share_encrypted_room = !share_encrypted_room(&services, sender_user, &user_id, None).await;
|
2024-08-08 17:18:30 +00:00
|
|
|
|
2023-07-24 10:41:50 +02:00
|
|
|
// If the user doesn't share an encrypted room with the target anymore, we need
|
|
|
|
// to tell them
|
|
|
|
if dont_share_encrypted_room {
|
|
|
|
device_list_left.insert(user_id);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-07-06 10:32:25 +02:00
|
|
|
let mut lists = BTreeMap::new();
|
|
|
|
let mut todo_rooms = BTreeMap::new(); // and required state
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
for (list_id, list) in &body.lists {
|
2024-08-24 06:44:32 +02:00
|
|
|
let active_rooms = match list.filters.clone().and_then(|f| f.is_invite) {
|
|
|
|
Some(true) => &all_invited_rooms,
|
|
|
|
Some(false) => &all_joined_rooms,
|
|
|
|
None => &all_rooms,
|
|
|
|
};
|
|
|
|
|
|
|
|
let active_rooms = match list.filters.clone().map(|f| f.not_room_types) {
|
|
|
|
Some(filter) if filter.is_empty() => active_rooms.clone(),
|
2024-08-08 17:18:30 +00:00
|
|
|
Some(value) => filter_rooms(active_rooms, State(services), &value, true).await,
|
2024-08-24 06:44:32 +02:00
|
|
|
None => active_rooms.clone(),
|
|
|
|
};
|
|
|
|
|
|
|
|
let active_rooms = match list.filters.clone().map(|f| f.room_types) {
|
|
|
|
Some(filter) if filter.is_empty() => active_rooms.clone(),
|
2024-08-08 17:18:30 +00:00
|
|
|
Some(value) => filter_rooms(&active_rooms, State(services), &value, false).await,
|
2024-08-24 06:44:32 +02:00
|
|
|
None => active_rooms,
|
|
|
|
};
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-09-13 20:54:53 +02:00
|
|
|
let mut new_known_rooms = BTreeSet::new();
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
let ranges = list.ranges.clone();
|
2023-07-06 10:32:25 +02:00
|
|
|
lists.insert(
|
2023-07-23 21:57:11 +02:00
|
|
|
list_id.clone(),
|
2023-07-06 10:32:25 +02:00
|
|
|
sync_events::v4::SyncList {
|
2024-08-08 17:18:30 +00:00
|
|
|
ops: ranges
|
2023-07-06 10:32:25 +02:00
|
|
|
.into_iter()
|
|
|
|
.map(|mut r| {
|
2024-05-03 21:42:47 -04:00
|
|
|
r.0 = r.0.clamp(
|
|
|
|
uint!(0),
|
2024-08-24 06:44:32 +02:00
|
|
|
UInt::try_from(active_rooms.len().saturating_sub(1)).unwrap_or(UInt::MAX),
|
2024-05-03 21:42:47 -04:00
|
|
|
);
|
2024-08-24 06:44:32 +02:00
|
|
|
r.1 =
|
|
|
|
r.1.clamp(r.0, UInt::try_from(active_rooms.len().saturating_sub(1)).unwrap_or(UInt::MAX));
|
2024-08-08 17:18:30 +00:00
|
|
|
|
2024-08-24 06:44:32 +02:00
|
|
|
let room_ids = if !active_rooms.is_empty() {
|
|
|
|
active_rooms[usize_from_ruma(r.0)..=usize_from_ruma(r.1)].to_vec()
|
2024-08-24 05:23:51 +02:00
|
|
|
} else {
|
|
|
|
Vec::new()
|
|
|
|
};
|
2024-08-08 17:18:30 +00:00
|
|
|
|
2023-09-13 20:54:53 +02:00
|
|
|
new_known_rooms.extend(room_ids.iter().cloned());
|
2023-07-23 21:57:11 +02:00
|
|
|
for room_id in &room_ids {
|
2024-03-25 17:05:11 -04:00
|
|
|
let todo_room = todo_rooms
|
|
|
|
.entry(room_id.clone())
|
|
|
|
.or_insert((BTreeSet::new(), 0, u64::MAX));
|
2024-08-08 17:18:30 +00:00
|
|
|
|
2024-03-25 17:05:11 -04:00
|
|
|
let limit = list
|
|
|
|
.room_details
|
|
|
|
.timeline_limit
|
|
|
|
.map_or(10, u64::from)
|
|
|
|
.min(100);
|
2024-08-08 17:18:30 +00:00
|
|
|
|
2024-03-25 17:05:11 -04:00
|
|
|
todo_room
|
|
|
|
.0
|
|
|
|
.extend(list.room_details.required_state.iter().cloned());
|
2024-08-08 17:18:30 +00:00
|
|
|
|
2023-07-24 10:41:50 +02:00
|
|
|
todo_room.1 = todo_room.1.max(limit);
|
2023-09-13 20:54:53 +02:00
|
|
|
// 0 means unknown because it got out of date
|
2024-03-25 17:05:11 -04:00
|
|
|
todo_room.2 = todo_room.2.min(
|
|
|
|
known_rooms
|
2024-08-08 17:18:30 +00:00
|
|
|
.get(list_id.as_str())
|
2024-03-25 17:05:11 -04:00
|
|
|
.and_then(|k| k.get(room_id))
|
|
|
|
.copied()
|
|
|
|
.unwrap_or(0),
|
|
|
|
);
|
2023-07-23 21:57:11 +02:00
|
|
|
}
|
2023-07-06 10:32:25 +02:00
|
|
|
sync_events::v4::SyncOp {
|
|
|
|
op: SlidingOp::Sync,
|
2023-11-27 00:39:50 -05:00
|
|
|
range: Some(r),
|
2023-07-06 10:32:25 +02:00
|
|
|
index: None,
|
|
|
|
room_ids,
|
|
|
|
room_id: None,
|
|
|
|
}
|
|
|
|
})
|
|
|
|
.collect(),
|
2024-08-30 09:30:50 +02:00
|
|
|
count: ruma_from_usize(active_rooms.len()),
|
2023-07-06 10:32:25 +02:00
|
|
|
},
|
|
|
|
);
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-07-23 21:57:11 +02:00
|
|
|
if let Some(conn_id) = &body.conn_id {
|
2024-08-08 17:18:30 +00:00
|
|
|
services.sync.update_sync_known_rooms(
|
2023-07-23 21:57:11 +02:00
|
|
|
sender_user.clone(),
|
|
|
|
sender_device.clone(),
|
|
|
|
conn_id.clone(),
|
2024-08-08 17:18:30 +00:00
|
|
|
list_id.clone(),
|
2023-07-23 21:57:11 +02:00
|
|
|
new_known_rooms,
|
2023-09-13 20:54:53 +02:00
|
|
|
globalsince,
|
2023-07-23 21:57:11 +02:00
|
|
|
);
|
2024-03-05 19:48:54 -05:00
|
|
|
}
|
2023-07-23 21:57:11 +02:00
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-09-13 20:54:53 +02:00
|
|
|
let mut known_subscription_rooms = BTreeSet::new();
|
2023-08-09 18:27:30 +02:00
|
|
|
for (room_id, room) in &body.room_subscriptions {
|
2024-08-08 17:18:30 +00:00
|
|
|
if !services.rooms.metadata.exists(room_id).await {
|
2024-02-28 16:24:26 +01:00
|
|
|
continue;
|
|
|
|
}
|
2024-03-25 17:05:11 -04:00
|
|
|
let todo_room = todo_rooms
|
|
|
|
.entry(room_id.clone())
|
|
|
|
.or_insert((BTreeSet::new(), 0, u64::MAX));
|
2023-07-23 21:57:11 +02:00
|
|
|
let limit = room.timeline_limit.map_or(10, u64::from).min(100);
|
|
|
|
todo_room.0.extend(room.required_state.iter().cloned());
|
2023-07-24 10:41:50 +02:00
|
|
|
todo_room.1 = todo_room.1.max(limit);
|
2023-09-13 20:54:53 +02:00
|
|
|
// 0 means unknown because it got out of date
|
2024-03-25 17:05:11 -04:00
|
|
|
todo_room.2 = todo_room.2.min(
|
|
|
|
known_rooms
|
|
|
|
.get("subscriptions")
|
|
|
|
.and_then(|k| k.get(room_id))
|
|
|
|
.copied()
|
|
|
|
.unwrap_or(0),
|
|
|
|
);
|
2023-09-13 20:54:53 +02:00
|
|
|
known_subscription_rooms.insert(room_id.clone());
|
2023-07-24 10:41:50 +02:00
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-07-24 10:41:50 +02:00
|
|
|
for r in body.unsubscribe_rooms {
|
|
|
|
known_subscription_rooms.remove(&r);
|
|
|
|
body.room_subscriptions.remove(&r);
|
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-07-24 10:41:50 +02:00
|
|
|
if let Some(conn_id) = &body.conn_id {
|
2024-08-08 17:18:30 +00:00
|
|
|
services.sync.update_sync_known_rooms(
|
2023-07-24 10:41:50 +02:00
|
|
|
sender_user.clone(),
|
|
|
|
sender_device.clone(),
|
|
|
|
conn_id.clone(),
|
|
|
|
"subscriptions".to_owned(),
|
|
|
|
known_subscription_rooms,
|
2023-09-13 20:54:53 +02:00
|
|
|
globalsince,
|
2023-07-24 10:41:50 +02:00
|
|
|
);
|
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-07-24 10:41:50 +02:00
|
|
|
if let Some(conn_id) = &body.conn_id {
|
2024-08-08 17:18:30 +00:00
|
|
|
services.sync.update_sync_subscriptions(
|
2023-07-24 10:41:50 +02:00
|
|
|
sender_user.clone(),
|
|
|
|
sender_device.clone(),
|
|
|
|
conn_id.clone(),
|
|
|
|
body.room_subscriptions,
|
|
|
|
);
|
2023-07-06 10:32:25 +02:00
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-07-06 10:32:25 +02:00
|
|
|
let mut rooms = BTreeMap::new();
|
2023-09-13 20:54:53 +02:00
|
|
|
for (room_id, (required_state_request, timeline_limit, roomsince)) in &todo_rooms {
|
|
|
|
let roomsincecount = PduCount::Normal(*roomsince);
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-31 18:58:39 +02:00
|
|
|
let mut timestamp: Option<_> = None;
|
|
|
|
let mut invite_state = None;
|
|
|
|
let (timeline_pdus, limited);
|
|
|
|
if all_invited_rooms.contains(room_id) {
|
|
|
|
// TODO: figure out a timestamp we can use for remote invites
|
|
|
|
invite_state = services
|
|
|
|
.rooms
|
|
|
|
.state_cache
|
2024-09-28 22:12:17 -04:00
|
|
|
.invite_state(sender_user, room_id)
|
2024-08-08 17:18:30 +00:00
|
|
|
.await
|
|
|
|
.ok();
|
2024-08-31 18:58:39 +02:00
|
|
|
|
|
|
|
(timeline_pdus, limited) = (Vec::new(), true);
|
|
|
|
} else {
|
|
|
|
(timeline_pdus, limited) =
|
2024-09-28 22:12:17 -04:00
|
|
|
match load_timeline(&services, sender_user, room_id, roomsincecount, *timeline_limit).await {
|
2024-08-31 18:58:39 +02:00
|
|
|
Ok(value) => value,
|
|
|
|
Err(err) => {
|
|
|
|
warn!("Encountered missing timeline in {}, error {}", room_id, err);
|
|
|
|
continue;
|
|
|
|
},
|
|
|
|
};
|
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-30 10:31:08 +02:00
|
|
|
account_data.rooms.insert(
|
|
|
|
room_id.clone(),
|
|
|
|
services
|
|
|
|
.account_data
|
2024-09-28 22:12:17 -04:00
|
|
|
.changes_since(Some(room_id), sender_user, *roomsince)
|
2024-08-08 17:18:30 +00:00
|
|
|
.await?
|
2024-08-30 10:31:08 +02:00
|
|
|
.into_iter()
|
|
|
|
.filter_map(|e| extract_variant!(e, AnyRawAccountDataEvent::Room))
|
|
|
|
.collect(),
|
|
|
|
);
|
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
let vector: Vec<_> = services
|
2024-08-30 18:23:42 +02:00
|
|
|
.rooms
|
|
|
|
.read_receipt
|
2024-08-08 17:18:30 +00:00
|
|
|
.readreceipts_since(room_id, *roomsince)
|
2024-09-28 21:44:38 -04:00
|
|
|
.filter_map(|(read_user, ts, v)| async move {
|
|
|
|
(!services
|
|
|
|
.users
|
|
|
|
.user_is_ignored(&read_user, sender_user)
|
|
|
|
.await)
|
|
|
|
.then_some((read_user, ts, v))
|
|
|
|
})
|
2024-08-08 17:18:30 +00:00
|
|
|
.collect()
|
|
|
|
.await;
|
|
|
|
|
2024-08-30 18:23:42 +02:00
|
|
|
let receipt_size = vector.len();
|
|
|
|
receipts
|
|
|
|
.rooms
|
|
|
|
.insert(room_id.clone(), pack_receipts(Box::new(vector.into_iter())));
|
|
|
|
|
|
|
|
if roomsince != &0
|
|
|
|
&& timeline_pdus.is_empty()
|
|
|
|
&& account_data.rooms.get(room_id).is_some_and(Vec::is_empty)
|
|
|
|
&& receipt_size == 0
|
|
|
|
{
|
2023-07-23 21:57:11 +02:00
|
|
|
continue;
|
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-07-10 16:25:33 +02:00
|
|
|
let prev_batch = timeline_pdus
|
|
|
|
.first()
|
|
|
|
.map_or(Ok::<_, Error>(None), |(pdu_count, _)| {
|
|
|
|
Ok(Some(match pdu_count {
|
|
|
|
PduCount::Backfilled(_) => {
|
|
|
|
error!("timeline in backfill state?!");
|
|
|
|
"0".to_owned()
|
|
|
|
},
|
|
|
|
PduCount::Normal(c) => c.to_string(),
|
|
|
|
}))
|
2023-07-23 21:57:11 +02:00
|
|
|
})?
|
|
|
|
.or_else(|| {
|
2023-09-13 20:54:53 +02:00
|
|
|
if roomsince != &0 {
|
|
|
|
Some(roomsince.to_string())
|
2023-07-23 21:57:11 +02:00
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
});
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-03-25 17:05:11 -04:00
|
|
|
let room_events: Vec<_> = timeline_pdus
|
|
|
|
.iter()
|
2024-09-28 22:12:17 -04:00
|
|
|
.stream()
|
|
|
|
.filter_map(|(_, pdu)| async move {
|
|
|
|
// list of safe and common non-state events to ignore
|
|
|
|
if matches!(
|
|
|
|
&pdu.kind,
|
|
|
|
RoomMessage
|
|
|
|
| Sticker | CallInvite
|
|
|
|
| CallNotify | RoomEncrypted
|
|
|
|
| Image | File | Audio
|
|
|
|
| Voice | Video | UnstablePollStart
|
|
|
|
| PollStart | KeyVerificationStart
|
|
|
|
| Reaction | Emote | Location
|
|
|
|
) && services
|
|
|
|
.users
|
|
|
|
.user_is_ignored(&pdu.sender, sender_user)
|
|
|
|
.await
|
|
|
|
{
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
|
|
|
Some(pdu.to_sync_room_event())
|
|
|
|
})
|
|
|
|
.collect()
|
|
|
|
.await;
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-08-24 14:52:48 +02:00
|
|
|
for (_, pdu) in timeline_pdus {
|
2024-08-30 18:20:18 +02:00
|
|
|
let ts = MilliSecondsSinceUnixEpoch(pdu.origin_server_ts);
|
|
|
|
if DEFAULT_BUMP_TYPES.contains(pdu.event_type()) && !timestamp.is_some_and(|time| time > ts) {
|
|
|
|
timestamp = Some(ts);
|
2024-08-24 14:52:48 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-06 10:32:25 +02:00
|
|
|
let required_state = required_state_request
|
|
|
|
.iter()
|
2024-08-08 17:18:30 +00:00
|
|
|
.stream()
|
|
|
|
.filter_map(|state| async move {
|
2024-07-16 08:05:25 +00:00
|
|
|
services
|
2024-03-25 17:05:11 -04:00
|
|
|
.rooms
|
|
|
|
.state_accessor
|
|
|
|
.room_state_get(room_id, &state.0, &state.1)
|
2024-08-08 17:18:30 +00:00
|
|
|
.await
|
|
|
|
.map(|s| s.to_sync_state_event())
|
|
|
|
.ok()
|
2024-03-25 17:05:11 -04:00
|
|
|
})
|
2024-08-08 17:18:30 +00:00
|
|
|
.collect()
|
|
|
|
.await;
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-08-07 17:54:08 +02:00
|
|
|
// Heroes
|
2024-10-01 02:47:39 +00:00
|
|
|
let heroes: Vec<_> = services
|
2023-08-07 17:54:08 +02:00
|
|
|
.rooms
|
|
|
|
.state_cache
|
2023-11-27 00:39:50 -05:00
|
|
|
.room_members(room_id)
|
2024-09-28 22:12:17 -04:00
|
|
|
.ready_filter(|member| member != sender_user)
|
2024-10-01 02:47:39 +00:00
|
|
|
.filter_map(|user_id| {
|
2024-08-08 17:18:30 +00:00
|
|
|
services
|
|
|
|
.rooms
|
|
|
|
.state_accessor
|
2024-10-01 02:47:39 +00:00
|
|
|
.get_member(room_id, user_id)
|
|
|
|
.map_ok(|memberevent| SlidingSyncRoomHero {
|
|
|
|
user_id: user_id.into(),
|
2024-08-08 17:18:30 +00:00
|
|
|
name: memberevent.displayname,
|
|
|
|
avatar: memberevent.avatar_url,
|
|
|
|
})
|
|
|
|
.ok()
|
2023-08-07 17:54:08 +02:00
|
|
|
})
|
|
|
|
.take(5)
|
2024-10-01 02:47:39 +00:00
|
|
|
.collect()
|
2024-08-08 17:18:30 +00:00
|
|
|
.await;
|
|
|
|
|
2023-11-27 00:39:50 -05:00
|
|
|
let name = match heroes.len().cmp(&(1_usize)) {
|
|
|
|
Ordering::Greater => {
|
2024-05-03 21:42:47 -04:00
|
|
|
let firsts = heroes[1..]
|
|
|
|
.iter()
|
2024-08-21 19:47:39 -04:00
|
|
|
.map(|h| h.name.clone().unwrap_or_else(|| h.user_id.to_string()))
|
2024-05-03 21:42:47 -04:00
|
|
|
.collect::<Vec<_>>()
|
|
|
|
.join(", ");
|
2024-08-08 17:18:30 +00:00
|
|
|
|
2024-08-21 19:47:39 -04:00
|
|
|
let last = heroes[0]
|
|
|
|
.name
|
|
|
|
.clone()
|
|
|
|
.unwrap_or_else(|| heroes[0].user_id.to_string());
|
2024-08-08 17:18:30 +00:00
|
|
|
|
2024-05-03 21:42:47 -04:00
|
|
|
Some(format!("{firsts} and {last}"))
|
2023-11-27 00:39:50 -05:00
|
|
|
},
|
2024-08-21 19:47:39 -04:00
|
|
|
Ordering::Equal => Some(
|
|
|
|
heroes[0]
|
|
|
|
.name
|
|
|
|
.clone()
|
|
|
|
.unwrap_or_else(|| heroes[0].user_id.to_string()),
|
|
|
|
),
|
2023-11-27 00:39:50 -05:00
|
|
|
Ordering::Less => None,
|
2023-08-07 17:54:08 +02:00
|
|
|
};
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-01-07 21:21:13 -05:00
|
|
|
let heroes_avatar = if heroes.len() == 1 {
|
2024-08-21 19:47:39 -04:00
|
|
|
heroes[0].avatar.clone()
|
2023-08-07 17:54:08 +02:00
|
|
|
} else {
|
|
|
|
None
|
|
|
|
};
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-07-06 10:32:25 +02:00
|
|
|
rooms.insert(
|
|
|
|
room_id.clone(),
|
|
|
|
sync_events::v4::SlidingSyncRoom {
|
2024-08-08 17:18:30 +00:00
|
|
|
name: services
|
|
|
|
.rooms
|
|
|
|
.state_accessor
|
|
|
|
.get_name(room_id)
|
|
|
|
.await
|
|
|
|
.ok()
|
|
|
|
.or(name),
|
2024-01-07 21:21:13 -05:00
|
|
|
avatar: if let Some(heroes_avatar) = heroes_avatar {
|
|
|
|
ruma::JsOption::Some(heroes_avatar)
|
|
|
|
} else {
|
2024-08-08 17:18:30 +00:00
|
|
|
match services.rooms.state_accessor.get_avatar(room_id).await {
|
2024-03-20 17:41:16 -04:00
|
|
|
ruma::JsOption::Some(avatar) => ruma::JsOption::from_option(avatar.url),
|
2024-01-07 21:21:13 -05:00
|
|
|
ruma::JsOption::Null => ruma::JsOption::Null,
|
|
|
|
ruma::JsOption::Undefined => ruma::JsOption::Undefined,
|
|
|
|
}
|
|
|
|
},
|
2023-09-13 20:54:53 +02:00
|
|
|
initial: Some(roomsince == &0),
|
2023-07-06 10:32:25 +02:00
|
|
|
is_dm: None,
|
2024-08-24 07:33:07 +02:00
|
|
|
invite_state,
|
2023-07-06 10:32:25 +02:00
|
|
|
unread_notifications: UnreadNotificationsCount {
|
2023-07-24 10:41:50 +02:00
|
|
|
highlight_count: Some(
|
2024-07-16 08:05:25 +00:00
|
|
|
services
|
2023-07-24 10:41:50 +02:00
|
|
|
.rooms
|
|
|
|
.user
|
2024-09-28 22:12:17 -04:00
|
|
|
.highlight_count(sender_user, room_id)
|
2024-08-08 17:18:30 +00:00
|
|
|
.await
|
2023-07-24 10:41:50 +02:00
|
|
|
.try_into()
|
|
|
|
.expect("notification count can't go that high"),
|
|
|
|
),
|
|
|
|
notification_count: Some(
|
2024-07-16 08:05:25 +00:00
|
|
|
services
|
2023-07-24 10:41:50 +02:00
|
|
|
.rooms
|
|
|
|
.user
|
2024-09-28 22:12:17 -04:00
|
|
|
.notification_count(sender_user, room_id)
|
2024-08-08 17:18:30 +00:00
|
|
|
.await
|
2023-07-24 10:41:50 +02:00
|
|
|
.try_into()
|
|
|
|
.expect("notification count can't go that high"),
|
|
|
|
),
|
2023-07-06 10:32:25 +02:00
|
|
|
},
|
|
|
|
timeline: room_events,
|
|
|
|
required_state,
|
2023-07-10 16:25:33 +02:00
|
|
|
prev_batch,
|
2023-07-06 10:32:25 +02:00
|
|
|
limited,
|
|
|
|
joined_count: Some(
|
2024-07-16 08:05:25 +00:00
|
|
|
services
|
2024-03-25 17:05:11 -04:00
|
|
|
.rooms
|
|
|
|
.state_cache
|
2024-08-08 17:18:30 +00:00
|
|
|
.room_joined_count(room_id)
|
|
|
|
.await
|
2024-07-07 06:17:58 +00:00
|
|
|
.unwrap_or(0)
|
|
|
|
.try_into()
|
|
|
|
.unwrap_or_else(|_| uint!(0)),
|
2023-07-06 10:32:25 +02:00
|
|
|
),
|
|
|
|
invited_count: Some(
|
2024-07-16 08:05:25 +00:00
|
|
|
services
|
2024-03-25 17:05:11 -04:00
|
|
|
.rooms
|
|
|
|
.state_cache
|
2024-08-08 17:18:30 +00:00
|
|
|
.room_invited_count(room_id)
|
|
|
|
.await
|
2024-07-07 06:17:58 +00:00
|
|
|
.unwrap_or(0)
|
|
|
|
.try_into()
|
|
|
|
.unwrap_or_else(|_| uint!(0)),
|
2023-07-06 10:32:25 +02:00
|
|
|
),
|
2023-07-24 10:41:50 +02:00
|
|
|
num_live: None, // Count events in timeline greater than global sync counter
|
2024-08-24 14:52:48 +02:00
|
|
|
timestamp,
|
2024-08-21 19:47:39 -04:00
|
|
|
heroes: Some(heroes),
|
2023-07-06 10:32:25 +02:00
|
|
|
},
|
|
|
|
);
|
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2024-03-25 17:05:11 -04:00
|
|
|
if rooms
|
|
|
|
.iter()
|
|
|
|
.all(|(_, r)| r.timeline.is_empty() && r.required_state.is_empty())
|
|
|
|
{
|
2023-07-06 10:32:25 +02:00
|
|
|
// Hang a few seconds so requests are not spammed
|
|
|
|
// Stop hanging if new info arrives
|
2024-07-08 16:13:31 +00:00
|
|
|
let default = Duration::from_secs(30);
|
|
|
|
let duration = cmp::min(body.timeout.unwrap_or(default), default);
|
2024-07-08 16:10:18 +00:00
|
|
|
_ = tokio::time::timeout(duration, watcher).await;
|
2023-07-06 10:32:25 +02:00
|
|
|
}
|
2024-03-05 19:48:54 -05:00
|
|
|
|
2023-09-24 10:17:26 -04:00
|
|
|
Ok(sync_events::v4::Response {
|
2023-09-13 20:54:53 +02:00
|
|
|
initial: globalsince == 0,
|
2023-07-06 10:32:25 +02:00
|
|
|
txn_id: body.txn_id.clone(),
|
|
|
|
pos: next_batch.to_string(),
|
|
|
|
lists,
|
|
|
|
rooms,
|
|
|
|
extensions: sync_events::v4::Extensions {
|
2023-07-24 10:41:50 +02:00
|
|
|
to_device: if body.extensions.to_device.enabled.unwrap_or(false) {
|
|
|
|
Some(sync_events::v4::ToDevice {
|
2024-07-16 08:05:25 +00:00
|
|
|
events: services
|
2024-03-25 17:05:11 -04:00
|
|
|
.users
|
2024-09-28 22:12:17 -04:00
|
|
|
.get_to_device_events(sender_user, &sender_device)
|
2024-08-08 17:18:30 +00:00
|
|
|
.collect()
|
|
|
|
.await,
|
2023-07-24 10:41:50 +02:00
|
|
|
next_batch: next_batch.to_string(),
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
},
|
2023-07-06 10:32:25 +02:00
|
|
|
e2ee: sync_events::v4::E2EE {
|
|
|
|
device_lists: DeviceLists {
|
2023-07-24 10:41:50 +02:00
|
|
|
changed: device_list_changes.into_iter().collect(),
|
|
|
|
left: device_list_left.into_iter().collect(),
|
2023-07-06 10:32:25 +02:00
|
|
|
},
|
2024-07-16 08:05:25 +00:00
|
|
|
device_one_time_keys_count: services
|
2024-03-25 17:05:11 -04:00
|
|
|
.users
|
2024-09-28 22:12:17 -04:00
|
|
|
.count_one_time_keys(sender_user, &sender_device)
|
2024-08-08 17:18:30 +00:00
|
|
|
.await,
|
2023-07-24 10:41:50 +02:00
|
|
|
// Fallback keys are not yet supported
|
2023-07-06 10:32:25 +02:00
|
|
|
device_unused_fallback_key_types: None,
|
|
|
|
},
|
2024-08-30 10:31:08 +02:00
|
|
|
account_data,
|
2024-08-30 18:23:42 +02:00
|
|
|
receipts,
|
2023-07-06 10:32:25 +02:00
|
|
|
typing: sync_events::v4::Typing {
|
|
|
|
rooms: BTreeMap::new(),
|
2024-03-05 19:48:54 -05:00
|
|
|
},
|
2023-07-06 10:32:25 +02:00
|
|
|
},
|
|
|
|
delta_token: None,
|
2023-09-24 10:17:26 -04:00
|
|
|
})
|
2023-07-06 10:32:25 +02:00
|
|
|
}
|
2024-08-24 06:44:32 +02:00
|
|
|
|
2024-08-08 17:18:30 +00:00
|
|
|
async fn filter_rooms(
|
2024-09-07 18:09:21 +02:00
|
|
|
rooms: &[OwnedRoomId], State(services): State<crate::State>, filter: &[RoomTypeFilter], negate: bool,
|
2024-08-24 06:44:32 +02:00
|
|
|
) -> Vec<OwnedRoomId> {
|
2024-08-08 17:18:30 +00:00
|
|
|
rooms
|
2024-08-24 06:44:32 +02:00
|
|
|
.iter()
|
2024-08-08 17:18:30 +00:00
|
|
|
.stream()
|
|
|
|
.filter_map(|r| async move {
|
2024-10-05 12:30:05 -04:00
|
|
|
let room_type = services.rooms.state_accessor.get_room_type(r).await;
|
|
|
|
|
2024-10-04 17:07:31 +00:00
|
|
|
if room_type.as_ref().is_err_and(|e| !e.is_not_found()) {
|
2024-10-05 12:30:05 -04:00
|
|
|
return None;
|
2024-08-08 17:18:30 +00:00
|
|
|
}
|
2024-10-05 12:30:05 -04:00
|
|
|
|
|
|
|
let room_type_filter = RoomTypeFilter::from(room_type.ok());
|
|
|
|
|
|
|
|
let include = if negate {
|
|
|
|
!filter.contains(&room_type_filter)
|
|
|
|
} else {
|
|
|
|
filter.is_empty() || filter.contains(&room_type_filter)
|
|
|
|
};
|
|
|
|
|
|
|
|
include.then_some(r.to_owned())
|
2024-08-24 06:44:32 +02:00
|
|
|
})
|
2024-08-08 17:18:30 +00:00
|
|
|
.collect()
|
|
|
|
.await
|
2024-08-24 06:44:32 +02:00
|
|
|
}
|