1
0
Fork 0
mirror of https://gitlab.com/famedly/conduit.git synced 2025-06-27 16:35:59 +00:00
conduit/src/service/rooms/timeline/mod.rs

1126 lines
40 KiB
Rust
Raw Normal View History

mod data;
2022-10-05 20:41:05 +02:00
2023-02-26 16:29:06 +01:00
use std::{
cmp::Ordering,
collections::{BTreeMap, HashMap},
};
2022-10-05 20:41:05 +02:00
2022-10-10 14:09:11 +02:00
use std::{
collections::HashSet,
2023-02-26 16:29:06 +01:00
sync::{Arc, Mutex, RwLock},
2022-10-10 14:09:11 +02:00
};
pub use data::Data;
use regex::Regex;
2022-10-05 20:34:31 +02:00
use ruma::{
2023-02-26 16:29:06 +01:00
api::{client::error::ErrorKind, federation},
2022-10-10 14:09:11 +02:00
canonical_json::to_canonical_value,
2022-10-05 20:34:31 +02:00
events::{
push_rules::PushRulesEvent,
2022-10-10 14:09:11 +02:00
room::{
create::RoomCreateEventContent, member::MembershipState,
power_levels::RoomPowerLevelsEventContent,
},
2023-02-26 16:29:06 +01:00
GlobalAccountDataEventType, StateEventType, TimelineEventType,
2022-10-05 20:34:31 +02:00
},
2022-10-10 14:09:11 +02:00
push::{Action, Ruleset, Tweak},
2023-02-26 16:29:06 +01:00
serde::Base64,
2022-10-10 14:09:11 +02:00
state_res,
2023-02-26 16:29:06 +01:00
state_res::{Event, RoomVersion},
uint, user_id, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId,
OwnedServerName, RoomAliasId, RoomId, ServerName, UserId,
2022-10-05 20:34:31 +02:00
};
use serde::Deserialize;
2023-02-20 22:59:45 +01:00
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
2022-10-05 09:34:25 +02:00
use tokio::sync::MutexGuard;
2023-02-20 22:59:45 +01:00
use tracing::{error, info, warn};
2022-10-05 20:34:31 +02:00
use crate::{
2023-02-26 16:29:06 +01:00
api::server_server,
2022-10-05 20:34:31 +02:00
service::pdu::{EventHash, PduBuilder},
services, utils, Error, PduEvent, Result,
};
2022-09-07 13:25:51 +02:00
use super::state_compressor::CompressedStateEvent;
2023-02-20 22:59:45 +01:00
#[derive(Hash, PartialEq, Eq, Clone, Copy, Debug)]
pub enum PduCount {
Backfilled(u64),
Normal(u64),
}
impl PduCount {
pub fn min() -> Self {
Self::Backfilled(u64::MAX)
}
pub fn max() -> Self {
Self::Normal(u64::MAX)
}
pub fn try_from_string(token: &str) -> Result<Self> {
if token.starts_with('-') {
token[1..].parse().map(PduCount::Backfilled)
} else {
token.parse().map(PduCount::Normal)
}
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid pagination token."))
}
pub fn stringify(&self) -> String {
match self {
PduCount::Backfilled(x) => format!("-{x}"),
PduCount::Normal(x) => x.to_string(),
}
}
}
impl PartialOrd for PduCount {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl Ord for PduCount {
fn cmp(&self, other: &Self) -> Ordering {
match (self, other) {
(PduCount::Normal(s), PduCount::Normal(o)) => s.cmp(o),
(PduCount::Backfilled(s), PduCount::Backfilled(o)) => o.cmp(s),
(PduCount::Normal(_), PduCount::Backfilled(_)) => Ordering::Greater,
(PduCount::Backfilled(_), PduCount::Normal(_)) => Ordering::Less,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn comparisons() {
assert!(PduCount::Normal(1) < PduCount::Normal(2));
assert!(PduCount::Backfilled(2) < PduCount::Backfilled(1));
assert!(PduCount::Normal(1) > PduCount::Backfilled(1));
assert!(PduCount::Backfilled(1) < PduCount::Normal(1));
}
}
2022-10-05 12:45:54 +02:00
pub struct Service {
2022-10-08 13:02:52 +02:00
pub db: &'static dyn Data,
2022-10-05 20:33:55 +02:00
2023-02-20 22:59:45 +01:00
pub lasttimelinecount_cache: Mutex<HashMap<OwnedRoomId, PduCount>>,
}
2022-10-05 12:45:54 +02:00
impl Service {
#[tracing::instrument(skip(self))]
pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result<Option<Arc<PduEvent>>> {
2023-02-21 00:56:26 +01:00
self.all_pdus(&user_id!("@doesntmatter:conduit.rs"), &room_id)?
.next()
.map(|o| o.map(|(_, p)| Arc::new(p)))
.transpose()
}
#[tracing::instrument(skip(self))]
2023-02-20 22:59:45 +01:00
pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result<PduCount> {
2022-09-07 13:25:51 +02:00
self.db.last_timeline_count(sender_user, room_id)
2022-06-19 22:56:14 +02:00
}
2021-08-12 23:04:00 +02:00
2023-02-20 22:59:45 +01:00
/// Returns the `count` of this pdu's id.
pub fn get_pdu_count(&self, event_id: &EventId) -> Result<Option<PduCount>> {
self.db.get_pdu_count(event_id)
}
2022-06-19 22:56:14 +02:00
// TODO Is this the same as the function above?
/*
2022-06-19 22:56:14 +02:00
#[tracing::instrument(skip(self))]
pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result<u64> {
let prefix = self
.get_shortroomid(room_id)?
.expect("room exists")
.to_be_bytes()
.to_vec();
2020-09-13 22:24:36 +02:00
2022-06-19 22:56:14 +02:00
let mut last_possible_key = prefix.clone();
last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes());
2020-09-13 22:24:36 +02:00
2022-06-19 22:56:14 +02:00
self.pduid_pdu
.iter_from(&last_possible_key, true)
.take_while(move |(k, _)| k.starts_with(&prefix))
.next()
.map(|b| self.pdu_count(&b.0))
.transpose()
.map(|op| op.unwrap_or_default())
}
*/
2022-06-19 22:56:14 +02:00
/// Returns the json of a pdu.
pub fn get_pdu_json(&self, event_id: &EventId) -> Result<Option<CanonicalJsonObject>> {
self.db.get_pdu_json(event_id)
2021-08-12 23:04:00 +02:00
}
2022-06-19 22:56:14 +02:00
/// Returns the json of a pdu.
pub fn get_non_outlier_pdu_json(
2021-07-15 19:54:04 +02:00
&self,
event_id: &EventId,
) -> Result<Option<CanonicalJsonObject>> {
2022-10-05 20:33:55 +02:00
self.db.get_non_outlier_pdu_json(event_id)
2021-07-15 19:54:04 +02:00
}
2020-05-26 10:27:51 +02:00
/// Returns the pdu's id.
2021-06-08 18:10:00 +02:00
pub fn get_pdu_id(&self, event_id: &EventId) -> Result<Option<Vec<u8>>> {
self.db.get_pdu_id(event_id)
2020-05-26 10:27:51 +02:00
}
2021-03-26 11:10:45 +01:00
/// Returns the pdu.
///
/// Checks the `eventid_outlierpdu` Tree if not found in the timeline.
pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result<Option<PduEvent>> {
self.db.get_non_outlier_pdu(event_id)
2021-03-26 11:10:45 +01:00
}
2020-05-24 18:25:52 +02:00
/// Returns the pdu.
2021-02-01 12:44:30 -05:00
///
/// Checks the `eventid_outlierpdu` Tree if not found in the timeline.
2021-06-30 09:52:01 +02:00
pub fn get_pdu(&self, event_id: &EventId) -> Result<Option<Arc<PduEvent>>> {
self.db.get_pdu(event_id)
2020-05-24 18:25:52 +02:00
}
2020-05-26 10:27:51 +02:00
/// Returns the pdu.
///
/// This does __NOT__ check the outliers `Tree`.
2021-04-09 21:38:16 +02:00
pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result<Option<PduEvent>> {
self.db.get_pdu_from_id(pdu_id)
2020-05-26 10:27:51 +02:00
}
/// Returns the pdu as a `BTreeMap<String, CanonicalJsonValue>`.
pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result<Option<CanonicalJsonObject>> {
self.db.get_pdu_json_from_id(pdu_id)
2020-09-15 16:13:54 +02:00
}
2020-06-09 15:13:17 +02:00
/// Removes a pdu and creates a new one with the same id.
#[tracing::instrument(skip(self))]
2021-06-08 18:10:00 +02:00
fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> {
2022-09-07 13:25:51 +02:00
self.db.replace_pdu(pdu_id, pdu)
2020-05-26 10:27:51 +02:00
}
2020-05-24 18:25:52 +02:00
/// Creates a new persisted data unit and adds it to a room.
///
/// By this point the incoming event should be fully authenticated, no auth happens
/// in `append_pdu`.
2021-08-12 23:04:00 +02:00
///
/// Returns pdu id
#[tracing::instrument(skip(self, pdu, pdu_json, leaves))]
2021-11-27 16:35:59 +01:00
pub fn append_pdu<'a>(
&self,
2020-09-13 22:24:36 +02:00
pdu: &PduEvent,
mut pdu_json: CanonicalJsonObject,
2022-10-09 17:25:06 +02:00
leaves: Vec<OwnedEventId>,
2022-10-05 20:33:55 +02:00
state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
) -> Result<Vec<u8>> {
2022-10-05 20:34:31 +02:00
let shortroomid = services()
.rooms
.short
.get_shortroomid(&pdu.room_id)?
.expect("room exists");
2021-08-12 23:04:00 +02:00
// Make unsigned fields correct. This is not properly documented in the spec, but state
// events need to have previous content in the unsigned field, so clients can easily
// interpret things like membership changes
if let Some(state_key) = &pdu.state_key {
if let CanonicalJsonValue::Object(unsigned) = pdu_json
.entry("unsigned".to_owned())
.or_insert_with(|| CanonicalJsonValue::Object(Default::default()))
{
2022-10-05 20:34:31 +02:00
if let Some(shortstatehash) = services()
.rooms
.state_accessor
.pdu_shortstatehash(&pdu.event_id)
.unwrap()
{
if let Some(prev_state) = services()
.rooms
.state_accessor
2022-04-06 21:31:29 +02:00
.state_get(shortstatehash, &pdu.kind.to_string().into(), state_key)
.unwrap()
{
unsigned.insert(
"prev_content".to_owned(),
CanonicalJsonValue::Object(
2021-06-30 09:52:01 +02:00
utils::to_canonical_object(prev_state.content.clone())
.expect("event is valid, we just created it"),
),
);
}
}
} else {
error!("Invalid unsigned type in pdu.");
}
}
// We must keep track of all events that have been referenced.
2022-10-05 20:34:31 +02:00
services()
.rooms
.pdu_metadata
.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?;
services()
.rooms
.state
.set_forward_extremities(&pdu.room_id, leaves, state_lock)?;
2021-08-03 11:10:58 +02:00
let mutex_insert = Arc::clone(
2022-10-05 20:34:31 +02:00
services()
.globals
2021-08-03 11:10:58 +02:00
.roomid_mutex_insert
.write()
.unwrap()
.entry(pdu.room_id.clone())
.or_default(),
);
let insert_lock = mutex_insert.lock().unwrap();
2022-09-07 13:25:51 +02:00
let count1 = services().globals.next_count()?;
// Mark as read first so the sending client doesn't get a notification even if appending
// fails
2022-10-05 20:34:31 +02:00
services()
.rooms
.edus
.read_receipt
2022-09-07 13:25:51 +02:00
.private_read_set(&pdu.room_id, &pdu.sender, count1)?;
2022-10-05 20:34:31 +02:00
services()
.rooms
.user
.reset_notification_counts(&pdu.sender, &pdu.room_id)?;
2022-09-07 13:25:51 +02:00
let count2 = services().globals.next_count()?;
2021-08-12 23:04:00 +02:00
let mut pdu_id = shortroomid.to_be_bytes().to_vec();
pdu_id.extend_from_slice(&count2.to_be_bytes());
2022-10-05 20:33:55 +02:00
// Insert pdu
self.db.append_pdu(&pdu_id, pdu, &pdu_json, count2)?;
2021-08-03 11:10:58 +02:00
drop(insert_lock);
// See if the event matches any known pushers
2022-09-07 13:25:51 +02:00
let power_levels: RoomPowerLevelsEventContent = services()
.rooms
2022-10-05 20:33:55 +02:00
.state_accessor
2022-04-06 21:31:29 +02:00
.room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")?
.map(|ev| {
serde_json::from_str(ev.content.get())
.map_err(|_| Error::bad_database("invalid m.room.power_levels event"))
})
.transpose()?
.unwrap_or_default();
let sync_pdu = pdu.to_sync_room_event();
2021-08-15 13:17:42 +02:00
let mut notifies = Vec::new();
let mut highlights = Vec::new();
2022-10-05 20:34:31 +02:00
for user in services()
.rooms
.state_cache
.get_our_real_users(&pdu.room_id)?
2022-10-08 13:02:52 +02:00
.iter()
2022-10-05 20:34:31 +02:00
{
// Don't notify the user of their own events
2022-10-08 13:02:52 +02:00
if user == &pdu.sender {
continue;
}
2022-09-07 13:25:51 +02:00
let rules_for_user = services()
.account_data
2022-04-06 21:31:29 +02:00
.get(
None,
2022-10-10 14:09:11 +02:00
user,
2022-04-06 21:31:29 +02:00
GlobalAccountDataEventType::PushRules.to_string().into(),
)?
2022-10-05 20:34:31 +02:00
.map(|event| {
serde_json::from_str::<PushRulesEvent>(event.get())
.map_err(|_| Error::bad_database("Invalid push rules event in db."))
})
.transpose()?
.map(|ev: PushRulesEvent| ev.content.global)
2022-10-10 14:09:11 +02:00
.unwrap_or_else(|| Ruleset::server_default(user));
let mut highlight = false;
let mut notify = false;
2022-09-07 13:25:51 +02:00
for action in services().pusher.get_actions(
2022-10-10 14:09:11 +02:00
user,
&rules_for_user,
&power_levels,
&sync_pdu,
&pdu.room_id,
)? {
match action {
Action::DontNotify => notify = false,
// TODO: Implement proper support for coalesce
Action::Notify | Action::Coalesce => notify = true,
Action::SetTweak(Tweak::Highlight(true)) => {
highlight = true;
}
_ => {}
};
}
if notify {
2022-10-08 13:02:52 +02:00
notifies.push(user.clone());
}
if highlight {
2022-10-08 13:02:52 +02:00
highlights.push(user.clone());
}
2022-10-10 14:09:11 +02:00
for push_key in services().pusher.get_pushkeys(user) {
services().sending.send_push_pdu(&pdu_id, user, push_key?)?;
}
}
2022-10-05 20:34:31 +02:00
self.db
2022-10-08 13:02:52 +02:00
.increment_notification_counts(&pdu.room_id, notifies, highlights)?;
2021-08-15 13:17:42 +02:00
match pdu.kind {
2023-02-26 16:29:06 +01:00
TimelineEventType::RoomRedaction => {
if let Some(redact_id) = &pdu.redacts {
self.redact_pdu(redact_id, pdu)?;
}
}
2023-02-26 16:29:06 +01:00
TimelineEventType::RoomMember => {
2020-09-13 22:24:36 +02:00
if let Some(state_key) = &pdu.state_key {
#[derive(Deserialize)]
struct ExtractMembership {
membership: MembershipState,
}
// if the state_key fails
2021-11-27 00:30:28 +01:00
let target_user_id = UserId::parse(state_key.clone())
.expect("This state_key was previously validated");
2021-04-11 21:01:27 +02:00
let content = serde_json::from_str::<ExtractMembership>(pdu.content.get())
.map_err(|_| Error::bad_database("Invalid content in pdu."))?;
let invite_state = match content.membership {
MembershipState::Invite => {
2022-10-05 20:33:55 +02:00
let state = services().rooms.state.calculate_invite_state(pdu)?;
2021-04-11 21:01:27 +02:00
Some(state)
}
_ => None,
};
// Update our membership info, we do this here incase a user is invited
// and immediately leaves we need the DB to record the invite event for auth
2022-10-05 20:33:55 +02:00
services().rooms.state_cache.update_membership(
&pdu.room_id,
&target_user_id,
content.membership,
&pdu.sender,
2021-04-11 21:01:27 +02:00
invite_state,
2021-08-17 00:22:52 +02:00
true,
)?;
}
}
2023-02-26 16:29:06 +01:00
TimelineEventType::RoomMessage => {
#[derive(Deserialize)]
2022-10-05 20:33:55 +02:00
struct ExtractBody {
body: Option<String>,
}
2022-10-05 20:33:55 +02:00
let content = serde_json::from_str::<ExtractBody>(pdu.content.get())
.map_err(|_| Error::bad_database("Invalid content in pdu."))?;
if let Some(body) = content.body {
2022-10-05 20:34:31 +02:00
services()
.rooms
.search
2022-10-08 13:02:52 +02:00
.index_pdu(shortroomid, &pdu_id, &body)?;
2020-10-05 22:19:22 +02:00
2022-10-05 20:33:55 +02:00
let admin_room = services().rooms.alias.resolve_local_alias(
<&RoomAliasId>::try_from(
2022-09-07 13:25:51 +02:00
format!("#admins:{}", services().globals.server_name()).as_str(),
)
.expect("#admins:server_name is a valid room alias"),
)?;
2022-09-07 13:25:51 +02:00
let server_user = format!("@conduit:{}", services().globals.server_name());
let to_conduit = body.starts_with(&format!("{server_user}: "))
|| body.starts_with(&format!("{server_user} "));
2022-04-07 12:11:55 +00:00
// This will evaluate to false if the emergency password is set up so that
// the administrator can execute commands as conduit
2022-10-05 20:34:31 +02:00
let from_conduit = pdu.sender == server_user
&& services().globals.emergency_password().is_none();
if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) {
2022-10-10 14:09:11 +02:00
services().admin.process_message(body);
2020-10-05 22:19:22 +02:00
}
}
}
_ => {}
}
2022-09-07 13:25:51 +02:00
for appservice in services().appservice.all()? {
2022-10-05 20:34:31 +02:00
if services()
.rooms
.state_cache
.appservice_in_room(&pdu.room_id, &appservice)?
{
services()
.sending
2022-10-08 13:02:52 +02:00
.send_pdu_appservice(appservice.0, pdu_id.clone())?;
2022-06-20 11:31:27 +02:00
continue;
}
// If the RoomMember event has a non-empty state_key, it is targeted at someone.
// If it is our appservice user, we send this PDU to it.
2023-02-26 16:29:06 +01:00
if pdu.kind == TimelineEventType::RoomMember {
2022-06-20 11:31:27 +02:00
if let Some(state_key_uid) = &pdu
.state_key
.as_ref()
.and_then(|state_key| UserId::parse(state_key.as_str()).ok())
{
if let Some(appservice_uid) = appservice
.1
.get("sender_localpart")
.and_then(|string| string.as_str())
.and_then(|string| {
2022-10-05 20:34:31 +02:00
UserId::parse_with_server_name(string, services().globals.server_name())
.ok()
2022-06-20 11:31:27 +02:00
})
{
if state_key_uid == &appservice_uid {
2022-10-05 20:34:31 +02:00
services()
.sending
2022-10-08 13:02:52 +02:00
.send_pdu_appservice(appservice.0, pdu_id.clone())?;
2022-06-20 11:31:27 +02:00
continue;
}
}
}
}
if let Some(namespaces) = appservice.1.get("namespaces") {
let users = namespaces
.get("users")
.and_then(|users| users.as_sequence())
.map_or_else(Vec::new, |users| {
users
.iter()
.filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok())
.collect::<Vec<_>>()
});
let aliases = namespaces
.get("aliases")
.and_then(|aliases| aliases.as_sequence())
.map_or_else(Vec::new, |aliases| {
aliases
.iter()
.filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok())
.collect::<Vec<_>>()
});
let rooms = namespaces
.get("rooms")
.and_then(|rooms| rooms.as_sequence());
let matching_users = |users: &Regex| {
users.is_match(pdu.sender.as_str())
2023-02-26 16:29:06 +01:00
|| pdu.kind == TimelineEventType::RoomMember
2022-06-20 11:31:27 +02:00
&& pdu
.state_key
.as_ref()
.map_or(false, |state_key| users.is_match(state_key))
};
let matching_aliases = |aliases: &Regex| {
2022-10-05 20:34:31 +02:00
services()
.rooms
.alias
.local_aliases_for_room(&pdu.room_id)
2022-06-20 11:31:27 +02:00
.filter_map(|r| r.ok())
.any(|room_alias| aliases.is_match(room_alias.as_str()))
};
if aliases.iter().any(matching_aliases)
2022-09-07 13:25:51 +02:00
|| rooms.map_or(false, |rooms| rooms.contains(&pdu.room_id.as_str().into()))
2022-06-20 11:31:27 +02:00
|| users.iter().any(matching_users)
{
2022-10-05 20:34:31 +02:00
services()
.sending
2022-10-08 13:02:52 +02:00
.send_pdu_appservice(appservice.0, pdu_id.clone())?;
2022-06-20 11:31:27 +02:00
}
}
}
Ok(pdu_id)
}
2022-06-20 11:31:27 +02:00
pub fn create_hash_and_sign_event(
2022-10-05 20:34:31 +02:00
&self,
pdu_builder: PduBuilder,
sender: &UserId,
room_id: &RoomId,
_mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
2022-10-05 09:34:25 +02:00
) -> Result<(PduEvent, CanonicalJsonObject)> {
2022-06-19 22:56:14 +02:00
let PduBuilder {
event_type,
content,
unsigned,
state_key,
redacts,
} = pdu_builder;
2022-09-07 13:25:51 +02:00
let prev_events: Vec<_> = services()
2022-06-20 11:31:27 +02:00
.rooms
2022-10-05 09:34:25 +02:00
.state
.get_forward_extremities(room_id)?
2022-06-19 22:56:14 +02:00
.into_iter()
.take(20)
2022-06-20 11:31:27 +02:00
.collect();
2021-08-12 23:04:00 +02:00
2022-10-05 20:34:31 +02:00
let create_event = services().rooms.state_accessor.room_state_get(
room_id,
&StateEventType::RoomCreate,
"",
)?;
let create_event_content: Option<RoomCreateEventContent> = create_event
.as_ref()
.map(|create_event| {
serde_json::from_str(create_event.content.get()).map_err(|e| {
warn!("Invalid create event: {}", e);
Error::bad_database("Invalid create event in db.")
})
})
.transpose()?;
// If there was no create event yet, assume we are creating a room with the default
// version right now
2021-05-08 01:54:24 +02:00
let room_version_id = create_event_content
2022-09-07 13:25:51 +02:00
.map_or(services().globals.default_room_version(), |create_event| {
create_event.room_version
});
2022-10-05 20:34:31 +02:00
let room_version = RoomVersion::new(&room_version_id).expect("room version is supported");
let auth_events = services().rooms.state.get_auth_events(
room_id,
&event_type,
sender,
state_key.as_deref(),
&content,
)?;
// Our depth is the maximum depth of prev_events + 1
let depth = prev_events
.iter()
2022-10-05 20:33:55 +02:00
.filter_map(|event_id| Some(services().rooms.timeline.get_pdu(event_id).ok()??.depth))
.max()
2021-04-14 10:43:31 +02:00
.unwrap_or_else(|| uint!(0))
2021-04-11 21:01:27 +02:00
+ uint!(1);
let mut unsigned = unsigned.unwrap_or_default();
2022-06-20 11:31:27 +02:00
if let Some(state_key) = &state_key {
2022-10-05 20:34:31 +02:00
if let Some(prev_pdu) = services().rooms.state_accessor.room_state_get(
room_id,
&event_type.to_string().into(),
state_key,
)? {
unsigned.insert(
"prev_content".to_owned(),
serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"),
);
2020-06-26 10:07:02 +02:00
unsigned.insert(
"prev_sender".to_owned(),
2021-06-30 09:52:01 +02:00
serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"),
2020-06-26 10:07:02 +02:00
);
}
}
2022-10-08 13:02:52 +02:00
let mut pdu = PduEvent {
event_id: ruma::event_id!("$thiswillbefilledinlater").into(),
2021-11-26 20:36:40 +01:00
room_id: room_id.to_owned(),
2022-09-07 13:25:51 +02:00
sender: sender.to_owned(),
origin_server_ts: utils::millis_since_unix_epoch()
.try_into()
2020-06-09 15:13:17 +02:00
.expect("time is valid"),
kind: event_type,
content,
state_key,
prev_events,
2021-04-11 21:01:27 +02:00
depth,
auth_events: auth_events
2022-10-10 14:09:11 +02:00
.values()
.map(|pdu| pdu.event_id.clone())
.collect(),
redacts,
unsigned: if unsigned.is_empty() {
None
} else {
Some(to_raw_value(&unsigned).expect("to_raw_value always works"))
},
hashes: EventHash {
sha256: "aaa".to_owned(),
},
signatures: None,
};
2021-03-26 13:41:05 +01:00
let auth_check = state_res::auth_check(
&room_version,
2021-09-01 15:28:02 +02:00
&pdu,
None::<PduEvent>, // TODO: third_party_invite
|k, s| auth_events.get(&(k.clone(), s.to_owned())),
)
.map_err(|e| {
error!("{:?}", e);
Error::bad_database("Auth check failed.")
2021-03-26 13:41:05 +01:00
})?;
if !auth_check {
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Event is not authorized.",
));
}
2020-09-14 20:23:19 +02:00
// Hash and sign
let mut pdu_json =
utils::to_canonical_object(&pdu).expect("event is valid, we just created it");
pdu_json.remove("event_id");
2020-09-14 20:23:19 +02:00
// Add origin because synapse likes that (and it's required in the spec)
pdu_json.insert(
"origin".to_owned(),
2022-09-07 13:25:51 +02:00
to_canonical_value(services().globals.server_name())
2022-06-20 11:31:27 +02:00
.expect("server name is a valid CanonicalJsonValue"),
);
match ruma::signatures::hash_and_sign_event(
2022-09-07 13:25:51 +02:00
services().globals.server_name().as_str(),
services().globals.keypair(),
2020-09-14 20:23:19 +02:00
&mut pdu_json,
2021-05-08 01:54:24 +02:00
&room_version_id,
) {
Ok(_) => {}
Err(e) => {
return match e {
2022-04-13 00:08:55 +02:00
ruma::signatures::Error::PduSize => Err(Error::BadRequest(
ErrorKind::TooLarge,
"Message is too long",
)),
_ => Err(Error::BadRequest(
ErrorKind::Unknown,
"Signing event failed",
)),
}
}
}
2020-09-14 20:23:19 +02:00
// Generate event id
pdu.event_id = EventId::parse_arc(format!(
"${}",
2021-05-08 01:54:24 +02:00
ruma::signatures::reference_hash(&pdu_json, &room_version_id)
2020-09-14 20:23:19 +02:00
.expect("ruma can calculate reference hashes")
))
2020-06-09 15:13:17 +02:00
.expect("ruma's reference hashes are valid event ids");
pdu_json.insert(
"event_id".to_owned(),
2021-04-26 18:20:20 +02:00
CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()),
);
2020-09-14 20:23:19 +02:00
2021-07-29 12:33:44 +02:00
// Generate short event id
2022-10-05 20:34:31 +02:00
let _shorteventid = services()
.rooms
.short
.get_or_create_shorteventid(&pdu.event_id)?;
2022-10-05 09:34:25 +02:00
Ok((pdu, pdu_json))
2022-06-20 11:31:27 +02:00
}
/// Creates a new persisted data unit and adds it to a room. This function takes a
/// roomid_mutex_state, meaning that only this function is able to mutate the room state.
2022-09-07 13:25:51 +02:00
#[tracing::instrument(skip(self, state_lock))]
2022-06-20 11:31:27 +02:00
pub fn build_and_append_pdu(
&self,
pdu_builder: PduBuilder,
sender: &UserId,
room_id: &RoomId,
2022-09-07 13:25:51 +02:00
state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
2022-06-20 11:31:27 +02:00
) -> Result<Arc<EventId>> {
2022-10-05 20:34:31 +02:00
let (pdu, pdu_json) =
2022-10-10 14:09:11 +02:00
self.create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock)?;
2021-07-29 12:33:44 +02:00
let admin_room = services().rooms.alias.resolve_local_alias(
<&RoomAliasId>::try_from(
format!("#admins:{}", services().globals.server_name()).as_str(),
)
.expect("#admins:server_name is a valid room alias"),
)?;
if admin_room.filter(|v| v == room_id).is_some() {
match pdu.event_type() {
2023-02-26 16:29:06 +01:00
TimelineEventType::RoomEncryption => {
warn!("Encryption is not allowed in the admins room");
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Encryption is not allowed in the admins room.",
));
}
2023-02-26 16:29:06 +01:00
TimelineEventType::RoomMember => {
#[derive(Deserialize)]
struct ExtractMembership {
membership: MembershipState,
}
let target = pdu
.state_key()
.filter(|v| v.starts_with("@"))
.unwrap_or(sender.as_str());
let server_name = services().globals.server_name();
let server_user = format!("@conduit:{}", server_name);
let content = serde_json::from_str::<ExtractMembership>(pdu.content.get())
.map_err(|_| Error::bad_database("Invalid content in pdu."))?;
if content.membership == MembershipState::Leave {
if target == &server_user {
warn!("Conduit user cannot leave from admins room");
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Conduit user cannot leave from admins room.",
));
}
let count = services()
.rooms
.state_cache
.room_members(room_id)
.filter_map(|m| m.ok())
.filter(|m| m.server_name() == server_name)
.filter(|m| m != target)
.count();
if count < 2 {
warn!("Last admin cannot leave from admins room");
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Last admin cannot leave from admins room.",
));
}
}
if content.membership == MembershipState::Ban && pdu.state_key().is_some() {
if target == &server_user {
warn!("Conduit user cannot be banned in admins room");
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Conduit user cannot be banned in admins room.",
));
}
let count = services()
.rooms
.state_cache
.room_members(room_id)
.filter_map(|m| m.ok())
.filter(|m| m.server_name() == server_name)
.filter(|m| m != target)
.count();
if count < 2 {
warn!("Last admin cannot be banned in admins room");
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"Last admin cannot be banned in admins room.",
));
}
}
}
_ => {}
}
}
2020-10-18 08:56:21 +02:00
// We append to state before appending the pdu, so we don't have a moment in time with the
// pdu without it's state. This is okay because append_pdu can't fail.
2022-10-05 20:33:55 +02:00
let statehashid = services().rooms.state.append_to_state(&pdu)?;
2021-01-15 11:05:57 -05:00
let pdu_id = self.append_pdu(
&pdu,
pdu_json,
// Since this PDU references all pdu_leaves we can update the leaves
// of the room
2022-10-05 20:33:55 +02:00
vec![(*pdu.event_id).to_owned()],
state_lock,
)?;
2020-10-18 08:56:21 +02:00
2020-12-31 14:52:08 +01:00
// We set the room state after inserting the pdu, so that we never have a moment in time
// where events in the current room state do not exist
2022-10-05 20:34:31 +02:00
services()
.rooms
.state
.set_room_state(room_id, statehashid, state_lock)?;
2020-12-31 14:52:08 +01:00
2022-10-09 17:25:06 +02:00
let mut servers: HashSet<OwnedServerName> = services()
2022-10-05 20:34:31 +02:00
.rooms
.state_cache
.room_servers(room_id)
.filter_map(|r| r.ok())
.collect();
// In case we are kicking or banning a user, we need to inform their server of the change
2023-02-26 16:29:06 +01:00
if pdu.kind == TimelineEventType::RoomMember {
if let Some(state_key_uid) = &pdu
.state_key
.as_ref()
.and_then(|state_key| UserId::parse(state_key.as_str()).ok())
{
2022-10-09 17:25:06 +02:00
servers.insert(state_key_uid.server_name().to_owned());
}
}
// Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above
2022-09-07 13:25:51 +02:00
servers.remove(services().globals.server_name());
2022-09-07 13:25:51 +02:00
services().sending.send_pdu(servers.into_iter(), &pdu_id)?;
2020-09-14 20:23:19 +02:00
2022-06-20 11:31:27 +02:00
Ok(pdu.event_id)
}
2022-06-20 11:31:27 +02:00
/// Append the incoming event setting the state snapshot to the state from the
/// server that sent the event.
#[tracing::instrument(skip_all)]
2022-10-05 18:36:12 +02:00
pub fn append_incoming_pdu<'a>(
&self,
2022-06-20 11:31:27 +02:00
pdu: &PduEvent,
pdu_json: CanonicalJsonObject,
2022-10-09 17:25:06 +02:00
new_room_leaves: Vec<OwnedEventId>,
2022-06-20 11:31:27 +02:00
state_ids_compressed: HashSet<CompressedStateEvent>,
soft_fail: bool,
2022-10-05 20:33:55 +02:00
state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
2022-06-20 11:31:27 +02:00
) -> Result<Option<Vec<u8>>> {
// We append to state before appending the pdu, so we don't have a moment in time with the
// pdu without it's state. This is okay because append_pdu can't fail.
2022-10-05 20:33:55 +02:00
services().rooms.state.set_event_state(
2022-06-20 11:31:27 +02:00
&pdu.event_id,
&pdu.room_id,
state_ids_compressed,
)?;
2021-01-15 11:05:57 -05:00
2022-06-20 11:31:27 +02:00
if soft_fail {
2022-10-05 20:34:31 +02:00
services()
.rooms
.pdu_metadata
2022-06-20 11:31:27 +02:00
.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?;
2022-10-05 20:34:31 +02:00
services().rooms.state.set_forward_extremities(
&pdu.room_id,
new_room_leaves,
state_lock,
)?;
2022-06-20 11:31:27 +02:00
return Ok(None);
}
2022-10-05 20:34:31 +02:00
let pdu_id =
services()
.rooms
.timeline
.append_pdu(pdu, pdu_json, new_room_leaves, state_lock)?;
2022-06-20 11:31:27 +02:00
Ok(Some(pdu_id))
}
/// Returns an iterator over all PDUs in a room.
2021-06-08 18:10:00 +02:00
pub fn all_pdus<'a>(
&'a self,
2020-06-16 12:11:38 +02:00
user_id: &UserId,
room_id: &RoomId,
2023-02-20 22:59:45 +01:00
) -> Result<impl Iterator<Item = Result<(PduCount, PduEvent)>> + 'a> {
self.pdus_after(user_id, room_id, PduCount::min())
}
2020-07-26 17:34:12 +02:00
/// Returns an iterator over all events and their tokens in a room that happened before the
/// event with id `until` in reverse-chronological order.
#[tracing::instrument(skip(self))]
2021-06-08 18:10:00 +02:00
pub fn pdus_until<'a>(
&'a self,
2020-06-16 12:11:38 +02:00
user_id: &UserId,
room_id: &RoomId,
2023-02-20 22:59:45 +01:00
until: PduCount,
) -> Result<impl Iterator<Item = Result<(PduCount, PduEvent)>> + 'a> {
self.db.pdus_until(user_id, room_id, until)
}
2020-07-26 17:34:12 +02:00
/// Returns an iterator over all events and their token in a room that happened after the event
/// with id `from` in chronological order.
2021-02-28 12:41:03 +01:00
#[tracing::instrument(skip(self))]
2021-06-08 18:10:00 +02:00
pub fn pdus_after<'a>(
&'a self,
2020-06-16 12:11:38 +02:00
user_id: &UserId,
2020-06-04 13:58:55 +02:00
room_id: &RoomId,
2023-02-20 22:59:45 +01:00
from: PduCount,
) -> Result<impl Iterator<Item = Result<(PduCount, PduEvent)>> + 'a> {
self.db.pdus_after(user_id, room_id, from)
2020-06-04 13:58:55 +02:00
}
2020-05-26 10:27:51 +02:00
/// Replace a PDU with the redacted form.
#[tracing::instrument(skip(self, reason))]
pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> {
2020-05-26 10:27:51 +02:00
if let Some(pdu_id) = self.get_pdu_id(event_id)? {
let mut pdu = self
.get_pdu_from_id(&pdu_id)?
.ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?;
pdu.redact(reason)?;
2020-05-26 10:27:51 +02:00
self.replace_pdu(&pdu_id, &pdu)?;
}
2022-02-10 20:59:11 +01:00
// If event does not exist, just noop
Ok(())
2020-05-26 10:27:51 +02:00
}
2023-02-20 22:59:45 +01:00
#[tracing::instrument(skip(self, room_id))]
pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Result<()> {
let first_pdu = self
.all_pdus(&user_id!("@doesntmatter:conduit.rs"), &room_id)?
.next()
.expect("Room is not empty")?;
if first_pdu.0 < from {
// No backfill required, there are still events between them
return Ok(());
}
let power_levels: RoomPowerLevelsEventContent = services()
.rooms
.state_accessor
.room_state_get(&room_id, &StateEventType::RoomPowerLevels, "")?
.map(|ev| {
serde_json::from_str(ev.content.get())
.map_err(|_| Error::bad_database("invalid m.room.power_levels event"))
})
.transpose()?
.unwrap_or_default();
let mut admin_servers = power_levels
.users
.iter()
.filter(|(_, level)| **level > power_levels.users_default)
.map(|(user_id, _)| user_id.server_name())
.collect::<HashSet<_>>();
admin_servers.remove(services().globals.server_name());
// Request backfill
for backfill_server in admin_servers {
info!("Asking {backfill_server} for backfill");
let response = services()
.sending
.send_federation_request(
backfill_server,
federation::backfill::get_backfill::v1::Request {
room_id: room_id.to_owned(),
v: vec![first_pdu.1.event_id.as_ref().to_owned()],
limit: uint!(100),
},
)
.await;
match response {
Ok(response) => {
let mut pub_key_map = RwLock::new(BTreeMap::new());
for pdu in response.pdus {
if let Err(e) = self
.backfill_pdu(backfill_server, pdu, &mut pub_key_map)
.await
{
warn!("Failed to add backfilled pdu: {e}");
}
}
return Ok(());
}
Err(e) => {
warn!("{backfill_server} could not provide backfill: {e}");
}
}
}
info!("No servers could backfill");
Ok(())
}
#[tracing::instrument(skip(self, pdu))]
pub async fn backfill_pdu(
&self,
origin: &ServerName,
pdu: Box<RawJsonValue>,
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
) -> Result<()> {
let (event_id, value, room_id) = server_server::parse_incoming_pdu(&pdu)?;
// Lock so we cannot backfill the same pdu twice at the same time
let mutex = Arc::clone(
services()
.globals
.roomid_mutex_federation
.write()
.unwrap()
.entry(room_id.to_owned())
.or_default(),
);
let mutex_lock = mutex.lock().await;
// Skip the PDU if we already have it as a timeline event
if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(&event_id)? {
info!("We already know {event_id} at {pdu_id:?}");
return Ok(());
}
2023-02-20 22:59:45 +01:00
services()
.rooms
.event_handler
.handle_incoming_pdu(origin, &event_id, &room_id, value, false, &pub_key_map)
.await?;
let value = self.get_pdu_json(&event_id)?.expect("We just created it");
let pdu = self.get_pdu(&event_id)?.expect("We just created it");
2023-02-20 22:59:45 +01:00
let shortroomid = services()
.rooms
.short
.get_shortroomid(&room_id)?
.expect("room exists");
let mutex_insert = Arc::clone(
services()
.globals
.roomid_mutex_insert
.write()
.unwrap()
.entry(room_id.clone())
.or_default(),
);
let insert_lock = mutex_insert.lock().unwrap();
let count = services().globals.next_count()?;
let mut pdu_id = shortroomid.to_be_bytes().to_vec();
pdu_id.extend_from_slice(&0_u64.to_be_bytes());
pdu_id.extend_from_slice(&(u64::MAX - count).to_be_bytes());
2023-02-20 22:59:45 +01:00
// Insert pdu
self.db.prepend_backfill_pdu(&pdu_id, &event_id, &value)?;
drop(insert_lock);
match pdu.kind {
2023-02-26 16:29:06 +01:00
TimelineEventType::RoomMessage => {
#[derive(Deserialize)]
struct ExtractBody {
body: Option<String>,
}
let content = serde_json::from_str::<ExtractBody>(pdu.content.get())
.map_err(|_| Error::bad_database("Invalid content in pdu."))?;
if let Some(body) = content.body {
services()
.rooms
.search
.index_pdu(shortroomid, &pdu_id, &body)?;
}
}
_ => {}
}
drop(mutex_lock);
info!("Prepended backfill pdu");
2023-02-20 22:59:45 +01:00
Ok(())
}
}