mirror of
https://forgejo.ellis.link/continuwuation/continuwuity.git
synced 2025-10-15 19:21:57 +00:00
Implement room v12 (#943)
**Does not yet work!** Currently, state resolution does not correctly resolve conflicting states. Everything else appears to work as expected, so stateres will be fixed soon, then we should be clear for takeoff. Also: a lot of things currently accept a nullable room ID that really just don't need to. This will need tidying up before merge. Some authentication checks have also been disabled temporarily but nothing important. A lot of things are tagged with `TODO(hydra)`, those need resolving before merge. External contributors should PR to the `hydra/public` branch, *not* ` main`. --- This PR should be squash merged. Reviewed-on: https://forgejo.ellis.link/continuwuation/continuwuity/pulls/943 Co-authored-by: nexy7574 <git@nexy7574.co.uk> Co-committed-by: nexy7574 <git@nexy7574.co.uk>
This commit is contained in:
parent
51423c9d7d
commit
7e4071c117
63 changed files with 1190 additions and 477 deletions
|
|
@ -1,6 +1,6 @@
|
|||
use std::collections::BTreeMap;
|
||||
|
||||
use conduwuit::{Result, pdu::PduBuilder};
|
||||
use conduwuit::{Result, info, pdu::PduBuilder};
|
||||
use futures::FutureExt;
|
||||
use ruma::{
|
||||
RoomId, RoomVersionId,
|
||||
|
|
@ -26,7 +26,7 @@ use crate::Services;
|
|||
/// used to issue admin commands by talking to the server user inside it.
|
||||
pub async fn create_admin_room(services: &Services) -> Result {
|
||||
let room_id = RoomId::new(services.globals.server_name());
|
||||
let room_version = &services.config.default_room_version;
|
||||
let room_version = &RoomVersionId::V11;
|
||||
|
||||
let _short_id = services
|
||||
.rooms
|
||||
|
|
@ -45,10 +45,13 @@ pub async fn create_admin_room(services: &Services) -> Result {
|
|||
match room_version {
|
||||
| V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 =>
|
||||
RoomCreateEventContent::new_v1(server_user.into()),
|
||||
| _ => RoomCreateEventContent::new_v11(),
|
||||
| V11 => RoomCreateEventContent::new_v11(),
|
||||
| _ => RoomCreateEventContent::new_v12(),
|
||||
}
|
||||
};
|
||||
|
||||
info!("Creating admin room {} with version {}", room_id, room_version);
|
||||
|
||||
// 1. The room create event
|
||||
services
|
||||
.rooms
|
||||
|
|
@ -61,7 +64,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
|
|||
..create_content
|
||||
}),
|
||||
server_user,
|
||||
&room_id,
|
||||
Some(&room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.boxed()
|
||||
|
|
@ -77,7 +80,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
|
|||
&RoomMemberEventContent::new(MembershipState::Join),
|
||||
),
|
||||
server_user,
|
||||
&room_id,
|
||||
Some(&room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.boxed()
|
||||
|
|
@ -95,7 +98,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
|
|||
..Default::default()
|
||||
}),
|
||||
server_user,
|
||||
&room_id,
|
||||
Some(&room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.boxed()
|
||||
|
|
@ -108,7 +111,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
|
|||
.build_and_append_pdu(
|
||||
PduBuilder::state(String::new(), &RoomJoinRulesEventContent::new(JoinRule::Invite)),
|
||||
server_user,
|
||||
&room_id,
|
||||
Some(&room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.boxed()
|
||||
|
|
@ -124,7 +127,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
|
|||
&RoomHistoryVisibilityEventContent::new(HistoryVisibility::Shared),
|
||||
),
|
||||
server_user,
|
||||
&room_id,
|
||||
Some(&room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.boxed()
|
||||
|
|
@ -140,7 +143,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
|
|||
&RoomGuestAccessEventContent::new(GuestAccess::Forbidden),
|
||||
),
|
||||
server_user,
|
||||
&room_id,
|
||||
Some(&room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.boxed()
|
||||
|
|
@ -154,7 +157,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
|
|||
.build_and_append_pdu(
|
||||
PduBuilder::state(String::new(), &RoomNameEventContent::new(room_name)),
|
||||
server_user,
|
||||
&room_id,
|
||||
Some(&room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.boxed()
|
||||
|
|
@ -168,7 +171,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
|
|||
topic: format!("Manage {} | Run commands prefixed with `!admin` | Run `!admin -h` for help | Documentation: https://continuwuity.org/", services.config.server_name),
|
||||
}),
|
||||
server_user,
|
||||
&room_id,
|
||||
Some(&room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.boxed()
|
||||
|
|
@ -186,7 +189,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
|
|||
alt_aliases: Vec::new(),
|
||||
}),
|
||||
server_user,
|
||||
&room_id,
|
||||
Some(&room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.boxed()
|
||||
|
|
@ -204,7 +207,7 @@ pub async fn create_admin_room(services: &Services) -> Result {
|
|||
.build_and_append_pdu(
|
||||
PduBuilder::state(String::new(), &RoomPreviewUrlsEventContent { disabled: true }),
|
||||
server_user,
|
||||
&room_id,
|
||||
Some(&room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.boxed()
|
||||
|
|
|
|||
|
|
@ -55,7 +55,7 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result {
|
|||
&RoomMemberEventContent::new(MembershipState::Invite),
|
||||
),
|
||||
server_user,
|
||||
&room_id,
|
||||
Some(&room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
|
@ -69,7 +69,7 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result {
|
|||
&RoomMemberEventContent::new(MembershipState::Join),
|
||||
),
|
||||
user_id,
|
||||
&room_id,
|
||||
Some(&room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
|
@ -83,7 +83,7 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result {
|
|||
&RoomMemberEventContent::new(MembershipState::Invite),
|
||||
),
|
||||
server_user,
|
||||
&room_id,
|
||||
Some(&room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
|
@ -111,7 +111,7 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result {
|
|||
.build_and_append_pdu(
|
||||
PduBuilder::state(String::new(), &room_power_levels),
|
||||
server_user,
|
||||
&room_id,
|
||||
Some(&room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
|
@ -135,7 +135,7 @@ pub async fn make_user_admin(&self, user_id: &UserId) -> Result {
|
|||
.build_and_append_pdu(
|
||||
PduBuilder::timeline(&RoomMessageEventContent::text_markdown(welcome_message)),
|
||||
server_user,
|
||||
&room_id,
|
||||
Some(&room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
|
@ -218,7 +218,7 @@ pub async fn revoke_admin(&self, user_id: &UserId) -> Result {
|
|||
..event
|
||||
}),
|
||||
self.services.globals.server_user.as_ref(),
|
||||
&room_id,
|
||||
Some(&room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.await
|
||||
|
|
|
|||
|
|
@ -393,13 +393,13 @@ impl Service {
|
|||
return Ok(());
|
||||
};
|
||||
|
||||
let response_sender = if self.is_admin_room(pdu.room_id()).await {
|
||||
let response_sender = if self.is_admin_room(pdu.room_id().unwrap()).await {
|
||||
&self.services.globals.server_user
|
||||
} else {
|
||||
pdu.sender()
|
||||
};
|
||||
|
||||
self.respond_to_room(content, pdu.room_id(), response_sender)
|
||||
self.respond_to_room(content, pdu.room_id().unwrap(), response_sender)
|
||||
.boxed()
|
||||
.await
|
||||
}
|
||||
|
|
@ -419,12 +419,13 @@ impl Service {
|
|||
.build_and_append_pdu(
|
||||
PduBuilder::timeline(&self.text_or_file(content).await),
|
||||
user_id,
|
||||
room_id,
|
||||
Some(room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.await
|
||||
{
|
||||
self.handle_response_error(e, room_id, user_id, &state_lock)
|
||||
.boxed()
|
||||
.await
|
||||
.unwrap_or_else(default_log);
|
||||
}
|
||||
|
|
@ -447,7 +448,12 @@ impl Service {
|
|||
|
||||
self.services
|
||||
.timeline
|
||||
.build_and_append_pdu(PduBuilder::timeline(&content), user_id, room_id, state_lock)
|
||||
.build_and_append_pdu(
|
||||
PduBuilder::timeline(&content),
|
||||
user_id,
|
||||
Some(room_id),
|
||||
state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
|
|
@ -484,7 +490,10 @@ impl Service {
|
|||
}
|
||||
|
||||
// Prevent unescaped !admin from being used outside of the admin room
|
||||
if is_public_prefix && !self.is_admin_room(event.room_id()).await {
|
||||
if event.room_id().is_some()
|
||||
&& is_public_prefix
|
||||
&& !self.is_admin_room(event.room_id().unwrap()).await
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
@ -497,7 +506,7 @@ impl Service {
|
|||
// the administrator can execute commands as the server user
|
||||
let emergency_password_set = self.services.server.config.emergency_password.is_some();
|
||||
let from_server = event.sender() == server_user && !emergency_password_set;
|
||||
if from_server && self.is_admin_room(event.room_id()).await {
|
||||
if from_server && self.is_admin_room(event.room_id().unwrap()).await {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ use crate::{Services, media};
|
|||
/// - If database is opened at lesser version we apply migrations up to this.
|
||||
/// Note that named-feature migrations may also be performed when opening at
|
||||
/// equal or lesser version. These are expected to be backward-compatible.
|
||||
pub(crate) const DATABASE_VERSION: u64 = 17;
|
||||
pub(crate) const DATABASE_VERSION: u64 = 18;
|
||||
|
||||
pub(crate) async fn migrations(services: &Services) -> Result<()> {
|
||||
let users_count = services.users.count().await;
|
||||
|
|
@ -138,6 +138,11 @@ async fn migrate(services: &Services) -> Result<()> {
|
|||
info!("Migration: Bumped database version to 17");
|
||||
}
|
||||
|
||||
if services.globals.db.database_version().await < 18 {
|
||||
services.globals.db.bump_database_version(18);
|
||||
info!("Migration: Bumped database version to 18");
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
services.globals.db.database_version().await,
|
||||
DATABASE_VERSION,
|
||||
|
|
|
|||
|
|
@ -287,18 +287,22 @@ impl Service {
|
|||
{
|
||||
let mut notify = None;
|
||||
let mut tweaks = Vec::new();
|
||||
if event.room_id().is_none() {
|
||||
// TODO(hydra): does this matter?
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let power_levels: RoomPowerLevelsEventContent = self
|
||||
.services
|
||||
.state_accessor
|
||||
.room_state_get(event.room_id(), &StateEventType::RoomPowerLevels, "")
|
||||
.room_state_get(event.room_id().unwrap(), &StateEventType::RoomPowerLevels, "")
|
||||
.await
|
||||
.and_then(|event| event.get_content())
|
||||
.unwrap_or_default();
|
||||
|
||||
let serialized = event.to_format();
|
||||
for action in self
|
||||
.get_actions(user, &ruleset, &power_levels, &serialized, event.room_id())
|
||||
.get_actions(user, &ruleset, &power_levels, &serialized, event.room_id().unwrap())
|
||||
.await
|
||||
{
|
||||
let n = match action {
|
||||
|
|
@ -426,7 +430,7 @@ impl Service {
|
|||
let mut notifi = Notification::new(d);
|
||||
|
||||
notifi.event_id = Some(event.event_id().to_owned());
|
||||
notifi.room_id = Some(event.room_id().to_owned());
|
||||
notifi.room_id = Some(event.room_id().unwrap().to_owned());
|
||||
if http
|
||||
.data
|
||||
.get("org.matrix.msc4076.disable_badge_count")
|
||||
|
|
@ -464,14 +468,14 @@ impl Service {
|
|||
notifi.room_name = self
|
||||
.services
|
||||
.state_accessor
|
||||
.get_name(event.room_id())
|
||||
.get_name(event.room_id().unwrap())
|
||||
.await
|
||||
.ok();
|
||||
|
||||
notifi.room_alias = self
|
||||
.services
|
||||
.state_accessor
|
||||
.get_canonical_alias(event.room_id())
|
||||
.get_canonical_alias(event.room_id().unwrap())
|
||||
.await
|
||||
.ok();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -195,13 +195,15 @@ async fn get_auth_chain_inner(
|
|||
debug_error!(?event_id, ?e, "Could not find pdu mentioned in auth events");
|
||||
},
|
||||
| Ok(pdu) => {
|
||||
if pdu.room_id != room_id {
|
||||
return Err!(Request(Forbidden(error!(
|
||||
?event_id,
|
||||
?room_id,
|
||||
wrong_room_id = ?pdu.room_id,
|
||||
"auth event for incorrect room"
|
||||
))));
|
||||
if let Some(claimed_room_id) = pdu.room_id.clone() {
|
||||
if claimed_room_id != *room_id {
|
||||
return Err!(Request(Forbidden(error!(
|
||||
?event_id,
|
||||
?room_id,
|
||||
wrong_room_id = ?pdu.room_id.unwrap(),
|
||||
"auth event for incorrect room"
|
||||
))));
|
||||
}
|
||||
}
|
||||
|
||||
for auth_event in &pdu.auth_events {
|
||||
|
|
|
|||
|
|
@ -139,6 +139,7 @@ where
|
|||
&pdu_event,
|
||||
None, // TODO: third party invite
|
||||
state_fetch,
|
||||
create_event.as_pdu(),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?;
|
||||
|
|
|
|||
|
|
@ -99,7 +99,10 @@ impl Service {
|
|||
}
|
||||
|
||||
fn check_room_id<Pdu: Event>(room_id: &RoomId, pdu: &Pdu) -> Result {
|
||||
if pdu.room_id() != room_id {
|
||||
if pdu
|
||||
.room_id()
|
||||
.is_some_and(|claimed_room_id| claimed_room_id != room_id)
|
||||
{
|
||||
return Err!(Request(InvalidParam(error!(
|
||||
pdu_event_id = ?pdu.event_id(),
|
||||
pdu_room_id = ?pdu.room_id(),
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
use conduwuit::{
|
||||
Result, err, implement, matrix::event::gen_event_id_canonical_json, result::FlatOk,
|
||||
Result, RoomVersion, err, implement, matrix::event::gen_event_id_canonical_json,
|
||||
result::FlatOk,
|
||||
};
|
||||
use ruma::{CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId};
|
||||
use ruma::{CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, RoomVersionId};
|
||||
use serde_json::value::RawValue as RawJsonValue;
|
||||
|
||||
type Parsed = (OwnedRoomId, OwnedEventId, CanonicalJsonObject);
|
||||
|
|
@ -11,12 +12,44 @@ pub async fn parse_incoming_pdu(&self, pdu: &RawJsonValue) -> Result<Parsed> {
|
|||
let value = serde_json::from_str::<CanonicalJsonObject>(pdu.get()).map_err(|e| {
|
||||
err!(BadServerResponse(debug_warn!("Error parsing incoming event {e:?}")))
|
||||
})?;
|
||||
|
||||
let room_id: OwnedRoomId = value
|
||||
.get("room_id")
|
||||
let event_type = value
|
||||
.get("type")
|
||||
.and_then(CanonicalJsonValue::as_str)
|
||||
.map(OwnedRoomId::parse)
|
||||
.flat_ok_or(err!(Request(InvalidParam("Invalid room_id in pdu"))))?;
|
||||
.ok_or_else(|| err!(Request(InvalidParam("Missing or invalid type in pdu"))))?;
|
||||
|
||||
let room_id: OwnedRoomId = if event_type != "m.room.create" {
|
||||
value
|
||||
.get("room_id")
|
||||
.and_then(CanonicalJsonValue::as_str)
|
||||
.map(OwnedRoomId::parse)
|
||||
.flat_ok_or(err!(Request(InvalidParam("Invalid room_id in pdu"))))?
|
||||
} else {
|
||||
// v12 rooms might have no room_id in the create event. We'll need to check the
|
||||
// content.room_version
|
||||
let content = value
|
||||
.get("content")
|
||||
.and_then(CanonicalJsonValue::as_object)
|
||||
.ok_or_else(|| err!(Request(InvalidParam("Missing or invalid content in pdu"))))?;
|
||||
let room_version = content
|
||||
.get("room_version")
|
||||
.and_then(CanonicalJsonValue::as_str)
|
||||
.unwrap_or("1");
|
||||
let vi = RoomVersionId::try_from(room_version).unwrap_or(RoomVersionId::V1);
|
||||
let vf = RoomVersion::new(&vi).expect("supported room version");
|
||||
if vf.room_ids_as_hashes {
|
||||
let (event_id, _) = gen_event_id_canonical_json(pdu, &vi).map_err(|e| {
|
||||
err!(Request(InvalidParam("Could not convert event to canonical json: {e}")))
|
||||
})?;
|
||||
OwnedRoomId::parse(event_id.as_str().replace('$', "!")).expect("valid room ID")
|
||||
} else {
|
||||
// V11 or below room, room_id must be present
|
||||
value
|
||||
.get("room_id")
|
||||
.and_then(CanonicalJsonValue::as_str)
|
||||
.map(OwnedRoomId::parse)
|
||||
.flat_ok_or(err!(Request(InvalidParam("Invalid or missing room_id in pdu"))))?
|
||||
}
|
||||
};
|
||||
|
||||
let room_version_id = self
|
||||
.services
|
||||
|
|
@ -24,10 +57,8 @@ pub async fn parse_incoming_pdu(&self, pdu: &RawJsonValue) -> Result<Parsed> {
|
|||
.get_room_version(&room_id)
|
||||
.await
|
||||
.map_err(|_| err!("Server is not in room {room_id}"))?;
|
||||
|
||||
let (event_id, value) = gen_event_id_canonical_json(pdu, &room_version_id).map_err(|e| {
|
||||
err!(Request(InvalidParam("Could not convert event to canonical json: {e}")))
|
||||
})?;
|
||||
|
||||
Ok((room_id, event_id, value))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -102,6 +102,7 @@ where
|
|||
&incoming_pdu,
|
||||
None, // TODO: third party invite
|
||||
|ty, sk| state_fetch(ty.clone(), sk.into()),
|
||||
create_event.as_pdu(),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?;
|
||||
|
|
@ -123,6 +124,7 @@ where
|
|||
incoming_pdu.sender(),
|
||||
incoming_pdu.state_key(),
|
||||
incoming_pdu.content(),
|
||||
&room_version,
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
|
@ -140,6 +142,7 @@ where
|
|||
&incoming_pdu,
|
||||
None, // third-party invite
|
||||
state_fetch,
|
||||
create_event.as_pdu(),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?;
|
||||
|
|
@ -156,7 +159,7 @@ where
|
|||
!self
|
||||
.services
|
||||
.state_accessor
|
||||
.user_can_redact(&redact_id, incoming_pdu.sender(), incoming_pdu.room_id(), true)
|
||||
.user_can_redact(&redact_id, incoming_pdu.sender(), room_id, true)
|
||||
.await?,
|
||||
};
|
||||
|
||||
|
|
@ -172,7 +175,7 @@ where
|
|||
// Now we calculate the set of extremities this room has after the incoming
|
||||
// event has been applied. We start with the previous extremities (aka leaves)
|
||||
trace!("Calculating extremities");
|
||||
let extremities: Vec<_> = self
|
||||
let mut extremities: Vec<_> = self
|
||||
.services
|
||||
.state
|
||||
.get_forward_extremities(room_id)
|
||||
|
|
@ -192,6 +195,7 @@ where
|
|||
})
|
||||
.collect()
|
||||
.await;
|
||||
extremities.push(incoming_pdu.event_id().to_owned());
|
||||
|
||||
debug!(
|
||||
"Retained {} extremities checked against {} prev_events",
|
||||
|
|
@ -303,6 +307,7 @@ where
|
|||
);
|
||||
// assert!(extremities.is_empty(), "soft_fail extremities empty");
|
||||
let extremities = extremities.iter().map(Borrow::borrow);
|
||||
debug_assert!(extremities.clone().count() > 0, "extremities not empty");
|
||||
|
||||
self.services
|
||||
.timeline
|
||||
|
|
@ -313,6 +318,7 @@ where
|
|||
state_ids_compressed,
|
||||
soft_fail,
|
||||
&state_lock,
|
||||
room_id,
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
|
@ -336,6 +342,7 @@ where
|
|||
.iter()
|
||||
.map(Borrow::borrow)
|
||||
.chain(once(incoming_pdu.event_id()));
|
||||
debug_assert!(extremities.clone().count() > 0, "extremities not empty");
|
||||
|
||||
let pdu_id = self
|
||||
.services
|
||||
|
|
@ -347,6 +354,7 @@ where
|
|||
state_ids_compressed,
|
||||
soft_fail,
|
||||
&state_lock,
|
||||
room_id,
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
|
|
|||
|
|
@ -124,7 +124,7 @@ pub async fn search_pdus<'a>(
|
|||
.wide_filter_map(move |pdu| async move {
|
||||
self.services
|
||||
.state_accessor
|
||||
.user_can_see_event(query.user_id?, pdu.room_id(), pdu.event_id())
|
||||
.user_can_see_event(query.user_id?, pdu.room_id().unwrap(), pdu.event_id())
|
||||
.await
|
||||
.then_some(pdu)
|
||||
})
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
use std::{collections::HashMap, fmt::Write, iter::once, sync::Arc};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use conduwuit::{RoomVersion, debug};
|
||||
use conduwuit_core::{
|
||||
Event, PduEvent, Result, err,
|
||||
result::FlatOk,
|
||||
|
|
@ -148,7 +149,7 @@ impl Service {
|
|||
.roomid_spacehierarchy_cache
|
||||
.lock()
|
||||
.await
|
||||
.remove(&pdu.room_id);
|
||||
.remove(room_id);
|
||||
},
|
||||
| _ => continue,
|
||||
}
|
||||
|
|
@ -239,7 +240,7 @@ impl Service {
|
|||
/// This adds all current state events (not including the incoming event)
|
||||
/// to `stateid_pduid` and adds the incoming event to `eventid_statehash`.
|
||||
#[tracing::instrument(skip(self, new_pdu), level = "debug")]
|
||||
pub async fn append_to_state(&self, new_pdu: &PduEvent) -> Result<u64> {
|
||||
pub async fn append_to_state(&self, new_pdu: &PduEvent, room_id: &RoomId) -> Result<u64> {
|
||||
const BUFSIZE: usize = size_of::<u64>();
|
||||
|
||||
let shorteventid = self
|
||||
|
|
@ -248,7 +249,7 @@ impl Service {
|
|||
.get_or_create_shorteventid(&new_pdu.event_id)
|
||||
.await;
|
||||
|
||||
let previous_shortstatehash = self.get_room_shortstatehash(&new_pdu.room_id).await;
|
||||
let previous_shortstatehash = self.get_room_shortstatehash(room_id).await;
|
||||
|
||||
if let Ok(p) = previous_shortstatehash {
|
||||
self.db
|
||||
|
|
@ -319,7 +320,11 @@ impl Service {
|
|||
}
|
||||
|
||||
#[tracing::instrument(skip_all, level = "debug")]
|
||||
pub async fn summary_stripped<'a, E>(&self, event: &'a E) -> Vec<Raw<AnyStrippedStateEvent>>
|
||||
pub async fn summary_stripped<'a, E>(
|
||||
&self,
|
||||
event: &'a E,
|
||||
room_id: &RoomId,
|
||||
) -> Vec<Raw<AnyStrippedStateEvent>>
|
||||
where
|
||||
E: Event + Send + Sync,
|
||||
&'a E: Event + Send,
|
||||
|
|
@ -338,7 +343,7 @@ impl Service {
|
|||
let fetches = cells.into_iter().map(|(event_type, state_key)| {
|
||||
self.services
|
||||
.state_accessor
|
||||
.room_state_get(event.room_id(), event_type, state_key)
|
||||
.room_state_get(room_id, event_type, state_key)
|
||||
});
|
||||
|
||||
join_all(fetches)
|
||||
|
|
@ -421,7 +426,7 @@ impl Service {
|
|||
}
|
||||
|
||||
/// This fetches auth events from the current state.
|
||||
#[tracing::instrument(skip(self, content), level = "debug")]
|
||||
#[tracing::instrument(skip(self, content, room_version), level = "trace")]
|
||||
pub async fn get_auth_events(
|
||||
&self,
|
||||
room_id: &RoomId,
|
||||
|
|
@ -429,13 +434,15 @@ impl Service {
|
|||
sender: &UserId,
|
||||
state_key: Option<&str>,
|
||||
content: &serde_json::value::RawValue,
|
||||
room_version: &RoomVersion,
|
||||
) -> Result<StateMap<PduEvent>> {
|
||||
let Ok(shortstatehash) = self.get_room_shortstatehash(room_id).await else {
|
||||
return Ok(HashMap::new());
|
||||
};
|
||||
|
||||
let auth_types = state_res::auth_types_for_event(kind, sender, state_key, content)?;
|
||||
|
||||
let auth_types =
|
||||
state_res::auth_types_for_event(kind, sender, state_key, content, room_version)?;
|
||||
debug!(?auth_types, "Auth types for event");
|
||||
let sauthevents: HashMap<_, _> = auth_types
|
||||
.iter()
|
||||
.stream()
|
||||
|
|
@ -448,6 +455,7 @@ impl Service {
|
|||
})
|
||||
.collect()
|
||||
.await;
|
||||
debug!(?sauthevents, "Auth events to fetch");
|
||||
|
||||
let (state_keys, event_ids): (Vec<_>, Vec<_>) = self
|
||||
.services
|
||||
|
|
@ -461,7 +469,7 @@ impl Service {
|
|||
})
|
||||
.unzip()
|
||||
.await;
|
||||
|
||||
debug!(?state_keys, ?event_ids, "Auth events found in state");
|
||||
self.services
|
||||
.short
|
||||
.multi_get_eventid_from_short(event_ids.into_iter().stream())
|
||||
|
|
@ -473,6 +481,7 @@ impl Service {
|
|||
.get_pdu(&event_id)
|
||||
.await
|
||||
.map(move |pdu| (((*ty).clone(), (*sk).clone()), pdu))
|
||||
.inspect_err(|e| warn!("Failed to get auth event {event_id}: {e:?}"))
|
||||
.ok()
|
||||
})
|
||||
.collect()
|
||||
|
|
|
|||
|
|
@ -161,7 +161,7 @@ pub async fn user_can_invite(
|
|||
&RoomMemberEventContent::new(MembershipState::Invite),
|
||||
),
|
||||
sender,
|
||||
room_id,
|
||||
Some(room_id),
|
||||
state_lock,
|
||||
)
|
||||
.await
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ use std::{
|
|||
sync::Arc,
|
||||
};
|
||||
|
||||
use conduwuit::trace;
|
||||
use conduwuit_core::{
|
||||
Result, err, error, implement,
|
||||
matrix::{
|
||||
|
|
@ -34,6 +35,7 @@ use crate::{appservice::NamespaceRegex, rooms::state_compressor::CompressedState
|
|||
/// the server that sent the event.
|
||||
#[implement(super::Service)]
|
||||
#[tracing::instrument(level = "debug", skip_all)]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn append_incoming_pdu<'a, Leaves>(
|
||||
&'a self,
|
||||
pdu: &'a PduEvent,
|
||||
|
|
@ -42,6 +44,7 @@ pub async fn append_incoming_pdu<'a, Leaves>(
|
|||
state_ids_compressed: Arc<CompressedState>,
|
||||
soft_fail: bool,
|
||||
state_lock: &'a RoomMutexGuard,
|
||||
room_id: &'a ruma::RoomId,
|
||||
) -> Result<Option<RawPduId>>
|
||||
where
|
||||
Leaves: Iterator<Item = &'a EventId> + Send + 'a,
|
||||
|
|
@ -51,24 +54,24 @@ where
|
|||
// fail.
|
||||
self.services
|
||||
.state
|
||||
.set_event_state(&pdu.event_id, &pdu.room_id, state_ids_compressed)
|
||||
.set_event_state(&pdu.event_id, room_id, state_ids_compressed)
|
||||
.await?;
|
||||
|
||||
if soft_fail {
|
||||
self.services
|
||||
.pdu_metadata
|
||||
.mark_as_referenced(&pdu.room_id, pdu.prev_events.iter().map(AsRef::as_ref));
|
||||
.mark_as_referenced(room_id, pdu.prev_events.iter().map(AsRef::as_ref));
|
||||
|
||||
self.services
|
||||
.state
|
||||
.set_forward_extremities(&pdu.room_id, new_room_leaves, state_lock)
|
||||
.await;
|
||||
// self.services
|
||||
// .state
|
||||
// .set_forward_extremities(room_id, new_room_leaves, state_lock)
|
||||
// .await;
|
||||
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let pdu_id = self
|
||||
.append_pdu(pdu, pdu_json, new_room_leaves, state_lock)
|
||||
.append_pdu(pdu, pdu_json, new_room_leaves, state_lock, room_id)
|
||||
.await?;
|
||||
|
||||
Ok(Some(pdu_id))
|
||||
|
|
@ -88,6 +91,7 @@ pub async fn append_pdu<'a, Leaves>(
|
|||
mut pdu_json: CanonicalJsonObject,
|
||||
leaves: Leaves,
|
||||
state_lock: &'a RoomMutexGuard,
|
||||
room_id: &'a ruma::RoomId,
|
||||
) -> Result<RawPduId>
|
||||
where
|
||||
Leaves: Iterator<Item = &'a EventId> + Send + 'a,
|
||||
|
|
@ -98,7 +102,7 @@ where
|
|||
let shortroomid = self
|
||||
.services
|
||||
.short
|
||||
.get_shortroomid(pdu.room_id())
|
||||
.get_shortroomid(room_id)
|
||||
.await
|
||||
.map_err(|_| err!(Database("Room does not exist")))?;
|
||||
|
||||
|
|
@ -151,14 +155,15 @@ where
|
|||
// We must keep track of all events that have been referenced.
|
||||
self.services
|
||||
.pdu_metadata
|
||||
.mark_as_referenced(pdu.room_id(), pdu.prev_events().map(AsRef::as_ref));
|
||||
.mark_as_referenced(room_id, pdu.prev_events().map(AsRef::as_ref));
|
||||
|
||||
trace!("setting forward extremities");
|
||||
self.services
|
||||
.state
|
||||
.set_forward_extremities(pdu.room_id(), leaves, state_lock)
|
||||
.set_forward_extremities(room_id, leaves, state_lock)
|
||||
.await;
|
||||
|
||||
let insert_lock = self.mutex_insert.lock(pdu.room_id()).await;
|
||||
let insert_lock = self.mutex_insert.lock(room_id).await;
|
||||
|
||||
let count1 = self.services.globals.next_count().unwrap();
|
||||
|
||||
|
|
@ -166,11 +171,11 @@ where
|
|||
// appending fails
|
||||
self.services
|
||||
.read_receipt
|
||||
.private_read_set(pdu.room_id(), pdu.sender(), count1);
|
||||
.private_read_set(room_id, pdu.sender(), count1);
|
||||
|
||||
self.services
|
||||
.user
|
||||
.reset_notification_counts(pdu.sender(), pdu.room_id());
|
||||
.reset_notification_counts(pdu.sender(), room_id);
|
||||
|
||||
let count2 = PduCount::Normal(self.services.globals.next_count().unwrap());
|
||||
let pdu_id: RawPduId = PduId { shortroomid, shorteventid: count2 }.into();
|
||||
|
|
@ -184,14 +189,14 @@ where
|
|||
let power_levels: RoomPowerLevelsEventContent = self
|
||||
.services
|
||||
.state_accessor
|
||||
.room_state_get_content(pdu.room_id(), &StateEventType::RoomPowerLevels, "")
|
||||
.room_state_get_content(room_id, &StateEventType::RoomPowerLevels, "")
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
|
||||
let mut push_target: HashSet<_> = self
|
||||
.services
|
||||
.state_cache
|
||||
.active_local_users_in_room(pdu.room_id())
|
||||
.active_local_users_in_room(room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
// Don't notify the sender of their own events, and dont send from ignored users
|
||||
.ready_filter(|user| *user != pdu.sender())
|
||||
|
|
@ -230,7 +235,7 @@ where
|
|||
for action in self
|
||||
.services
|
||||
.pusher
|
||||
.get_actions(user, &rules_for_user, &power_levels, &serialized, pdu.room_id())
|
||||
.get_actions(user, &rules_for_user, &power_levels, &serialized, room_id)
|
||||
.await
|
||||
{
|
||||
match action {
|
||||
|
|
@ -268,20 +273,20 @@ where
|
|||
}
|
||||
|
||||
self.db
|
||||
.increment_notification_counts(pdu.room_id(), notifies, highlights);
|
||||
.increment_notification_counts(room_id, notifies, highlights);
|
||||
|
||||
match *pdu.kind() {
|
||||
| TimelineEventType::RoomRedaction => {
|
||||
use RoomVersionId::*;
|
||||
|
||||
let room_version_id = self.services.state.get_room_version(pdu.room_id()).await?;
|
||||
let room_version_id = self.services.state.get_room_version(room_id).await?;
|
||||
match room_version_id {
|
||||
| V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => {
|
||||
if let Some(redact_id) = pdu.redacts() {
|
||||
if self
|
||||
.services
|
||||
.state_accessor
|
||||
.user_can_redact(redact_id, pdu.sender(), pdu.room_id(), false)
|
||||
.user_can_redact(redact_id, pdu.sender(), room_id, false)
|
||||
.await?
|
||||
{
|
||||
self.redact_pdu(redact_id, pdu, shortroomid).await?;
|
||||
|
|
@ -294,7 +299,7 @@ where
|
|||
if self
|
||||
.services
|
||||
.state_accessor
|
||||
.user_can_redact(redact_id, pdu.sender(), pdu.room_id(), false)
|
||||
.user_can_redact(redact_id, pdu.sender(), room_id, false)
|
||||
.await?
|
||||
{
|
||||
self.redact_pdu(redact_id, pdu, shortroomid).await?;
|
||||
|
|
@ -310,7 +315,7 @@ where
|
|||
.roomid_spacehierarchy_cache
|
||||
.lock()
|
||||
.await
|
||||
.remove(pdu.room_id());
|
||||
.remove(room_id);
|
||||
},
|
||||
| TimelineEventType::RoomMember => {
|
||||
if let Some(state_key) = pdu.state_key() {
|
||||
|
|
@ -320,8 +325,12 @@ where
|
|||
|
||||
let content: RoomMemberEventContent = pdu.get_content()?;
|
||||
let stripped_state = match content.membership {
|
||||
| MembershipState::Invite | MembershipState::Knock =>
|
||||
self.services.state.summary_stripped(pdu).await.into(),
|
||||
| MembershipState::Invite | MembershipState::Knock => self
|
||||
.services
|
||||
.state
|
||||
.summary_stripped(pdu, room_id)
|
||||
.await
|
||||
.into(),
|
||||
| _ => None,
|
||||
};
|
||||
|
||||
|
|
@ -331,7 +340,7 @@ where
|
|||
self.services
|
||||
.state_cache
|
||||
.update_membership(
|
||||
pdu.room_id(),
|
||||
room_id,
|
||||
target_user_id,
|
||||
content,
|
||||
pdu.sender(),
|
||||
|
|
@ -392,7 +401,7 @@ where
|
|||
if self
|
||||
.services
|
||||
.state_cache
|
||||
.appservice_in_room(pdu.room_id(), appservice)
|
||||
.appservice_in_room(room_id, appservice)
|
||||
.await
|
||||
{
|
||||
self.services
|
||||
|
|
@ -430,12 +439,12 @@ where
|
|||
let matching_aliases = |aliases: NamespaceRegex| {
|
||||
self.services
|
||||
.alias
|
||||
.local_aliases_for_room(pdu.room_id())
|
||||
.local_aliases_for_room(room_id)
|
||||
.ready_any(move |room_alias| aliases.is_match(room_alias.as_str()))
|
||||
};
|
||||
|
||||
if matching_aliases(appservice.aliases.clone()).await
|
||||
|| appservice.rooms.is_match(pdu.room_id().as_str())
|
||||
|| appservice.rooms.is_match(room_id.as_str())
|
||||
|| matching_users(&appservice.users)
|
||||
{
|
||||
self.services
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
use std::iter::once;
|
||||
|
||||
use conduwuit::{Err, PduEvent};
|
||||
use conduwuit::{Err, PduEvent, RoomVersion};
|
||||
use conduwuit_core::{
|
||||
Result, debug, debug_warn, err, implement, info,
|
||||
matrix::{
|
||||
|
|
@ -12,10 +12,11 @@ use conduwuit_core::{
|
|||
};
|
||||
use futures::{FutureExt, StreamExt};
|
||||
use ruma::{
|
||||
CanonicalJsonObject, EventId, RoomId, ServerName,
|
||||
CanonicalJsonObject, EventId, Int, RoomId, ServerName,
|
||||
api::federation,
|
||||
events::{
|
||||
StateEventType, TimelineEventType, room::power_levels::RoomPowerLevelsEventContent,
|
||||
StateEventType, TimelineEventType,
|
||||
room::{create::RoomCreateEventContent, power_levels::RoomPowerLevelsEventContent},
|
||||
},
|
||||
uint,
|
||||
};
|
||||
|
|
@ -24,7 +25,7 @@ use serde_json::value::RawValue as RawJsonValue;
|
|||
use super::ExtractBody;
|
||||
|
||||
#[implement(super::Service)]
|
||||
#[tracing::instrument(name = "backfill", level = "debug", skip(self))]
|
||||
#[tracing::instrument(name = "backfill", level = "trace", skip(self))]
|
||||
pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Result<()> {
|
||||
if self
|
||||
.services
|
||||
|
|
@ -39,6 +40,7 @@ pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Re
|
|||
.await
|
||||
{
|
||||
// Room is empty (1 user or none), there is no one that can backfill
|
||||
debug_warn!("Room {room_id} is empty, skipping backfill");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
|
|
@ -49,6 +51,7 @@ pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Re
|
|||
|
||||
if first_pdu.0 < from {
|
||||
// No backfill required, there are still events between them
|
||||
debug!("No backfill required in room {room_id}, {:?} < {from}", first_pdu.0);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
|
|
@ -58,11 +61,47 @@ pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Re
|
|||
.room_state_get_content(room_id, &StateEventType::RoomPowerLevels, "")
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
let create_event_content: RoomCreateEventContent = self
|
||||
.services
|
||||
.state_accessor
|
||||
.room_state_get_content(room_id, &StateEventType::RoomCreate, "")
|
||||
.await?;
|
||||
let create_event = self
|
||||
.services
|
||||
.state_accessor
|
||||
.room_state_get(room_id, &StateEventType::RoomCreate, "")
|
||||
.await?;
|
||||
|
||||
let room_mods = power_levels.users.iter().filter_map(|(user_id, level)| {
|
||||
if level > &power_levels.users_default && !self.services.globals.user_is_local(user_id) {
|
||||
let room_version =
|
||||
RoomVersion::new(&create_event_content.room_version).expect("supported room version");
|
||||
let mut users = power_levels.users.clone();
|
||||
if room_version.explicitly_privilege_room_creators {
|
||||
users.insert(create_event.sender().to_owned(), Int::MAX);
|
||||
if let Some(additional_creators) = &create_event_content.additional_creators {
|
||||
for user_id in additional_creators {
|
||||
users.insert(user_id.to_owned(), Int::MAX);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let room_mods = users.iter().filter_map(|(user_id, level)| {
|
||||
let remote_powered =
|
||||
level > &power_levels.users_default && !self.services.globals.user_is_local(user_id);
|
||||
let creator = if room_version.explicitly_privilege_room_creators {
|
||||
create_event.sender() == user_id
|
||||
|| create_event_content
|
||||
.additional_creators
|
||||
.as_ref()
|
||||
.is_some_and(|c| c.contains(user_id))
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
if remote_powered || creator {
|
||||
debug!(%remote_powered, %creator, "User {user_id} can backfill in room {room_id}");
|
||||
Some(user_id.server_name())
|
||||
} else {
|
||||
debug!(%remote_powered, %creator, "User {user_id} cannot backfill in room {room_id}");
|
||||
None
|
||||
}
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
use std::{collections::HashSet, iter::once};
|
||||
|
||||
use conduwuit::trace;
|
||||
use conduwuit_core::{
|
||||
Err, Result, implement,
|
||||
matrix::{event::Event, pdu::PduBuilder},
|
||||
|
|
@ -23,32 +24,34 @@ use super::RoomMutexGuard;
|
|||
/// takes a roomid_mutex_state, meaning that only this function is able to
|
||||
/// mutate the room state.
|
||||
#[implement(super::Service)]
|
||||
#[tracing::instrument(skip(self, state_lock), level = "debug")]
|
||||
#[tracing::instrument(skip(self, state_lock, pdu_builder), level = "trace")]
|
||||
pub async fn build_and_append_pdu(
|
||||
&self,
|
||||
pdu_builder: PduBuilder,
|
||||
sender: &UserId,
|
||||
room_id: &RoomId,
|
||||
room_id: Option<&RoomId>,
|
||||
state_lock: &RoomMutexGuard,
|
||||
) -> Result<OwnedEventId> {
|
||||
let (pdu, pdu_json) = self
|
||||
.create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock)
|
||||
.await?;
|
||||
|
||||
if self.services.admin.is_admin_room(pdu.room_id()).await {
|
||||
let room_id = pdu.room_id_or_hash();
|
||||
if self.services.admin.is_admin_room(&room_id).await {
|
||||
self.check_pdu_for_admin_room(&pdu, sender).boxed().await?;
|
||||
}
|
||||
|
||||
// If redaction event is not authorized, do not append it to the timeline
|
||||
if *pdu.kind() == TimelineEventType::RoomRedaction {
|
||||
use RoomVersionId::*;
|
||||
match self.services.state.get_room_version(pdu.room_id()).await? {
|
||||
trace!("Running redaction checks for room {room_id}");
|
||||
match self.services.state.get_room_version(&room_id).await? {
|
||||
| V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => {
|
||||
if let Some(redact_id) = pdu.redacts() {
|
||||
if !self
|
||||
.services
|
||||
.state_accessor
|
||||
.user_can_redact(redact_id, pdu.sender(), pdu.room_id(), false)
|
||||
.user_can_redact(redact_id, pdu.sender(), &room_id, false)
|
||||
.await?
|
||||
{
|
||||
return Err!(Request(Forbidden("User cannot redact this event.")));
|
||||
|
|
@ -61,7 +64,7 @@ pub async fn build_and_append_pdu(
|
|||
if !self
|
||||
.services
|
||||
.state_accessor
|
||||
.user_can_redact(redact_id, pdu.sender(), pdu.room_id(), false)
|
||||
.user_can_redact(redact_id, pdu.sender(), &room_id, false)
|
||||
.await?
|
||||
{
|
||||
return Err!(Request(Forbidden("User cannot redact this event.")));
|
||||
|
|
@ -72,6 +75,7 @@ pub async fn build_and_append_pdu(
|
|||
}
|
||||
|
||||
if *pdu.kind() == TimelineEventType::RoomMember {
|
||||
trace!("Running room member checks for room {room_id}");
|
||||
let content: RoomMemberEventContent = pdu.get_content()?;
|
||||
|
||||
if content.join_authorized_via_users_server.is_some()
|
||||
|
|
@ -93,12 +97,22 @@ pub async fn build_and_append_pdu(
|
|||
)));
|
||||
}
|
||||
}
|
||||
if *pdu.kind() == TimelineEventType::RoomCreate {
|
||||
trace!("Creating shortroomid for {room_id}");
|
||||
self.services
|
||||
.short
|
||||
.get_or_create_shortroomid(&room_id)
|
||||
.await;
|
||||
}
|
||||
|
||||
// We append to state before appending the pdu, so we don't have a moment in
|
||||
// time with the pdu without it's state. This is okay because append_pdu can't
|
||||
// fail.
|
||||
let statehashid = self.services.state.append_to_state(&pdu).await?;
|
||||
trace!("Appending {} state for room {room_id}", pdu.event_id());
|
||||
let statehashid = self.services.state.append_to_state(&pdu, &room_id).await?;
|
||||
trace!("State hash ID for {room_id}: {statehashid:?}");
|
||||
|
||||
trace!("Generating raw ID for PDU {}", pdu.event_id());
|
||||
let pdu_id = self
|
||||
.append_pdu(
|
||||
&pdu,
|
||||
|
|
@ -107,20 +121,22 @@ pub async fn build_and_append_pdu(
|
|||
// of the room
|
||||
once(pdu.event_id()),
|
||||
state_lock,
|
||||
&room_id,
|
||||
)
|
||||
.boxed()
|
||||
.await?;
|
||||
|
||||
// We set the room state after inserting the pdu, so that we never have a moment
|
||||
// in time where events in the current room state do not exist
|
||||
trace!("Setting room state for room {room_id}");
|
||||
self.services
|
||||
.state
|
||||
.set_room_state(pdu.room_id(), statehashid, state_lock);
|
||||
.set_room_state(&room_id, statehashid, state_lock);
|
||||
|
||||
let mut servers: HashSet<OwnedServerName> = self
|
||||
.services
|
||||
.state_cache
|
||||
.room_servers(pdu.room_id())
|
||||
.room_servers(&room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect()
|
||||
.await;
|
||||
|
|
@ -141,11 +157,13 @@ pub async fn build_and_append_pdu(
|
|||
// room_servers() and/or the if statement above
|
||||
servers.remove(self.services.globals.server_name());
|
||||
|
||||
trace!("Sending PDU {} to {} servers", pdu.event_id(), servers.len());
|
||||
self.services
|
||||
.sending
|
||||
.send_pdu_servers(servers.iter().map(AsRef::as_ref).stream(), &pdu_id)
|
||||
.await?;
|
||||
|
||||
trace!("Event {} in room {:?} has been appended", pdu.event_id(), room_id);
|
||||
Ok(pdu.event_id().to_owned())
|
||||
}
|
||||
|
||||
|
|
@ -179,7 +197,7 @@ where
|
|||
let count = self
|
||||
.services
|
||||
.state_cache
|
||||
.room_members(pdu.room_id())
|
||||
.room_members(&pdu.room_id_or_hash())
|
||||
.ready_filter(|user| self.services.globals.user_is_local(user))
|
||||
.ready_filter(|user| *user != target)
|
||||
.boxed()
|
||||
|
|
@ -203,7 +221,7 @@ where
|
|||
let count = self
|
||||
.services
|
||||
.state_cache
|
||||
.room_members(pdu.room_id())
|
||||
.room_members(&pdu.room_id_or_hash())
|
||||
.ready_filter(|user| self.services.globals.user_is_local(user))
|
||||
.ready_filter(|user| *user != target)
|
||||
.boxed()
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
use std::cmp;
|
||||
use std::{cmp, collections::HashMap};
|
||||
|
||||
use conduwuit::{smallstr::SmallString, trace};
|
||||
use conduwuit_core::{
|
||||
Err, Error, Result, err, implement,
|
||||
matrix::{
|
||||
|
|
@ -11,12 +12,13 @@ use conduwuit_core::{
|
|||
};
|
||||
use futures::{StreamExt, TryStreamExt, future, future::ready};
|
||||
use ruma::{
|
||||
CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, RoomId, RoomVersionId, UserId,
|
||||
CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, RoomId, RoomVersionId,
|
||||
UserId,
|
||||
canonical_json::to_canonical_value,
|
||||
events::{StateEventType, TimelineEventType, room::create::RoomCreateEventContent},
|
||||
uint,
|
||||
};
|
||||
use serde_json::value::to_raw_value;
|
||||
use serde_json::value::{RawValue, to_raw_value};
|
||||
use tracing::warn;
|
||||
|
||||
use super::RoomMutexGuard;
|
||||
|
|
@ -26,10 +28,26 @@ pub async fn create_hash_and_sign_event(
|
|||
&self,
|
||||
pdu_builder: PduBuilder,
|
||||
sender: &UserId,
|
||||
room_id: &RoomId,
|
||||
room_id: Option<&RoomId>,
|
||||
_mutex_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room
|
||||
* state mutex */
|
||||
) -> Result<(PduEvent, CanonicalJsonObject)> {
|
||||
#[allow(clippy::boxed_local)]
|
||||
fn from_evt(
|
||||
room_id: OwnedRoomId,
|
||||
event_type: &TimelineEventType,
|
||||
content: &RawValue,
|
||||
) -> Result<RoomVersionId> {
|
||||
if event_type == &TimelineEventType::RoomCreate {
|
||||
let content: RoomCreateEventContent = serde_json::from_str(content.get())?;
|
||||
Ok(content.room_version)
|
||||
} else {
|
||||
Err(Error::InconsistentRoomState(
|
||||
"non-create event for room of unknown version",
|
||||
room_id,
|
||||
))
|
||||
}
|
||||
}
|
||||
let PduBuilder {
|
||||
event_type,
|
||||
content,
|
||||
|
|
@ -38,86 +56,114 @@ pub async fn create_hash_and_sign_event(
|
|||
redacts,
|
||||
timestamp,
|
||||
} = pdu_builder;
|
||||
|
||||
let prev_events: Vec<OwnedEventId> = self
|
||||
.services
|
||||
.state
|
||||
.get_forward_extremities(room_id)
|
||||
.take(20)
|
||||
.map(Into::into)
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
// If there was no create event yet, assume we are creating a room
|
||||
let room_version_id = self
|
||||
.services
|
||||
.state
|
||||
.get_room_version(room_id)
|
||||
.await
|
||||
.or_else(|_| {
|
||||
if event_type == TimelineEventType::RoomCreate {
|
||||
let content: RoomCreateEventContent = serde_json::from_str(content.get())?;
|
||||
Ok(content.room_version)
|
||||
} else {
|
||||
Err(Error::InconsistentRoomState(
|
||||
"non-create event for room of unknown version",
|
||||
room_id.to_owned(),
|
||||
))
|
||||
}
|
||||
})?;
|
||||
trace!(
|
||||
"Creating event of type {} in room {}",
|
||||
event_type,
|
||||
room_id.as_ref().map_or("None", |id| id.as_str())
|
||||
);
|
||||
let room_version_id = match room_id {
|
||||
| Some(room_id) => {
|
||||
trace!(%room_id, "Looking up existing room ID");
|
||||
self.services
|
||||
.state
|
||||
.get_room_version(room_id)
|
||||
.await
|
||||
.or_else(|_| {
|
||||
from_evt(room_id.to_owned(), &event_type.clone(), &content.clone())
|
||||
})?
|
||||
},
|
||||
| None => {
|
||||
trace!("No room ID, assuming room creation");
|
||||
from_evt(
|
||||
RoomId::new(self.services.globals.server_name()),
|
||||
&event_type.clone(),
|
||||
&content.clone(),
|
||||
)?
|
||||
},
|
||||
};
|
||||
|
||||
let room_version = RoomVersion::new(&room_version_id).expect("room version is supported");
|
||||
|
||||
let auth_events = self
|
||||
.services
|
||||
.state
|
||||
.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)
|
||||
.await?;
|
||||
let prev_events: Vec<OwnedEventId> = match room_id {
|
||||
| Some(room_id) =>
|
||||
self.services
|
||||
.state
|
||||
.get_forward_extremities(room_id)
|
||||
.take(20)
|
||||
.map(Into::into)
|
||||
.collect()
|
||||
.await,
|
||||
| None => Vec::new(),
|
||||
};
|
||||
|
||||
let auth_events: HashMap<(StateEventType, SmallString<[u8; 48]>), PduEvent> = match room_id {
|
||||
| Some(room_id) =>
|
||||
self.services
|
||||
.state
|
||||
.get_auth_events(
|
||||
room_id,
|
||||
&event_type,
|
||||
sender,
|
||||
state_key.as_deref(),
|
||||
&content,
|
||||
&room_version,
|
||||
)
|
||||
.await?,
|
||||
| None => HashMap::new(),
|
||||
};
|
||||
// Our depth is the maximum depth of prev_events + 1
|
||||
let depth = prev_events
|
||||
.iter()
|
||||
.stream()
|
||||
.map(Ok)
|
||||
.and_then(|event_id| self.get_pdu(event_id))
|
||||
.and_then(|pdu| future::ok(pdu.depth))
|
||||
.ignore_err()
|
||||
.ready_fold(uint!(0), cmp::max)
|
||||
.await
|
||||
.saturating_add(uint!(1));
|
||||
let depth = match room_id {
|
||||
| Some(_) => prev_events
|
||||
.iter()
|
||||
.stream()
|
||||
.map(Ok)
|
||||
.and_then(|event_id| self.get_pdu(event_id))
|
||||
.and_then(|pdu| future::ok(pdu.depth))
|
||||
.ignore_err()
|
||||
.ready_fold(uint!(0), cmp::max)
|
||||
.await
|
||||
.saturating_add(uint!(1)),
|
||||
| None => uint!(1),
|
||||
};
|
||||
|
||||
let mut unsigned = unsigned.unwrap_or_default();
|
||||
|
||||
if let Some(state_key) = &state_key {
|
||||
if let Ok(prev_pdu) = self
|
||||
.services
|
||||
.state_accessor
|
||||
.room_state_get(room_id, &event_type.to_string().into(), state_key)
|
||||
.await
|
||||
{
|
||||
unsigned.insert("prev_content".to_owned(), prev_pdu.get_content_as_value());
|
||||
unsigned.insert("prev_sender".to_owned(), serde_json::to_value(prev_pdu.sender())?);
|
||||
unsigned
|
||||
.insert("replaces_state".to_owned(), serde_json::to_value(prev_pdu.event_id())?);
|
||||
if let Some(room_id) = room_id {
|
||||
if let Some(state_key) = &state_key {
|
||||
if let Ok(prev_pdu) = self
|
||||
.services
|
||||
.state_accessor
|
||||
.room_state_get(room_id, &event_type.clone().to_string().into(), state_key)
|
||||
.await
|
||||
{
|
||||
unsigned.insert("prev_content".to_owned(), prev_pdu.get_content_as_value());
|
||||
unsigned
|
||||
.insert("prev_sender".to_owned(), serde_json::to_value(prev_pdu.sender())?);
|
||||
unsigned.insert(
|
||||
"replaces_state".to_owned(),
|
||||
serde_json::to_value(prev_pdu.event_id())?,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if event_type != TimelineEventType::RoomCreate && prev_events.is_empty() {
|
||||
return Err!(Request(Unknown("Event incorrectly had zero prev_events.")));
|
||||
}
|
||||
if state_key.is_none() && depth.lt(&uint!(2)) {
|
||||
// The first two events in a room are always m.room.create and m.room.member,
|
||||
// so any other events with that same depth are illegal.
|
||||
warn!(
|
||||
"Had unsafe depth {depth} when creating non-state event in {room_id}. Cowardly \
|
||||
aborting"
|
||||
);
|
||||
return Err!(Request(Unknown("Unsafe depth for non-state event.")));
|
||||
}
|
||||
// if event_type != TimelineEventType::RoomCreate && prev_events.is_empty() {
|
||||
// return Err!(Request(Unknown("Event incorrectly had zero prev_events.")));
|
||||
// }
|
||||
// if state_key.is_none() && depth.lt(&uint!(2)) {
|
||||
// // The first two events in a room are always m.room.create and
|
||||
// m.room.member, // so any other events with that same depth are illegal.
|
||||
// warn!(
|
||||
// "Had unsafe depth {depth} when creating non-state event in {}. Cowardly
|
||||
// aborting", room_id.expect("room_id is Some here").as_str()
|
||||
// );
|
||||
// return Err!(Request(Unknown("Unsafe depth for non-state event.")));
|
||||
// }
|
||||
|
||||
let mut pdu = PduEvent {
|
||||
event_id: ruma::event_id!("$thiswillbefilledinlater").into(),
|
||||
room_id: room_id.to_owned(),
|
||||
room_id: room_id.map(ToOwned::to_owned),
|
||||
sender: sender.to_owned(),
|
||||
origin: None,
|
||||
origin_server_ts: timestamp.map_or_else(
|
||||
|
|
@ -152,11 +198,30 @@ pub async fn create_hash_and_sign_event(
|
|||
ready(auth_events.get(&key).map(ToOwned::to_owned))
|
||||
};
|
||||
|
||||
let room_id_or_hash = pdu.room_id_or_hash();
|
||||
let create_pdu = match &pdu.kind {
|
||||
| TimelineEventType::RoomCreate => None,
|
||||
| _ => Some(
|
||||
self.services
|
||||
.state_accessor
|
||||
.room_state_get(&room_id_or_hash, &StateEventType::RoomCreate, "")
|
||||
.await
|
||||
.map_err(|e| {
|
||||
err!(Request(Forbidden(warn!("Failed to fetch room create event: {e}"))))
|
||||
})?,
|
||||
),
|
||||
};
|
||||
let create_event = match &pdu.kind {
|
||||
| TimelineEventType::RoomCreate => &pdu,
|
||||
| _ => create_pdu.as_ref().unwrap().as_pdu(),
|
||||
};
|
||||
|
||||
let auth_check = state_res::auth_check(
|
||||
&room_version,
|
||||
&pdu,
|
||||
None, // TODO: third_party_invite
|
||||
auth_fetch,
|
||||
create_event,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| err!(Request(Forbidden(warn!("Auth check failed: {e:?}")))))?;
|
||||
|
|
@ -164,6 +229,11 @@ pub async fn create_hash_and_sign_event(
|
|||
if !auth_check {
|
||||
return Err!(Request(Forbidden("Event is not authorized.")));
|
||||
}
|
||||
trace!(
|
||||
"Event {} in room {} is authorized",
|
||||
pdu.event_id,
|
||||
pdu.room_id.as_ref().map_or("None", |id| id.as_str())
|
||||
);
|
||||
|
||||
// Hash and sign
|
||||
let mut pdu_json = utils::to_canonical_object(&pdu).map_err(|e| {
|
||||
|
|
@ -178,13 +248,13 @@ pub async fn create_hash_and_sign_event(
|
|||
},
|
||||
}
|
||||
|
||||
// Add origin because synapse likes that (and it's required in the spec)
|
||||
pdu_json.insert(
|
||||
"origin".to_owned(),
|
||||
to_canonical_value(self.services.globals.server_name())
|
||||
.expect("server name is a valid CanonicalJsonValue"),
|
||||
);
|
||||
|
||||
trace!("hashing and signing event {}", pdu.event_id);
|
||||
if let Err(e) = self
|
||||
.services
|
||||
.server_keys
|
||||
|
|
@ -204,30 +274,45 @@ pub async fn create_hash_and_sign_event(
|
|||
pdu_json.insert("event_id".into(), CanonicalJsonValue::String(pdu.event_id.clone().into()));
|
||||
|
||||
// Check with the policy server
|
||||
match self
|
||||
.services
|
||||
.event_handler
|
||||
.ask_policy_server(&pdu, room_id)
|
||||
.await
|
||||
{
|
||||
| Ok(true) => {},
|
||||
| Ok(false) => {
|
||||
return Err!(Request(Forbidden(debug_warn!(
|
||||
"Policy server marked this event as spam"
|
||||
))));
|
||||
},
|
||||
| Err(e) => {
|
||||
// fail open
|
||||
warn!("Failed to check event with policy server: {e}");
|
||||
},
|
||||
// TODO(hydra): Skip this check for create events (why didnt we do this
|
||||
// already?)
|
||||
if room_id.is_some() {
|
||||
trace!(
|
||||
"Checking event {} in room {} with policy server",
|
||||
pdu.event_id,
|
||||
pdu.room_id.as_ref().map_or("None", |id| id.as_str())
|
||||
);
|
||||
match self
|
||||
.services
|
||||
.event_handler
|
||||
.ask_policy_server(&pdu, &pdu.room_id_or_hash())
|
||||
.await
|
||||
{
|
||||
| Ok(true) => {},
|
||||
| Ok(false) => {
|
||||
return Err!(Request(Forbidden(debug_warn!(
|
||||
"Policy server marked this event as spam"
|
||||
))));
|
||||
},
|
||||
| Err(e) => {
|
||||
// fail open
|
||||
warn!("Failed to check event with policy server: {e}");
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Generate short event id
|
||||
trace!(
|
||||
"Generating short event ID for {} in room {}",
|
||||
pdu.event_id,
|
||||
pdu.room_id.as_ref().map_or("None", |id| id.as_str())
|
||||
);
|
||||
let _shorteventid = self
|
||||
.services
|
||||
.short
|
||||
.get_or_create_shorteventid(&pdu.event_id)
|
||||
.await;
|
||||
|
||||
trace!("New PDU created: {pdu:?}");
|
||||
Ok((pdu, pdu_json))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -39,7 +39,11 @@ pub async fn redact_pdu<Pdu: Event + Send + Sync>(
|
|||
}
|
||||
}
|
||||
|
||||
let room_version_id = self.services.state.get_room_version(pdu.room_id()).await?;
|
||||
let room_version_id = self
|
||||
.services
|
||||
.state
|
||||
.get_room_version(&pdu.room_id_or_hash())
|
||||
.await?;
|
||||
|
||||
pdu.redact(&room_version_id, reason.to_value())?;
|
||||
|
||||
|
|
|
|||
|
|
@ -798,7 +798,7 @@ impl Service {
|
|||
let unread: UInt = self
|
||||
.services
|
||||
.user
|
||||
.notification_count(&user_id, pdu.room_id())
|
||||
.notification_count(&user_id, &pdu.room_id_or_hash())
|
||||
.await
|
||||
.try_into()
|
||||
.expect("notification count can't go that high");
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
use std::borrow::Borrow;
|
||||
|
||||
use conduwuit::{Err, Result, implement};
|
||||
use conduwuit::{Err, Result, debug_error, implement, trace};
|
||||
use ruma::{
|
||||
CanonicalJsonObject, RoomVersionId, ServerName, ServerSigningKeyId,
|
||||
api::federation::discovery::VerifyKey,
|
||||
|
|
@ -19,9 +19,11 @@ pub async fn get_event_keys(
|
|||
let required = match required_keys(object, version) {
|
||||
| Ok(required) => required,
|
||||
| Err(e) => {
|
||||
debug_error!("Failed to determine keys required to verify: {e}");
|
||||
return Err!(BadServerResponse("Failed to determine keys required to verify: {e}"));
|
||||
},
|
||||
};
|
||||
trace!(?required, "Keys required to verify event");
|
||||
|
||||
let batch = required
|
||||
.iter()
|
||||
|
|
@ -61,6 +63,7 @@ where
|
|||
}
|
||||
|
||||
#[implement(super::Service)]
|
||||
#[tracing::instrument(skip(self))]
|
||||
pub async fn get_verify_key(
|
||||
&self,
|
||||
origin: &ServerName,
|
||||
|
|
@ -70,6 +73,7 @@ pub async fn get_verify_key(
|
|||
let notary_only = self.services.server.config.only_query_trusted_key_servers;
|
||||
|
||||
if let Some(result) = self.verify_keys_for(origin).await.remove(key_id) {
|
||||
trace!("Found key in cache");
|
||||
return Ok(result);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ mod verify;
|
|||
use std::{collections::BTreeMap, sync::Arc, time::Duration};
|
||||
|
||||
use conduwuit::{
|
||||
Result, Server, implement,
|
||||
Result, Server, debug_error, debug_warn, implement, trace,
|
||||
utils::{IterStream, timepoint_from_now},
|
||||
};
|
||||
use database::{Deserialized, Json, Map};
|
||||
|
|
@ -112,6 +112,7 @@ async fn add_signing_keys(&self, new_keys: ServerSigningKeys) {
|
|||
}
|
||||
|
||||
#[implement(Service)]
|
||||
#[tracing::instrument(skip(self, object))]
|
||||
pub async fn required_keys_exist(
|
||||
&self,
|
||||
object: &CanonicalJsonObject,
|
||||
|
|
@ -119,10 +120,12 @@ pub async fn required_keys_exist(
|
|||
) -> bool {
|
||||
use ruma::signatures::required_keys;
|
||||
|
||||
trace!(?object, "Checking required keys exist");
|
||||
let Ok(required_keys) = required_keys(object, version) else {
|
||||
debug_error!("Failed to determine required keys");
|
||||
return false;
|
||||
};
|
||||
|
||||
trace!(?required_keys, "Required keys to verify event");
|
||||
required_keys
|
||||
.iter()
|
||||
.flat_map(|(server, key_ids)| key_ids.iter().map(move |key_id| (server, key_id)))
|
||||
|
|
@ -132,6 +135,7 @@ pub async fn required_keys_exist(
|
|||
}
|
||||
|
||||
#[implement(Service)]
|
||||
#[tracing::instrument(skip(self))]
|
||||
pub async fn verify_key_exists(&self, origin: &ServerName, key_id: &ServerSigningKeyId) -> bool {
|
||||
type KeysMap<'a> = BTreeMap<&'a ServerSigningKeyId, &'a RawJsonValue>;
|
||||
|
||||
|
|
@ -142,6 +146,7 @@ pub async fn verify_key_exists(&self, origin: &ServerName, key_id: &ServerSignin
|
|||
.await
|
||||
.deserialized::<Raw<ServerSigningKeys>>()
|
||||
else {
|
||||
debug_warn!("No known signing keys found for {origin}");
|
||||
return false;
|
||||
};
|
||||
|
||||
|
|
@ -157,6 +162,7 @@ pub async fn verify_key_exists(&self, origin: &ServerName, key_id: &ServerSignin
|
|||
}
|
||||
}
|
||||
|
||||
debug_warn!("Key {key_id} not found for {origin}");
|
||||
false
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,4 +1,6 @@
|
|||
use conduwuit::{Err, Result, implement, matrix::event::gen_event_id_canonical_json};
|
||||
use conduwuit::{
|
||||
Err, Result, debug_warn, implement, matrix::event::gen_event_id_canonical_json, trace,
|
||||
};
|
||||
use ruma::{
|
||||
CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, RoomVersionId, signatures::Verified,
|
||||
};
|
||||
|
|
@ -28,18 +30,25 @@ pub async fn validate_and_add_event_id_no_fetch(
|
|||
pdu: &RawJsonValue,
|
||||
room_version: &RoomVersionId,
|
||||
) -> Result<(OwnedEventId, CanonicalJsonObject)> {
|
||||
trace!(?pdu, "Validating PDU without fetching keys");
|
||||
let (event_id, mut value) = gen_event_id_canonical_json(pdu, room_version)?;
|
||||
trace!(event_id = event_id.as_str(), "Generated event ID, checking required keys");
|
||||
if !self.required_keys_exist(&value, room_version).await {
|
||||
debug_warn!(
|
||||
"Event {event_id} is missing required keys, cannot verify without fetching keys"
|
||||
);
|
||||
return Err!(BadServerResponse(debug_warn!(
|
||||
"Event {event_id} cannot be verified: missing keys."
|
||||
)));
|
||||
}
|
||||
|
||||
trace!("All required keys exist, verifying event");
|
||||
if let Err(e) = self.verify_event(&value, Some(room_version)).await {
|
||||
debug_warn!("Event verification failed");
|
||||
return Err!(BadServerResponse(debug_error!(
|
||||
"Event {event_id} failed verification: {e:?}"
|
||||
)));
|
||||
}
|
||||
trace!("Event verified successfully");
|
||||
|
||||
value.insert("event_id".into(), CanonicalJsonValue::String(event_id.as_str().into()));
|
||||
|
||||
|
|
@ -52,7 +61,7 @@ pub async fn verify_event(
|
|||
event: &CanonicalJsonObject,
|
||||
room_version: Option<&RoomVersionId>,
|
||||
) -> Result<Verified> {
|
||||
let room_version = room_version.unwrap_or(&RoomVersionId::V11);
|
||||
let room_version = room_version.unwrap_or(&RoomVersionId::V12);
|
||||
let keys = self.get_event_keys(event, room_version).await?;
|
||||
ruma::signatures::verify_event(&keys, event, room_version).map_err(Into::into)
|
||||
}
|
||||
|
|
@ -63,7 +72,7 @@ pub async fn verify_json(
|
|||
event: &CanonicalJsonObject,
|
||||
room_version: Option<&RoomVersionId>,
|
||||
) -> Result {
|
||||
let room_version = room_version.unwrap_or(&RoomVersionId::V11);
|
||||
let room_version = room_version.unwrap_or(&RoomVersionId::V12);
|
||||
let keys = self.get_event_keys(event, room_version).await?;
|
||||
ruma::signatures::verify_json(&keys, event.clone()).map_err(Into::into)
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue