1
0
Fork 0
mirror of https://gitlab.com/famedly/conduit.git synced 2025-06-27 16:35:59 +00:00

Merge branch 'release-0.10.5' into 'master'

Release 0.10.5

See merge request famedly/conduit!758
This commit is contained in:
Matthias Ahouansou 2025-06-23 14:23:32 +00:00
commit 7b1f8f8ee2
27 changed files with 539 additions and 421 deletions

551
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -16,10 +16,10 @@ license = "Apache-2.0"
name = "conduit" name = "conduit"
readme = "README.md" readme = "README.md"
repository = "https://gitlab.com/famedly/conduit" repository = "https://gitlab.com/famedly/conduit"
version = "0.10.4" version = "0.10.5"
# See also `rust-toolchain.toml` # See also `rust-toolchain.toml`
rust-version = "1.83.0" rust-version = "1.85.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
@ -28,18 +28,17 @@ workspace = true
[dependencies] [dependencies]
# Web framework # Web framework
# Can't bump until https://github.com/ruma/ruma/pull/1846 is merged or superseded axum = { version = "0.8", default-features = false, features = [
axum = { version = "0.7", default-features = false, features = [
"form", "form",
"http1", "http1",
"http2", "http2",
"json", "json",
"matched-path", "matched-path",
], optional = true } ], optional = true }
axum-extra = { version = "0.9", features = ["typed-header"] } axum-extra = { version = "0.10", features = ["typed-header"] }
axum-server = { version = "0.6", features = ["tls-rustls"] } axum-server = { version = "0.7", features = ["tls-rustls"] }
tower = { version = "0.4", features = ["util"] } tower = { version = "0.5", features = ["util"] }
tower-http = { version = "0.5", features = [ tower-http = { version = "0.6", features = [
"add-extension", "add-extension",
"cors", "cors",
"sensitive-headers", "sensitive-headers",

View file

@ -59,7 +59,7 @@
file = ./rust-toolchain.toml; file = ./rust-toolchain.toml;
# See also `rust-toolchain.toml` # See also `rust-toolchain.toml`
sha256 = "sha256-s1RPtyvDGJaX/BisLT+ifVfuhDT1nZkZ1NcK8sbwELM="; sha256 = "sha256-AJ6LX/Q/Er9kS15bn9iflkUwcgYqRQxiOIL2ToVAXaU=";
}; };
}); });
in in

View file

@ -9,7 +9,7 @@
# If you're having trouble making the relevant changes, bug a maintainer. # If you're having trouble making the relevant changes, bug a maintainer.
[toolchain] [toolchain]
channel = "1.83.0" channel = "1.85.0"
components = [ components = [
# For rust-analyzer # For rust-analyzer
"rust-src", "rust-src",

View file

@ -1,4 +1,4 @@
use crate::{services, utils, Error, Result, MATRIX_VERSIONS}; use crate::{services, utils, Error, Result, SUPPORTED_VERSIONS};
use bytes::BytesMut; use bytes::BytesMut;
use ruma::api::{appservice::Registration, IncomingResponse, OutgoingRequest, SendAccessToken}; use ruma::api::{appservice::Registration, IncomingResponse, OutgoingRequest, SendAccessToken};
use std::{fmt::Debug, mem, time::Duration}; use std::{fmt::Debug, mem, time::Duration};
@ -28,7 +28,7 @@ where
.try_into_http_request::<BytesMut>( .try_into_http_request::<BytesMut>(
&destination, &destination,
SendAccessToken::IfRequired(hs_token), SendAccessToken::IfRequired(hs_token),
MATRIX_VERSIONS, &SUPPORTED_VERSIONS,
) )
.unwrap() .unwrap()
.map(|body| body.freeze()); .map(|body| body.freeze());

View file

@ -25,7 +25,7 @@ const RANDOM_USER_ID_LENGTH: usize = 10;
/// Checks if a username is valid and available on this server. /// Checks if a username is valid and available on this server.
/// ///
/// Conditions for returning true: /// Conditions for returning true:
/// - The user id is not historical /// - The user id must be valid according to the strict grammar
/// - The server name of the user id matches this server /// - The server name of the user id matches this server
/// - No user or appservice on this server already claimed this username /// - No user or appservice on this server already claimed this username
/// ///
@ -40,7 +40,8 @@ pub async fn get_register_available_route(
) )
.ok() .ok()
.filter(|user_id| { .filter(|user_id| {
!user_id.is_historical() && user_id.server_name() == services().globals.server_name() user_id.validate_strict().is_ok()
&& user_id.server_name() == services().globals.server_name()
}) })
.ok_or(Error::BadRequest( .ok_or(Error::BadRequest(
ErrorKind::InvalidUsername, ErrorKind::InvalidUsername,
@ -92,7 +93,7 @@ pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<registe
) )
.ok() .ok()
.filter(|user_id| { .filter(|user_id| {
!user_id.is_historical() user_id.validate_strict().is_ok()
&& user_id.server_name() == services().globals.server_name() && user_id.server_name() == services().globals.server_name()
}) })
.ok_or(Error::BadRequest( .ok_or(Error::BadRequest(

View file

@ -1,6 +1,7 @@
use crate::{services, Result, Ruma}; use crate::{services, Result, Ruma};
use ruma::api::client::discovery::get_capabilities::{ use ruma::api::client::discovery::get_capabilities::{
self, Capabilities, RoomVersionStability, RoomVersionsCapability, self,
v3::{Capabilities, RoomVersionStability, RoomVersionsCapability},
}; };
use std::collections::BTreeMap; use std::collections::BTreeMap;

View file

@ -656,7 +656,7 @@ pub async fn joined_members_route(
Ok(joined_members::v3::Response { joined }) Ok(joined_members::v3::Response { joined })
} }
pub(crate) async fn invite_helper<'a>( pub(crate) async fn invite_helper(
sender_user: &UserId, sender_user: &UserId,
user_id: &UserId, user_id: &UserId,
room_id: &RoomId, room_id: &RoomId,

View file

@ -182,11 +182,11 @@ pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Re
let token = utils::random_string(TOKEN_LENGTH); let token = utils::random_string(TOKEN_LENGTH);
// Determine if device_id was provided and exists in the db for this user // Determine if device_id was provided and exists in the db for this user
let device_exists = body.device_id.as_ref().map_or(false, |device_id| { let device_exists = body.device_id.as_ref().is_some_and(|device_id| {
services() services()
.users .users
.all_device_ids(&user_id) .all_device_ids(&user_id)
.any(|x| x.as_ref().map_or(false, |v| v == device_id)) .any(|x| x.as_ref().is_ok_and(|v| v == device_id))
}); });
if device_exists { if device_exists {

View file

@ -807,8 +807,8 @@ async fn load_joined_room(
.ok() .ok()
}); });
let joined_since_last_sync = since_sender_member let joined_since_last_sync =
.map_or(true, |member| member.membership != MembershipState::Join); since_sender_member.is_none_or(|member| member.membership != MembershipState::Join);
if since_shortstatehash.is_none() || joined_since_last_sync { if since_shortstatehash.is_none() || joined_since_last_sync {
// Probably since = 0, we will do an initial sync // Probably since = 0, we will do an initial sync
@ -1380,7 +1380,7 @@ pub async fn sync_events_v5_route(
)?; )?;
let joined_since_last_sync = since_sender_member let joined_since_last_sync = since_sender_member
.map_or(true, |member| member.membership != MembershipState::Join); .is_none_or(|member| member.membership != MembershipState::Join);
let new_encrypted_room = encrypted_room && since_encryption.is_none(); let new_encrypted_room = encrypted_room && since_encryption.is_none();
if encrypted_room { if encrypted_room {
@ -1509,10 +1509,13 @@ pub async fn sync_events_v5_route(
let mut new_known_rooms = BTreeSet::new(); let mut new_known_rooms = BTreeSet::new();
for (mut start, mut end) in list.ranges { for (mut start, mut end) in list.ranges {
start = start.clamp(uint!(0), UInt::from(all_joined_rooms.len() as u32 - 1)); start = start.clamp(
end = end.clamp(start, UInt::from(all_joined_rooms.len() as u32 - 1)); uint!(0),
UInt::from(all_joined_rooms.len().saturating_sub(1) as u32),
);
end = end.clamp(start, UInt::from(all_joined_rooms.len() as u32));
let room_ids = let room_ids =
all_joined_rooms[(u64::from(start) as usize)..=(u64::from(end) as usize)].to_vec(); all_joined_rooms[(u64::from(start) as usize)..(u64::from(end) as usize)].to_vec();
new_known_rooms.extend(room_ids.iter().cloned()); new_known_rooms.extend(room_ids.iter().cloned());
for room_id in &room_ids { for room_id in &room_ids {
let todo_room = let todo_room =
@ -1754,16 +1757,7 @@ pub async fn sync_events_v5_route(
), ),
num_live: None, // Count events in timeline greater than global sync counter num_live: None, // Count events in timeline greater than global sync counter
bump_stamp, bump_stamp,
heroes: if body heroes: Some(heroes),
.room_subscriptions
.get(room_id)
.map(|sub| sub.include_heroes.unwrap_or_default())
.unwrap_or_default()
{
Some(heroes)
} else {
None
},
}, },
); );
} }

View file

@ -61,12 +61,11 @@ pub async fn search_users_route(
.rooms .rooms
.state_accessor .state_accessor
.room_state_get(&room, &StateEventType::RoomJoinRules, "") .room_state_get(&room, &StateEventType::RoomJoinRules, "")
.map_or(false, |event| { .is_ok_and(|event| {
event.map_or(false, |event| { event.is_some_and(|event| {
serde_json::from_str(event.content.get()) serde_json::from_str(event.content.get()).is_ok_and(
.map_or(false, |r: RoomJoinRulesEventContent| { |r: RoomJoinRulesEventContent| r.join_rule == JoinRule::Public,
r.join_rule == JoinRule::Public )
})
}) })
}) })
}); });

View file

@ -1,7 +1,6 @@
use std::{collections::BTreeMap, iter::FromIterator, str}; use std::{collections::BTreeMap, iter::FromIterator, str};
use axum::{ use axum::{
async_trait,
body::Body, body::Body,
extract::{FromRequest, Path}, extract::{FromRequest, Path},
response::{IntoResponse, Response}, response::{IntoResponse, Response},
@ -34,10 +33,10 @@ enum Token {
None, None,
} }
#[async_trait]
impl<T, S> FromRequest<S> for Ruma<T> impl<T, S> FromRequest<S> for Ruma<T>
where where
T: IncomingRequest, T: IncomingRequest,
S: Sync,
{ {
type Rejection = Error; type Rejection = Error;
@ -65,7 +64,13 @@ where
}; };
let metadata = T::METADATA; let metadata = T::METADATA;
let auth_header: Option<TypedHeader<Authorization<Bearer>>> = parts.extract().await?; let auth_header: Option<TypedHeader<Authorization<Bearer>>> =
// If X-Matrix signatures are used, it causes this extraction to fail with an error
if metadata.authentication != AuthScheme::ServerSignatures {
parts.extract().await?
} else {
None
};
let path_params: Path<Vec<String>> = parts.extract().await?; let path_params: Path<Vec<String>> = parts.extract().await?;
let query = parts.uri.query().unwrap_or_default(); let query = parts.uri.query().unwrap_or_default();

View file

@ -7,7 +7,7 @@ use crate::{
media::FileMeta, media::FileMeta,
pdu::{gen_event_id_canonical_json, PduBuilder}, pdu::{gen_event_id_canonical_json, PduBuilder},
}, },
services, utils, Error, PduEvent, Result, Ruma, MATRIX_VERSIONS, services, utils, Error, PduEvent, Result, Ruma, SUPPORTED_VERSIONS,
}; };
use axum::{response::IntoResponse, Json}; use axum::{response::IntoResponse, Json};
use axum_extra::headers::{CacheControl, Header}; use axum_extra::headers::{CacheControl, Header};
@ -214,7 +214,7 @@ where
.try_into_http_request::<Vec<u8>>( .try_into_http_request::<Vec<u8>>(
&actual_destination_str, &actual_destination_str,
SendAccessToken::IfRequired(""), SendAccessToken::IfRequired(""),
MATRIX_VERSIONS, &SUPPORTED_VERSIONS,
) )
.map_err(|e| { .map_err(|e| {
warn!( warn!(

View file

@ -22,10 +22,7 @@ impl service::pusher::Data for KeyValueDatabase {
let mut key = sender.as_bytes().to_vec(); let mut key = sender.as_bytes().to_vec();
key.push(0xff); key.push(0xff);
key.extend_from_slice(ids.pushkey.as_bytes()); key.extend_from_slice(ids.pushkey.as_bytes());
self.senderkey_pusher self.senderkey_pusher.remove(&key).map(|_| ())
.remove(&key)
.map(|_| ())
.map_err(Into::into)
} }
} }
} }

View file

@ -221,9 +221,9 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
.ok(); .ok();
let in_room = bridge_user_id let in_room = bridge_user_id
.map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) .is_some_and(|id| self.is_joined(&id, room_id).unwrap_or(false))
|| self.room_members(room_id).any(|userid| { || self.room_members(room_id).any(|userid| {
userid.map_or(false, |userid| appservice.users.is_match(userid.as_str())) userid.is_ok_and(|userid| appservice.users.is_match(userid.as_str()))
}); });
self.appservice_in_room_cache self.appservice_in_room_cache
@ -273,7 +273,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase {
} }
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result<bool> { fn server_in_room(&self, server: &ServerName, room_id: &RoomId) -> Result<bool> {
let mut key = server.as_bytes().to_vec(); let mut key = server.as_bytes().to_vec();
key.push(0xff); key.push(0xff);
key.extend_from_slice(room_id.as_bytes()); key.extend_from_slice(room_id.as_bytes());

View file

@ -491,7 +491,7 @@ impl KeyValueDatabase {
for (userid, password) in db.userid_password.iter() { for (userid, password) in db.userid_password.iter() {
let password = utils::string_from_bytes(&password); let password = utils::string_from_bytes(&password);
let empty_hashed_password = password.map_or(false, |password| { let empty_hashed_password = password.is_ok_and(|password| {
argon2::verify_encoded(&password, b"").unwrap_or(false) argon2::verify_encoded(&password, b"").unwrap_or(false)
}); });

View file

@ -8,17 +8,23 @@ mod utils;
// Not async due to services() being used in many closures, and async closures are not stable as of writing // Not async due to services() being used in many closures, and async closures are not stable as of writing
// This is the case for every other occurrence of sync Mutex/RwLock, except for database related ones, where // This is the case for every other occurrence of sync Mutex/RwLock, except for database related ones, where
// the current maintainer (Timo) has asked to not modify those // the current maintainer (Timo) has asked to not modify those
use std::sync::RwLock; use std::{
collections::BTreeSet,
sync::{LazyLock, RwLock},
};
pub use api::ruma_wrapper::{Ruma, RumaResponse}; pub use api::ruma_wrapper::{Ruma, RumaResponse};
pub use config::Config; pub use config::Config;
pub use database::KeyValueDatabase; pub use database::KeyValueDatabase;
use ruma::api::MatrixVersion; use ruma::api::{MatrixVersion, SupportedVersions};
pub use service::{pdu::PduEvent, Services}; pub use service::{pdu::PduEvent, Services};
pub use utils::error::{Error, Result}; pub use utils::error::{Error, Result};
pub static SERVICES: RwLock<Option<&'static Services>> = RwLock::new(None); pub static SERVICES: RwLock<Option<&'static Services>> = RwLock::new(None);
pub const MATRIX_VERSIONS: &[MatrixVersion] = &[MatrixVersion::V1_13]; pub static SUPPORTED_VERSIONS: LazyLock<SupportedVersions> = LazyLock::new(|| SupportedVersions {
versions: BTreeSet::from_iter([MatrixVersion::V1_13]),
features: Vec::new(),
});
pub fn services() -> &'static Services { pub fn services() -> &'static Services {
SERVICES SERVICES

View file

@ -401,23 +401,23 @@ fn routes(config: &Config) -> Router {
// Ruma doesn't have support for multiple paths for a single endpoint yet, and these routes // Ruma doesn't have support for multiple paths for a single endpoint yet, and these routes
// share one Ruma request / response type pair with {get,send}_state_event_for_key_route // share one Ruma request / response type pair with {get,send}_state_event_for_key_route
.route( .route(
"/_matrix/client/r0/rooms/:room_id/state/:event_type", "/_matrix/client/r0/rooms/{room_id}/state/{event_type}",
get(client_server::get_state_events_for_empty_key_route) get(client_server::get_state_events_for_empty_key_route)
.put(client_server::send_state_event_for_empty_key_route), .put(client_server::send_state_event_for_empty_key_route),
) )
.route( .route(
"/_matrix/client/v3/rooms/:room_id/state/:event_type", "/_matrix/client/v3/rooms/{room_id}/state/{event_type}",
get(client_server::get_state_events_for_empty_key_route) get(client_server::get_state_events_for_empty_key_route)
.put(client_server::send_state_event_for_empty_key_route), .put(client_server::send_state_event_for_empty_key_route),
) )
// These two endpoints allow trailing slashes // These two endpoints allow trailing slashes
.route( .route(
"/_matrix/client/r0/rooms/:room_id/state/:event_type/", "/_matrix/client/r0/rooms/{room_id}/state/{event_type}/",
get(client_server::get_state_events_for_empty_key_route) get(client_server::get_state_events_for_empty_key_route)
.put(client_server::send_state_event_for_empty_key_route), .put(client_server::send_state_event_for_empty_key_route),
) )
.route( .route(
"/_matrix/client/v3/rooms/:room_id/state/:event_type/", "/_matrix/client/v3/rooms/{room_id}/state/{event_type}/",
get(client_server::get_state_events_for_empty_key_route) get(client_server::get_state_events_for_empty_key_route)
.put(client_server::send_state_event_for_empty_key_route), .put(client_server::send_state_event_for_empty_key_route),
) )
@ -459,11 +459,11 @@ fn routes(config: &Config) -> Router {
.ruma_route(client_server::get_hierarchy_route) .ruma_route(client_server::get_hierarchy_route)
.ruma_route(client_server::well_known_client) .ruma_route(client_server::well_known_client)
.route( .route(
"/_matrix/client/r0/rooms/:room_id/initialSync", "/_matrix/client/r0/rooms/{room_id}/initialSync",
get(initial_sync), get(initial_sync),
) )
.route( .route(
"/_matrix/client/v3/rooms/:room_id/initialSync", "/_matrix/client/v3/rooms/{room_id}/initialSync",
get(initial_sync), get(initial_sync),
) )
.route("/", get(it_works)) .route("/", get(it_works))
@ -477,7 +477,7 @@ fn routes(config: &Config) -> Router {
get(server_server::get_server_keys_route), get(server_server::get_server_keys_route),
) )
.route( .route(
"/_matrix/key/v2/server/:key_id", "/_matrix/key/v2/server/{key_id}",
get(server_server::get_server_keys_deprecated_route), get(server_server::get_server_keys_deprecated_route),
) )
.ruma_route(server_server::get_public_rooms_route) .ruma_route(server_server::get_public_rooms_route)
@ -509,8 +509,8 @@ fn routes(config: &Config) -> Router {
.ruma_route(server_server::well_known_server) .ruma_route(server_server::well_known_server)
} else { } else {
router router
.route("/_matrix/federation/*path", any(federation_disabled)) .route("/_matrix/federation/{*path}", any(federation_disabled))
.route("/_matrix/key/*path", any(federation_disabled)) .route("/_matrix/key/{*path}", any(federation_disabled))
.route("/.well-known/matrix/server", any(federation_disabled)) .route("/.well-known/matrix/server", any(federation_disabled))
} }
} }
@ -595,12 +595,11 @@ pub trait RumaHandler<T> {
macro_rules! impl_ruma_handler { macro_rules! impl_ruma_handler {
( $($ty:ident),* $(,)? ) => { ( $($ty:ident),* $(,)? ) => {
#[axum::async_trait]
#[allow(non_snake_case)] #[allow(non_snake_case)]
impl<Req, E, F, Fut, $($ty,)*> RumaHandler<($($ty,)* Ruma<Req>,)> for F impl<Req, E, F, Fut, $($ty,)*> RumaHandler<($($ty,)* Ruma<Req>,)> for F
where where
Req: IncomingRequest + Send + 'static, Req: IncomingRequest + Send + 'static,
F: FnOnce($($ty,)* Ruma<Req>) -> Fut + Clone + Send + 'static, F: FnOnce($($ty,)* Ruma<Req>) -> Fut + Clone + Send + Sync + 'static,
Fut: Future<Output = Result<Req::OutgoingResponse, E>> Fut: Future<Output = Result<Req::OutgoingResponse, E>>
+ Send, + Send,
E: IntoResponse, E: IntoResponse,

View file

@ -408,6 +408,13 @@ impl state_res::Event for PduEvent {
fn redacts(&self) -> Option<&Self::Id> { fn redacts(&self) -> Option<&Self::Id> {
self.redacts.as_ref() self.redacts.as_ref()
} }
// We currently don't store rejected events (see steps 6-8 of `handle_incoming_pdu`), even
// though we should according to the spec:
// https://spec.matrix.org/v1.14/rooms/v11/#rejected-events
fn rejected(&self) -> bool {
false
}
} }
// These impl's allow us to dedup state snapshots when resolving state // These impl's allow us to dedup state snapshots when resolving state

View file

@ -2,7 +2,7 @@ mod data;
pub use data::Data; pub use data::Data;
use ruma::{events::AnySyncTimelineEvent, push::PushConditionPowerLevelsCtx}; use ruma::{events::AnySyncTimelineEvent, push::PushConditionPowerLevelsCtx};
use crate::{services, Error, PduEvent, Result, MATRIX_VERSIONS}; use crate::{services, Error, PduEvent, Result, SUPPORTED_VERSIONS};
use bytes::BytesMut; use bytes::BytesMut;
use ruma::{ use ruma::{
api::{ api::{
@ -58,7 +58,7 @@ impl Service {
.try_into_http_request::<BytesMut>( .try_into_http_request::<BytesMut>(
&destination, &destination,
SendAccessToken::IfRequired(""), SendAccessToken::IfRequired(""),
MATRIX_VERSIONS, &SUPPORTED_VERSIONS,
) )
.map_err(|e| { .map_err(|e| {
warn!("Failed to find destination {}: {}", destination, e); warn!("Failed to find destination {}: {}", destination, e);

View file

@ -51,25 +51,26 @@ impl Service {
/// 0. Check the server is in the room /// 0. Check the server is in the room
/// 1. Skip the PDU if we already know about it /// 1. Skip the PDU if we already know about it
/// 1.1. Remove unsigned field /// 1.1. Remove unsigned field
/// 2. Check signatures, otherwise drop /// 2. Check event is valid, otherwise drop
/// 3. Check content hash, redact if doesn't match /// 3. Check signatures, otherwise drop
/// 4. Fetch any missing auth events doing all checks listed here starting at 1. These are not /// 4. Check content hash, redact if doesn't match
/// 5. Fetch any missing auth events doing all checks listed here starting at 1. These are not
/// timeline events /// timeline events
/// 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are /// 6. Reject "due to auth events" if can't get all the auth events or some of the auth events are
/// also rejected "due to auth events" /// also rejected "due to auth events"
/// 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events /// 7. Reject "due to auth events" if the event doesn't pass auth based on the auth events
/// 7. Persist this event as an outlier /// 8. Persist this event as an outlier
/// 8. If not timeline event: stop /// 9. If not timeline event: stop
/// 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline /// 10. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline
/// events /// events
/// 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities /// 11. Fetch missing state and auth chain events by calling /state_ids at backwards extremities
/// doing all the checks in this list starting at 1. These are not timeline events /// doing all the checks in this list starting at 1. These are not timeline events
/// 11. Check the auth of the event passes based on the state of the event /// 12. Check the auth of the event passes based on the state of the event
/// 12. Ensure that the state is derived from the previous current state (i.e. we calculated by /// 13. Ensure that the state is derived from the previous current state (i.e. we calculated by
/// doing state res where one of the inputs was a previously trusted set of state, don't just /// doing state res where one of the inputs was a previously trusted set of state, don't just
/// trust a set of state we got from a remote) /// trust a set of state we got from a remote)
/// 13. Use state resolution to find new room state /// 14. Use state resolution to find new room state
/// 14. Check if the event passes auth based on the "current state" of the room, if not soft fail it /// 15. Check if the event passes auth based on the "current state" of the room, if not soft fail it
// We use some AsyncRecursiveType hacks here so we can call this async function recursively // We use some AsyncRecursiveType hacks here so we can call this async function recursively
#[tracing::instrument(skip(self, value, is_timeline_event, pub_key_map))] #[tracing::instrument(skip(self, value, is_timeline_event, pub_key_map))]
pub(crate) async fn handle_incoming_pdu<'a>( pub(crate) async fn handle_incoming_pdu<'a>(
@ -135,7 +136,7 @@ impl Service {
.await?; .await?;
self.check_room_id(room_id, &incoming_pdu)?; self.check_room_id(room_id, &incoming_pdu)?;
// 8. if not timeline event: stop // 9. if not timeline event: stop
if !is_timeline_event { if !is_timeline_event {
return Ok(None); return Ok(None);
} }
@ -145,7 +146,7 @@ impl Service {
return Ok(None); return Ok(None);
} }
// 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events // 10. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events
let (sorted_prev_events, mut eventid_info) = self let (sorted_prev_events, mut eventid_info) = self
.fetch_unknown_prev_events( .fetch_unknown_prev_events(
origin, origin,
@ -313,8 +314,9 @@ impl Service {
// 1.1. Remove unsigned field // 1.1. Remove unsigned field
value.remove("unsigned"); value.remove("unsigned");
// 2. Check signatures, otherwise drop // 2. Check event is valid, otherwise drop
// 3. check content hash, redact if doesn't match // 3. Check signatures, otherwise drop
// 4. check content hash, redact if doesn't match
let create_event_content: RoomCreateEventContent = let create_event_content: RoomCreateEventContent =
serde_json::from_str(create_event.content.get()).map_err(|e| { serde_json::from_str(create_event.content.get()).map_err(|e| {
error!("Invalid create event: {}", e); error!("Invalid create event: {}", e);
@ -326,6 +328,15 @@ impl Service {
.rules() .rules()
.expect("Supported room version has rules"); .expect("Supported room version has rules");
debug!("Checking format of join event PDU");
if let Err(e) = state_res::check_pdu_format(&value, &room_version_rules.event_format) {
warn!("Invalid PDU with event ID {event_id} received: {e}");
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Received Invalid PDU",
));
}
// TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json
// We go through all the signatures we see on the value and fetch the corresponding signing // We go through all the signatures we see on the value and fetch the corresponding signing
@ -421,8 +432,8 @@ impl Service {
self.check_room_id(room_id, &incoming_pdu)?; self.check_room_id(room_id, &incoming_pdu)?;
if !auth_events_known { if !auth_events_known {
// 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events // 5. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events
// 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" // 6. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events"
// NOTE: Step 5 is not applied anymore because it failed too often // NOTE: Step 5 is not applied anymore because it failed too often
debug!(event_id = ?incoming_pdu.event_id, "Fetching auth events"); debug!(event_id = ?incoming_pdu.event_id, "Fetching auth events");
self.fetch_and_handle_outliers( self.fetch_and_handle_outliers(
@ -440,7 +451,7 @@ impl Service {
.await; .await;
} }
// 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events // 7. Reject "due to auth events" if the event doesn't pass auth based on the auth events
debug!( debug!(
"Auth check for {} based on auth events", "Auth check for {} based on auth events",
incoming_pdu.event_id incoming_pdu.event_id
@ -448,6 +459,7 @@ impl Service {
// Build map of auth events // Build map of auth events
let mut auth_events = HashMap::new(); let mut auth_events = HashMap::new();
let mut auth_events_by_event_id = HashMap::new();
for id in &incoming_pdu.auth_events { for id in &incoming_pdu.auth_events {
let auth_event = match services().rooms.timeline.get_pdu(id)? { let auth_event = match services().rooms.timeline.get_pdu(id)? {
Some(e) => e, Some(e) => e,
@ -457,46 +469,33 @@ impl Service {
} }
}; };
self.check_room_id(room_id, &auth_event)?; auth_events_by_event_id.insert(auth_event.event_id.clone(), auth_event.clone());
auth_events.insert(
match auth_events.entry(( (
auth_event.kind.to_string().into(), StateEventType::from(auth_event.kind.to_string()),
auth_event auth_event
.state_key .state_key
.clone() .clone()
.expect("all auth events have state keys"), .expect("all auth events have state keys"),
)) { ),
hash_map::Entry::Vacant(v) => { auth_event,
v.insert(auth_event); );
}
hash_map::Entry::Occupied(_) => {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Auth event's type and state_key combination exists multiple times.",
));
}
}
} }
// The original create event must be in the auth events // first time we are doing any sort of auth check, so we check state-independent
if !matches!( // auth rules in addition to the state-dependent ones.
auth_events if state_res::check_state_independent_auth_rules(
.get(&(StateEventType::RoomCreate, "".to_owned()))
.map(|a| a.as_ref()),
Some(_) | None
) {
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Incoming event refers to wrong create event.",
));
}
if state_res::event_auth::auth_check(
&room_version_rules.authorization, &room_version_rules.authorization,
&incoming_pdu, &incoming_pdu,
|k, s| auth_events.get(&(k.to_string().into(), s.to_owned())), |event_id| auth_events_by_event_id.get(event_id),
) )
.is_err() .is_err()
|| state_res::check_state_dependent_auth_rules(
&room_version_rules.authorization,
&incoming_pdu,
|k, s| auth_events.get(&(k.to_string().into(), s.to_owned())),
)
.is_err()
{ {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::InvalidParam, ErrorKind::InvalidParam,
@ -506,7 +505,7 @@ impl Service {
debug!("Validation successful."); debug!("Validation successful.");
// 7. Persist the event as an outlier. // 8. Persist the event as an outlier.
services() services()
.rooms .rooms
.outlier .outlier
@ -557,7 +556,7 @@ impl Service {
.rules() .rules()
.expect("Supported room version has rules"); .expect("Supported room version has rules");
// 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities // 11. Fetch missing state and auth chain events by calling /state_ids at backwards extremities
// doing all the checks in this list starting at 1. These are not timeline events. // doing all the checks in this list starting at 1. These are not timeline events.
// TODO: if we know the prev_events of the incoming event we can avoid the request and build // TODO: if we know the prev_events of the incoming event we can avoid the request and build
@ -807,8 +806,8 @@ impl Service {
state_at_incoming_event.expect("we always set this to some above"); state_at_incoming_event.expect("we always set this to some above");
debug!("Starting auth check"); debug!("Starting auth check");
// 11. Check the auth of the event passes based on the state of the event // 12. Check the auth of the event passes based on the state of the event
if state_res::event_auth::auth_check( if state_res::check_state_dependent_auth_rules(
&room_version_rules.authorization, &room_version_rules.authorization,
&incoming_pdu, &incoming_pdu,
|k, s| { |k, s| {
@ -840,7 +839,7 @@ impl Service {
&room_version_rules.authorization, &room_version_rules.authorization,
)?; )?;
let soft_fail = state_res::event_auth::auth_check( let soft_fail = state_res::check_state_dependent_auth_rules(
&room_version_rules.authorization, &room_version_rules.authorization,
&incoming_pdu, &incoming_pdu,
|k, s| auth_events.get(&(k.clone(), s.to_owned())), |k, s| auth_events.get(&(k.clone(), s.to_owned())),
@ -891,7 +890,7 @@ impl Service {
} }
}; };
// 13. Use state resolution to find new room state // 14. Use state resolution to find new room state
// We start looking at current room state now, so lets lock the room // We start looking at current room state now, so lets lock the room
let mutex_state = Arc::clone( let mutex_state = Arc::clone(
@ -974,7 +973,7 @@ impl Service {
.await?; .await?;
} }
// 14. Check if the event passes auth based on the "current state" of the room, if not soft fail it // 15. Check if the event passes auth based on the "current state" of the room, if not soft fail it
debug!("Starting soft fail auth check"); debug!("Starting soft fail auth check");
if soft_fail { if soft_fail {
@ -1399,7 +1398,7 @@ impl Service {
} }
} }
let sorted = state_res::lexicographical_topological_sort(&graph, |event_id| { let sorted = state_res::reverse_topological_power_sort(&graph, |event_id| {
// This return value is the key used for sorting events, // This return value is the key used for sorting events,
// events are then sorted by power level, time, // events are then sorted by power level, time,
// and lexically by event_id. // and lexically by event_id.

View file

@ -169,6 +169,18 @@ impl Service {
} }
} }
let rules = room_version_id
.rules()
.expect("Supported room version has rules");
debug!("Checking format of join event PDU");
if let Err(e) = state_res::check_pdu_format(&join_event, &rules.event_format) {
warn!(
"Invalid PDU with event ID {event_id} received from `/send_join` response: {e}"
);
return Err(Error::BadServerResponse("Received Invalid PDU"));
}
services().rooms.short.get_or_create_shortroomid(room_id)?; services().rooms.short.get_or_create_shortroomid(room_id)?;
info!("Parsing join event"); info!("Parsing join event");
@ -240,28 +252,32 @@ impl Service {
} }
info!("Running send_join auth check"); info!("Running send_join auth check");
if let Err(e) = state_res::event_auth::auth_check( if let Err(e) = state_res::check_state_independent_auth_rules(
&room_version_id &rules.authorization,
.rules()
.expect("Supported room version has rules")
.authorization,
&parsed_join_pdu, &parsed_join_pdu,
|k, s| { |event_id| services().rooms.timeline.get_pdu(event_id).ok().flatten(),
services() )
.rooms .and_then(|_| {
.timeline state_res::check_state_dependent_auth_rules(
.get_pdu( &rules.authorization,
state.get( &parsed_join_pdu,
&services() |k, s| {
.rooms services()
.short .rooms
.get_or_create_shortstatekey(&k.to_string().into(), s) .timeline
.ok()?, .get_pdu(
)?, state.get(
) &services()
.ok()? .rooms
}, .short
) { .get_or_create_shortstatekey(&k.to_string().into(), s)
.ok()?,
)?,
)
.ok()?
},
)
}) {
warn!("Auth check failed: {e}"); warn!("Auth check failed: {e}");
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::InvalidParam, ErrorKind::InvalidParam,
@ -674,6 +690,15 @@ async fn validate_and_add_event_id(
} }
} }
let rules = &room_version
.rules()
.expect("Supported room version has rules");
if let Err(e) = state_res::check_pdu_format(&value, &rules.event_format) {
warn!("Invalid PDU with event ID {event_id} received from `/send_join` response: {e}");
return Err(Error::BadServerResponse("Received Invalid PDU"));
}
let origin_server_ts = value.get("origin_server_ts").ok_or_else(|| { let origin_server_ts = value.get("origin_server_ts").ok_or_else(|| {
error!("Invalid PDU, no origin_server_ts field"); error!("Invalid PDU, no origin_server_ts field");
Error::BadRequest( Error::BadRequest(
@ -702,13 +727,7 @@ async fn validate_and_add_event_id(
.globals .globals
.filter_keys_server_map(unfiltered_keys, origin_server_ts, room_version); .filter_keys_server_map(unfiltered_keys, origin_server_ts, room_version);
if let Err(e) = ruma::signatures::verify_event( if let Err(e) = ruma::signatures::verify_event(&keys, &value, rules) {
&keys,
&value,
&room_version
.rules()
.expect("Supported room version has rules"),
) {
warn!("Event {} failed verification {:?} {}", event_id, pdu, e); warn!("Event {} failed verification {:?} {}", event_id, pdu, e);
back_off(event_id).await; back_off(event_id).await;
return Err(Error::BadServerResponse("Event failed verification.")); return Err(Error::BadServerResponse("Event failed verification."));

View file

@ -87,13 +87,13 @@ impl Service {
let events_after: Vec<_> = relations_until // TODO: should be relations_after let events_after: Vec<_> = relations_until // TODO: should be relations_after
.iter() .iter()
.filter(|(_, pdu)| { .filter(|(_, pdu)| {
filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t) filter_event_type.as_ref().is_none_or(|t| &pdu.kind == t)
&& if let Ok(content) = && if let Ok(content) =
serde_json::from_str::<ExtractRelatesToEventId>(pdu.content.get()) serde_json::from_str::<ExtractRelatesToEventId>(pdu.content.get())
{ {
filter_rel_type filter_rel_type
.as_ref() .as_ref()
.map_or(true, |r| &content.relates_to.rel_type == r) .is_none_or(|r| &content.relates_to.rel_type == r)
} else { } else {
false false
} }
@ -135,13 +135,13 @@ impl Service {
let events_before: Vec<_> = relations_until let events_before: Vec<_> = relations_until
.iter() .iter()
.filter(|(_, pdu)| { .filter(|(_, pdu)| {
filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t) filter_event_type.as_ref().is_none_or(|t| &pdu.kind == t)
&& if let Ok(content) = && if let Ok(content) =
serde_json::from_str::<ExtractRelatesToEventId>(pdu.content.get()) serde_json::from_str::<ExtractRelatesToEventId>(pdu.content.get())
{ {
filter_rel_type filter_rel_type
.as_ref() .as_ref()
.map_or(true, |r| &content.relates_to.rel_type == r) .is_none_or(|r| &content.relates_to.rel_type == r)
} else { } else {
false false
} }

View file

@ -11,17 +11,12 @@ pub struct Service {
impl Service { impl Service {
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> { pub fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> {
self.db.index_pdu(shortroomid, pdu_id, message_body) self.db.index_pdu(shortroomid, pdu_id, message_body)
} }
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn deindex_pdu<'a>( pub fn deindex_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> {
&self,
shortroomid: u64,
pdu_id: &[u8],
message_body: &str,
) -> Result<()> {
self.db.deindex_pdu(shortroomid, pdu_id, message_body) self.db.deindex_pdu(shortroomid, pdu_id, message_body)
} }

View file

@ -629,7 +629,7 @@ fn next_room_to_traverse(
stack: &mut Vec<Vec<(OwnedRoomId, Vec<OwnedServerName>)>>, stack: &mut Vec<Vec<(OwnedRoomId, Vec<OwnedServerName>)>>,
parents: &mut VecDeque<OwnedRoomId>, parents: &mut VecDeque<OwnedRoomId>,
) -> Option<(OwnedRoomId, Vec<OwnedServerName>)> { ) -> Option<(OwnedRoomId, Vec<OwnedServerName>)> {
while stack.last().map_or(false, |s| s.is_empty()) { while stack.last().is_some_and(|s| s.is_empty()) {
stack.pop(); stack.pop();
parents.pop_back(); parents.pop_back();
} }
@ -663,7 +663,7 @@ async fn get_stripped_space_child_events(
if serde_json::from_str::<SpaceChildEventContent>(pdu.content.get()) if serde_json::from_str::<SpaceChildEventContent>(pdu.content.get())
.ok() .ok()
.map(|c| c.via) .map(|c| c.via)
.map_or(true, |v| v.is_empty()) .is_none_or(|v| v.is_empty())
{ {
continue; continue;
} }

View file

@ -171,7 +171,7 @@ impl Service {
}) })
}) })
.transpose()? .transpose()?
.map_or(false, |ignored| { .is_some_and(|ignored| {
ignored ignored
.content .content
.ignored_users .ignored_users
@ -236,7 +236,7 @@ impl Service {
} }
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result<bool> { pub fn server_in_room(&self, server: &ServerName, room_id: &RoomId) -> Result<bool> {
self.db.server_in_room(server, room_id) self.db.server_in_room(server, room_id)
} }

View file

@ -199,7 +199,7 @@ impl Service {
/// ///
/// Returns pdu id /// Returns pdu id
#[tracing::instrument(skip(self, pdu, pdu_json, leaves))] #[tracing::instrument(skip(self, pdu, pdu_json, leaves))]
pub async fn append_pdu<'a>( pub async fn append_pdu(
&self, &self,
pdu: &PduEvent, pdu: &PduEvent,
mut pdu_json: CanonicalJsonObject, mut pdu_json: CanonicalJsonObject,
@ -612,8 +612,8 @@ impl Service {
services().globals.server_name() == pdu.sender.server_name() services().globals.server_name() == pdu.sender.server_name()
&& appservice.is_user_match(&pdu.sender) && appservice.is_user_match(&pdu.sender)
|| pdu.kind == TimelineEventType::RoomMember || pdu.kind == TimelineEventType::RoomMember
&& pdu.state_key.as_ref().map_or(false, |state_key| { && pdu.state_key.as_ref().is_some_and(|state_key| {
UserId::parse(state_key).map_or(false, |user_id| { UserId::parse(state_key).is_ok_and(|user_id| {
services().globals.server_name() == user_id.server_name() services().globals.server_name() == user_id.server_name()
&& appservice.is_user_match(&user_id) && appservice.is_user_match(&user_id)
}) })
@ -633,8 +633,8 @@ impl Service {
"", "",
) { ) {
serde_json::from_str::<RoomCanonicalAliasEventContent>(pdu.content.get()) serde_json::from_str::<RoomCanonicalAliasEventContent>(pdu.content.get())
.map_or(false, |content| { .is_ok_and(|content| {
content.alias.map_or(false, |alias| { content.alias.is_some_and(|alias| {
appservice.aliases.is_match(alias.as_str()) appservice.aliases.is_match(alias.as_str())
}) || content }) || content
.alt_aliases .alt_aliases
@ -713,6 +713,10 @@ impl Service {
&content, &content,
&room_version_rules.authorization, &room_version_rules.authorization,
)?; )?;
let mut auth_events_by_event_id = HashMap::new();
for event in auth_events.values() {
auth_events_by_event_id.insert(event.event_id.clone(), event.clone());
}
// Our depth is the maximum depth of prev_events + 1 // Our depth is the maximum depth of prev_events + 1
let depth = prev_events let depth = prev_events
@ -769,10 +773,18 @@ impl Service {
signatures: None, signatures: None,
}; };
if state_res::auth_check(&room_version_rules.authorization, &pdu, |k, s| { if state_res::check_state_independent_auth_rules(
auth_events.get(&(k.clone(), s.to_owned())) &room_version_rules.authorization,
}) &pdu,
|event_id| auth_events_by_event_id.get(event_id),
)
.is_err() .is_err()
|| state_res::check_state_dependent_auth_rules(
&room_version_rules.authorization,
&pdu,
|k, s| auth_events.get(&(k.clone(), s.to_owned())),
)
.is_err()
{ {
return Err(Error::BadRequest( return Err(Error::BadRequest(
ErrorKind::forbidden(), ErrorKind::forbidden(),
@ -814,6 +826,14 @@ impl Service {
} }
} }
if let Err(e) = state_res::check_pdu_format(&pdu_json, &room_version_rules.event_format) {
warn!("locally constructed event is not a valid PDU: {e}");
return Err(Error::BadRequest(
ErrorKind::InvalidParam,
"Event is invalid",
));
}
// Generate event id // Generate event id
pdu.event_id = EventId::parse_arc(format!( pdu.event_id = EventId::parse_arc(format!(
"${}", "${}",
@ -1044,7 +1064,7 @@ impl Service {
/// Append the incoming event setting the state snapshot to the state from the /// Append the incoming event setting the state snapshot to the state from the
/// server that sent the event. /// server that sent the event.
#[tracing::instrument(skip_all)] #[tracing::instrument(skip_all)]
pub async fn append_incoming_pdu<'a>( pub async fn append_incoming_pdu(
&self, &self,
pdu: &PduEvent, pdu: &PduEvent,
pdu_json: CanonicalJsonObject, pdu_json: CanonicalJsonObject,