2021-08-25 00:43:01 +02:00
use std ::{
collections ::{ BTreeMap , HashMap , HashSet } ,
convert ::{ TryFrom , TryInto } ,
mem ::size_of ,
sync ::{ Arc , Mutex } ,
} ;
2020-05-03 17:25:31 +02:00
2021-06-30 09:52:01 +02:00
use lru_cache ::LruCache ;
2021-08-25 00:43:01 +02:00
use member ::MembershipState ;
2020-12-23 19:41:54 +01:00
use regex ::Regex ;
2020-08-18 14:32:38 -04:00
use ring ::digest ;
2021-08-21 14:38:00 +02:00
use rocket ::http ::RawStr ;
2020-06-05 18:19:26 +02:00
use ruma ::{
2021-04-13 15:00:45 +02:00
api ::{ client ::error ::ErrorKind , federation } ,
2021-08-25 00:43:01 +02:00
EventId ,
2020-06-05 18:19:26 +02:00
events ::{
2021-08-25 00:43:01 +02:00
AnyStrippedStateEvent , AnySyncStateEvent ,
EventType ,
ignored_user_list , push_rules , room ::{
2021-07-30 12:11:06 +02:00
create ::CreateEventContent , member , message , power_levels ::PowerLevelsEventContent ,
} ,
2020-05-24 18:25:52 +02:00
} ,
2021-04-12 12:40:16 +02:00
push ::{ self , Action , Tweak } ,
2021-08-25 00:43:01 +02:00
RoomAliasId ,
RoomId , RoomVersionId , serde ::{ CanonicalJsonObject , CanonicalJsonValue , Raw } , ServerName , state_res ::{ self , RoomVersion , StateMap } , uint , UserId ,
2020-05-03 17:25:31 +02:00
} ;
2021-08-02 10:13:34 +02:00
use tokio ::sync ::MutexGuard ;
2021-08-24 19:10:31 +02:00
use tracing ::{ error , warn } ;
2020-05-03 17:25:31 +02:00
2021-08-25 00:43:01 +02:00
pub use edus ::RoomEdus ;
use crate ::{ Database , Error , pdu ::PduBuilder , PduEvent , Result , utils } ;
2021-06-08 18:10:00 +02:00
use super ::{ abstraction ::Tree , admin ::AdminCommand , pusher } ;
2020-11-09 12:21:04 +01:00
2021-08-25 00:43:01 +02:00
mod edus ;
2020-08-06 08:29:59 -04:00
/// The unique identifier of each state group.
///
/// This is created when a state group is added to the database by
/// hashing the entire state.
2021-06-12 15:04:28 +02:00
pub type StateHashId = Vec < u8 > ;
2021-08-12 23:04:00 +02:00
pub type CompressedStateEvent = [ u8 ; 2 * size_of ::< u64 > ( ) ] ;
2020-08-06 08:29:59 -04:00
2020-05-03 17:25:31 +02:00
pub struct Rooms {
pub edus : edus ::RoomEdus ,
2021-08-12 23:04:00 +02:00
pub ( super ) pduid_pdu : Arc < dyn Tree > , // PduId = ShortRoomId + Count
2021-06-08 18:10:00 +02:00
pub ( super ) eventid_pduid : Arc < dyn Tree > ,
pub ( super ) roomid_pduleaves : Arc < dyn Tree > ,
pub ( super ) alias_roomid : Arc < dyn Tree > ,
pub ( super ) aliasid_alias : Arc < dyn Tree > , // AliasId = RoomId + Count
pub ( super ) publicroomids : Arc < dyn Tree > ,
2020-05-24 08:30:57 +02:00
2021-08-01 15:14:54 +02:00
pub ( super ) tokenids : Arc < dyn Tree > , // TokenId = ShortRoomId + Token + PduIdCount
2020-08-18 12:15:27 +02:00
2020-09-14 20:23:19 +02:00
/// Participating servers in a room.
2021-06-08 18:10:00 +02:00
pub ( super ) roomserverids : Arc < dyn Tree > , // RoomServerId = RoomId + ServerName
pub ( super ) serverroomids : Arc < dyn Tree > , // ServerRoomId = ServerName + RoomId
2021-05-17 10:25:27 +02:00
2021-06-08 18:10:00 +02:00
pub ( super ) userroomid_joined : Arc < dyn Tree > ,
pub ( super ) roomuserid_joined : Arc < dyn Tree > ,
2021-08-04 21:15:01 +02:00
pub ( super ) roomid_joinedcount : Arc < dyn Tree > ,
2021-06-08 18:10:00 +02:00
pub ( super ) roomuseroncejoinedids : Arc < dyn Tree > ,
pub ( super ) userroomid_invitestate : Arc < dyn Tree > , // InviteState = Vec<Raw<Pdu>>
pub ( super ) roomuserid_invitecount : Arc < dyn Tree > , // InviteCount = Count
pub ( super ) userroomid_leftstate : Arc < dyn Tree > ,
pub ( super ) roomuserid_leftcount : Arc < dyn Tree > ,
2020-08-06 08:29:59 -04:00
2021-06-08 18:10:00 +02:00
pub ( super ) userroomid_notificationcount : Arc < dyn Tree > , // NotifyCount = u64
pub ( super ) userroomid_highlightcount : Arc < dyn Tree > , // HightlightCount = u64
2021-04-12 12:40:16 +02:00
2020-09-12 21:30:07 +02:00
/// Remember the current state hash of a room.
2021-06-08 18:10:00 +02:00
pub ( super ) roomid_shortstatehash : Arc < dyn Tree > ,
2020-09-12 21:30:07 +02:00
/// Remember the state hash at events in the past.
2021-06-08 18:10:00 +02:00
pub ( super ) shorteventid_shortstatehash : Arc < dyn Tree > ,
2021-03-17 22:30:25 +01:00
/// StateKey = EventType + StateKey, ShortStateKey = Count
2021-06-08 18:10:00 +02:00
pub ( super ) statekey_shortstatekey : Arc < dyn Tree > ,
2021-08-24 19:10:31 +02:00
pub ( super ) shortstatekey_statekey : Arc < dyn Tree > ,
2021-08-01 15:14:54 +02:00
pub ( super ) roomid_shortroomid : Arc < dyn Tree > ,
2021-06-08 18:10:00 +02:00
pub ( super ) shorteventid_eventid : Arc < dyn Tree > ,
pub ( super ) eventid_shorteventid : Arc < dyn Tree > ,
2021-08-01 15:14:54 +02:00
2021-06-08 18:10:00 +02:00
pub ( super ) statehash_shortstatehash : Arc < dyn Tree > ,
2021-08-01 15:14:54 +02:00
pub ( super ) shortstatehash_statediff : Arc < dyn Tree > , // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--)
2021-01-14 21:32:22 -05:00
2021-02-01 12:44:30 -05:00
/// RoomId + EventId -> outlier PDU.
/// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn.
2021-06-08 18:10:00 +02:00
pub ( super ) eventid_outlierpdu : Arc < dyn Tree > ,
2021-01-14 21:32:22 -05:00
2021-02-03 20:00:01 -05:00
/// RoomId + EventId -> Parent PDU EventId.
2021-07-29 08:36:01 +02:00
pub ( super ) referencedevents : Arc < dyn Tree > ,
2021-06-30 09:52:01 +02:00
2021-07-18 20:43:39 +02:00
pub ( super ) pdu_cache : Mutex < LruCache < EventId , Arc < PduEvent > > > ,
2021-08-24 21:10:01 +02:00
pub ( super ) auth_chain_cache : Mutex < LruCache < Vec < u64 > , HashSet < u64 > > > ,
2021-08-11 21:14:22 +02:00
pub ( super ) shorteventid_cache : Mutex < LruCache < u64 , EventId > > ,
2021-08-17 16:06:09 +02:00
pub ( super ) eventidshort_cache : Mutex < LruCache < EventId , u64 > > ,
pub ( super ) statekeyshort_cache : Mutex < LruCache < ( EventType , String ) , u64 > > ,
2021-08-24 19:10:31 +02:00
pub ( super ) shortstatekey_cache : Mutex < LruCache < u64 , ( EventType , String ) > > ,
2021-08-15 13:17:42 +02:00
pub ( super ) stateinfo_cache : Mutex <
LruCache <
u64 ,
Vec < (
u64 , // sstatehash
HashSet < CompressedStateEvent > , // full state
HashSet < CompressedStateEvent > , // added
HashSet < CompressedStateEvent > , // removed
) > ,
> ,
> ,
2020-05-03 17:25:31 +02:00
}
impl Rooms {
2020-08-26 11:15:52 -04:00
/// Builds a StateMap by iterating over all keys that start
/// with state_hash, this gives the full state for the given state_hash.
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self)) ]
2021-08-24 19:10:31 +02:00
pub fn state_full_ids ( & self , shortstatehash : u64 ) -> Result < BTreeMap < u64 , EventId > > {
2021-08-12 23:04:00 +02:00
let full_state = self
. load_shortstatehash_info ( shortstatehash ) ?
. pop ( )
. expect ( " there is always one layer " )
. 1 ;
full_state
. into_iter ( )
. map ( | compressed | self . parse_compressed_state_event ( compressed ) )
. collect ( )
2021-03-17 22:30:25 +01:00
}
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self)) ]
2021-03-17 22:30:25 +01:00
pub fn state_full (
& self ,
shortstatehash : u64 ,
2021-07-18 20:43:39 +02:00
) -> Result < HashMap < ( EventType , String ) , Arc < PduEvent > > > {
2021-08-12 23:04:00 +02:00
let full_state = self
. load_shortstatehash_info ( shortstatehash ) ?
. pop ( )
. expect ( " there is always one layer " )
. 1 ;
Ok ( full_state
. into_iter ( )
. map ( | compressed | self . parse_compressed_state_event ( compressed ) )
. filter_map ( | r | r . ok ( ) )
2021-08-24 19:10:31 +02:00
. map ( | ( _ , eventid ) | self . get_pdu ( & eventid ) )
2021-03-17 22:30:25 +01:00
. filter_map ( | r | r . ok ( ) . flatten ( ) )
2020-08-06 08:29:59 -04:00
. map ( | pdu | {
2021-03-17 22:30:25 +01:00
Ok ::< _ , Error > ( (
2020-09-17 14:44:47 +02:00
(
pdu . kind . clone ( ) ,
pdu . state_key
. as_ref ( )
. ok_or_else ( | | Error ::bad_database ( " State event has no state key. " ) ) ?
. clone ( ) ,
) ,
pdu ,
) )
2020-08-06 08:29:59 -04:00
} )
2021-03-17 22:30:25 +01:00
. filter_map ( | r | r . ok ( ) )
2021-08-12 23:04:00 +02:00
. collect ( ) )
2020-08-06 08:29:59 -04:00
}
2020-09-17 14:44:47 +02:00
/// Returns a single PDU from `room_id` with key (`event_type`, `state_key`).
2021-02-28 12:41:03 +01:00
#[ tracing::instrument(skip(self)) ]
2021-04-11 21:01:27 +02:00
pub fn state_get_id (
2020-09-17 14:44:47 +02:00
& self ,
2021-03-17 22:30:25 +01:00
shortstatehash : u64 ,
2020-09-17 14:44:47 +02:00
event_type : & EventType ,
state_key : & str ,
2021-04-11 21:01:27 +02:00
) -> Result < Option < EventId > > {
2021-08-12 23:04:00 +02:00
let shortstatekey = match self . get_shortstatekey ( event_type , state_key ) ? {
Some ( s ) = > s ,
None = > return Ok ( None ) ,
} ;
let full_state = self
. load_shortstatehash_info ( shortstatehash ) ?
. pop ( )
. expect ( " there is always one layer " )
. 1 ;
Ok ( full_state
. into_iter ( )
. find ( | bytes | bytes . starts_with ( & shortstatekey . to_be_bytes ( ) ) )
2021-08-24 19:10:31 +02:00
. and_then ( | compressed | {
self . parse_compressed_state_event ( compressed )
. ok ( )
. map ( | ( _ , id ) | id )
} ) )
2020-08-06 08:29:59 -04:00
}
2021-04-11 21:01:27 +02:00
/// Returns a single PDU from `room_id` with key (`event_type`, `state_key`).
#[ tracing::instrument(skip(self)) ]
pub fn state_get (
& self ,
shortstatehash : u64 ,
event_type : & EventType ,
state_key : & str ,
2021-06-30 09:52:01 +02:00
) -> Result < Option < Arc < PduEvent > > > {
2021-04-11 21:01:27 +02:00
self . state_get_id ( shortstatehash , event_type , state_key ) ?
. map_or ( Ok ( None ) , | event_id | self . get_pdu ( & event_id ) )
}
2021-01-16 16:37:20 -05:00
/// Returns the state hash for this pdu.
2021-02-28 12:41:03 +01:00
#[ tracing::instrument(skip(self)) ]
2021-03-17 22:30:25 +01:00
pub fn pdu_shortstatehash ( & self , event_id : & EventId ) -> Result < Option < u64 > > {
self . eventid_shorteventid
. get ( event_id . as_bytes ( ) ) ?
. map_or ( Ok ( None ) , | shorteventid | {
2021-06-17 20:34:14 +02:00
self . shorteventid_shortstatehash . get ( & shorteventid ) ? . map_or (
Ok ::< _ , Error > ( None ) ,
| bytes | {
2021-03-17 22:30:25 +01:00
Ok ( Some ( utils ::u64_from_bytes ( & bytes ) . map_err ( | _ | {
Error ::bad_database (
" Invalid shortstatehash bytes in shorteventid_shortstatehash " ,
)
} ) ? ) )
2021-06-17 20:34:14 +02:00
} ,
)
2021-03-17 22:30:25 +01:00
} )
2020-09-17 14:44:47 +02:00
}
2020-08-06 08:29:59 -04:00
2020-10-27 19:10:09 -04:00
/// Returns the last state hash key added to the db for the given room.
2021-02-28 12:41:03 +01:00
#[ tracing::instrument(skip(self)) ]
2021-03-17 22:30:25 +01:00
pub fn current_shortstatehash ( & self , room_id : & RoomId ) -> Result < Option < u64 > > {
self . roomid_shortstatehash
. get ( room_id . as_bytes ( ) ) ?
. map_or ( Ok ( None ) , | bytes | {
Ok ( Some ( utils ::u64_from_bytes ( & bytes ) . map_err ( | _ | {
Error ::bad_database ( " Invalid shortstatehash in roomid_shortstatehash " )
} ) ? ) )
} )
2020-08-06 08:29:59 -04:00
}
2020-09-14 20:23:19 +02:00
/// This fetches auth events from the current state.
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self)) ]
2020-08-06 08:29:59 -04:00
pub fn get_auth_events (
& self ,
room_id : & RoomId ,
kind : & EventType ,
sender : & UserId ,
state_key : Option < & str > ,
2021-04-16 18:18:29 +02:00
content : & serde_json ::Value ,
2021-03-25 23:55:40 +01:00
) -> Result < StateMap < Arc < PduEvent > > > {
2021-08-24 19:10:31 +02:00
let shortstatehash =
if let Some ( current_shortstatehash ) = self . current_shortstatehash ( room_id ) ? {
current_shortstatehash
} else {
return Ok ( HashMap ::new ( ) ) ;
} ;
2020-08-06 08:29:59 -04:00
let auth_events = state_res ::auth_types_for_event (
2020-12-31 08:40:49 -05:00
kind ,
2020-08-06 08:29:59 -04:00
sender ,
state_key . map ( | s | s . to_string ( ) ) ,
2021-03-25 23:55:40 +01:00
content . clone ( ) ,
2020-08-06 08:29:59 -04:00
) ;
2021-08-24 19:10:31 +02:00
let mut sauthevents = auth_events
. into_iter ( )
. filter_map ( | ( event_type , state_key ) | {
self . get_shortstatekey ( & event_type , & state_key )
. ok ( )
. flatten ( )
. map ( | s | ( s , ( event_type , state_key ) ) )
} )
. collect ::< HashMap < _ , _ > > ( ) ;
let full_state = self
. load_shortstatehash_info ( shortstatehash ) ?
. pop ( )
. expect ( " there is always one layer " )
. 1 ;
Ok ( full_state
. into_iter ( )
. filter_map ( | compressed | self . parse_compressed_state_event ( compressed ) . ok ( ) )
. filter_map ( | ( shortstatekey , event_id ) | {
sauthevents . remove ( & shortstatekey ) . map ( | k | ( k , event_id ) )
} )
. filter_map ( | ( k , event_id ) | self . get_pdu ( & event_id ) . ok ( ) . flatten ( ) . map ( | pdu | ( k , pdu ) ) )
. collect ( ) )
2020-08-06 08:29:59 -04:00
}
/// Generate a new StateHash.
///
2020-09-12 21:30:07 +02:00
/// A unique hash made from hashing all PDU ids of the state joined with 0xff.
2021-03-24 11:52:10 +01:00
fn calculate_hash ( & self , bytes_list : & [ & [ u8 ] ] ) -> StateHashId {
2020-09-12 21:30:07 +02:00
// We only hash the pdu's event ids, not the whole pdu
2021-03-17 22:30:25 +01:00
let bytes = bytes_list . join ( & 0xff ) ;
2020-09-12 21:30:07 +02:00
let hash = digest ::digest ( & digest ::SHA256 , & bytes ) ;
2021-03-24 11:52:10 +01:00
hash . as_ref ( ) . into ( )
2020-08-06 08:29:59 -04:00
}
2020-05-03 17:25:31 +02:00
/// Checks if a room exists.
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self)) ]
2020-05-03 17:25:31 +02:00
pub fn exists ( & self , room_id : & RoomId ) -> Result < bool > {
2021-08-14 08:26:45 +02:00
let prefix = match self . get_shortroomid ( room_id ) ? {
Some ( b ) = > b . to_be_bytes ( ) . to_vec ( ) ,
None = > return Ok ( false ) ,
} ;
2020-05-03 17:25:31 +02:00
2020-05-08 21:13:52 +02:00
// Look for PDUs in that room.
2020-05-03 17:25:31 +02:00
Ok ( self
. pduid_pdu
2021-06-08 18:10:00 +02:00
. iter_from ( & prefix , false )
. next ( )
2020-05-03 17:25:31 +02:00
. filter ( | ( k , _ ) | k . starts_with ( & prefix ) )
. is_some ( ) )
}
2021-08-09 19:15:14 +02:00
/// Checks if a room exists.
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self)) ]
2021-08-09 19:15:14 +02:00
pub fn first_pdu_in_room ( & self , room_id : & RoomId ) -> Result < Option < Arc < PduEvent > > > {
2021-08-14 19:47:49 +02:00
let prefix = self
. get_shortroomid ( room_id ) ?
. expect ( " room exists " )
. to_be_bytes ( )
. to_vec ( ) ;
2021-08-09 19:15:14 +02:00
// Look for PDUs in that room.
self . pduid_pdu
. iter_from ( & prefix , false )
. filter ( | ( k , _ ) | k . starts_with ( & prefix ) )
. map ( | ( _ , pdu ) | {
serde_json ::from_slice ( & pdu )
. map_err ( | _ | Error ::bad_database ( " Invalid first PDU in db. " ) )
. map ( Arc ::new )
} )
. next ( )
. transpose ( )
}
2020-10-27 19:10:09 -04:00
/// Force the creation of a new StateHash and insert it into the db.
2020-11-15 16:48:43 -05:00
///
2021-08-12 23:04:00 +02:00
/// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot.
2021-08-24 19:10:31 +02:00
#[ tracing::instrument(skip(self, new_state_ids_compressed, db)) ]
2020-09-13 22:24:36 +02:00
pub fn force_state (
& self ,
room_id : & RoomId ,
2021-08-24 19:10:31 +02:00
new_state_ids_compressed : HashSet < CompressedStateEvent > ,
2021-04-16 18:18:29 +02:00
db : & Database ,
2020-09-13 22:24:36 +02:00
) -> Result < ( ) > {
2021-08-12 23:04:00 +02:00
let previous_shortstatehash = self . current_shortstatehash ( & room_id ) ? ;
2021-03-17 22:30:25 +01:00
let state_hash = self . calculate_hash (
2021-08-24 19:10:31 +02:00
& new_state_ids_compressed
. iter ( )
. map ( | bytes | & bytes [ .. ] )
2021-03-17 22:30:25 +01:00
. collect ::< Vec < _ > > ( ) ,
2021-03-24 11:52:10 +01:00
) ;
2020-09-13 22:24:36 +02:00
2021-08-12 23:04:00 +02:00
let ( new_shortstatehash , already_existed ) =
2021-08-11 19:15:38 +02:00
self . get_or_create_shortstatehash ( & state_hash , & db . globals ) ? ;
2021-03-17 22:30:25 +01:00
2021-08-12 23:04:00 +02:00
if Some ( new_shortstatehash ) = = previous_shortstatehash {
return Ok ( ( ) ) ;
}
2021-04-16 18:18:29 +02:00
2021-08-12 23:04:00 +02:00
let states_parents = previous_shortstatehash
. map_or_else ( | | Ok ( Vec ::new ( ) ) , | p | self . load_shortstatehash_info ( p ) ) ? ;
2021-04-16 18:18:29 +02:00
2021-08-12 23:04:00 +02:00
let ( statediffnew , statediffremoved ) = if let Some ( parent_stateinfo ) = states_parents . last ( )
{
let statediffnew = new_state_ids_compressed
. difference ( & parent_stateinfo . 1 )
. cloned ( )
. collect ::< HashSet < _ > > ( ) ;
let statediffremoved = parent_stateinfo
. 1
. difference ( & new_state_ids_compressed )
. cloned ( )
. collect ::< HashSet < _ > > ( ) ;
( statediffnew , statediffremoved )
2021-04-16 18:18:29 +02:00
} else {
2021-08-12 23:04:00 +02:00
( new_state_ids_compressed , HashSet ::new ( ) )
2021-04-16 18:18:29 +02:00
} ;
2021-08-12 23:04:00 +02:00
if ! already_existed {
self . save_state_from_diff (
new_shortstatehash ,
statediffnew . clone ( ) ,
statediffremoved . clone ( ) ,
2 , // every state change is 2 event changes on average
states_parents ,
) ? ;
} ;
2021-04-16 18:18:29 +02:00
2021-08-24 19:10:31 +02:00
for event_id in statediffnew . into_iter ( ) . filter_map ( | new | {
self . parse_compressed_state_event ( new )
. ok ( )
. map ( | ( _ , id ) | id )
} ) {
2021-08-12 23:04:00 +02:00
if let Some ( pdu ) = self . get_pdu_json ( & event_id ) ? {
2021-05-21 22:22:05 +02:00
if pdu . get ( " type " ) . and_then ( | val | val . as_str ( ) ) = = Some ( " m.room.member " ) {
2021-04-16 18:18:29 +02:00
if let Ok ( pdu ) = serde_json ::from_value ::< PduEvent > (
serde_json ::to_value ( & pdu ) . expect ( " CanonicalJsonObj is a valid JsonValue " ) ,
) {
if let Some ( membership ) =
pdu . content . get ( " membership " ) . and_then ( | membership | {
serde_json ::from_value ::< member ::MembershipState > (
membership . clone ( ) ,
)
. ok ( )
} )
{
if let Some ( state_key ) = pdu
. state_key
. and_then ( | state_key | UserId ::try_from ( state_key ) . ok ( ) )
{
self . update_membership (
room_id ,
& state_key ,
membership ,
& pdu . sender ,
None ,
db ,
2021-08-17 00:22:52 +02:00
false ,
2021-04-16 18:18:29 +02:00
) ? ;
}
}
}
}
}
2020-09-13 22:24:36 +02:00
}
2021-08-17 00:22:52 +02:00
self . update_joined_count ( room_id ) ? ;
2021-03-17 22:30:25 +01:00
self . roomid_shortstatehash
2021-08-12 23:04:00 +02:00
. insert ( room_id . as_bytes ( ) , & new_shortstatehash . to_be_bytes ( ) ) ? ;
Ok ( ( ) )
}
/// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer.
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self)) ]
2021-08-12 23:04:00 +02:00
pub fn load_shortstatehash_info (
& self ,
shortstatehash : u64 ,
) -> Result <
Vec < (
u64 , // sstatehash
HashSet < CompressedStateEvent > , // full state
HashSet < CompressedStateEvent > , // added
HashSet < CompressedStateEvent > , // removed
) > ,
> {
2021-08-15 13:17:42 +02:00
if let Some ( r ) = self
. stateinfo_cache
2021-08-15 06:46:00 +02:00
. lock ( )
. unwrap ( )
. get_mut ( & shortstatehash )
{
return Ok ( r . clone ( ) ) ;
}
2021-08-12 23:04:00 +02:00
let value = self
. shortstatehash_statediff
. get ( & shortstatehash . to_be_bytes ( ) ) ?
. ok_or_else ( | | Error ::bad_database ( " State hash does not exist " ) ) ? ;
let parent =
utils ::u64_from_bytes ( & value [ 0 .. size_of ::< u64 > ( ) ] ) . expect ( " bytes have right length " ) ;
let mut add_mode = true ;
let mut added = HashSet ::new ( ) ;
let mut removed = HashSet ::new ( ) ;
let mut i = size_of ::< u64 > ( ) ;
while let Some ( v ) = value . get ( i .. i + 2 * size_of ::< u64 > ( ) ) {
if add_mode & & v . starts_with ( & 0_ u64 . to_be_bytes ( ) ) {
add_mode = false ;
i + = size_of ::< u64 > ( ) ;
continue ;
}
if add_mode {
added . insert ( v . try_into ( ) . expect ( " we checked the size above " ) ) ;
} else {
removed . insert ( v . try_into ( ) . expect ( " we checked the size above " ) ) ;
}
i + = 2 * size_of ::< u64 > ( ) ;
}
if parent ! = 0_ u64 {
let mut response = self . load_shortstatehash_info ( parent ) ? ;
let mut state = response . last ( ) . unwrap ( ) . 1. clone ( ) ;
state . extend ( added . iter ( ) . cloned ( ) ) ;
for r in & removed {
state . remove ( r ) ;
}
response . push ( ( shortstatehash , state , added , removed ) ) ;
Ok ( response )
} else {
let mut response = Vec ::new ( ) ;
response . push ( ( shortstatehash , added . clone ( ) , added , removed ) ) ;
2021-08-15 06:46:00 +02:00
self . stateinfo_cache
. lock ( )
. unwrap ( )
. insert ( shortstatehash , response . clone ( ) ) ;
2021-08-12 23:04:00 +02:00
Ok ( response )
}
}
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self, globals)) ]
2021-08-12 23:04:00 +02:00
pub fn compress_state_event (
& self ,
shortstatekey : u64 ,
event_id : & EventId ,
globals : & super ::globals ::Globals ,
) -> Result < CompressedStateEvent > {
let mut v = shortstatekey . to_be_bytes ( ) . to_vec ( ) ;
v . extend_from_slice (
& self
. get_or_create_shorteventid ( event_id , globals ) ?
. to_be_bytes ( ) ,
) ;
Ok ( v . try_into ( ) . expect ( " we checked the size above " ) )
}
2021-08-24 19:10:31 +02:00
/// Returns shortstatekey, event id
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self, compressed_event)) ]
2021-08-12 23:04:00 +02:00
pub fn parse_compressed_state_event (
& self ,
compressed_event : CompressedStateEvent ,
2021-08-24 19:10:31 +02:00
) -> Result < ( u64 , EventId ) > {
Ok ( (
utils ::u64_from_bytes ( & compressed_event [ 0 .. size_of ::< u64 > ( ) ] )
2021-08-12 23:04:00 +02:00
. expect ( " bytes have right length " ) ,
2021-08-24 19:10:31 +02:00
self . get_eventid_from_short (
utils ::u64_from_bytes ( & compressed_event [ size_of ::< u64 > ( ) .. ] )
. expect ( " bytes have right length " ) ,
) ? ,
) )
2021-08-12 23:04:00 +02:00
}
/// Creates a new shortstatehash that often is just a diff to an already existing
/// shortstatehash and therefore very efficient.
///
/// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer
/// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0
/// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's
/// based on layer n-2. If that layer is also too big, it will recursively fix above layers too.
///
/// * `shortstatehash` - Shortstatehash of this state
/// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid
/// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid
/// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer
/// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(
self ,
statediffnew ,
statediffremoved ,
diff_to_sibling ,
parent_states
) ) ]
2021-08-12 23:04:00 +02:00
pub fn save_state_from_diff (
& self ,
shortstatehash : u64 ,
statediffnew : HashSet < CompressedStateEvent > ,
statediffremoved : HashSet < CompressedStateEvent > ,
diff_to_sibling : usize ,
mut parent_states : Vec < (
u64 , // sstatehash
HashSet < CompressedStateEvent > , // full state
HashSet < CompressedStateEvent > , // added
HashSet < CompressedStateEvent > , // removed
) > ,
) -> Result < ( ) > {
let diffsum = statediffnew . len ( ) + statediffremoved . len ( ) ;
if parent_states . len ( ) > 3 {
// Number of layers
// To many layers, we have to go deeper
let parent = parent_states . pop ( ) . unwrap ( ) ;
let mut parent_new = parent . 2 ;
let mut parent_removed = parent . 3 ;
for removed in statediffremoved {
if ! parent_new . remove ( & removed ) {
2021-08-14 19:07:50 +02:00
// It was not added in the parent and we removed it
2021-08-12 23:04:00 +02:00
parent_removed . insert ( removed ) ;
}
2021-08-14 19:07:50 +02:00
// Else it was added in the parent and we removed it again. We can forget this change
}
for new in statediffnew {
if ! parent_removed . remove ( & new ) {
// It was not touched in the parent and we added it
parent_new . insert ( new ) ;
}
// Else it was removed in the parent and we added it again. We can forget this change
2021-08-12 23:04:00 +02:00
}
self . save_state_from_diff (
shortstatehash ,
parent_new ,
parent_removed ,
diffsum ,
parent_states ,
) ? ;
return Ok ( ( ) ) ;
}
if parent_states . len ( ) = = 0 {
// There is no parent layer, create a new state
let mut value = 0_ u64 . to_be_bytes ( ) . to_vec ( ) ; // 0 means no parent
for new in & statediffnew {
value . extend_from_slice ( & new [ .. ] ) ;
}
if ! statediffremoved . is_empty ( ) {
warn! ( " Tried to create new state with removals " ) ;
}
self . shortstatehash_statediff
. insert ( & shortstatehash . to_be_bytes ( ) , & value ) ? ;
return Ok ( ( ) ) ;
} ;
// Else we have two options.
// 1. We add the current diff on top of the parent layer.
// 2. We replace a layer above
let parent = parent_states . pop ( ) . unwrap ( ) ;
let parent_diff = parent . 2. len ( ) + parent . 3. len ( ) ;
if diffsum * diffsum > = 2 * diff_to_sibling * parent_diff {
// Diff too big, we replace above layer(s)
let mut parent_new = parent . 2 ;
let mut parent_removed = parent . 3 ;
for removed in statediffremoved {
if ! parent_new . remove ( & removed ) {
2021-08-14 19:07:50 +02:00
// It was not added in the parent and we removed it
2021-08-12 23:04:00 +02:00
parent_removed . insert ( removed ) ;
}
2021-08-14 19:07:50 +02:00
// Else it was added in the parent and we removed it again. We can forget this change
}
for new in statediffnew {
if ! parent_removed . remove ( & new ) {
// It was not touched in the parent and we added it
parent_new . insert ( new ) ;
}
// Else it was removed in the parent and we added it again. We can forget this change
2021-08-12 23:04:00 +02:00
}
self . save_state_from_diff (
shortstatehash ,
parent_new ,
parent_removed ,
diffsum ,
parent_states ,
) ? ;
} else {
// Diff small enough, we add diff as layer on top of parent
let mut value = parent . 0. to_be_bytes ( ) . to_vec ( ) ;
for new in & statediffnew {
value . extend_from_slice ( & new [ .. ] ) ;
}
if ! statediffremoved . is_empty ( ) {
value . extend_from_slice ( & 0_ u64 . to_be_bytes ( ) ) ;
for removed in & statediffremoved {
value . extend_from_slice ( & removed [ .. ] ) ;
}
}
self . shortstatehash_statediff
. insert ( & shortstatehash . to_be_bytes ( ) , & value ) ? ;
}
2020-09-13 22:24:36 +02:00
Ok ( ( ) )
}
2021-08-11 19:15:38 +02:00
/// Returns (shortstatehash, already_existed)
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self, globals)) ]
2021-08-11 19:15:38 +02:00
fn get_or_create_shortstatehash (
& self ,
state_hash : & StateHashId ,
globals : & super ::globals ::Globals ,
) -> Result < ( u64 , bool ) > {
Ok ( match self . statehash_shortstatehash . get ( & state_hash ) ? {
Some ( shortstatehash ) = > (
utils ::u64_from_bytes ( & shortstatehash )
. map_err ( | _ | Error ::bad_database ( " Invalid shortstatehash in db. " ) ) ? ,
true ,
) ,
None = > {
let shortstatehash = globals . next_count ( ) ? ;
self . statehash_shortstatehash
. insert ( & state_hash , & shortstatehash . to_be_bytes ( ) ) ? ;
( shortstatehash , false )
}
} )
}
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self, globals)) ]
2021-08-11 19:15:38 +02:00
pub fn get_or_create_shorteventid (
& self ,
event_id : & EventId ,
globals : & super ::globals ::Globals ,
) -> Result < u64 > {
2021-08-17 16:06:09 +02:00
if let Some ( short ) = self . eventidshort_cache . lock ( ) . unwrap ( ) . get_mut ( & event_id ) {
return Ok ( * short ) ;
}
let short = match self . eventid_shorteventid . get ( event_id . as_bytes ( ) ) ? {
2021-08-11 19:15:38 +02:00
Some ( shorteventid ) = > utils ::u64_from_bytes ( & shorteventid )
. map_err ( | _ | Error ::bad_database ( " Invalid shorteventid in db. " ) ) ? ,
None = > {
let shorteventid = globals . next_count ( ) ? ;
self . eventid_shorteventid
. insert ( event_id . as_bytes ( ) , & shorteventid . to_be_bytes ( ) ) ? ;
self . shorteventid_eventid
. insert ( & shorteventid . to_be_bytes ( ) , event_id . as_bytes ( ) ) ? ;
shorteventid
}
2021-08-17 16:06:09 +02:00
} ;
self . eventidshort_cache
. lock ( )
. unwrap ( )
. insert ( event_id . clone ( ) , short ) ;
Ok ( short )
2021-08-11 19:15:38 +02:00
}
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self)) ]
2021-08-14 08:26:45 +02:00
pub fn get_shortroomid ( & self , room_id : & RoomId ) -> Result < Option < u64 > > {
2021-08-14 19:47:49 +02:00
self . roomid_shortroomid
2021-08-12 23:04:00 +02:00
. get ( & room_id . as_bytes ( ) ) ?
2021-08-14 19:47:49 +02:00
. map ( | bytes | {
utils ::u64_from_bytes ( & bytes )
. map_err ( | _ | Error ::bad_database ( " Invalid shortroomid in db. " ) )
} )
. transpose ( )
2021-08-12 23:04:00 +02:00
}
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self)) ]
2021-08-12 23:04:00 +02:00
pub fn get_shortstatekey (
& self ,
event_type : & EventType ,
state_key : & str ,
) -> Result < Option < u64 > > {
2021-08-17 16:06:09 +02:00
if let Some ( short ) = self
. statekeyshort_cache
. lock ( )
. unwrap ( )
. get_mut ( & ( event_type . clone ( ) , state_key . to_owned ( ) ) )
{
return Ok ( Some ( * short ) ) ;
}
2021-08-12 23:04:00 +02:00
let mut statekey = event_type . as_ref ( ) . as_bytes ( ) . to_vec ( ) ;
statekey . push ( 0xff ) ;
statekey . extend_from_slice ( & state_key . as_bytes ( ) ) ;
2021-08-17 16:06:09 +02:00
let short = self
. statekey_shortstatekey
2021-08-12 23:04:00 +02:00
. get ( & statekey ) ?
. map ( | shortstatekey | {
utils ::u64_from_bytes ( & shortstatekey )
. map_err ( | _ | Error ::bad_database ( " Invalid shortstatekey in db. " ) )
} )
2021-08-17 16:06:09 +02:00
. transpose ( ) ? ;
if let Some ( s ) = short {
self . statekeyshort_cache
. lock ( )
. unwrap ( )
. insert ( ( event_type . clone ( ) , state_key . to_owned ( ) ) , s ) ;
}
Ok ( short )
2021-08-12 23:04:00 +02:00
}
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self, globals)) ]
2021-08-12 23:04:00 +02:00
pub fn get_or_create_shortroomid (
& self ,
room_id : & RoomId ,
globals : & super ::globals ::Globals ,
) -> Result < u64 > {
Ok ( match self . roomid_shortroomid . get ( & room_id . as_bytes ( ) ) ? {
Some ( short ) = > utils ::u64_from_bytes ( & short )
. map_err ( | _ | Error ::bad_database ( " Invalid shortroomid in db. " ) ) ? ,
None = > {
let short = globals . next_count ( ) ? ;
self . roomid_shortroomid
. insert ( & room_id . as_bytes ( ) , & short . to_be_bytes ( ) ) ? ;
short
}
} )
}
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self, globals)) ]
2021-08-12 23:04:00 +02:00
pub fn get_or_create_shortstatekey (
& self ,
event_type : & EventType ,
state_key : & str ,
globals : & super ::globals ::Globals ,
) -> Result < u64 > {
2021-08-17 16:06:09 +02:00
if let Some ( short ) = self
. statekeyshort_cache
. lock ( )
. unwrap ( )
. get_mut ( & ( event_type . clone ( ) , state_key . to_owned ( ) ) )
{
return Ok ( * short ) ;
}
2021-08-12 23:04:00 +02:00
let mut statekey = event_type . as_ref ( ) . as_bytes ( ) . to_vec ( ) ;
statekey . push ( 0xff ) ;
statekey . extend_from_slice ( & state_key . as_bytes ( ) ) ;
2021-08-17 16:06:09 +02:00
let short = match self . statekey_shortstatekey . get ( & statekey ) ? {
2021-08-12 23:04:00 +02:00
Some ( shortstatekey ) = > utils ::u64_from_bytes ( & shortstatekey )
. map_err ( | _ | Error ::bad_database ( " Invalid shortstatekey in db. " ) ) ? ,
None = > {
let shortstatekey = globals . next_count ( ) ? ;
self . statekey_shortstatekey
. insert ( & statekey , & shortstatekey . to_be_bytes ( ) ) ? ;
2021-08-24 19:10:31 +02:00
self . shortstatekey_statekey
. insert ( & shortstatekey . to_be_bytes ( ) , & statekey ) ? ;
2021-08-12 23:04:00 +02:00
shortstatekey
}
2021-08-17 16:06:09 +02:00
} ;
self . statekeyshort_cache
. lock ( )
. unwrap ( )
. insert ( ( event_type . clone ( ) , state_key . to_owned ( ) ) , short ) ;
Ok ( short )
2021-08-12 23:04:00 +02:00
}
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self)) ]
2021-08-11 19:15:38 +02:00
pub fn get_eventid_from_short ( & self , shorteventid : u64 ) -> Result < EventId > {
2021-08-12 17:55:16 +02:00
if let Some ( id ) = self
. shorteventid_cache
. lock ( )
. unwrap ( )
. get_mut ( & shorteventid )
{
2021-08-11 21:14:22 +02:00
return Ok ( id . clone ( ) ) ;
}
2021-08-11 19:15:38 +02:00
let bytes = self
. shorteventid_eventid
. get ( & shorteventid . to_be_bytes ( ) ) ?
. ok_or_else ( | | Error ::bad_database ( " Shorteventid does not exist " ) ) ? ;
2021-08-24 19:10:31 +02:00
let event_id = EventId ::try_from ( utils ::string_from_bytes ( & bytes ) . map_err ( | _ | {
Error ::bad_database ( " EventID in shorteventid_eventid is invalid unicode. " )
} ) ? )
. map_err ( | _ | Error ::bad_database ( " EventId in shorteventid_eventid is invalid. " ) ) ? ;
2021-08-11 21:14:22 +02:00
self . shorteventid_cache
. lock ( )
. unwrap ( )
. insert ( shorteventid , event_id . clone ( ) ) ;
Ok ( event_id )
2021-08-11 19:15:38 +02:00
}
2021-08-24 19:10:31 +02:00
#[ tracing::instrument(skip(self)) ]
pub fn get_statekey_from_short ( & self , shortstatekey : u64 ) -> Result < ( EventType , String ) > {
if let Some ( id ) = self
. shortstatekey_cache
. lock ( )
. unwrap ( )
. get_mut ( & shortstatekey )
{
return Ok ( id . clone ( ) ) ;
}
let bytes = self
. shortstatekey_statekey
. get ( & shortstatekey . to_be_bytes ( ) ) ?
. ok_or_else ( | | Error ::bad_database ( " Shortstatekey does not exist " ) ) ? ;
let mut parts = bytes . splitn ( 2 , | & b | b = = 0xff ) ;
let eventtype_bytes = parts . next ( ) . expect ( " split always returns one entry " ) ;
let statekey_bytes = parts
. next ( )
. ok_or_else ( | | Error ::bad_database ( " Invalid statekey in shortstatekey_statekey. " ) ) ? ;
let event_type =
EventType ::try_from ( utils ::string_from_bytes ( & eventtype_bytes ) . map_err ( | _ | {
Error ::bad_database ( " Event type in shortstatekey_statekey is invalid unicode. " )
} ) ? )
. map_err ( | _ | Error ::bad_database ( " Event type in shortstatekey_statekey is invalid. " ) ) ? ;
let state_key = utils ::string_from_bytes ( & statekey_bytes ) . map_err ( | _ | {
Error ::bad_database ( " Statekey in shortstatekey_statekey is invalid unicode. " )
} ) ? ;
let result = ( event_type , state_key ) ;
self . shortstatekey_cache
. lock ( )
. unwrap ( )
. insert ( shortstatekey , result . clone ( ) ) ;
Ok ( result )
}
2020-05-03 17:25:31 +02:00
/// Returns the full room state.
2021-02-28 12:41:03 +01:00
#[ tracing::instrument(skip(self)) ]
2020-12-31 08:40:49 -05:00
pub fn room_state_full (
& self ,
room_id : & RoomId ,
2021-07-18 20:43:39 +02:00
) -> Result < HashMap < ( EventType , String ) , Arc < PduEvent > > > {
2021-03-17 22:30:25 +01:00
if let Some ( current_shortstatehash ) = self . current_shortstatehash ( room_id ) ? {
2021-03-24 11:52:10 +01:00
self . state_full ( current_shortstatehash )
2020-09-12 21:30:07 +02:00
} else {
2021-07-18 20:43:39 +02:00
Ok ( HashMap ::new ( ) )
2020-05-03 17:25:31 +02:00
}
}
2021-04-11 21:01:27 +02:00
/// Returns a single PDU from `room_id` with key (`event_type`, `state_key`).
#[ tracing::instrument(skip(self)) ]
pub fn room_state_get_id (
& self ,
room_id : & RoomId ,
event_type : & EventType ,
state_key : & str ,
) -> Result < Option < EventId > > {
if let Some ( current_shortstatehash ) = self . current_shortstatehash ( room_id ) ? {
self . state_get_id ( current_shortstatehash , event_type , state_key )
} else {
Ok ( None )
}
}
2020-08-26 11:15:52 -04:00
/// Returns a single PDU from `room_id` with key (`event_type`, `state_key`).
2021-02-28 12:41:03 +01:00
#[ tracing::instrument(skip(self)) ]
2020-06-12 13:18:25 +02:00
pub fn room_state_get (
& self ,
room_id : & RoomId ,
event_type : & EventType ,
state_key : & str ,
2021-06-30 09:52:01 +02:00
) -> Result < Option < Arc < PduEvent > > > {
2021-03-17 22:30:25 +01:00
if let Some ( current_shortstatehash ) = self . current_shortstatehash ( room_id ) ? {
2021-03-24 11:52:10 +01:00
self . state_get ( current_shortstatehash , event_type , state_key )
2020-09-12 21:30:07 +02:00
} else {
Ok ( None )
}
2020-06-12 13:18:25 +02:00
}
2020-09-16 21:11:38 +02:00
/// Returns the `count` of this pdu's id.
2021-02-28 12:41:03 +01:00
#[ tracing::instrument(skip(self)) ]
2020-09-16 21:11:38 +02:00
pub fn pdu_count ( & self , pdu_id : & [ u8 ] ) -> Result < u64 > {
Ok (
2021-08-12 23:04:00 +02:00
utils ::u64_from_bytes ( & pdu_id [ pdu_id . len ( ) - size_of ::< u64 > ( ) .. ] )
2020-09-16 21:11:38 +02:00
. map_err ( | _ | Error ::bad_database ( " PDU has invalid count bytes. " ) ) ? ,
)
}
2020-05-03 17:25:31 +02:00
/// Returns the `count` of this pdu's id.
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self)) ]
2020-05-03 17:25:31 +02:00
pub fn get_pdu_count ( & self , event_id : & EventId ) -> Result < Option < u64 > > {
2020-06-09 15:13:17 +02:00
self . eventid_pduid
2020-08-06 08:29:59 -04:00
. get ( event_id . as_bytes ( ) ) ?
2020-09-16 21:11:38 +02:00
. map_or ( Ok ( None ) , | pdu_id | self . pdu_count ( & pdu_id ) . map ( Some ) )
2020-05-03 17:25:31 +02:00
}
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self)) ]
2021-02-03 22:48:43 -05:00
pub fn latest_pdu_count ( & self , room_id : & RoomId ) -> Result < u64 > {
2021-08-14 19:47:49 +02:00
let prefix = self
. get_shortroomid ( room_id ) ?
. expect ( " room exists " )
. to_be_bytes ( )
. to_vec ( ) ;
2021-06-08 18:10:00 +02:00
let mut last_possible_key = prefix . clone ( ) ;
last_possible_key . extend_from_slice ( & u64 ::MAX . to_be_bytes ( ) ) ;
2021-02-03 22:48:43 -05:00
self . pduid_pdu
2021-06-08 18:10:00 +02:00
. iter_from ( & last_possible_key , true )
. take_while ( move | ( k , _ ) | k . starts_with ( & prefix ) )
. next ( )
. map ( | b | self . pdu_count ( & b . 0 ) )
2021-02-03 22:48:43 -05:00
. transpose ( )
. map ( | op | op . unwrap_or_default ( ) )
}
2020-05-03 17:25:31 +02:00
/// Returns the json of a pdu.
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self)) ]
2021-04-11 21:01:27 +02:00
pub fn get_pdu_json ( & self , event_id : & EventId ) -> Result < Option < CanonicalJsonObject > > {
2020-05-03 17:25:31 +02:00
self . eventid_pduid
2020-08-06 08:29:59 -04:00
. get ( event_id . as_bytes ( ) ) ?
2021-03-13 16:30:12 +01:00
. map_or_else ::< Result < _ > , _ , _ > (
2021-06-17 20:34:14 +02:00
| | self . eventid_outlierpdu . get ( event_id . as_bytes ( ) ) ,
2021-03-13 16:30:12 +01:00
| pduid | {
Ok ( Some ( self . pduid_pdu . get ( & pduid ) ? . ok_or_else ( | | {
Error ::bad_database ( " Invalid pduid in eventid_pduid. " )
} ) ? ) )
} ,
) ?
. map ( | pdu | {
2021-08-14 22:50:45 +02:00
serde_json ::from_slice ( & pdu ) . map_err ( | _ | Error ::bad_database ( " Invalid PDU in db. " ) )
} )
. transpose ( )
}
/// Returns the json of a pdu.
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self)) ]
2021-08-14 23:29:25 +02:00
pub fn get_outlier_pdu_json ( & self , event_id : & EventId ) -> Result < Option < CanonicalJsonObject > > {
2021-08-14 22:50:45 +02:00
self . eventid_outlierpdu
. get ( event_id . as_bytes ( ) ) ?
. map ( | pdu | {
2021-06-17 20:34:14 +02:00
serde_json ::from_slice ( & pdu ) . map_err ( | _ | Error ::bad_database ( " Invalid PDU in db. " ) )
2020-05-03 17:25:31 +02:00
} )
2021-03-13 16:30:12 +01:00
. transpose ( )
2020-05-03 17:25:31 +02:00
}
2021-07-15 19:54:04 +02:00
/// Returns the json of a pdu.
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self)) ]
2021-07-15 19:54:04 +02:00
pub fn get_non_outlier_pdu_json (
& self ,
event_id : & EventId ,
) -> Result < Option < CanonicalJsonObject > > {
self . eventid_pduid
. get ( event_id . as_bytes ( ) ) ?
. map_or_else ::< Result < _ > , _ , _ > (
| | Ok ( None ) ,
| pduid | {
Ok ( Some ( self . pduid_pdu . get ( & pduid ) ? . ok_or_else ( | | {
Error ::bad_database ( " Invalid pduid in eventid_pduid. " )
} ) ? ) )
} ,
) ?
. map ( | pdu | {
serde_json ::from_slice ( & pdu ) . map_err ( | _ | Error ::bad_database ( " Invalid PDU in db. " ) )
} )
. transpose ( )
}
2020-05-26 10:27:51 +02:00
/// Returns the pdu's id.
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self)) ]
2021-06-08 18:10:00 +02:00
pub fn get_pdu_id ( & self , event_id : & EventId ) -> Result < Option < Vec < u8 > > > {
2020-05-26 10:27:51 +02:00
self . eventid_pduid
2021-04-05 21:44:21 +02:00
. get ( event_id . as_bytes ( ) ) ?
2020-05-26 10:27:51 +02:00
. map_or ( Ok ( None ) , | pdu_id | Ok ( Some ( pdu_id ) ) )
}
2021-03-26 11:10:45 +01:00
/// Returns the pdu.
///
/// Checks the `eventid_outlierpdu` Tree if not found in the timeline.
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self)) ]
2021-03-26 11:10:45 +01:00
pub fn get_non_outlier_pdu ( & self , event_id : & EventId ) -> Result < Option < PduEvent > > {
self . eventid_pduid
. get ( event_id . as_bytes ( ) ) ?
. map_or_else ::< Result < _ > , _ , _ > (
| | Ok ( None ) ,
| pduid | {
Ok ( Some ( self . pduid_pdu . get ( & pduid ) ? . ok_or_else ( | | {
Error ::bad_database ( " Invalid pduid in eventid_pduid. " )
} ) ? ) )
} ,
) ?
. map ( | pdu | {
2021-06-17 20:34:14 +02:00
serde_json ::from_slice ( & pdu ) . map_err ( | _ | Error ::bad_database ( " Invalid PDU in db. " ) )
2021-03-26 11:10:45 +01:00
} )
. transpose ( )
}
2020-05-24 18:25:52 +02:00
/// Returns the pdu.
2021-02-01 12:44:30 -05:00
///
/// Checks the `eventid_outlierpdu` Tree if not found in the timeline.
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self)) ]
2021-06-30 09:52:01 +02:00
pub fn get_pdu ( & self , event_id : & EventId ) -> Result < Option < Arc < PduEvent > > > {
2021-07-18 20:43:39 +02:00
if let Some ( p ) = self . pdu_cache . lock ( ) . unwrap ( ) . get_mut ( & event_id ) {
2021-06-30 09:52:01 +02:00
return Ok ( Some ( Arc ::clone ( p ) ) ) ;
}
if let Some ( pdu ) = self
. eventid_pduid
2020-08-06 08:29:59 -04:00
. get ( event_id . as_bytes ( ) ) ?
2021-03-13 16:30:12 +01:00
. map_or_else ::< Result < _ > , _ , _ > (
2021-06-30 09:52:01 +02:00
| | {
let r = self . eventid_outlierpdu . get ( event_id . as_bytes ( ) ) ;
r
} ,
2021-03-13 16:30:12 +01:00
| pduid | {
2021-07-14 12:31:38 +02:00
Ok ( Some ( self . pduid_pdu . get ( & pduid ) ? . ok_or_else ( | | {
2021-03-13 16:30:12 +01:00
Error ::bad_database ( " Invalid pduid in eventid_pduid. " )
2021-07-14 12:31:38 +02:00
} ) ? ) )
2021-03-13 16:30:12 +01:00
} ,
) ?
. map ( | pdu | {
2021-07-14 12:31:38 +02:00
serde_json ::from_slice ( & pdu )
2021-06-30 09:52:01 +02:00
. map_err ( | _ | Error ::bad_database ( " Invalid PDU in db. " ) )
2021-07-14 12:31:38 +02:00
. map ( Arc ::new )
2020-05-24 18:25:52 +02:00
} )
2021-06-30 09:52:01 +02:00
. transpose ( ) ?
{
self . pdu_cache
2021-07-18 20:43:39 +02:00
. lock ( )
2021-06-30 09:52:01 +02:00
. unwrap ( )
. insert ( event_id . clone ( ) , Arc ::clone ( & pdu ) ) ;
Ok ( Some ( pdu ) )
} else {
Ok ( None )
}
2020-05-24 18:25:52 +02:00
}
2021-01-29 21:45:33 -05:00
2020-05-26 10:27:51 +02:00
/// Returns the pdu.
2021-02-03 20:00:01 -05:00
///
/// This does __NOT__ check the outliers `Tree`.
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self)) ]
2021-04-09 21:38:16 +02:00
pub fn get_pdu_from_id ( & self , pdu_id : & [ u8 ] ) -> Result < Option < PduEvent > > {
2020-06-09 15:13:17 +02:00
self . pduid_pdu . get ( pdu_id ) ? . map_or ( Ok ( None ) , | pdu | {
Ok ( Some (
serde_json ::from_slice ( & pdu )
2020-06-11 10:03:08 +02:00
. map_err ( | _ | Error ::bad_database ( " Invalid PDU in db. " ) ) ? ,
2020-06-09 15:13:17 +02:00
) )
} )
2020-05-26 10:27:51 +02:00
}
2020-11-30 14:46:47 -05:00
/// Returns the pdu as a `BTreeMap<String, CanonicalJsonValue>`.
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self)) ]
2020-11-30 14:46:47 -05:00
pub fn get_pdu_json_from_id ( & self , pdu_id : & [ u8 ] ) -> Result < Option < CanonicalJsonObject > > {
2020-09-15 16:13:54 +02:00
self . pduid_pdu . get ( pdu_id ) ? . map_or ( Ok ( None ) , | pdu | {
Ok ( Some (
serde_json ::from_slice ( & pdu )
. map_err ( | _ | Error ::bad_database ( " Invalid PDU in db. " ) ) ? ,
) )
} )
}
2020-06-09 15:13:17 +02:00
/// Removes a pdu and creates a new one with the same id.
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self)) ]
2021-06-08 18:10:00 +02:00
fn replace_pdu ( & self , pdu_id : & [ u8 ] , pdu : & PduEvent ) -> Result < ( ) > {
2020-05-26 10:27:51 +02:00
if self . pduid_pdu . get ( & pdu_id ) ? . is_some ( ) {
2020-06-09 15:13:17 +02:00
self . pduid_pdu . insert (
& pdu_id ,
2021-06-08 18:10:00 +02:00
& serde_json ::to_vec ( pdu ) . expect ( " PduEvent::to_vec always works " ) ,
2020-06-09 15:13:17 +02:00
) ? ;
2020-05-26 10:27:51 +02:00
Ok ( ( ) )
} else {
2020-06-09 15:13:17 +02:00
Err ( Error ::BadRequest (
ErrorKind ::NotFound ,
" PDU does not exist. " ,
) )
2020-05-26 10:27:51 +02:00
}
}
2020-05-24 18:25:52 +02:00
2020-05-03 17:25:31 +02:00
/// Returns the leaf pdus of a room.
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self)) ]
2021-03-25 23:55:40 +01:00
pub fn get_pdu_leaves ( & self , room_id : & RoomId ) -> Result < HashSet < EventId > > {
2020-08-26 11:15:52 -04:00
let mut prefix = room_id . as_bytes ( ) . to_vec ( ) ;
2020-05-03 17:25:31 +02:00
prefix . push ( 0xff ) ;
2021-03-25 23:55:40 +01:00
self . roomid_pduleaves
2021-06-12 18:40:33 +02:00
. scan_prefix ( prefix )
. map ( | ( _ , bytes ) | {
2021-06-17 20:34:14 +02:00
EventId ::try_from ( utils ::string_from_bytes ( & bytes ) . map_err ( | _ | {
Error ::bad_database ( " EventID in roomid_pduleaves is invalid unicode. " )
} ) ? )
. map_err ( | _ | Error ::bad_database ( " EventId in roomid_pduleaves is invalid. " ) )
2020-06-09 15:13:17 +02:00
} )
2021-03-25 23:55:40 +01:00
. collect ( )
2020-05-03 17:25:31 +02:00
}
2021-08-24 19:10:31 +02:00
#[ tracing::instrument(skip(self, room_id, event_ids)) ]
pub fn mark_as_referenced ( & self , room_id : & RoomId , event_ids : & [ EventId ] ) -> Result < ( ) > {
for prev in event_ids {
let mut key = room_id . as_bytes ( ) . to_vec ( ) ;
key . extend_from_slice ( prev . as_bytes ( ) ) ;
self . referencedevents . insert ( & key , & [ ] ) ? ;
}
Ok ( ( ) )
}
2021-01-18 19:41:38 -05:00
/// Replace the leaves of a room.
///
2021-02-03 20:00:01 -05:00
/// The provided `event_ids` become the new leaves, this allows a room to have multiple
2021-01-18 19:41:38 -05:00
/// `prev_events`.
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self)) ]
2021-01-18 19:41:38 -05:00
pub fn replace_pdu_leaves ( & self , room_id : & RoomId , event_ids : & [ EventId ] ) -> Result < ( ) > {
2021-01-18 19:08:59 -05:00
let mut prefix = room_id . as_bytes ( ) . to_vec ( ) ;
prefix . push ( 0xff ) ;
2021-06-08 18:10:00 +02:00
for ( key , _ ) in self . roomid_pduleaves . scan_prefix ( prefix . clone ( ) ) {
self . roomid_pduleaves . remove ( & key ) ? ;
2021-01-18 19:08:59 -05:00
}
2021-04-05 21:46:10 +02:00
for event_id in event_ids {
2021-01-18 19:08:59 -05:00
let mut key = prefix . to_owned ( ) ;
key . extend_from_slice ( event_id . as_bytes ( ) ) ;
self . roomid_pduleaves . insert ( & key , event_id . as_bytes ( ) ) ? ;
}
Ok ( ( ) )
}
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self)) ]
2021-07-20 12:41:35 +02:00
pub fn is_event_referenced ( & self , room_id : & RoomId , event_id : & EventId ) -> Result < bool > {
let mut key = room_id . as_bytes ( ) . to_vec ( ) ;
key . extend_from_slice ( event_id . as_bytes ( ) ) ;
2021-07-29 08:36:01 +02:00
Ok ( self . referencedevents . get ( & key ) ? . is_some ( ) )
2021-02-03 20:00:01 -05:00
}
2021-01-14 21:32:22 -05:00
/// Returns the pdu from the outlier tree.
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self)) ]
2021-01-14 21:32:22 -05:00
pub fn get_pdu_outlier ( & self , event_id : & EventId ) -> Result < Option < PduEvent > > {
2021-03-13 16:30:12 +01:00
self . eventid_outlierpdu
2021-02-01 12:44:30 -05:00
. get ( event_id . as_bytes ( ) ) ?
. map_or ( Ok ( None ) , | pdu | {
2021-01-29 21:45:33 -05:00
serde_json ::from_slice ( & pdu ) . map_err ( | _ | Error ::bad_database ( " Invalid PDU in db. " ) )
2021-01-14 21:32:22 -05:00
} )
}
2021-02-03 20:00:01 -05:00
/// Append the PDU as an outlier.
///
/// Any event given to this will be processed (state-res) on another thread.
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self, pdu)) ]
2021-04-16 18:18:29 +02:00
pub fn add_pdu_outlier ( & self , event_id : & EventId , pdu : & CanonicalJsonObject ) -> Result < ( ) > {
2021-03-13 16:30:12 +01:00
self . eventid_outlierpdu . insert (
2021-04-16 18:18:29 +02:00
& event_id . as_bytes ( ) ,
2021-06-08 18:10:00 +02:00
& serde_json ::to_vec ( & pdu ) . expect ( " CanonicalJsonObject is valid " ) ,
2021-02-03 20:00:01 -05:00
) ? ;
2021-01-29 21:45:33 -05:00
2021-02-03 20:00:01 -05:00
Ok ( ( ) )
2021-01-14 21:32:22 -05:00
}
2020-05-03 17:25:31 +02:00
/// Creates a new persisted data unit and adds it to a room.
2020-11-08 14:44:02 -05:00
///
/// By this point the incoming event should be fully authenticated, no auth happens
/// in `append_pdu`.
2021-08-12 23:04:00 +02:00
///
/// Returns pdu id
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self, pdu, pdu_json, leaves, db)) ]
2020-05-03 17:25:31 +02:00
pub fn append_pdu (
2020-08-18 16:26:03 -04:00
& self ,
2020-09-13 22:24:36 +02:00
pdu : & PduEvent ,
2020-12-22 20:08:20 +01:00
mut pdu_json : CanonicalJsonObject ,
2021-01-18 19:41:38 -05:00
leaves : & [ EventId ] ,
2021-01-15 11:05:57 -05:00
db : & Database ,
2021-07-01 11:06:05 +02:00
) -> Result < Vec < u8 > > {
2021-08-14 08:26:45 +02:00
let shortroomid = self . get_shortroomid ( & pdu . room_id ) ? . expect ( " room exists " ) ;
2021-08-12 23:04:00 +02:00
2020-12-22 20:08:20 +01:00
// Make unsigned fields correct. This is not properly documented in the spec, but state
// events need to have previous content in the unsigned field, so clients can easily
// interpret things like membership changes
if let Some ( state_key ) = & pdu . state_key {
if let CanonicalJsonValue ::Object ( unsigned ) = pdu_json
. entry ( " unsigned " . to_owned ( ) )
. or_insert_with ( | | CanonicalJsonValue ::Object ( Default ::default ( ) ) )
{
2021-03-17 22:30:25 +01:00
if let Some ( shortstatehash ) = self . pdu_shortstatehash ( & pdu . event_id ) . unwrap ( ) {
2020-12-22 20:08:20 +01:00
if let Some ( prev_state ) = self
2021-03-24 11:52:10 +01:00
. state_get ( shortstatehash , & pdu . kind , & state_key )
2020-12-22 20:08:20 +01:00
. unwrap ( )
{
unsigned . insert (
" prev_content " . to_owned ( ) ,
CanonicalJsonValue ::Object (
2021-06-30 09:52:01 +02:00
utils ::to_canonical_object ( prev_state . content . clone ( ) )
2020-12-22 20:08:20 +01:00
. expect ( " event is valid, we just created it " ) ,
) ,
) ;
}
}
} else {
error! ( " Invalid unsigned type in pdu. " ) ;
}
}
2021-02-03 20:00:01 -05:00
// We must keep track of all events that have been referenced.
2021-08-24 19:10:31 +02:00
self . mark_as_referenced ( & pdu . room_id , & pdu . prev_events ) ? ;
2021-01-18 19:41:38 -05:00
self . replace_pdu_leaves ( & pdu . room_id , leaves ) ? ;
2020-08-18 16:26:03 -04:00
2021-08-03 11:10:58 +02:00
let mutex_insert = Arc ::clone (
db . globals
. roomid_mutex_insert
. write ( )
. unwrap ( )
. entry ( pdu . room_id . clone ( ) )
. or_default ( ) ,
) ;
let insert_lock = mutex_insert . lock ( ) . unwrap ( ) ;
2021-07-01 11:06:05 +02:00
let count1 = db . globals . next_count ( ) ? ;
2020-09-17 19:58:19 +02:00
// Mark as read first so the sending client doesn't get a notification even if appending
// fails
self . edus
2021-07-01 11:06:05 +02:00
. private_read_set ( & pdu . room_id , & pdu . sender , count1 , & db . globals ) ? ;
2021-04-12 12:40:16 +02:00
self . reset_notification_counts ( & pdu . sender , & pdu . room_id ) ? ;
2020-08-18 16:26:03 -04:00
2021-07-01 11:06:05 +02:00
let count2 = db . globals . next_count ( ) ? ;
2021-08-12 23:04:00 +02:00
let mut pdu_id = shortroomid . to_be_bytes ( ) . to_vec ( ) ;
2021-07-01 11:06:05 +02:00
pdu_id . extend_from_slice ( & count2 . to_be_bytes ( ) ) ;
// There's a brief moment of time here where the count is updated but the pdu does not
// exist. This could theoretically lead to dropped pdus, but it's extremely rare
2021-08-03 11:10:58 +02:00
//
// Update: We fixed this using insert_lock
2021-07-01 11:06:05 +02:00
2020-11-30 12:10:33 -05:00
self . pduid_pdu . insert (
2021-07-01 11:06:05 +02:00
& pdu_id ,
2021-06-08 18:10:00 +02:00
& serde_json ::to_vec ( & pdu_json ) . expect ( " CanonicalJsonObject is always a valid " ) ,
2020-11-30 12:10:33 -05:00
) ? ;
2020-08-18 16:26:03 -04:00
2021-07-01 11:06:05 +02:00
self . eventid_pduid
. insert ( pdu . event_id . as_bytes ( ) , & pdu_id ) ? ;
2021-08-14 23:29:25 +02:00
self . eventid_outlierpdu . remove ( pdu . event_id . as_bytes ( ) ) ? ;
2020-08-18 16:26:03 -04:00
2021-08-03 11:10:58 +02:00
drop ( insert_lock ) ;
2021-01-26 21:54:35 -05:00
// See if the event matches any known pushers
2021-07-30 12:11:06 +02:00
let power_levels : PowerLevelsEventContent = db
. rooms
. room_state_get ( & pdu . room_id , & EventType ::RoomPowerLevels , " " ) ?
. map ( | ev | {
serde_json ::from_value ( ev . content . clone ( ) )
. map_err ( | _ | Error ::bad_database ( " invalid m.room.power_levels event " ) )
} )
. transpose ( ) ?
. unwrap_or_default ( ) ;
let sync_pdu = pdu . to_sync_room_event ( ) ;
2021-08-15 13:17:42 +02:00
let mut notifies = Vec ::new ( ) ;
let mut highlights = Vec ::new ( ) ;
2021-03-22 14:04:11 +01:00
for user in db
2021-07-29 08:36:01 +02:00
. rooms
. room_members ( & pdu . room_id )
2021-03-22 14:04:11 +01:00
. filter_map ( | r | r . ok ( ) )
2021-05-30 22:01:12 +02:00
. filter ( | user_id | user_id . server_name ( ) = = db . globals . server_name ( ) )
2021-08-03 10:23:20 +02:00
. filter ( | user_id | ! db . users . is_deactivated ( user_id ) . unwrap_or ( true ) )
2021-03-22 14:04:11 +01:00
{
// Don't notify the user of their own events
if user = = pdu . sender {
continue ;
}
2021-04-12 12:40:16 +02:00
let rules_for_user = db
. account_data
. get ::< push_rules ::PushRulesEvent > ( None , & user , EventType ::PushRules ) ?
. map ( | ev | ev . content . global )
. unwrap_or_else ( | | push ::Ruleset ::server_default ( & user ) ) ;
let mut highlight = false ;
let mut notify = false ;
2021-07-30 12:11:06 +02:00
for action in pusher ::get_actions (
& user ,
& rules_for_user ,
& power_levels ,
& sync_pdu ,
& pdu . room_id ,
db ,
) ? {
2021-04-12 12:40:16 +02:00
match action {
Action ::DontNotify = > notify = false ,
// TODO: Implement proper support for coalesce
Action ::Notify | Action ::Coalesce = > notify = true ,
Action ::SetTweak ( Tweak ::Highlight ( true ) ) = > {
highlight = true ;
}
_ = > { }
} ;
}
let mut userroom_id = user . as_bytes ( ) . to_vec ( ) ;
userroom_id . push ( 0xff ) ;
userroom_id . extend_from_slice ( pdu . room_id . as_bytes ( ) ) ;
if notify {
2021-08-15 13:17:42 +02:00
notifies . push ( userroom_id . clone ( ) ) ;
2021-04-12 12:40:16 +02:00
}
if highlight {
2021-08-15 13:17:42 +02:00
highlights . push ( userroom_id ) ;
2021-04-12 12:40:16 +02:00
}
2021-06-08 18:10:00 +02:00
for senderkey in db . pusher . get_pusher_senderkeys ( & user ) {
2021-03-22 14:04:11 +01:00
db . sending . send_push_pdu ( & * pdu_id , senderkey ) ? ;
}
}
2021-01-26 21:54:35 -05:00
2021-08-15 13:17:42 +02:00
self . userroomid_notificationcount
. increment_batch ( & mut notifies . into_iter ( ) ) ? ;
self . userroomid_highlightcount
. increment_batch ( & mut highlights . into_iter ( ) ) ? ;
2020-09-12 22:13:53 +02:00
match pdu . kind {
2020-08-18 16:26:03 -04:00
EventType ::RoomRedaction = > {
if let Some ( redact_id ) = & pdu . redacts {
2020-09-12 22:13:53 +02:00
self . redact_pdu ( & redact_id , & pdu ) ? ;
2020-08-18 16:26:03 -04:00
}
}
EventType ::RoomMember = > {
2020-09-13 22:24:36 +02:00
if let Some ( state_key ) = & pdu . state_key {
2020-08-18 16:26:03 -04:00
// if the state_key fails
2020-09-13 22:24:36 +02:00
let target_user_id = UserId ::try_from ( state_key . clone ( ) )
2020-08-18 16:26:03 -04:00
. expect ( " This state_key was previously validated " ) ;
2021-04-11 21:01:27 +02:00
let membership = serde_json ::from_value ::< member ::MembershipState > (
pdu . content
. get ( " membership " )
2021-04-14 10:43:31 +02:00
. ok_or ( Error ::BadRequest (
ErrorKind ::InvalidParam ,
" Invalid member event content " ,
) ) ?
2021-04-11 21:01:27 +02:00
. clone ( ) ,
)
. map_err ( | _ | {
Error ::BadRequest (
ErrorKind ::InvalidParam ,
" Invalid membership state content. " ,
)
} ) ? ;
let invite_state = match membership {
member ::MembershipState ::Invite = > {
2021-04-25 14:10:07 +02:00
let state = self . calculate_invite_state ( pdu ) ? ;
2021-04-12 12:40:16 +02:00
2021-04-11 21:01:27 +02:00
Some ( state )
}
_ = > None ,
} ;
2020-08-18 16:26:03 -04:00
// Update our membership info, we do this here incase a user is invited
// and immediately leaves we need the DB to record the invite event for auth
self . update_membership (
& pdu . room_id ,
& target_user_id ,
2021-04-11 21:01:27 +02:00
membership ,
2020-08-18 16:26:03 -04:00
& pdu . sender ,
2021-04-11 21:01:27 +02:00
invite_state ,
2021-04-14 10:43:31 +02:00
db ,
2021-08-17 00:22:52 +02:00
true ,
2020-08-18 16:26:03 -04:00
) ? ;
}
}
2020-08-19 18:26:39 +02:00
EventType ::RoomMessage = > {
if let Some ( body ) = pdu . content . get ( " body " ) . and_then ( | b | b . as_str ( ) ) {
2021-08-04 12:54:26 +02:00
let mut batch = body
2020-08-19 18:26:39 +02:00
. split_terminator ( | c : char | ! c . is_alphanumeric ( ) )
2021-08-19 14:05:23 +02:00
. filter ( | s | ! s . is_empty ( ) )
2021-07-29 20:17:47 +02:00
. filter ( | word | word . len ( ) < = 50 )
2020-08-19 18:26:39 +02:00
. map ( str ::to_lowercase )
2021-08-04 12:54:26 +02:00
. map ( | word | {
2021-08-12 23:04:00 +02:00
let mut key = shortroomid . to_be_bytes ( ) . to_vec ( ) ;
2021-08-04 12:54:26 +02:00
key . extend_from_slice ( word . as_bytes ( ) ) ;
key . push ( 0xff ) ;
key . extend_from_slice ( & pdu_id ) ;
( key , Vec ::new ( ) )
} ) ;
self . tokenids . insert_batch ( & mut batch ) ? ;
2020-10-05 22:19:22 +02:00
2021-01-15 11:05:57 -05:00
if body . starts_with ( & format! ( " @conduit: {} : " , db . globals . server_name ( ) ) )
2020-10-05 22:19:22 +02:00
& & self
. id_from_alias (
2021-01-15 11:05:57 -05:00
& format! ( " #admins: {} " , db . globals . server_name ( ) )
2020-10-05 22:19:22 +02:00
. try_into ( )
. expect ( " #admins:server_name is a valid room alias " ) ,
) ?
. as_ref ( )
= = Some ( & pdu . room_id )
{
2020-12-08 10:33:44 +01:00
let mut lines = body . lines ( ) ;
let command_line = lines . next ( ) . expect ( " each string has at least one line " ) ;
let body = lines . collect ::< Vec < _ > > ( ) ;
let mut parts = command_line . split_whitespace ( ) . skip ( 1 ) ;
2020-10-05 22:19:22 +02:00
if let Some ( command ) = parts . next ( ) {
let args = parts . collect ::< Vec < _ > > ( ) ;
2020-12-08 10:33:44 +01:00
match command {
" register_appservice " = > {
if body . len ( ) > 2
& & body [ 0 ] . trim ( ) = = " ``` "
& & body . last ( ) . unwrap ( ) . trim ( ) = = " ``` "
{
let appservice_config = body [ 1 .. body . len ( ) - 1 ] . join ( " \n " ) ;
let parsed_config = serde_yaml ::from_str ::< serde_yaml ::Value > (
& appservice_config ,
) ;
match parsed_config {
Ok ( yaml ) = > {
2021-01-15 11:05:57 -05:00
db . admin
. send ( AdminCommand ::RegisterAppservice ( yaml ) ) ;
2020-12-08 10:33:44 +01:00
}
Err ( e ) = > {
2021-01-15 11:05:57 -05:00
db . admin . send ( AdminCommand ::SendMessage (
2020-12-08 10:33:44 +01:00
message ::MessageEventContent ::text_plain (
format! (
" Could not parse appservice config: {} " ,
e
) ,
) ,
) ) ;
}
}
} else {
2021-01-15 11:05:57 -05:00
db . admin . send ( AdminCommand ::SendMessage (
2020-12-08 10:33:44 +01:00
message ::MessageEventContent ::text_plain (
" Expected code block in command body. " ,
) ,
) ) ;
}
}
" list_appservices " = > {
2021-01-15 11:05:57 -05:00
db . admin . send ( AdminCommand ::ListAppservices ) ;
2020-12-08 10:33:44 +01:00
}
2021-07-15 19:54:04 +02:00
" get_pdu " = > {
if args . len ( ) = = 1 {
if let Ok ( event_id ) = EventId ::try_from ( args [ 0 ] ) {
let mut outlier = false ;
let mut pdu_json =
db . rooms . get_non_outlier_pdu_json ( & event_id ) ? ;
if pdu_json . is_none ( ) {
outlier = true ;
pdu_json = db . rooms . get_pdu_json ( & event_id ) ? ;
}
match pdu_json {
Some ( json ) = > {
2021-08-21 14:38:00 +02:00
let json_text =
serde_json ::to_string_pretty ( & json )
. expect ( " canonical json is valid json " ) ;
2021-07-15 19:54:04 +02:00
db . admin . send ( AdminCommand ::SendMessage (
message ::MessageEventContent ::text_html (
2021-08-21 14:38:00 +02:00
format! ( " {} \n ```json \n {} \n ``` " ,
2021-07-15 19:54:04 +02:00
if outlier {
" PDU is outlier "
2021-08-21 14:38:00 +02:00
} else { " PDU was accepted " } , json_text ) ,
2021-07-15 19:54:04 +02:00
format! ( " <p> {} </p> \n <pre><code class= \" language-json \" > {} \n </code></pre> \n " ,
if outlier {
" PDU is outlier "
2021-08-21 14:38:00 +02:00
} else { " PDU was accepted " } , RawStr ::new ( & json_text ) . html_escape ( ) )
2021-07-15 19:54:04 +02:00
) ,
) ) ;
}
None = > {
db . admin . send ( AdminCommand ::SendMessage (
message ::MessageEventContent ::text_plain (
" PDU not found. " ,
) ,
) ) ;
}
}
} else {
db . admin . send ( AdminCommand ::SendMessage (
message ::MessageEventContent ::text_plain (
" Event ID could not be parsed. " ,
) ,
) ) ;
}
} else {
db . admin . send ( AdminCommand ::SendMessage (
message ::MessageEventContent ::text_plain (
" Usage: get_pdu <eventid> " ,
) ,
) ) ;
}
}
2020-12-08 10:33:44 +01:00
_ = > {
2021-01-15 11:05:57 -05:00
db . admin . send ( AdminCommand ::SendMessage (
2020-12-08 10:33:44 +01:00
message ::MessageEventContent ::text_plain ( format! (
2021-07-15 19:54:04 +02:00
" Unrecognized command: {} " ,
command
2020-12-08 10:33:44 +01:00
) ) ,
) ) ;
}
}
2020-10-05 22:19:22 +02:00
}
}
2020-08-19 18:26:39 +02:00
}
}
2020-08-18 16:26:03 -04:00
_ = > { }
}
2020-09-12 22:13:53 +02:00
2021-07-01 11:06:05 +02:00
Ok ( pdu_id )
2020-08-18 16:26:03 -04:00
}
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self)) ]
2021-04-12 12:40:16 +02:00
pub fn reset_notification_counts ( & self , user_id : & UserId , room_id : & RoomId ) -> Result < ( ) > {
let mut userroom_id = user_id . as_bytes ( ) . to_vec ( ) ;
userroom_id . push ( 0xff ) ;
userroom_id . extend_from_slice ( room_id . as_bytes ( ) ) ;
self . userroomid_notificationcount
. insert ( & userroom_id , & 0_ u64 . to_be_bytes ( ) ) ? ;
self . userroomid_highlightcount
. insert ( & userroom_id , & 0_ u64 . to_be_bytes ( ) ) ? ;
Ok ( ( ) )
}
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self)) ]
2021-04-12 12:40:16 +02:00
pub fn notification_count ( & self , user_id : & UserId , room_id : & RoomId ) -> Result < u64 > {
let mut userroom_id = user_id . as_bytes ( ) . to_vec ( ) ;
userroom_id . push ( 0xff ) ;
userroom_id . extend_from_slice ( room_id . as_bytes ( ) ) ;
self . userroomid_notificationcount
. get ( & userroom_id ) ?
. map ( | bytes | {
utils ::u64_from_bytes ( & bytes )
. map_err ( | _ | Error ::bad_database ( " Invalid notification count in db. " ) )
} )
. unwrap_or ( Ok ( 0 ) )
}
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self)) ]
2021-04-12 12:40:16 +02:00
pub fn highlight_count ( & self , user_id : & UserId , room_id : & RoomId ) -> Result < u64 > {
let mut userroom_id = user_id . as_bytes ( ) . to_vec ( ) ;
userroom_id . push ( 0xff ) ;
userroom_id . extend_from_slice ( room_id . as_bytes ( ) ) ;
self . userroomid_highlightcount
. get ( & userroom_id ) ?
. map ( | bytes | {
utils ::u64_from_bytes ( & bytes )
. map_err ( | _ | Error ::bad_database ( " Invalid highlight count in db. " ) )
} )
. unwrap_or ( Ok ( 0 ) )
}
2021-03-25 23:55:40 +01:00
/// Generates a new StateHash and associates it with the incoming event.
///
/// This adds all current state events (not including the incoming event)
/// to `stateid_pduid` and adds the incoming event to `eventid_statehash`.
2021-08-24 19:10:31 +02:00
#[ tracing::instrument(skip(self, state_ids_compressed, globals)) ]
2021-03-25 23:55:40 +01:00
pub fn set_event_state (
& self ,
event_id : & EventId ,
2021-08-12 23:04:00 +02:00
room_id : & RoomId ,
2021-08-24 19:10:31 +02:00
state_ids_compressed : HashSet < CompressedStateEvent > ,
2021-03-25 23:55:40 +01:00
globals : & super ::globals ::Globals ,
) -> Result < ( ) > {
2021-08-11 19:15:38 +02:00
let shorteventid = self . get_or_create_shorteventid ( & event_id , globals ) ? ;
2021-03-25 23:55:40 +01:00
2021-08-12 23:04:00 +02:00
let previous_shortstatehash = self . current_shortstatehash ( & room_id ) ? ;
2021-03-25 23:55:40 +01:00
let state_hash = self . calculate_hash (
2021-08-24 19:10:31 +02:00
& state_ids_compressed
. iter ( )
. map ( | s | & s [ .. ] )
2021-03-25 23:55:40 +01:00
. collect ::< Vec < _ > > ( ) ,
) ;
2021-08-11 19:15:38 +02:00
let ( shortstatehash , already_existed ) =
self . get_or_create_shortstatehash ( & state_hash , globals ) ? ;
2021-03-25 23:55:40 +01:00
2021-08-11 19:15:38 +02:00
if ! already_existed {
2021-08-12 23:04:00 +02:00
let states_parents = previous_shortstatehash
. map_or_else ( | | Ok ( Vec ::new ( ) ) , | p | self . load_shortstatehash_info ( p ) ) ? ;
let ( statediffnew , statediffremoved ) =
if let Some ( parent_stateinfo ) = states_parents . last ( ) {
let statediffnew = state_ids_compressed
. difference ( & parent_stateinfo . 1 )
. cloned ( )
. collect ::< HashSet < _ > > ( ) ;
let statediffremoved = parent_stateinfo
. 1
. difference ( & state_ids_compressed )
. cloned ( )
. collect ::< HashSet < _ > > ( ) ;
( statediffnew , statediffremoved )
} else {
( state_ids_compressed , HashSet ::new ( ) )
} ;
self . save_state_from_diff (
shortstatehash ,
statediffnew . clone ( ) ,
statediffremoved . clone ( ) ,
1_000_000 , // high number because no state will be based on this one
states_parents ,
) ? ;
2021-08-11 19:15:38 +02:00
}
2021-03-25 23:55:40 +01:00
self . shorteventid_shortstatehash
2021-08-11 19:15:38 +02:00
. insert ( & shorteventid . to_be_bytes ( ) , & shortstatehash . to_be_bytes ( ) ) ? ;
2021-03-25 23:55:40 +01:00
Ok ( ( ) )
}
2020-08-19 17:27:24 -04:00
/// Generates a new StateHash and associates it with the incoming event.
///
/// This adds all current state events (not including the incoming event)
2021-03-17 22:30:25 +01:00
/// to `stateid_pduid` and adds the incoming event to `eventid_statehash`.
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self, new_pdu, globals)) ]
2020-12-19 16:00:11 +01:00
pub fn append_to_state (
& self ,
new_pdu : & PduEvent ,
globals : & super ::globals ::Globals ,
2021-03-17 22:30:25 +01:00
) -> Result < u64 > {
2021-08-11 19:15:38 +02:00
let shorteventid = self . get_or_create_shorteventid ( & new_pdu . event_id , globals ) ? ;
2021-08-12 23:04:00 +02:00
let previous_shortstatehash = self . current_shortstatehash ( & new_pdu . room_id ) ? ;
2021-08-11 19:15:38 +02:00
2021-08-12 23:04:00 +02:00
if let Some ( p ) = previous_shortstatehash {
self . shorteventid_shortstatehash
. insert ( & shorteventid . to_be_bytes ( ) , & p . to_be_bytes ( ) ) ? ;
}
2021-03-17 22:30:25 +01:00
2020-09-12 21:30:07 +02:00
if let Some ( state_key ) = & new_pdu . state_key {
2021-08-12 23:04:00 +02:00
let states_parents = previous_shortstatehash
. map_or_else ( | | Ok ( Vec ::new ( ) ) , | p | self . load_shortstatehash_info ( p ) ) ? ;
2020-12-19 16:00:11 +01:00
2021-08-12 23:04:00 +02:00
let shortstatekey =
self . get_or_create_shortstatekey ( & new_pdu . kind , & state_key , globals ) ? ;
2020-09-12 21:30:07 +02:00
2021-08-16 23:24:52 +02:00
let new = self . compress_state_event ( shortstatekey , & new_pdu . event_id , globals ) ? ;
2021-08-12 23:04:00 +02:00
let replaces = states_parents
. last ( )
. map ( | info | {
info . 1
. iter ( )
. find ( | bytes | bytes . starts_with ( & shortstatekey . to_be_bytes ( ) ) )
} )
. unwrap_or_default ( ) ;
2020-08-19 17:27:24 -04:00
2021-08-16 23:24:52 +02:00
if Some ( & new ) = = replaces {
return Ok ( previous_shortstatehash . expect ( " must exist " ) ) ;
}
2021-08-12 23:04:00 +02:00
// TODO: statehash with deterministic inputs
let shortstatehash = globals . next_count ( ) ? ;
2020-08-19 17:27:24 -04:00
2021-08-12 23:04:00 +02:00
let mut statediffnew = HashSet ::new ( ) ;
statediffnew . insert ( new ) ;
let mut statediffremoved = HashSet ::new ( ) ;
if let Some ( replaces ) = replaces {
statediffremoved . insert ( replaces . clone ( ) ) ;
}
2021-08-03 16:14:07 +02:00
2021-08-12 23:04:00 +02:00
self . save_state_from_diff (
shortstatehash ,
statediffnew ,
statediffremoved ,
2 ,
states_parents ,
) ? ;
2020-08-19 17:27:24 -04:00
2021-03-17 22:30:25 +01:00
Ok ( shortstatehash )
2020-09-12 21:30:07 +02:00
} else {
2021-08-12 23:04:00 +02:00
Ok ( previous_shortstatehash . expect ( " first event in room must be a state event " ) )
2020-09-12 21:30:07 +02:00
}
2020-08-19 17:27:24 -04:00
}
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self, invite_event)) ]
2021-04-25 14:10:07 +02:00
pub fn calculate_invite_state (
& self ,
invite_event : & PduEvent ,
) -> Result < Vec < Raw < AnyStrippedStateEvent > > > {
let mut state = Vec ::new ( ) ;
// Add recommended events
2021-05-21 22:22:05 +02:00
if let Some ( e ) = self . room_state_get ( & invite_event . room_id , & EventType ::RoomCreate , " " ) ? {
state . push ( e . to_stripped_state_event ( ) ) ;
}
2021-04-25 14:10:07 +02:00
if let Some ( e ) =
self . room_state_get ( & invite_event . room_id , & EventType ::RoomJoinRules , " " ) ?
{
state . push ( e . to_stripped_state_event ( ) ) ;
}
if let Some ( e ) =
self . room_state_get ( & invite_event . room_id , & EventType ::RoomCanonicalAlias , " " ) ?
{
state . push ( e . to_stripped_state_event ( ) ) ;
}
if let Some ( e ) = self . room_state_get ( & invite_event . room_id , & EventType ::RoomAvatar , " " ) ? {
state . push ( e . to_stripped_state_event ( ) ) ;
}
if let Some ( e ) = self . room_state_get ( & invite_event . room_id , & EventType ::RoomName , " " ) ? {
state . push ( e . to_stripped_state_event ( ) ) ;
}
if let Some ( e ) = self . room_state_get (
& invite_event . room_id ,
& EventType ::RoomMember ,
invite_event . sender . as_str ( ) ,
) ? {
state . push ( e . to_stripped_state_event ( ) ) ;
}
state . push ( invite_event . to_stripped_state_event ( ) ) ;
Ok ( state )
}
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self)) ]
2021-03-17 22:30:25 +01:00
pub fn set_room_state ( & self , room_id : & RoomId , shortstatehash : u64 ) -> Result < ( ) > {
self . roomid_shortstatehash
. insert ( room_id . as_bytes ( ) , & shortstatehash . to_be_bytes ( ) ) ? ;
2020-12-31 14:52:08 +01:00
Ok ( ( ) )
}
2020-08-18 16:26:03 -04:00
/// Creates a new persisted data unit and adds it to a room.
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self, db, _mutex_lock)) ]
2020-10-05 22:19:22 +02:00
pub fn build_and_append_pdu (
2020-05-03 17:25:31 +02:00
& self ,
2020-07-28 09:00:23 -04:00
pdu_builder : PduBuilder ,
2020-09-12 21:30:07 +02:00
sender : & UserId ,
room_id : & RoomId ,
2021-01-15 11:05:57 -05:00
db : & Database ,
2021-07-13 15:44:25 +02:00
_mutex_lock : & MutexGuard < '_ , ( ) > , // Take mutex guard to make sure users get the room mutex
2020-05-03 17:25:31 +02:00
) -> Result < EventId > {
2020-07-28 09:00:23 -04:00
let PduBuilder {
event_type ,
content ,
unsigned ,
state_key ,
redacts ,
} = pdu_builder ;
2021-07-01 19:55:26 +02:00
2021-03-25 23:55:40 +01:00
let prev_events = self
. get_pdu_leaves ( & room_id ) ?
. into_iter ( )
. take ( 20 )
. collect ::< Vec < _ > > ( ) ;
let create_event = self . room_state_get ( & room_id , & EventType ::RoomCreate , " " ) ? ;
let create_event_content = create_event
. as_ref ( )
. map ( | create_event | {
2021-06-17 20:34:14 +02:00
serde_json ::from_value ::< Raw < CreateEventContent > > ( create_event . content . clone ( ) )
. expect ( " Raw::from_value always works. " )
. deserialize ( )
2021-08-22 13:00:36 +02:00
. map_err ( | e | {
warn! ( " Invalid create event: {} " , e ) ;
Error ::bad_database ( " Invalid create event in db. " )
} )
2021-03-25 23:55:40 +01:00
} )
. transpose ( ) ? ;
let create_prev_event = if prev_events . len ( ) = = 1
& & Some ( & prev_events [ 0 ] ) = = create_event . as_ref ( ) . map ( | c | & c . event_id )
{
2021-06-30 09:52:01 +02:00
create_event
2021-03-25 23:55:40 +01:00
} else {
None
} ;
2021-03-26 11:10:45 +01:00
// If there was no create event yet, assume we are creating a version 6 room right now
2021-05-08 01:54:24 +02:00
let room_version_id = create_event_content
. map_or ( RoomVersionId ::Version6 , | create_event | {
create_event . room_version
} ) ;
let room_version = RoomVersion ::new ( & room_version_id ) . expect ( " room version is supported " ) ;
2020-05-24 18:25:52 +02:00
2020-08-06 08:29:59 -04:00
let auth_events = self . get_auth_events (
& room_id ,
& event_type ,
& sender ,
state_key . as_deref ( ) ,
2021-04-16 18:18:29 +02:00
& content ,
2020-08-06 08:29:59 -04:00
) ? ;
2020-05-03 17:25:31 +02:00
// Our depth is the maximum depth of prev_events + 1
let depth = prev_events
. iter ( )
2021-04-11 21:01:27 +02:00
. filter_map ( | event_id | Some ( self . get_pdu ( event_id ) . ok ( ) ? ? . depth ) )
2020-05-03 17:25:31 +02:00
. max ( )
2021-04-14 10:43:31 +02:00
. unwrap_or_else ( | | uint! ( 0 ) )
2021-04-11 21:01:27 +02:00
+ uint! ( 1 ) ;
2020-05-03 17:25:31 +02:00
let mut unsigned = unsigned . unwrap_or_default ( ) ;
if let Some ( state_key ) = & state_key {
2021-03-17 22:30:25 +01:00
if let Some ( prev_pdu ) = self . room_state_get ( & room_id , & event_type , & state_key ) ? {
2021-06-30 09:52:01 +02:00
unsigned . insert ( " prev_content " . to_owned ( ) , prev_pdu . content . clone ( ) ) ;
2020-06-26 10:07:02 +02:00
unsigned . insert (
" prev_sender " . to_owned ( ) ,
2021-06-30 09:52:01 +02:00
serde_json ::to_value ( & prev_pdu . sender ) . expect ( " UserId::to_value always works " ) ,
2020-06-26 10:07:02 +02:00
) ;
2020-05-03 17:25:31 +02:00
}
}
let mut pdu = PduEvent {
2020-10-27 19:10:09 -04:00
event_id : ruma ::event_id! ( " $thiswillbefilledinlater " ) ,
2020-09-12 21:30:07 +02:00
room_id : room_id . clone ( ) ,
sender : sender . clone ( ) ,
2020-05-03 17:25:31 +02:00
origin_server_ts : utils ::millis_since_unix_epoch ( )
. try_into ( )
2020-06-09 15:13:17 +02:00
. expect ( " time is valid " ) ,
2020-08-18 16:26:03 -04:00
kind : event_type ,
content ,
state_key ,
2020-05-03 17:25:31 +02:00
prev_events ,
2021-04-11 21:01:27 +02:00
depth ,
2020-08-06 08:29:59 -04:00
auth_events : auth_events
2021-03-25 23:55:40 +01:00
. iter ( )
. map ( | ( _ , pdu ) | pdu . event_id . clone ( ) )
2020-08-06 08:29:59 -04:00
. collect ( ) ,
2020-08-18 16:26:03 -04:00
redacts ,
2020-05-03 17:25:31 +02:00
unsigned ,
2020-06-21 15:58:42 -04:00
hashes : ruma ::events ::pdu ::EventHash {
2020-05-03 17:25:31 +02:00
sha256 : " aaa " . to_owned ( ) ,
} ,
2020-09-12 21:30:07 +02:00
signatures : BTreeMap ::new ( ) ,
2020-05-03 17:25:31 +02:00
} ;
2021-03-26 13:41:05 +01:00
let auth_check = state_res ::auth_check (
2021-03-25 23:55:40 +01:00
& room_version ,
& Arc ::new ( pdu . clone ( ) ) ,
create_prev_event ,
None , // TODO: third_party_invite
2021-08-24 19:10:31 +02:00
| k , s | auth_events . get ( & ( k . clone ( ) , s . to_owned ( ) ) ) . map ( Arc ::clone ) ,
2021-03-25 23:55:40 +01:00
)
. map_err ( | e | {
error! ( " {:?} " , e ) ;
Error ::bad_database ( " Auth check failed. " )
2021-03-26 13:41:05 +01:00
} ) ? ;
if ! auth_check {
2021-03-25 23:55:40 +01:00
return Err ( Error ::BadRequest (
2021-05-21 18:12:02 +02:00
ErrorKind ::Forbidden ,
2021-03-25 23:55:40 +01:00
" Event is not authorized. " ,
) ) ;
}
2020-09-14 20:23:19 +02:00
// Hash and sign
2020-11-18 08:36:12 -05:00
let mut pdu_json =
utils ::to_canonical_object ( & pdu ) . expect ( " event is valid, we just created it " ) ;
2020-10-27 19:10:09 -04:00
pdu_json . remove ( " event_id " ) ;
2020-09-14 20:23:19 +02:00
2020-09-16 10:49:54 +02:00
// Add origin because synapse likes that (and it's required in the spec)
2020-10-27 19:10:09 -04:00
pdu_json . insert (
" origin " . to_owned ( ) ,
2021-04-26 18:20:20 +02:00
CanonicalJsonValue ::String ( db . globals . server_name ( ) . as_ref ( ) . to_owned ( ) ) ,
2020-10-27 19:10:09 -04:00
) ;
2020-09-16 10:49:54 +02:00
2020-09-14 20:23:19 +02:00
ruma ::signatures ::hash_and_sign_event (
2021-01-15 11:05:57 -05:00
db . globals . server_name ( ) . as_str ( ) ,
db . globals . keypair ( ) ,
2020-09-14 20:23:19 +02:00
& mut pdu_json ,
2021-05-08 01:54:24 +02:00
& room_version_id ,
2020-09-14 20:23:19 +02:00
)
. expect ( " event is valid, we just created it " ) ;
2020-05-03 17:25:31 +02:00
// Generate event id
pdu . event_id = EventId ::try_from ( & * format! (
" ${} " ,
2021-05-08 01:54:24 +02:00
ruma ::signatures ::reference_hash ( & pdu_json , & room_version_id )
2020-09-14 20:23:19 +02:00
. expect ( " ruma can calculate reference hashes " )
2020-05-03 17:25:31 +02:00
) )
2020-06-09 15:13:17 +02:00
. expect ( " ruma's reference hashes are valid event ids " ) ;
2020-05-03 17:25:31 +02:00
2020-10-27 19:10:09 -04:00
pdu_json . insert (
" event_id " . to_owned ( ) ,
2021-04-26 18:20:20 +02:00
CanonicalJsonValue ::String ( pdu . event_id . as_str ( ) . to_owned ( ) ) ,
2020-10-27 19:10:09 -04:00
) ;
2020-09-14 20:23:19 +02:00
2021-07-29 12:33:44 +02:00
// Generate short event id
2021-08-11 19:15:38 +02:00
let _shorteventid = self . get_or_create_shorteventid ( & pdu . event_id , & db . globals ) ? ;
2021-07-29 12:33:44 +02:00
2020-10-18 08:56:21 +02:00
// We append to state before appending the pdu, so we don't have a moment in time with the
// pdu without it's state. This is okay because append_pdu can't fail.
2021-03-17 22:30:25 +01:00
let statehashid = self . append_to_state ( & pdu , & db . globals ) ? ;
2021-01-15 11:05:57 -05:00
2021-07-01 11:06:05 +02:00
let pdu_id = self . append_pdu (
2021-01-18 19:41:38 -05:00
& pdu ,
pdu_json ,
// Since this PDU references all pdu_leaves we can update the leaves
// of the room
& [ pdu . event_id . clone ( ) ] ,
db ,
) ? ;
2020-10-18 08:56:21 +02:00
2020-12-31 14:52:08 +01:00
// We set the room state after inserting the pdu, so that we never have a moment in time
// where events in the current room state do not exist
2021-03-17 22:30:25 +01:00
self . set_room_state ( & room_id , statehashid ) ? ;
2020-12-31 14:52:08 +01:00
2020-09-15 16:13:54 +02:00
for server in self
. room_servers ( room_id )
. filter_map ( | r | r . ok ( ) )
2021-01-15 11:05:57 -05:00
. filter ( | server | & * * server ! = db . globals . server_name ( ) )
2020-09-15 16:13:54 +02:00
{
2021-01-15 11:05:57 -05:00
db . sending . send_pdu ( & server , & pdu_id ) ? ;
2020-09-15 08:16:20 +02:00
}
2020-09-14 20:23:19 +02:00
2021-08-02 10:13:34 +02:00
for appservice in db . appservice . all ( ) ? {
2020-12-23 19:41:54 +01:00
if let Some ( namespaces ) = appservice . 1. get ( " namespaces " ) {
let users = namespaces
. get ( " users " )
. and_then ( | users | users . as_sequence ( ) )
2021-01-15 11:05:57 -05:00
. map_or_else ( Vec ::new , | users | {
users
. iter ( )
2021-06-17 20:37:07 +02:00
. filter_map ( | users | Regex ::new ( users . get ( " regex " ) ? . as_str ( ) ? ) . ok ( ) )
2021-01-15 11:05:57 -05:00
. collect ::< Vec < _ > > ( )
} ) ;
2020-12-23 19:41:54 +01:00
let aliases = namespaces
. get ( " aliases " )
2021-03-18 12:03:15 +01:00
. and_then ( | aliases | aliases . as_sequence ( ) )
. map_or_else ( Vec ::new , | aliases | {
aliases
. iter ( )
2021-06-17 20:37:07 +02:00
. filter_map ( | aliases | Regex ::new ( aliases . get ( " regex " ) ? . as_str ( ) ? ) . ok ( ) )
2021-03-18 12:03:15 +01:00
. collect ::< Vec < _ > > ( )
} ) ;
2020-12-23 19:41:54 +01:00
let rooms = namespaces
. get ( " rooms " )
. and_then ( | rooms | rooms . as_sequence ( ) ) ;
let bridge_user_id = appservice
. 1
. get ( " sender_localpart " )
. and_then ( | string | string . as_str ( ) )
. and_then ( | string | {
2021-01-15 11:05:57 -05:00
UserId ::parse_with_server_name ( string , db . globals . server_name ( ) ) . ok ( )
2020-12-23 19:41:54 +01:00
} ) ;
2021-01-15 11:05:57 -05:00
let user_is_joined =
| bridge_user_id | self . is_joined ( & bridge_user_id , room_id ) . unwrap_or ( false ) ;
2021-03-04 08:39:16 -05:00
2021-01-15 11:05:57 -05:00
let matching_users = | users : & Regex | {
2020-12-31 14:52:08 +01:00
users . is_match ( pdu . sender . as_str ( ) )
| | pdu . kind = = EventType ::RoomMember
& & pdu
. state_key
. as_ref ( )
. map_or ( false , | state_key | users . is_match ( & state_key ) )
2021-04-13 15:00:45 +02:00
| | self . room_members ( & room_id ) . any ( | userid | {
2021-03-23 19:46:54 +01:00
userid . map_or ( false , | userid | users . is_match ( userid . as_str ( ) ) )
} )
2021-01-15 11:05:57 -05:00
} ;
2021-03-18 12:03:15 +01:00
let matching_aliases = | aliases : & Regex | {
self . room_aliases ( & room_id )
2020-12-23 19:41:54 +01:00
. filter_map ( | r | r . ok ( ) )
. any ( | room_alias | aliases . is_match ( room_alias . as_str ( ) ) )
2021-01-15 11:05:57 -05:00
} ;
if bridge_user_id . map_or ( false , user_is_joined )
2021-03-18 12:03:15 +01:00
| | aliases . iter ( ) . any ( matching_aliases )
2021-01-15 11:05:57 -05:00
| | rooms . map_or ( false , | rooms | rooms . contains ( & room_id . as_str ( ) . into ( ) ) )
2021-03-23 19:46:54 +01:00
| | users . iter ( ) . any ( matching_users )
2020-12-23 19:41:54 +01:00
{
2021-01-15 11:05:57 -05:00
db . sending . send_pdu_appservice ( & appservice . 0 , & pdu_id ) ? ;
2020-12-23 19:41:54 +01:00
}
}
2020-12-08 10:33:44 +01:00
}
2020-09-13 22:24:36 +02:00
Ok ( pdu . event_id )
2020-05-03 17:25:31 +02:00
}
/// Returns an iterator over all PDUs in a room.
2021-02-28 12:41:03 +01:00
#[ tracing::instrument(skip(self)) ]
2021-06-08 18:10:00 +02:00
pub fn all_pdus < ' a > (
& ' a self ,
2020-06-16 12:11:38 +02:00
user_id : & UserId ,
room_id : & RoomId ,
2021-08-12 23:04:00 +02:00
) -> Result < impl Iterator < Item = Result < ( Vec < u8 > , PduEvent ) > > + ' a > {
2020-06-16 12:11:38 +02:00
self . pdus_since ( user_id , room_id , 0 )
2020-05-03 17:25:31 +02:00
}
2021-06-08 18:10:00 +02:00
/// Returns an iterator over all events in a room that happened after the event with id `since`
2020-07-29 17:37:26 +02:00
/// in chronological order.
2021-02-28 12:41:03 +01:00
#[ tracing::instrument(skip(self)) ]
2021-06-08 18:10:00 +02:00
pub fn pdus_since < ' a > (
& ' a self ,
2020-06-16 12:11:38 +02:00
user_id : & UserId ,
2020-05-03 17:25:31 +02:00
room_id : & RoomId ,
since : u64 ,
2021-08-12 23:04:00 +02:00
) -> Result < impl Iterator < Item = Result < ( Vec < u8 > , PduEvent ) > > + ' a > {
2021-08-14 19:47:49 +02:00
let prefix = self
. get_shortroomid ( room_id ) ?
. expect ( " room exists " )
. to_be_bytes ( )
. to_vec ( ) ;
2020-05-03 17:25:31 +02:00
2020-07-29 17:03:04 +02:00
// Skip the first pdu if it's exactly at since, because we sent that last time
let mut first_pdu_id = prefix . clone ( ) ;
2020-07-29 17:37:26 +02:00
first_pdu_id . extend_from_slice ( & ( since + 1 ) . to_be_bytes ( ) ) ;
2020-07-29 17:03:04 +02:00
2020-06-16 12:11:38 +02:00
let user_id = user_id . clone ( ) ;
2021-08-12 23:04:00 +02:00
Ok ( self
. pduid_pdu
2021-06-08 18:10:00 +02:00
. iter_from ( & first_pdu_id , false )
. take_while ( move | ( k , _ ) | k . starts_with ( & prefix ) )
2020-09-16 21:11:38 +02:00
. map ( move | ( pdu_id , v ) | {
2020-06-16 12:11:38 +02:00
let mut pdu = serde_json ::from_slice ::< PduEvent > ( & v )
. map_err ( | _ | Error ::bad_database ( " PDU in db is invalid. " ) ) ? ;
if pdu . sender ! = user_id {
pdu . unsigned . remove ( " transaction_id " ) ;
}
2020-09-16 21:11:38 +02:00
Ok ( ( pdu_id , pdu ) )
2021-08-12 23:04:00 +02:00
} ) )
2020-05-03 17:25:31 +02:00
}
2020-07-26 17:34:12 +02:00
/// Returns an iterator over all events and their tokens in a room that happened before the
/// event with id `until` in reverse-chronological order.
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self)) ]
2021-06-08 18:10:00 +02:00
pub fn pdus_until < ' a > (
& ' a self ,
2020-06-16 12:11:38 +02:00
user_id : & UserId ,
2020-05-03 17:25:31 +02:00
room_id : & RoomId ,
until : u64 ,
2021-08-12 23:04:00 +02:00
) -> Result < impl Iterator < Item = Result < ( Vec < u8 > , PduEvent ) > > + ' a > {
2020-05-03 17:25:31 +02:00
// Create the first part of the full pdu id
2021-08-14 19:47:49 +02:00
let prefix = self
. get_shortroomid ( room_id ) ?
. expect ( " room exists " )
. to_be_bytes ( )
. to_vec ( ) ;
2020-05-03 17:25:31 +02:00
let mut current = prefix . clone ( ) ;
2021-06-30 20:31:51 +02:00
current . extend_from_slice ( & ( until . saturating_sub ( 1 ) ) . to_be_bytes ( ) ) ; // -1 because we don't want event at `until`
2020-05-03 17:25:31 +02:00
let current : & [ u8 ] = & current ;
2020-06-16 12:11:38 +02:00
let user_id = user_id . clone ( ) ;
2021-08-12 23:04:00 +02:00
Ok ( self
. pduid_pdu
2021-06-08 18:10:00 +02:00
. iter_from ( current , true )
2020-05-03 17:25:31 +02:00
. take_while ( move | ( k , _ ) | k . starts_with ( & prefix ) )
2020-09-17 14:44:47 +02:00
. map ( move | ( pdu_id , v ) | {
2020-06-16 12:11:38 +02:00
let mut pdu = serde_json ::from_slice ::< PduEvent > ( & v )
. map_err ( | _ | Error ::bad_database ( " PDU in db is invalid. " ) ) ? ;
if pdu . sender ! = user_id {
pdu . unsigned . remove ( " transaction_id " ) ;
}
2020-09-17 14:44:47 +02:00
Ok ( ( pdu_id , pdu ) )
2021-08-12 23:04:00 +02:00
} ) )
2020-05-03 17:25:31 +02:00
}
2020-07-26 17:34:12 +02:00
/// Returns an iterator over all events and their token in a room that happened after the event
/// with id `from` in chronological order.
2021-02-28 12:41:03 +01:00
#[ tracing::instrument(skip(self)) ]
2021-06-08 18:10:00 +02:00
pub fn pdus_after < ' a > (
& ' a self ,
2020-06-16 12:11:38 +02:00
user_id : & UserId ,
2020-06-04 13:58:55 +02:00
room_id : & RoomId ,
from : u64 ,
2021-08-12 23:04:00 +02:00
) -> Result < impl Iterator < Item = Result < ( Vec < u8 > , PduEvent ) > > + ' a > {
2020-06-04 13:58:55 +02:00
// Create the first part of the full pdu id
2021-08-14 19:47:49 +02:00
let prefix = self
. get_shortroomid ( room_id ) ?
. expect ( " room exists " )
. to_be_bytes ( )
. to_vec ( ) ;
2020-06-04 13:58:55 +02:00
let mut current = prefix . clone ( ) ;
current . extend_from_slice ( & ( from + 1 ) . to_be_bytes ( ) ) ; // +1 so we don't send the base event
let current : & [ u8 ] = & current ;
2020-06-16 12:11:38 +02:00
let user_id = user_id . clone ( ) ;
2021-08-12 23:04:00 +02:00
Ok ( self
. pduid_pdu
2021-06-08 18:10:00 +02:00
. iter_from ( current , false )
2020-06-04 13:58:55 +02:00
. take_while ( move | ( k , _ ) | k . starts_with ( & prefix ) )
2020-09-17 14:44:47 +02:00
. map ( move | ( pdu_id , v ) | {
2020-06-16 12:11:38 +02:00
let mut pdu = serde_json ::from_slice ::< PduEvent > ( & v )
. map_err ( | _ | Error ::bad_database ( " PDU in db is invalid. " ) ) ? ;
if pdu . sender ! = user_id {
pdu . unsigned . remove ( " transaction_id " ) ;
}
2020-09-17 14:44:47 +02:00
Ok ( ( pdu_id , pdu ) )
2021-08-12 23:04:00 +02:00
} ) )
2020-06-04 13:58:55 +02:00
}
2020-05-26 10:27:51 +02:00
/// Replace a PDU with the redacted form.
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self, reason)) ]
2020-08-23 16:47:27 +02:00
pub fn redact_pdu ( & self , event_id : & EventId , reason : & PduEvent ) -> Result < ( ) > {
2020-05-26 10:27:51 +02:00
if let Some ( pdu_id ) = self . get_pdu_id ( event_id ) ? {
let mut pdu = self
. get_pdu_from_id ( & pdu_id ) ?
2020-06-11 10:03:08 +02:00
. ok_or_else ( | | Error ::bad_database ( " PDU ID points to invalid PDU. " ) ) ? ;
2020-08-23 16:47:27 +02:00
pdu . redact ( & reason ) ? ;
2020-05-26 10:27:51 +02:00
self . replace_pdu ( & pdu_id , & pdu ) ? ;
Ok ( ( ) )
} else {
2020-06-09 15:13:17 +02:00
Err ( Error ::BadRequest (
ErrorKind ::NotFound ,
" Event ID does not exist. " ,
) )
2020-05-26 10:27:51 +02:00
}
}
/// Update current membership data.
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self, last_state, db)) ]
2021-03-13 16:30:12 +01:00
pub fn update_membership (
2020-05-03 17:25:31 +02:00
& self ,
room_id : & RoomId ,
user_id : & UserId ,
2021-04-09 21:38:16 +02:00
membership : member ::MembershipState ,
2020-07-29 13:47:50 -04:00
sender : & UserId ,
2021-04-13 15:00:45 +02:00
last_state : Option < Vec < Raw < AnyStrippedStateEvent > > > ,
2021-04-14 10:43:31 +02:00
db : & Database ,
2021-08-17 00:22:52 +02:00
update_joined_count : bool ,
2020-05-03 17:25:31 +02:00
) -> Result < ( ) > {
2021-06-12 18:40:33 +02:00
// Keep track what remote users exist by adding them as "deactivated" users
if user_id . server_name ( ) ! = db . globals . server_name ( ) {
db . users . create ( user_id , None ) ? ;
// TODO: displayname, avatar url
}
2020-09-14 20:23:19 +02:00
let mut roomserver_id = room_id . as_bytes ( ) . to_vec ( ) ;
roomserver_id . push ( 0xff ) ;
roomserver_id . extend_from_slice ( user_id . server_name ( ) . as_bytes ( ) ) ;
2021-05-17 10:25:27 +02:00
let mut serverroom_id = user_id . server_name ( ) . as_bytes ( ) . to_vec ( ) ;
serverroom_id . push ( 0xff ) ;
serverroom_id . extend_from_slice ( room_id . as_bytes ( ) ) ;
2020-08-26 11:15:52 -04:00
let mut userroom_id = user_id . as_bytes ( ) . to_vec ( ) ;
2020-05-03 17:25:31 +02:00
userroom_id . push ( 0xff ) ;
2020-08-26 11:15:52 -04:00
userroom_id . extend_from_slice ( room_id . as_bytes ( ) ) ;
2020-05-03 17:25:31 +02:00
2020-08-26 11:15:52 -04:00
let mut roomuser_id = room_id . as_bytes ( ) . to_vec ( ) ;
2020-05-03 17:25:31 +02:00
roomuser_id . push ( 0xff ) ;
2020-08-26 11:15:52 -04:00
roomuser_id . extend_from_slice ( user_id . as_bytes ( ) ) ;
2020-05-03 17:25:31 +02:00
2020-05-24 18:25:52 +02:00
match & membership {
member ::MembershipState ::Join = > {
2020-08-06 13:21:53 +02:00
// Check if the user never joined this room
if ! self . once_joined ( & user_id , & room_id ) ? {
// Add the user ID to the join list then
self . roomuseroncejoinedids . insert ( & userroom_id , & [ ] ) ? ;
// Check if the room has a predecessor
2020-09-13 22:24:36 +02:00
if let Some ( predecessor ) = self
. room_state_get ( & room_id , & EventType ::RoomCreate , " " ) ?
2021-03-17 22:30:25 +01:00
. and_then ( | create | {
2020-09-13 22:24:36 +02:00
serde_json ::from_value ::<
Raw < ruma ::events ::room ::create ::CreateEventContent > ,
2021-06-30 09:52:01 +02:00
> ( create . content . clone ( ) )
2020-09-13 22:24:36 +02:00
. expect ( " Raw::from_value always works " )
. deserialize ( )
. ok ( )
} )
. and_then ( | content | content . predecessor )
2020-08-06 13:21:53 +02:00
{
// Copy user settings from predecessor to the current room:
// - Push rules
//
// TODO: finish this once push rules are implemented.
//
// let mut push_rules_event_content = account_data
// .get::<ruma::events::push_rules::PushRulesEvent>(
// None,
// user_id,
// EventType::PushRules,
// )?;
//
// NOTE: find where `predecessor.room_id` match
// and update to `room_id`.
//
// account_data
// .update(
// None,
// user_id,
// EventType::PushRules,
// &push_rules_event_content,
// globals,
// )
// .ok();
2020-09-01 13:07:32 +02:00
// Copy old tags to new room
2021-04-14 10:43:31 +02:00
if let Some ( tag_event ) =
db . account_data . get ::< ruma ::events ::tag ::TagEvent > (
Some ( & predecessor . room_id ) ,
user_id ,
EventType ::Tag ,
) ?
{
db . account_data
. update (
Some ( room_id ) ,
user_id ,
EventType ::Tag ,
& tag_event ,
& db . globals ,
)
2020-08-06 13:21:53 +02:00
. ok ( ) ;
} ;
2020-09-01 13:07:32 +02:00
// Copy direct chat flag
2021-04-14 10:43:31 +02:00
if let Some ( mut direct_event ) =
db . account_data . get ::< ruma ::events ::direct ::DirectEvent > (
None ,
user_id ,
EventType ::Direct ,
) ?
{
2020-08-06 13:21:53 +02:00
let mut room_ids_updated = false ;
2020-09-01 13:07:32 +02:00
for room_ids in direct_event . content . 0. values_mut ( ) {
2020-08-06 13:21:53 +02:00
if room_ids . iter ( ) . any ( | r | r = = & predecessor . room_id ) {
room_ids . push ( room_id . clone ( ) ) ;
room_ids_updated = true ;
}
}
if room_ids_updated {
2021-04-14 10:43:31 +02:00
db . account_data . update (
2020-09-01 13:07:32 +02:00
None ,
user_id ,
EventType ::Direct ,
& direct_event ,
2021-04-14 10:43:31 +02:00
& db . globals ,
2020-09-01 13:07:32 +02:00
) ? ;
2020-08-06 13:21:53 +02:00
}
} ;
}
}
2021-08-17 16:06:09 +02:00
if update_joined_count {
self . roomserverids . insert ( & roomserver_id , & [ ] ) ? ;
self . serverroomids . insert ( & serverroom_id , & [ ] ) ? ;
}
2020-05-24 18:25:52 +02:00
self . userroomid_joined . insert ( & userroom_id , & [ ] ) ? ;
self . roomuserid_joined . insert ( & roomuser_id , & [ ] ) ? ;
2021-04-11 21:01:27 +02:00
self . userroomid_invitestate . remove ( & userroom_id ) ? ;
self . roomuserid_invitecount . remove ( & roomuser_id ) ? ;
2021-04-13 15:00:45 +02:00
self . userroomid_leftstate . remove ( & userroom_id ) ? ;
self . roomuserid_leftcount . remove ( & roomuser_id ) ? ;
2020-05-24 18:25:52 +02:00
}
member ::MembershipState ::Invite = > {
2020-07-29 13:47:50 -04:00
// We want to know if the sender is ignored by the receiver
2021-04-14 10:43:31 +02:00
let is_ignored = db
. account_data
2020-07-29 17:07:12 -04:00
. get ::< ignored_user_list ::IgnoredUserListEvent > (
2020-07-29 13:47:50 -04:00
None , // Ignored users are in global account data
& user_id , // Receiver
EventType ::IgnoredUserList ,
) ?
2020-07-29 17:07:12 -04:00
. map_or ( false , | ignored | {
ignored . content . ignored_users . contains ( & sender )
} ) ;
2020-07-29 13:47:50 -04:00
if is_ignored {
2020-07-30 08:43:51 -04:00
return Ok ( ( ) ) ;
2020-07-29 13:47:50 -04:00
}
2020-09-14 20:23:19 +02:00
2021-08-17 16:06:09 +02:00
if update_joined_count {
self . roomserverids . insert ( & roomserver_id , & [ ] ) ? ;
self . serverroomids . insert ( & serverroom_id , & [ ] ) ? ;
}
2021-04-11 21:01:27 +02:00
self . userroomid_invitestate . insert (
& userroom_id ,
2021-06-08 18:10:00 +02:00
& serde_json ::to_vec ( & last_state . unwrap_or_default ( ) )
2021-04-11 21:01:27 +02:00
. expect ( " state to bytes always works " ) ,
) ? ;
self . roomuserid_invitecount
2021-04-14 10:43:31 +02:00
. insert ( & roomuser_id , & db . globals . next_count ( ) ? . to_be_bytes ( ) ) ? ;
2020-05-24 18:25:52 +02:00
self . userroomid_joined . remove ( & userroom_id ) ? ;
self . roomuserid_joined . remove ( & roomuser_id ) ? ;
2021-04-13 15:00:45 +02:00
self . userroomid_leftstate . remove ( & userroom_id ) ? ;
self . roomuserid_leftcount . remove ( & roomuser_id ) ? ;
2020-05-24 18:25:52 +02:00
}
member ::MembershipState ::Leave | member ::MembershipState ::Ban = > {
2021-08-17 16:06:09 +02:00
if update_joined_count {
if self
. room_members ( room_id )
. chain ( self . room_members_invited ( room_id ) )
. filter_map ( | r | r . ok ( ) )
. all ( | u | u . server_name ( ) ! = user_id . server_name ( ) )
{
self . roomserverids . remove ( & roomserver_id ) ? ;
self . serverroomids . remove ( & serverroom_id ) ? ;
}
2020-09-14 20:23:19 +02:00
}
2021-04-13 15:00:45 +02:00
self . userroomid_leftstate . insert (
& userroom_id ,
2021-06-08 18:10:00 +02:00
& serde_json ::to_vec ( & Vec ::< Raw < AnySyncStateEvent > > ::new ( ) ) . unwrap ( ) ,
2021-04-13 15:00:45 +02:00
) ? ; // TODO
self . roomuserid_leftcount
2021-04-14 10:43:31 +02:00
. insert ( & roomuser_id , & db . globals . next_count ( ) ? . to_be_bytes ( ) ) ? ;
2020-05-24 18:25:52 +02:00
self . userroomid_joined . remove ( & userroom_id ) ? ;
self . roomuserid_joined . remove ( & roomuser_id ) ? ;
2021-04-11 21:01:27 +02:00
self . userroomid_invitestate . remove ( & userroom_id ) ? ;
self . roomuserid_invitecount . remove ( & roomuser_id ) ? ;
2020-05-24 18:25:52 +02:00
}
_ = > { }
2020-05-03 17:25:31 +02:00
}
2021-08-17 00:22:52 +02:00
if update_joined_count {
self . update_joined_count ( room_id ) ? ;
}
2021-08-04 21:15:01 +02:00
2020-05-03 17:25:31 +02:00
Ok ( ( ) )
}
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self)) ]
2021-08-04 21:15:01 +02:00
pub fn update_joined_count ( & self , room_id : & RoomId ) -> Result < ( ) > {
2021-08-17 16:06:09 +02:00
let mut joinedcount = 0_ u64 ;
let mut joined_servers = HashSet ::new ( ) ;
for joined in self . room_members ( & room_id ) . filter_map ( | r | r . ok ( ) ) {
joined_servers . insert ( joined . server_name ( ) . to_owned ( ) ) ;
joinedcount + = 1 ;
}
for invited in self . room_members_invited ( & room_id ) . filter_map ( | r | r . ok ( ) ) {
joined_servers . insert ( invited . server_name ( ) . to_owned ( ) ) ;
}
self . roomid_joinedcount
. insert ( room_id . as_bytes ( ) , & joinedcount . to_be_bytes ( ) ) ? ;
for old_joined_server in self . room_servers ( room_id ) . filter_map ( | r | r . ok ( ) ) {
if ! joined_servers . remove ( & old_joined_server ) {
// Server not in room anymore
let mut roomserver_id = room_id . as_bytes ( ) . to_vec ( ) ;
roomserver_id . push ( 0xff ) ;
roomserver_id . extend_from_slice ( old_joined_server . as_bytes ( ) ) ;
let mut serverroom_id = old_joined_server . as_bytes ( ) . to_vec ( ) ;
serverroom_id . push ( 0xff ) ;
serverroom_id . extend_from_slice ( room_id . as_bytes ( ) ) ;
self . roomserverids . remove ( & roomserver_id ) ? ;
self . serverroomids . remove ( & serverroom_id ) ? ;
}
}
// Now only new servers are in joined_servers anymore
for server in joined_servers {
let mut roomserver_id = room_id . as_bytes ( ) . to_vec ( ) ;
roomserver_id . push ( 0xff ) ;
roomserver_id . extend_from_slice ( server . as_bytes ( ) ) ;
let mut serverroom_id = server . as_bytes ( ) . to_vec ( ) ;
serverroom_id . push ( 0xff ) ;
serverroom_id . extend_from_slice ( room_id . as_bytes ( ) ) ;
self . roomserverids . insert ( & roomserver_id , & [ ] ) ? ;
self . serverroomids . insert ( & serverroom_id , & [ ] ) ? ;
}
Ok ( ( ) )
2021-08-04 21:15:01 +02:00
}
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self, db)) ]
2021-04-13 15:00:45 +02:00
pub async fn leave_room (
& self ,
user_id : & UserId ,
room_id : & RoomId ,
db : & Database ,
) -> Result < ( ) > {
// Ask a remote server if we don't have this room
if ! self . exists ( room_id ) ? & & room_id . server_name ( ) ! = db . globals . server_name ( ) {
if let Err ( e ) = self . remote_leave_room ( user_id , room_id , db ) . await {
warn! ( " Failed to leave room {} remotely: {} " , user_id , e ) ;
// Don't tell the client about this error
}
let last_state = self
. invite_state ( user_id , room_id ) ?
. map_or_else ( | | self . left_state ( user_id , room_id ) , | s | Ok ( Some ( s ) ) ) ? ;
// We always drop the invite, we can't rely on other servers
self . update_membership (
room_id ,
user_id ,
MembershipState ::Leave ,
user_id ,
last_state ,
2021-04-14 10:43:31 +02:00
db ,
2021-08-17 00:22:52 +02:00
true ,
2021-04-13 15:00:45 +02:00
) ? ;
} else {
2021-08-03 11:10:58 +02:00
let mutex_state = Arc ::clone (
2021-07-13 15:44:25 +02:00
db . globals
2021-08-03 11:10:58 +02:00
. roomid_mutex_state
2021-07-13 15:44:25 +02:00
. write ( )
. unwrap ( )
. entry ( room_id . clone ( ) )
. or_default ( ) ,
) ;
2021-08-03 11:10:58 +02:00
let state_lock = mutex_state . lock ( ) . await ;
2021-07-13 15:44:25 +02:00
2021-04-13 15:00:45 +02:00
let mut event = serde_json ::from_value ::< Raw < member ::MemberEventContent > > (
self . room_state_get ( room_id , & EventType ::RoomMember , & user_id . to_string ( ) ) ?
. ok_or ( Error ::BadRequest (
ErrorKind ::BadState ,
" Cannot leave a room you are not a member of. " ,
) ) ?
2021-06-30 09:52:01 +02:00
. content
. clone ( ) ,
2021-04-13 15:00:45 +02:00
)
. expect ( " from_value::<Raw<..>> can never fail " )
. deserialize ( )
. map_err ( | _ | Error ::bad_database ( " Invalid member event in database. " ) ) ? ;
event . membership = member ::MembershipState ::Leave ;
self . build_and_append_pdu (
PduBuilder {
event_type : EventType ::RoomMember ,
content : serde_json ::to_value ( event )
. expect ( " event is valid, we just created it " ) ,
unsigned : None ,
state_key : Some ( user_id . to_string ( ) ) ,
redacts : None ,
} ,
user_id ,
room_id ,
db ,
2021-08-03 11:10:58 +02:00
& state_lock ,
2021-04-13 15:00:45 +02:00
) ? ;
}
Ok ( ( ) )
}
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self, db)) ]
2021-04-13 15:00:45 +02:00
async fn remote_leave_room (
& self ,
user_id : & UserId ,
room_id : & RoomId ,
db : & Database ,
) -> Result < ( ) > {
let mut make_leave_response_and_server = Err ( Error ::BadServerResponse (
" No server available to assist in leaving. " ,
) ) ;
let invite_state = db
. rooms
. invite_state ( user_id , room_id ) ?
. ok_or ( Error ::BadRequest (
ErrorKind ::BadState ,
" User is not invited. " ,
) ) ? ;
let servers = invite_state
. iter ( )
. filter_map ( | event | {
serde_json ::from_str ::< serde_json ::Value > ( & event . json ( ) . to_string ( ) ) . ok ( )
} )
. filter_map ( | event | event . get ( " sender " ) . cloned ( ) )
. filter_map ( | sender | sender . as_str ( ) . map ( | s | s . to_owned ( ) ) )
. filter_map ( | sender | UserId ::try_from ( sender ) . ok ( ) )
2021-04-14 14:41:39 +02:00
. map ( | user | user . server_name ( ) . to_owned ( ) )
. collect ::< HashSet < _ > > ( ) ;
2021-04-13 15:00:45 +02:00
for remote_server in servers {
let make_leave_response = db
. sending
. send_federation_request (
& db . globals ,
& remote_server ,
federation ::membership ::get_leave_event ::v1 ::Request { room_id , user_id } ,
)
. await ;
make_leave_response_and_server = make_leave_response . map ( | r | ( r , remote_server ) ) ;
if make_leave_response_and_server . is_ok ( ) {
break ;
}
}
let ( make_leave_response , remote_server ) = make_leave_response_and_server ? ;
2021-05-08 01:54:24 +02:00
let room_version_id = match make_leave_response . room_version {
2021-07-21 11:29:13 +02:00
Some ( version )
if version = = RoomVersionId ::Version5 | | version = = RoomVersionId ::Version6 = >
{
version
}
2021-04-13 15:00:45 +02:00
_ = > return Err ( Error ::BadServerResponse ( " Room version is not supported " ) ) ,
} ;
let mut leave_event_stub =
serde_json ::from_str ::< CanonicalJsonObject > ( make_leave_response . event . json ( ) . get ( ) )
. map_err ( | _ | {
Error ::BadServerResponse ( " Invalid make_leave event json received from server. " )
} ) ? ;
// TODO: Is origin needed?
leave_event_stub . insert (
" origin " . to_owned ( ) ,
2021-04-26 18:20:20 +02:00
CanonicalJsonValue ::String ( db . globals . server_name ( ) . as_str ( ) . to_owned ( ) ) ,
2021-04-13 15:00:45 +02:00
) ;
leave_event_stub . insert (
" origin_server_ts " . to_owned ( ) ,
2021-04-26 18:20:20 +02:00
CanonicalJsonValue ::Integer (
utils ::millis_since_unix_epoch ( )
. try_into ( )
. expect ( " Timestamp is valid js_int value " ) ,
) ,
2021-04-13 15:00:45 +02:00
) ;
// We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms
leave_event_stub . remove ( " event_id " ) ;
// In order to create a compatible ref hash (EventID) the `hashes` field needs to be present
ruma ::signatures ::hash_and_sign_event (
db . globals . server_name ( ) . as_str ( ) ,
db . globals . keypair ( ) ,
& mut leave_event_stub ,
2021-05-08 01:54:24 +02:00
& room_version_id ,
2021-04-13 15:00:45 +02:00
)
. expect ( " event is valid, we just created it " ) ;
// Generate event id
let event_id = EventId ::try_from ( & * format! (
" ${} " ,
2021-05-08 01:54:24 +02:00
ruma ::signatures ::reference_hash ( & leave_event_stub , & room_version_id )
2021-04-13 15:00:45 +02:00
. expect ( " ruma can calculate reference hashes " )
) )
. expect ( " ruma's reference hashes are valid event ids " ) ;
// Add event_id back
leave_event_stub . insert (
" event_id " . to_owned ( ) ,
2021-04-26 18:20:20 +02:00
CanonicalJsonValue ::String ( event_id . as_str ( ) . to_owned ( ) ) ,
2021-04-13 15:00:45 +02:00
) ;
// It has enough fields to be called a proper event now
let leave_event = leave_event_stub ;
db . sending
. send_federation_request (
& db . globals ,
& remote_server ,
federation ::membership ::create_leave_event ::v2 ::Request {
room_id ,
event_id : & event_id ,
pdu : PduEvent ::convert_to_outgoing_federation_event ( leave_event . clone ( ) ) ,
} ,
)
. await ? ;
Ok ( ( ) )
}
2020-05-03 17:25:31 +02:00
/// Makes a user forget a room.
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self)) ]
2020-05-03 17:25:31 +02:00
pub fn forget ( & self , room_id : & RoomId , user_id : & UserId ) -> Result < ( ) > {
2020-08-26 11:15:52 -04:00
let mut userroom_id = user_id . as_bytes ( ) . to_vec ( ) ;
2020-05-03 17:25:31 +02:00
userroom_id . push ( 0xff ) ;
2020-08-26 11:15:52 -04:00
userroom_id . extend_from_slice ( room_id . as_bytes ( ) ) ;
2020-05-03 17:25:31 +02:00
2021-04-13 15:00:45 +02:00
let mut roomuser_id = room_id . as_bytes ( ) . to_vec ( ) ;
roomuser_id . push ( 0xff ) ;
roomuser_id . extend_from_slice ( user_id . as_bytes ( ) ) ;
2021-06-08 18:10:00 +02:00
self . userroomid_leftstate . remove ( & userroom_id ) ? ;
self . roomuserid_leftcount . remove ( & roomuser_id ) ? ;
2020-05-03 17:25:31 +02:00
Ok ( ( ) )
}
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self, globals)) ]
2020-05-25 23:24:13 +02:00
pub fn set_alias (
& self ,
alias : & RoomAliasId ,
room_id : Option < & RoomId > ,
globals : & super ::globals ::Globals ,
) -> Result < ( ) > {
if let Some ( room_id ) = room_id {
2020-06-03 20:55:11 +02:00
// New alias
2020-05-25 23:24:13 +02:00
self . alias_roomid
2021-06-08 18:10:00 +02:00
. insert ( & alias . alias ( ) . as_bytes ( ) , room_id . as_bytes ( ) ) ? ;
2020-08-26 11:15:52 -04:00
let mut aliasid = room_id . as_bytes ( ) . to_vec ( ) ;
2021-02-11 13:16:14 +01:00
aliasid . push ( 0xff ) ;
2020-05-25 23:24:13 +02:00
aliasid . extend_from_slice ( & globals . next_count ( ) ? . to_be_bytes ( ) ) ;
2021-06-08 18:10:00 +02:00
self . aliasid_alias . insert ( & aliasid , & * alias . as_bytes ( ) ) ? ;
2020-05-25 23:24:13 +02:00
} else {
2020-06-03 20:55:11 +02:00
// room_id=None means remove alias
2021-06-08 18:10:00 +02:00
if let Some ( room_id ) = self . alias_roomid . get ( & alias . alias ( ) . as_bytes ( ) ) ? {
let mut prefix = room_id . to_vec ( ) ;
prefix . push ( 0xff ) ;
for ( key , _ ) in self . aliasid_alias . scan_prefix ( prefix ) {
self . aliasid_alias . remove ( & key ) ? ;
}
self . alias_roomid . remove ( & alias . alias ( ) . as_bytes ( ) ) ? ;
} else {
return Err ( Error ::BadRequest (
2020-06-09 15:13:17 +02:00
ErrorKind ::NotFound ,
" Alias does not exist. " ,
2021-06-08 18:10:00 +02:00
) ) ;
2020-05-25 23:24:13 +02:00
}
}
Ok ( ( ) )
}
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self)) ]
2020-05-25 23:24:13 +02:00
pub fn id_from_alias ( & self , alias : & RoomAliasId ) -> Result < Option < RoomId > > {
self . alias_roomid
2021-06-08 18:10:00 +02:00
. get ( alias . alias ( ) . as_bytes ( ) ) ?
2020-05-25 23:24:13 +02:00
. map_or ( Ok ( None ) , | bytes | {
2020-06-11 10:03:08 +02:00
Ok ( Some (
RoomId ::try_from ( utils ::string_from_bytes ( & bytes ) . map_err ( | _ | {
Error ::bad_database ( " Room ID in alias_roomid is invalid unicode. " )
} ) ? )
. map_err ( | _ | Error ::bad_database ( " Room ID in alias_roomid is invalid. " ) ) ? ,
) )
2020-05-25 23:24:13 +02:00
} )
}
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self)) ]
2021-06-08 18:10:00 +02:00
pub fn room_aliases < ' a > (
& ' a self ,
room_id : & RoomId ,
) -> impl Iterator < Item = Result < RoomAliasId > > + ' a {
2020-08-26 11:15:52 -04:00
let mut prefix = room_id . as_bytes ( ) . to_vec ( ) ;
2020-05-25 23:24:13 +02:00
prefix . push ( 0xff ) ;
2021-06-08 18:10:00 +02:00
self . aliasid_alias . scan_prefix ( prefix ) . map ( | ( _ , bytes ) | {
2021-06-17 20:34:14 +02:00
utils ::string_from_bytes ( & bytes )
2021-06-08 18:10:00 +02:00
. map_err ( | _ | Error ::bad_database ( " Invalid alias bytes in aliasid_alias. " ) ) ?
. try_into ( )
2021-06-17 20:34:14 +02:00
. map_err ( | _ | Error ::bad_database ( " Invalid alias in aliasid_alias. " ) )
2021-06-08 18:10:00 +02:00
} )
2020-05-25 23:24:13 +02:00
}
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self)) ]
2020-05-25 23:24:13 +02:00
pub fn set_public ( & self , room_id : & RoomId , public : bool ) -> Result < ( ) > {
if public {
2020-08-26 11:15:52 -04:00
self . publicroomids . insert ( room_id . as_bytes ( ) , & [ ] ) ? ;
2020-05-25 23:24:13 +02:00
} else {
2020-08-26 11:15:52 -04:00
self . publicroomids . remove ( room_id . as_bytes ( ) ) ? ;
2020-05-24 08:30:57 +02:00
}
2020-05-25 23:24:13 +02:00
Ok ( ( ) )
}
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self)) ]
2020-05-25 23:24:13 +02:00
pub fn is_public_room ( & self , room_id : & RoomId ) -> Result < bool > {
2021-06-08 18:10:00 +02:00
Ok ( self . publicroomids . get ( room_id . as_bytes ( ) ) ? . is_some ( ) )
2020-05-25 23:24:13 +02:00
}
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self)) ]
2021-06-17 20:34:14 +02:00
pub fn public_rooms ( & self ) -> impl Iterator < Item = Result < RoomId > > + '_ {
2021-06-08 18:10:00 +02:00
self . publicroomids . iter ( ) . map ( | ( bytes , _ ) | {
2021-06-17 20:34:14 +02:00
RoomId ::try_from (
utils ::string_from_bytes ( & bytes ) . map_err ( | _ | {
2020-06-11 10:03:08 +02:00
Error ::bad_database ( " Room ID in publicroomids is invalid unicode. " )
2021-06-17 20:34:14 +02:00
} ) ? ,
2020-06-11 10:03:08 +02:00
)
2021-06-17 20:34:14 +02:00
. map_err ( | _ | Error ::bad_database ( " Room ID in publicroomids is invalid. " ) )
2020-06-09 15:13:17 +02:00
} )
2020-05-24 08:30:57 +02:00
}
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self)) ]
2020-08-21 21:22:59 +02:00
pub fn search_pdus < ' a > (
& ' a self ,
2020-08-18 12:15:27 +02:00
room_id : & RoomId ,
search_string : & str ,
2021-04-09 21:38:16 +02:00
) -> Result < ( impl Iterator < Item = Vec < u8 > > + ' a , Vec < String > ) > {
2021-08-14 19:47:49 +02:00
let prefix = self
. get_shortroomid ( room_id ) ?
. expect ( " room exists " )
. to_be_bytes ( )
. to_vec ( ) ;
2021-08-12 23:04:00 +02:00
let prefix_clone = prefix . clone ( ) ;
2020-08-18 12:15:27 +02:00
let words = search_string
. split_terminator ( | c : char | ! c . is_alphanumeric ( ) )
2021-08-19 14:05:23 +02:00
. filter ( | s | ! s . is_empty ( ) )
2020-08-18 12:15:27 +02:00
. map ( str ::to_lowercase )
. collect ::< Vec < _ > > ( ) ;
2021-06-17 20:44:29 +02:00
let iterators = words . clone ( ) . into_iter ( ) . map ( move | word | {
let mut prefix2 = prefix . clone ( ) ;
prefix2 . extend_from_slice ( word . as_bytes ( ) ) ;
prefix2 . push ( 0xff ) ;
let mut last_possible_id = prefix2 . clone ( ) ;
last_possible_id . extend_from_slice ( & u64 ::MAX . to_be_bytes ( ) ) ;
self . tokenids
. iter_from ( & last_possible_id , true ) // Newest pdus first
. take_while ( move | ( k , _ ) | k . starts_with ( & prefix2 ) )
. map ( | ( key , _ ) | {
2021-08-12 23:04:00 +02:00
let pdu_id = key [ key . len ( ) - size_of ::< u64 > ( ) .. ] . to_vec ( ) ;
2021-06-17 20:44:29 +02:00
Ok ::< _ , Error > ( pdu_id )
} )
. filter_map ( | r | r . ok ( ) )
} ) ;
2020-08-18 12:15:27 +02:00
2020-08-22 23:09:53 +02:00
Ok ( (
utils ::common_elements ( iterators , | a , b | {
// We compare b with a because we reversed the iterator earlier
b . cmp ( a )
} )
2021-08-12 23:04:00 +02:00
. unwrap ( )
. map ( move | id | {
let mut pduid = prefix_clone . clone ( ) ;
pduid . extend_from_slice ( & id ) ;
pduid
} ) ,
2020-08-22 23:09:53 +02:00
words ,
) )
2020-08-21 21:22:59 +02:00
}
2020-08-18 12:15:27 +02:00
2021-02-28 12:41:03 +01:00
#[ tracing::instrument(skip(self)) ]
2020-08-21 21:22:59 +02:00
pub fn get_shared_rooms < ' a > (
& ' a self ,
users : Vec < UserId > ,
2021-06-08 18:10:00 +02:00
) -> Result < impl Iterator < Item = Result < RoomId > > + ' a > {
2021-06-17 20:34:14 +02:00
let iterators = users . into_iter ( ) . map ( move | user_id | {
let mut prefix = user_id . as_bytes ( ) . to_vec ( ) ;
prefix . push ( 0xff ) ;
self . userroomid_joined
. scan_prefix ( prefix )
. map ( | ( key , _ ) | {
let roomid_index = key
. iter ( )
. enumerate ( )
. find ( | ( _ , & b ) | b = = 0xff )
. ok_or_else ( | | Error ::bad_database ( " Invalid userroomid_joined in db. " ) ) ?
. 0
+ 1 ; // +1 because the room id starts AFTER the separator
let room_id = key [ roomid_index .. ] . to_vec ( ) ;
Ok ::< _ , Error > ( room_id )
} )
. filter_map ( | r | r . ok ( ) )
} ) ;
2020-08-21 21:22:59 +02:00
2020-08-22 23:09:53 +02:00
// We use the default compare function because keys are sorted correctly (not reversed)
2021-06-08 18:10:00 +02:00
Ok ( utils ::common_elements ( iterators , Ord ::cmp )
2020-08-21 21:22:59 +02:00
. expect ( " users is not empty " )
. map ( | bytes | {
RoomId ::try_from ( utils ::string_from_bytes ( & * bytes ) . map_err ( | _ | {
Error ::bad_database ( " Invalid RoomId bytes in userroomid_joined " )
} ) ? )
. map_err ( | _ | Error ::bad_database ( " Invalid RoomId in userroomid_joined. " ) )
2021-06-08 18:10:00 +02:00
} ) )
2020-08-18 12:15:27 +02:00
}
2021-03-01 08:23:28 -05:00
/// Returns an iterator of all servers participating in this room.
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self)) ]
2021-06-08 18:10:00 +02:00
pub fn room_servers < ' a > (
& ' a self ,
room_id : & RoomId ,
) -> impl Iterator < Item = Result < Box < ServerName > > > + ' a {
2020-09-14 20:23:19 +02:00
let mut prefix = room_id . as_bytes ( ) . to_vec ( ) ;
prefix . push ( 0xff ) ;
2021-06-08 18:10:00 +02:00
self . roomserverids . scan_prefix ( prefix ) . map ( | ( key , _ ) | {
2021-06-17 20:34:14 +02:00
Box ::< ServerName > ::try_from (
2020-09-14 20:23:19 +02:00
utils ::string_from_bytes (
2021-06-08 18:10:00 +02:00
& key . rsplit ( | & b | b = = 0xff )
2020-09-14 20:23:19 +02:00
. next ( )
. expect ( " rsplit always returns an element " ) ,
)
. map_err ( | _ | {
Error ::bad_database ( " Server name in roomserverids is invalid unicode. " )
} ) ? ,
)
2021-06-17 20:34:14 +02:00
. map_err ( | _ | Error ::bad_database ( " Server name in roomserverids is invalid. " ) )
2020-09-14 20:23:19 +02:00
} )
}
2021-05-17 10:25:27 +02:00
/// Returns an iterator of all rooms a server participates in (as far as we know).
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self)) ]
2021-06-08 18:10:00 +02:00
pub fn server_rooms < ' a > (
& ' a self ,
server : & ServerName ,
) -> impl Iterator < Item = Result < RoomId > > + ' a {
2021-05-17 10:25:27 +02:00
let mut prefix = server . as_bytes ( ) . to_vec ( ) ;
prefix . push ( 0xff ) ;
2021-06-08 18:10:00 +02:00
self . serverroomids . scan_prefix ( prefix ) . map ( | ( key , _ ) | {
2021-06-17 20:34:14 +02:00
RoomId ::try_from (
2021-05-17 10:25:27 +02:00
utils ::string_from_bytes (
2021-06-08 18:10:00 +02:00
& key . rsplit ( | & b | b = = 0xff )
2021-05-17 10:25:27 +02:00
. next ( )
. expect ( " rsplit always returns an element " ) ,
)
. map_err ( | _ | Error ::bad_database ( " RoomId in serverroomids is invalid unicode. " ) ) ? ,
)
2021-06-17 20:34:14 +02:00
. map_err ( | _ | Error ::bad_database ( " RoomId in serverroomids is invalid. " ) )
2021-05-17 10:25:27 +02:00
} )
}
2020-06-11 10:03:08 +02:00
/// Returns an iterator over all joined members of a room.
2021-02-28 12:41:03 +01:00
#[ tracing::instrument(skip(self)) ]
2021-06-08 18:10:00 +02:00
pub fn room_members < ' a > (
& ' a self ,
room_id : & RoomId ,
) -> impl Iterator < Item = Result < UserId > > + ' a {
2020-09-14 20:23:19 +02:00
let mut prefix = room_id . as_bytes ( ) . to_vec ( ) ;
prefix . push ( 0xff ) ;
2021-06-08 18:10:00 +02:00
self . roomuserid_joined . scan_prefix ( prefix ) . map ( | ( key , _ ) | {
2021-06-17 20:34:14 +02:00
UserId ::try_from (
2021-06-08 18:10:00 +02:00
utils ::string_from_bytes (
& key . rsplit ( | & b | b = = 0xff )
. next ( )
. expect ( " rsplit always returns an element " ) ,
2020-06-09 15:13:17 +02:00
)
2021-06-08 18:10:00 +02:00
. map_err ( | _ | {
Error ::bad_database ( " User ID in roomuserid_joined is invalid unicode. " )
} ) ? ,
)
2021-06-17 20:34:14 +02:00
. map_err ( | _ | Error ::bad_database ( " User ID in roomuserid_joined is invalid. " ) )
2021-06-08 18:10:00 +02:00
} )
2020-05-03 17:25:31 +02:00
}
2021-08-19 11:01:18 +02:00
#[ tracing::instrument(skip(self)) ]
2021-08-04 21:15:01 +02:00
pub fn room_joined_count ( & self , room_id : & RoomId ) -> Result < Option < u64 > > {
Ok ( self
. roomid_joinedcount
. get ( room_id . as_bytes ( ) ) ?
. map ( | b | {
utils ::u64_from_bytes ( & b )
. map_err ( | _ | Error ::bad_database ( " Invalid joinedcount in db. " ) )
} )
. transpose ( ) ? )
}
2020-08-06 13:21:53 +02:00
/// Returns an iterator over all User IDs who ever joined a room.
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self)) ]
2021-06-08 18:10:00 +02:00
pub fn room_useroncejoined < ' a > (
& ' a self ,
room_id : & RoomId ,
) -> impl Iterator < Item = Result < UserId > > + ' a {
2020-09-14 20:23:19 +02:00
let mut prefix = room_id . as_bytes ( ) . to_vec ( ) ;
prefix . push ( 0xff ) ;
2020-08-06 13:21:53 +02:00
self . roomuseroncejoinedids
2020-09-14 20:23:19 +02:00
. scan_prefix ( prefix )
2021-06-08 18:10:00 +02:00
. map ( | ( key , _ ) | {
2021-06-17 20:34:14 +02:00
UserId ::try_from (
2020-08-06 13:21:53 +02:00
utils ::string_from_bytes (
2021-06-08 18:10:00 +02:00
& key . rsplit ( | & b | b = = 0xff )
2020-08-06 13:21:53 +02:00
. next ( )
. expect ( " rsplit always returns an element " ) ,
)
. map_err ( | _ | {
Error ::bad_database ( " User ID in room_useroncejoined is invalid unicode. " )
} ) ? ,
)
2021-06-17 20:34:14 +02:00
. map_err ( | _ | Error ::bad_database ( " User ID in room_useroncejoined is invalid. " ) )
2020-08-06 13:21:53 +02:00
} )
}
2020-06-11 10:03:08 +02:00
/// Returns an iterator over all invited members of a room.
2021-02-28 12:41:03 +01:00
#[ tracing::instrument(skip(self)) ]
2021-06-08 18:10:00 +02:00
pub fn room_members_invited < ' a > (
& ' a self ,
room_id : & RoomId ,
) -> impl Iterator < Item = Result < UserId > > + ' a {
2020-09-14 20:23:19 +02:00
let mut prefix = room_id . as_bytes ( ) . to_vec ( ) ;
prefix . push ( 0xff ) ;
2021-04-11 21:01:27 +02:00
self . roomuserid_invitecount
2020-09-14 20:23:19 +02:00
. scan_prefix ( prefix )
2021-06-08 18:10:00 +02:00
. map ( | ( key , _ ) | {
2021-06-17 20:34:14 +02:00
UserId ::try_from (
2020-06-11 10:03:08 +02:00
utils ::string_from_bytes (
2021-06-08 18:10:00 +02:00
& key . rsplit ( | & b | b = = 0xff )
2020-06-11 10:03:08 +02:00
. next ( )
. expect ( " rsplit always returns an element " ) ,
)
. map_err ( | _ | {
Error ::bad_database ( " User ID in roomuserid_invited is invalid unicode. " )
} ) ? ,
2020-06-09 15:13:17 +02:00
)
2021-06-17 20:34:14 +02:00
. map_err ( | _ | Error ::bad_database ( " User ID in roomuserid_invited is invalid. " ) )
2020-05-03 17:25:31 +02:00
} )
}
2021-04-11 21:01:27 +02:00
#[ tracing::instrument(skip(self)) ]
pub fn get_invite_count ( & self , room_id : & RoomId , user_id : & UserId ) -> Result < Option < u64 > > {
let mut key = room_id . as_bytes ( ) . to_vec ( ) ;
key . push ( 0xff ) ;
key . extend_from_slice ( user_id . as_bytes ( ) ) ;
self . roomuserid_invitecount
2021-06-08 18:10:00 +02:00
. get ( & key ) ?
2021-04-11 21:01:27 +02:00
. map_or ( Ok ( None ) , | bytes | {
Ok ( Some ( utils ::u64_from_bytes ( & bytes ) . map_err ( | _ | {
Error ::bad_database ( " Invalid invitecount in db. " )
} ) ? ) )
} )
}
2021-04-13 15:00:45 +02:00
#[ tracing::instrument(skip(self)) ]
pub fn get_left_count ( & self , room_id : & RoomId , user_id : & UserId ) -> Result < Option < u64 > > {
let mut key = room_id . as_bytes ( ) . to_vec ( ) ;
key . push ( 0xff ) ;
key . extend_from_slice ( user_id . as_bytes ( ) ) ;
self . roomuserid_leftcount
2021-06-08 18:10:00 +02:00
. get ( & key ) ?
2021-04-13 15:00:45 +02:00
. map_or ( Ok ( None ) , | bytes | {
Ok ( Some ( utils ::u64_from_bytes ( & bytes ) . map_err ( | _ | {
Error ::bad_database ( " Invalid leftcount in db. " )
} ) ? ) )
} )
}
2020-07-27 17:36:54 +02:00
/// Returns an iterator over all rooms this user joined.
2021-02-28 12:41:03 +01:00
#[ tracing::instrument(skip(self)) ]
2021-06-08 18:10:00 +02:00
pub fn rooms_joined < ' a > (
& ' a self ,
user_id : & UserId ,
) -> impl Iterator < Item = Result < RoomId > > + ' a {
2020-05-03 17:25:31 +02:00
self . userroomid_joined
2021-06-08 18:10:00 +02:00
. scan_prefix ( user_id . as_bytes ( ) . to_vec ( ) )
. map ( | ( key , _ ) | {
2021-06-17 20:34:14 +02:00
RoomId ::try_from (
2020-06-11 10:03:08 +02:00
utils ::string_from_bytes (
2021-06-08 18:10:00 +02:00
& key . rsplit ( | & b | b = = 0xff )
2020-06-11 10:03:08 +02:00
. next ( )
. expect ( " rsplit always returns an element " ) ,
)
. map_err ( | _ | {
Error ::bad_database ( " Room ID in userroomid_joined is invalid unicode. " )
} ) ? ,
2020-06-09 15:13:17 +02:00
)
2021-06-17 20:34:14 +02:00
. map_err ( | _ | Error ::bad_database ( " Room ID in userroomid_joined is invalid. " ) )
2020-05-03 17:25:31 +02:00
} )
}
/// Returns an iterator over all rooms a user was invited to.
2021-02-28 12:41:03 +01:00
#[ tracing::instrument(skip(self)) ]
2021-06-08 18:10:00 +02:00
pub fn rooms_invited < ' a > (
& ' a self ,
2021-04-11 21:01:27 +02:00
user_id : & UserId ,
2021-06-08 18:10:00 +02:00
) -> impl Iterator < Item = Result < ( RoomId , Vec < Raw < AnyStrippedStateEvent > > ) > > + ' a {
2020-09-14 20:23:19 +02:00
let mut prefix = user_id . as_bytes ( ) . to_vec ( ) ;
prefix . push ( 0xff ) ;
2021-06-08 18:10:00 +02:00
self . userroomid_invitestate
. scan_prefix ( prefix )
. map ( | ( key , state ) | {
let room_id = RoomId ::try_from (
utils ::string_from_bytes (
& key . rsplit ( | & b | b = = 0xff )
. next ( )
. expect ( " rsplit always returns an element " ) ,
)
. map_err ( | _ | {
Error ::bad_database ( " Room ID in userroomid_invited is invalid unicode. " )
} ) ? ,
2020-06-09 15:13:17 +02:00
)
2021-06-08 18:10:00 +02:00
. map_err ( | _ | Error ::bad_database ( " Room ID in userroomid_invited is invalid. " ) ) ? ;
2021-04-11 21:01:27 +02:00
2021-06-08 18:10:00 +02:00
let state = serde_json ::from_slice ( & state )
. map_err ( | _ | Error ::bad_database ( " Invalid state in userroomid_invitestate. " ) ) ? ;
2021-04-11 21:01:27 +02:00
2021-06-08 18:10:00 +02:00
Ok ( ( room_id , state ) )
} )
2020-05-03 17:25:31 +02:00
}
2021-04-13 15:00:45 +02:00
#[ tracing::instrument(skip(self)) ]
pub fn invite_state (
& self ,
user_id : & UserId ,
room_id : & RoomId ,
) -> Result < Option < Vec < Raw < AnyStrippedStateEvent > > > > {
let mut key = user_id . as_bytes ( ) . to_vec ( ) ;
key . push ( 0xff ) ;
key . extend_from_slice ( & room_id . as_bytes ( ) ) ;
self . userroomid_invitestate
2021-06-08 18:10:00 +02:00
. get ( & key ) ?
2021-04-13 15:00:45 +02:00
. map ( | state | {
let state = serde_json ::from_slice ( & state )
. map_err ( | _ | Error ::bad_database ( " Invalid state in userroomid_invitestate. " ) ) ? ;
Ok ( state )
} )
. transpose ( )
}
#[ tracing::instrument(skip(self)) ]
pub fn left_state (
& self ,
user_id : & UserId ,
room_id : & RoomId ,
) -> Result < Option < Vec < Raw < AnyStrippedStateEvent > > > > {
let mut key = user_id . as_bytes ( ) . to_vec ( ) ;
key . push ( 0xff ) ;
key . extend_from_slice ( & room_id . as_bytes ( ) ) ;
self . userroomid_leftstate
2021-06-08 18:10:00 +02:00
. get ( & key ) ?
2021-04-13 15:00:45 +02:00
. map ( | state | {
let state = serde_json ::from_slice ( & state )
. map_err ( | _ | Error ::bad_database ( " Invalid state in userroomid_leftstate. " ) ) ? ;
Ok ( state )
} )
. transpose ( )
}
2020-05-03 17:25:31 +02:00
/// Returns an iterator over all rooms a user left.
2021-02-28 12:41:03 +01:00
#[ tracing::instrument(skip(self)) ]
2021-06-08 18:10:00 +02:00
pub fn rooms_left < ' a > (
& ' a self ,
2021-04-13 15:00:45 +02:00
user_id : & UserId ,
2021-06-08 18:10:00 +02:00
) -> impl Iterator < Item = Result < ( RoomId , Vec < Raw < AnySyncStateEvent > > ) > > + ' a {
2020-09-14 20:23:19 +02:00
let mut prefix = user_id . as_bytes ( ) . to_vec ( ) ;
prefix . push ( 0xff ) ;
2021-06-08 18:10:00 +02:00
self . userroomid_leftstate
. scan_prefix ( prefix )
. map ( | ( key , state ) | {
let room_id = RoomId ::try_from (
utils ::string_from_bytes (
& key . rsplit ( | & b | b = = 0xff )
. next ( )
. expect ( " rsplit always returns an element " ) ,
)
. map_err ( | _ | {
Error ::bad_database ( " Room ID in userroomid_invited is invalid unicode. " )
} ) ? ,
2020-06-09 15:13:17 +02:00
)
2021-06-08 18:10:00 +02:00
. map_err ( | _ | Error ::bad_database ( " Room ID in userroomid_invited is invalid. " ) ) ? ;
2021-04-13 15:00:45 +02:00
2021-06-08 18:10:00 +02:00
let state = serde_json ::from_slice ( & state )
. map_err ( | _ | Error ::bad_database ( " Invalid state in userroomid_leftstate. " ) ) ? ;
2021-04-13 15:00:45 +02:00
2021-06-08 18:10:00 +02:00
Ok ( ( room_id , state ) )
} )
2020-05-03 17:25:31 +02:00
}
2020-05-24 18:25:52 +02:00
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self)) ]
2020-08-06 13:21:53 +02:00
pub fn once_joined ( & self , user_id : & UserId , room_id : & RoomId ) -> Result < bool > {
2021-04-05 21:44:21 +02:00
let mut userroom_id = user_id . as_bytes ( ) . to_vec ( ) ;
2020-08-06 13:21:53 +02:00
userroom_id . push ( 0xff ) ;
2021-04-05 21:44:21 +02:00
userroom_id . extend_from_slice ( room_id . as_bytes ( ) ) ;
2020-08-06 13:21:53 +02:00
2021-06-08 18:10:00 +02:00
Ok ( self . roomuseroncejoinedids . get ( & userroom_id ) ? . is_some ( ) )
2020-08-06 13:21:53 +02:00
}
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self)) ]
2020-05-24 18:25:52 +02:00
pub fn is_joined ( & self , user_id : & UserId , room_id : & RoomId ) -> Result < bool > {
2020-08-26 11:15:52 -04:00
let mut userroom_id = user_id . as_bytes ( ) . to_vec ( ) ;
2020-05-24 18:25:52 +02:00
userroom_id . push ( 0xff ) ;
2020-08-26 11:15:52 -04:00
userroom_id . extend_from_slice ( room_id . as_bytes ( ) ) ;
2020-05-24 18:25:52 +02:00
2021-06-08 18:10:00 +02:00
Ok ( self . userroomid_joined . get ( & userroom_id ) ? . is_some ( ) )
2020-05-24 18:25:52 +02:00
}
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self)) ]
2020-05-24 18:25:52 +02:00
pub fn is_invited ( & self , user_id : & UserId , room_id : & RoomId ) -> Result < bool > {
2020-08-26 11:15:52 -04:00
let mut userroom_id = user_id . as_bytes ( ) . to_vec ( ) ;
2020-05-24 18:25:52 +02:00
userroom_id . push ( 0xff ) ;
2020-08-26 11:15:52 -04:00
userroom_id . extend_from_slice ( room_id . as_bytes ( ) ) ;
2020-05-24 18:25:52 +02:00
2021-06-08 18:10:00 +02:00
Ok ( self . userroomid_invitestate . get ( & userroom_id ) ? . is_some ( ) )
2020-05-24 18:25:52 +02:00
}
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self)) ]
2020-05-24 18:25:52 +02:00
pub fn is_left ( & self , user_id : & UserId , room_id : & RoomId ) -> Result < bool > {
2020-08-26 11:15:52 -04:00
let mut userroom_id = user_id . as_bytes ( ) . to_vec ( ) ;
2020-05-24 18:25:52 +02:00
userroom_id . push ( 0xff ) ;
2020-08-26 11:15:52 -04:00
userroom_id . extend_from_slice ( room_id . as_bytes ( ) ) ;
2020-05-24 18:25:52 +02:00
2021-06-08 18:10:00 +02:00
Ok ( self . userroomid_leftstate . get ( & userroom_id ) ? . is_some ( ) )
2020-05-24 18:25:52 +02:00
}
2021-07-18 20:43:39 +02:00
2021-07-29 08:36:01 +02:00
#[ tracing::instrument(skip(self)) ]
2021-08-24 21:10:01 +02:00
pub fn auth_chain_cache ( & self ) -> std ::sync ::MutexGuard < '_ , LruCache < Vec < u64 > , HashSet < u64 > > > {
2021-07-18 20:43:39 +02:00
self . auth_chain_cache . lock ( ) . unwrap ( )
}
2020-05-03 17:25:31 +02:00
}
2021-08-25 00:43:01 +02:00
#[ test ]
fn test_size_of_caches ( ) {
let stateinfo_cache : LruCache <
u64 ,
Vec < (
u64 , // sstatehash
HashSet < CompressedStateEvent > , // full state
HashSet < CompressedStateEvent > , // added
HashSet < CompressedStateEvent > , // removed
) > ,
> = LruCache ::new ( 1_000 ) ;
2021-08-25 00:51:46 +02:00
let mut total_max_cache_size : i32 = 0 ;
2021-08-25 00:43:01 +02:00
{
let pdu_cache : LruCache < EventId , Arc < PduEvent > > = LruCache ::new ( 100_000 ) ;
let content_json = r #"
{
" body " : " > <@fusetim:matrix.org> with my toaster, sure xd \n \n For people with smaller machines, I can recomend gitpod. It is integrated with GitLab and gives you a quite powerful machine (see screenshot below) with vscode in the browser. " ,
" format " : " org.matrix.custom.html " ,
" formatted_body " : " <mx-reply><blockquote><a href= \" https://matrix.to/#/!pWCROeqlZcGggueJLt:fachschaften.org/$2Slj7obNBkOLVOiF5nhNLIRI7-gWLY6lc0F75ERJAmI?via=conduit.rs&via=matrix.org&via=privacytools.io \" >In reply to</a> <a href= \" https://matrix.to/#/@fusetim:matrix.org \" >@fusetim:matrix.org</a><br>with my toaster, sure xd</blockquote></mx-reply>For people with smaller machines, I can recomend gitpod. It is integrated with GitLab and gives you a quite powerful machine (see screenshot below) with vscode in the browser. " ,
" m.relates_to " : {
" m.in_reply_to " : {
" event_id " : " $2Slj7obNBkOLVOiF5nhNLIRI7-gWLY6lc0F75ERJAmI "
}
} ,
" msgtype " : " m.text "
} " #;
let pdu_event = PduEvent {
event_id : EventId ::try_from ( format! ( " $acR1l0raoZnm60CBwAVgqbZqoO/mYU81xysh1u7XcJk " ) )
. unwrap ( ) ,
room_id : RoomId ::try_from ( " !n8f893n9:example.com " ) . unwrap ( ) ,
sender : UserId ::try_from ( " @carl:example.com " ) . unwrap ( ) ,
origin_server_ts : uint ! ( 400 ) ,
kind : EventType ::Dummy ,
content : serde_json ::from_str ( content_json ) . unwrap ( ) ,
auth_events : Vec ::new ( ) ,
state_key : Some ( " ################################# " . to_string ( ) ) ,
prev_events : Vec ::new ( ) ,
depth : uint ! ( 400 ) ,
redacts : None ,
unsigned : BTreeMap ::default ( ) ,
hashes : ruma ::events ::pdu ::EventHash ::new (
" ################################################################ " . to_string ( ) ,
) ,
signatures : BTreeMap ::default ( ) ,
} ;
let pdu_event_size = std ::mem ::size_of_val ( & pdu_event ) ;
let event_id =
EventId ::try_from ( format! ( " $acR1l0raoZnm60CBwAVgqbZqoO/mYU81xysh1u7XcJk " ) ) . unwrap ( ) ;
let event_id_size = std ::mem ::size_of_val ( & event_id ) ;
let total_pdu_mem = & pdu_cache . capacity ( ) * ( pdu_event_size + event_id_size ) ;
let pdu_cache_size_in_mb =
( std ::mem ::size_of_val ( & pdu_cache ) + total_pdu_mem ) as i32 / 100_000 as i32 ;
2021-08-25 00:51:46 +02:00
total_max_cache_size + = pdu_cache_size_in_mb ;
2021-08-25 00:43:01 +02:00
println! (
" size of filled ({}) pdu_cache is {} MB " ,
& pdu_cache . capacity ( ) ,
pdu_cache_size_in_mb
) ;
}
{
let auth_chain_cache : LruCache < u64 , HashSet < u64 > > = LruCache ::new ( 100_000 ) ;
let mut a_hash_set : HashSet < u64 > = HashSet ::new ( ) ;
a_hash_set . insert ( 100_000 as u64 ) ;
a_hash_set . insert ( 200_000 as u64 ) ;
a_hash_set . insert ( 300_000 as u64 ) ;
let entry_size = std ::mem ::size_of ::< u64 > ( ) + std ::mem ::size_of_val ( & a_hash_set ) ;
let all_entry_sizes = & auth_chain_cache . capacity ( ) * entry_size ;
let auth_chain_cache_size_in_mb =
( std ::mem ::size_of_val ( & auth_chain_cache ) + all_entry_sizes ) as i32 / 100_000 as i32 ;
2021-08-25 00:51:46 +02:00
total_max_cache_size + = auth_chain_cache_size_in_mb ;
2021-08-25 00:43:01 +02:00
println! (
" size of filled ({}) auth_chain_cache is {} MB " ,
& auth_chain_cache . capacity ( ) ,
auth_chain_cache_size_in_mb
) ;
}
{
let shorteventid_cache : LruCache < u64 , EventId > = LruCache ::new ( 1_000_000 ) ;
let event_id =
EventId ::try_from ( format! ( " $acR1l0raoZnm60CBwAVgqbZqoO/mYU81xysh1u7XcJk " ) ) . unwrap ( ) ;
let event_id_size = std ::mem ::size_of_val ( & event_id ) ;
let entry_size = std ::mem ::size_of ::< u64 > ( ) + event_id_size ;
let all_entry_sizes = & shorteventid_cache . capacity ( ) * entry_size ;
let shorteventid_cache_size_in_mb =
( std ::mem ::size_of_val ( & shorteventid_cache ) + all_entry_sizes ) as i32 / 100_000 as i32 ;
2021-08-25 00:51:46 +02:00
total_max_cache_size + = shorteventid_cache_size_in_mb * 2 ;
2021-08-25 00:43:01 +02:00
println! (
" size of filled ({}) shorteventid_cache / eventidshort_cache is {} MB " ,
& shorteventid_cache . capacity ( ) ,
shorteventid_cache_size_in_mb
) ;
}
{
let statekeyshort_cache : LruCache < ( EventType , String ) , u64 > = LruCache ::new ( 1_000_000 ) ;
let a_key = ( EventType ::Dummy , " ################################### " . to_string ( ) ) ;
let a_value = " ##################################### " . to_string ( ) ;
let entry_size = std ::mem ::size_of_val ( & a_key ) + std ::mem ::size_of_val ( & a_value ) ;
let all_entry_sizes = & statekeyshort_cache . capacity ( ) * entry_size ;
let statekeyshort_cache_size_in_mb =
( std ::mem ::size_of_val ( & statekeyshort_cache ) + all_entry_sizes ) as i32 / 100_000 as i32 ;
2021-08-25 00:51:46 +02:00
total_max_cache_size + = statekeyshort_cache_size_in_mb * 2 ;
2021-08-25 00:43:01 +02:00
println! (
" size of filled ({}) statekeyshort_cache / shortstatekey_cache is {} MB " ,
& statekeyshort_cache . capacity ( ) ,
statekeyshort_cache_size_in_mb
) ;
}
2021-08-25 00:51:46 +02:00
println! ( " All caches sum up to {} MB " , & total_max_cache_size ) ;
2021-08-25 00:43:01 +02:00
}