1
0
Fork 0
mirror of https://gitlab.com/famedly/conduit.git synced 2025-06-27 16:35:59 +00:00

Fully revert "Use Arc<EventId> in place of most EventIds"

This reverts commit 9bff276fa9.
This commit is contained in:
Jonas Platte 2021-08-31 20:47:59 +02:00
parent 5536bfdca5
commit 3b5853043f
No known key found for this signature in database
GPG key ID: CC154DE0E30B7C67
3 changed files with 54 additions and 76 deletions

8
Cargo.lock generated
View file

@ -325,9 +325,9 @@ checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b"
[[package]] [[package]]
name = "cpufeatures" name = "cpufeatures"
version = "0.2.1" version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469" checksum = "66c99696f6c9dd7f35d486b9d04d7e6e202aa3e8c40d553f2fdf5e7e0c6a71ef"
dependencies = [ dependencies = [
"libc", "libc",
] ]
@ -2476,9 +2476,9 @@ checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d"
[[package]] [[package]]
name = "sha2" name = "sha2"
version = "0.9.6" version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9204c41a1597a8c5af23c82d1c921cb01ec0a4c59e07a9c7306062829a3903f3" checksum = "b362ae5752fd2137731f9fa25fd4d9058af34666ca1966fb969119cc35719f12"
dependencies = [ dependencies = [
"block-buffer", "block-buffer",
"cfg-if 1.0.0", "cfg-if 1.0.0",

View file

@ -97,8 +97,8 @@ pub struct Rooms {
pub(super) referencedevents: Arc<dyn Tree>, pub(super) referencedevents: Arc<dyn Tree>,
pub(super) pdu_cache: Mutex<LruCache<EventId, Arc<PduEvent>>>, pub(super) pdu_cache: Mutex<LruCache<EventId, Arc<PduEvent>>>,
pub(super) shorteventid_cache: Mutex<LruCache<u64, Arc<EventId>>>,
pub(super) auth_chain_cache: Mutex<LruCache<Vec<u64>, Arc<HashSet<u64>>>>, pub(super) auth_chain_cache: Mutex<LruCache<Vec<u64>, Arc<HashSet<u64>>>>,
pub(super) shorteventid_cache: Mutex<LruCache<u64, EventId>>,
pub(super) eventidshort_cache: Mutex<LruCache<EventId, u64>>, pub(super) eventidshort_cache: Mutex<LruCache<EventId, u64>>,
pub(super) statekeyshort_cache: Mutex<LruCache<(EventType, String), u64>>, pub(super) statekeyshort_cache: Mutex<LruCache<(EventType, String), u64>>,
pub(super) shortstatekey_cache: Mutex<LruCache<u64, (EventType, String)>>, pub(super) shortstatekey_cache: Mutex<LruCache<u64, (EventType, String)>>,
@ -121,7 +121,7 @@ impl Rooms {
/// Builds a StateMap by iterating over all keys that start /// Builds a StateMap by iterating over all keys that start
/// with state_hash, this gives the full state for the given state_hash. /// with state_hash, this gives the full state for the given state_hash.
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn state_full_ids(&self, shortstatehash: u64) -> Result<BTreeMap<u64, Arc<EventId>>> { pub fn state_full_ids(&self, shortstatehash: u64) -> Result<BTreeMap<u64, EventId>> {
let full_state = self let full_state = self
.load_shortstatehash_info(shortstatehash)? .load_shortstatehash_info(shortstatehash)?
.pop() .pop()
@ -172,7 +172,7 @@ impl Rooms {
shortstatehash: u64, shortstatehash: u64,
event_type: &EventType, event_type: &EventType,
state_key: &str, state_key: &str,
) -> Result<Option<Arc<EventId>>> { ) -> Result<Option<EventId>> {
let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { let shortstatekey = match self.get_shortstatekey(event_type, state_key)? {
Some(s) => s, Some(s) => s,
None => return Ok(None), None => return Ok(None),
@ -527,7 +527,7 @@ impl Rooms {
pub fn parse_compressed_state_event( pub fn parse_compressed_state_event(
&self, &self,
compressed_event: CompressedStateEvent, compressed_event: CompressedStateEvent,
) -> Result<(u64, Arc<EventId>)> { ) -> Result<(u64, EventId)> {
Ok(( Ok((
utils::u64_from_bytes(&compressed_event[0..size_of::<u64>()]) utils::u64_from_bytes(&compressed_event[0..size_of::<u64>()])
.expect("bytes have right length"), .expect("bytes have right length"),
@ -843,14 +843,14 @@ impl Rooms {
} }
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result<Arc<EventId>> { pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result<EventId> {
if let Some(id) = self if let Some(id) = self
.shorteventid_cache .shorteventid_cache
.lock() .lock()
.unwrap() .unwrap()
.get_mut(&shorteventid) .get_mut(&shorteventid)
{ {
return Ok(Arc::clone(id)); return Ok(id.clone());
} }
let bytes = self let bytes = self
@ -858,17 +858,15 @@ impl Rooms {
.get(&shorteventid.to_be_bytes())? .get(&shorteventid.to_be_bytes())?
.ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?;
let event_id = Arc::new( let event_id = EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| {
EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| {
Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") Error::bad_database("EventID in shorteventid_eventid is invalid unicode.")
})?) })?)
.map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?, .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?;
);
self.shorteventid_cache self.shorteventid_cache
.lock() .lock()
.unwrap() .unwrap()
.insert(shorteventid, Arc::clone(&event_id)); .insert(shorteventid, event_id.clone());
Ok(event_id) Ok(event_id)
} }
@ -935,7 +933,7 @@ impl Rooms {
room_id: &RoomId, room_id: &RoomId,
event_type: &EventType, event_type: &EventType,
state_key: &str, state_key: &str,
) -> Result<Option<Arc<EventId>>> { ) -> Result<Option<EventId>> {
if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? {
self.state_get_id(current_shortstatehash, event_type, state_key) self.state_get_id(current_shortstatehash, event_type, state_key)
} else { } else {
@ -1543,7 +1541,7 @@ impl Rooms {
let start = Instant::now(); let start = Instant::now();
let count = server_server::get_auth_chain( let count = server_server::get_auth_chain(
&room_id, &room_id,
vec![Arc::new(event_id)], vec![event_id],
db, db,
)? )?
.count(); .count();

View file

@ -1006,12 +1006,7 @@ pub(crate) async fn handle_incoming_pdu<'a>(
// 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events
let mut graph = HashMap::new(); let mut graph = HashMap::new();
let mut eventid_info = HashMap::new(); let mut eventid_info = HashMap::new();
let mut todo_outlier_stack = incoming_pdu let mut todo_outlier_stack = incoming_pdu.prev_events.clone();
.prev_events
.iter()
.cloned()
.map(Arc::new)
.collect::<Vec<_>>();
let mut amount = 0; let mut amount = 0;
@ -1030,7 +1025,7 @@ pub(crate) async fn handle_incoming_pdu<'a>(
if amount > 100 { if amount > 100 {
// Max limit reached // Max limit reached
warn!("Max prev event limit reached!"); warn!("Max prev event limit reached!");
graph.insert((*prev_event_id).clone(), HashSet::new()); graph.insert(prev_event_id.clone(), HashSet::new());
continue; continue;
} }
@ -1047,27 +1042,27 @@ pub(crate) async fn handle_incoming_pdu<'a>(
amount += 1; amount += 1;
for prev_prev in &pdu.prev_events { for prev_prev in &pdu.prev_events {
if !graph.contains_key(prev_prev) { if !graph.contains_key(prev_prev) {
todo_outlier_stack.push(dbg!(Arc::new(prev_prev.clone()))); todo_outlier_stack.push(dbg!(prev_prev.clone()));
} }
} }
graph.insert( graph.insert(
(*prev_event_id).clone(), prev_event_id.clone(),
pdu.prev_events.iter().cloned().collect(), pdu.prev_events.iter().cloned().collect(),
); );
eventid_info.insert(prev_event_id.clone(), (pdu, json)); eventid_info.insert(prev_event_id.clone(), (pdu, json));
} else { } else {
// Time based check failed // Time based check failed
graph.insert((*prev_event_id).clone(), HashSet::new()); graph.insert(prev_event_id.clone(), HashSet::new());
eventid_info.insert(prev_event_id.clone(), (pdu, json)); eventid_info.insert(prev_event_id.clone(), (pdu, json));
} }
} else { } else {
// Get json failed // Get json failed
graph.insert((*prev_event_id).clone(), HashSet::new()); graph.insert(prev_event_id.clone(), HashSet::new());
} }
} else { } else {
// Fetch and handle failed // Fetch and handle failed
graph.insert((*prev_event_id).clone(), HashSet::new()); graph.insert(prev_event_id.clone(), HashSet::new());
} }
} }
@ -1082,7 +1077,7 @@ pub(crate) async fn handle_incoming_pdu<'a>(
MilliSecondsSinceUnixEpoch( MilliSecondsSinceUnixEpoch(
eventid_info eventid_info
.get(event_id) .get(event_id)
.map_or_else(|| uint!(0), |info| info.0.origin_server_ts), .map_or_else(|| uint!(0), |info| info.0.origin_server_ts.clone()),
), ),
ruma::event_id!("$notimportant"), ruma::event_id!("$notimportant"),
)) ))
@ -1207,12 +1202,7 @@ fn handle_outlier_pdu<'a>(
fetch_and_handle_outliers( fetch_and_handle_outliers(
db, db,
origin, origin,
&incoming_pdu &incoming_pdu.auth_events,
.auth_events
.iter()
.cloned()
.map(Arc::new)
.collect::<Vec<_>>(),
&create_event, &create_event,
&room_id, &room_id,
pub_key_map, pub_key_map,
@ -1281,7 +1271,7 @@ fn handle_outlier_pdu<'a>(
if !state_res::event_auth::auth_check( if !state_res::event_auth::auth_check(
&room_version, &room_version,
&incoming_pdu, &incoming_pdu,
previous_create, previous_create.clone(),
None, // TODO: third party invite None, // TODO: third party invite
|k, s| auth_events.get(&(k.clone(), s.to_owned())).map(Arc::clone), |k, s| auth_events.get(&(k.clone(), s.to_owned())).map(Arc::clone),
) )
@ -1368,7 +1358,7 @@ async fn upgrade_outlier_to_timeline_pdu(
.get_or_create_shortstatekey(&prev_pdu.kind, state_key, &db.globals) .get_or_create_shortstatekey(&prev_pdu.kind, state_key, &db.globals)
.map_err(|_| "Failed to create shortstatekey.".to_owned())?; .map_err(|_| "Failed to create shortstatekey.".to_owned())?;
state.insert(shortstatekey, Arc::new(prev_event.clone())); state.insert(shortstatekey, prev_event.clone());
// Now it's the state after the pdu // Now it's the state after the pdu
} }
@ -1411,7 +1401,7 @@ async fn upgrade_outlier_to_timeline_pdu(
.rooms .rooms
.get_or_create_shortstatekey(&prev_event.kind, state_key, &db.globals) .get_or_create_shortstatekey(&prev_event.kind, state_key, &db.globals)
.map_err(|_| "Failed to create shortstatekey.".to_owned())?; .map_err(|_| "Failed to create shortstatekey.".to_owned())?;
leaf_state.insert(shortstatekey, Arc::new(prev_event.event_id.clone())); leaf_state.insert(shortstatekey, prev_event.event_id.clone());
// Now it's the state after the pdu // Now it's the state after the pdu
} }
@ -1432,14 +1422,14 @@ async fn upgrade_outlier_to_timeline_pdu(
db, db,
) )
.map_err(|_| "Failed to load auth chain.".to_owned())? .map_err(|_| "Failed to load auth chain.".to_owned())?
.map(|event_id| (*event_id).clone()) .map(|event_id| event_id.clone())
.collect(), .collect(),
); );
} }
let fork_states = &fork_states let fork_states = &fork_states
.into_iter() .into_iter()
.map(|map| map.into_iter().map(|(k, id)| (k, (*id).clone())).collect()) .map(|map| map.into_iter().map(|(k, id)| (k, id.clone())).collect())
.collect::<Vec<_>>(); .collect::<Vec<_>>();
state_at_incoming_event = match state_res::StateResolution::resolve( state_at_incoming_event = match state_res::StateResolution::resolve(
@ -1463,7 +1453,7 @@ async fn upgrade_outlier_to_timeline_pdu(
.rooms .rooms
.get_or_create_shortstatekey(&event_type, &state_key, &db.globals) .get_or_create_shortstatekey(&event_type, &state_key, &db.globals)
.map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?;
Ok((shortstatekey, Arc::new(event_id))) Ok((shortstatekey, event_id))
}) })
.collect::<StdResult<_, String>>()?, .collect::<StdResult<_, String>>()?,
), ),
@ -1496,11 +1486,7 @@ async fn upgrade_outlier_to_timeline_pdu(
let state_vec = fetch_and_handle_outliers( let state_vec = fetch_and_handle_outliers(
&db, &db,
origin, origin,
&res.pdu_ids &res.pdu_ids,
.iter()
.cloned()
.map(Arc::new)
.collect::<Vec<_>>(),
&create_event, &create_event,
&room_id, &room_id,
pub_key_map, pub_key_map,
@ -1521,7 +1507,7 @@ async fn upgrade_outlier_to_timeline_pdu(
match state.entry(shortstatekey) { match state.entry(shortstatekey) {
btree_map::Entry::Vacant(v) => { btree_map::Entry::Vacant(v) => {
v.insert(Arc::new(pdu.event_id.clone())); v.insert(pdu.event_id.clone());
} }
btree_map::Entry::Occupied(_) => return Err( btree_map::Entry::Occupied(_) => return Err(
"State event's type and state_key combination exists multiple times." "State event's type and state_key combination exists multiple times."
@ -1537,9 +1523,7 @@ async fn upgrade_outlier_to_timeline_pdu(
.map_err(|_| "Failed to talk to db.")? .map_err(|_| "Failed to talk to db.")?
.expect("Room exists"); .expect("Room exists");
if state.get(&create_shortstatekey).map(|id| id.as_ref()) if state.get(&create_shortstatekey) != Some(&create_event.event_id) {
!= Some(&create_event.event_id)
{
return Err("Incoming event refers to wrong create event.".to_owned()); return Err("Incoming event refers to wrong create event.".to_owned());
} }
@ -1734,7 +1718,7 @@ async fn upgrade_outlier_to_timeline_pdu(
.rooms .rooms
.get_or_create_shortstatekey(&leaf_pdu.kind, state_key, &db.globals) .get_or_create_shortstatekey(&leaf_pdu.kind, state_key, &db.globals)
.map_err(|_| "Failed to create shortstatekey.".to_owned())?; .map_err(|_| "Failed to create shortstatekey.".to_owned())?;
leaf_state.insert(shortstatekey, Arc::new(leaf_pdu.event_id.clone())); leaf_state.insert(shortstatekey, leaf_pdu.event_id.clone());
// Now it's the state after the pdu // Now it's the state after the pdu
} }
@ -1749,9 +1733,9 @@ async fn upgrade_outlier_to_timeline_pdu(
.get_or_create_shortstatekey(&incoming_pdu.kind, state_key, &db.globals) .get_or_create_shortstatekey(&incoming_pdu.kind, state_key, &db.globals)
.map_err(|_| "Failed to create shortstatekey.".to_owned())?; .map_err(|_| "Failed to create shortstatekey.".to_owned())?;
state_after.insert(shortstatekey, Arc::new(incoming_pdu.event_id.clone())); state_after.insert(shortstatekey, incoming_pdu.event_id.clone());
} }
fork_states.push(state_after); fork_states.push(state_after.clone());
let mut update_state = false; let mut update_state = false;
// 14. Use state resolution to find new room state // 14. Use state resolution to find new room state
@ -1781,7 +1765,7 @@ async fn upgrade_outlier_to_timeline_pdu(
db, db,
) )
.map_err(|_| "Failed to load auth chain.".to_owned())? .map_err(|_| "Failed to load auth chain.".to_owned())?
.map(|event_id| (*event_id).clone()) .map(|event_id| event_id.clone())
.collect(), .collect(),
); );
} }
@ -1790,11 +1774,7 @@ async fn upgrade_outlier_to_timeline_pdu(
.into_iter() .into_iter()
.map(|map| { .map(|map| {
map.into_iter() map.into_iter()
.map(|(k, id)| { .map(|(k, id)| db.rooms.get_statekey_from_short(k).map(|k| (k, id.clone())))
db.rooms
.get_statekey_from_short(k)
.map(|k| (k, (*id).clone()))
})
.collect::<Result<StateMap<_>>>() .collect::<Result<StateMap<_>>>()
}) })
.collect::<Result<Vec<_>>>() .collect::<Result<Vec<_>>>()
@ -1879,7 +1859,7 @@ async fn upgrade_outlier_to_timeline_pdu(
pub(crate) fn fetch_and_handle_outliers<'a>( pub(crate) fn fetch_and_handle_outliers<'a>(
db: &'a Database, db: &'a Database,
origin: &'a ServerName, origin: &'a ServerName,
events: &'a [Arc<EventId>], events: &'a [EventId],
create_event: &'a PduEvent, create_event: &'a PduEvent,
room_id: &'a RoomId, room_id: &'a RoomId,
pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, String>>>, pub_key_map: &'a RwLock<BTreeMap<String, BTreeMap<String, String>>>,
@ -1934,12 +1914,12 @@ pub(crate) fn fetch_and_handle_outliers<'a>(
match crate::pdu::gen_event_id_canonical_json(&res.pdu) { match crate::pdu::gen_event_id_canonical_json(&res.pdu) {
Ok(t) => t, Ok(t) => t,
Err(_) => { Err(_) => {
back_off((**id).clone()); back_off(id.clone());
continue; continue;
} }
}; };
if calculated_event_id != **id { if calculated_event_id != *id {
warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}", warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}",
id, calculated_event_id, &res.pdu); id, calculated_event_id, &res.pdu);
} }
@ -1959,14 +1939,14 @@ pub(crate) fn fetch_and_handle_outliers<'a>(
Ok((pdu, json)) => (pdu, Some(json)), Ok((pdu, json)) => (pdu, Some(json)),
Err(e) => { Err(e) => {
warn!("Authentication of event {} failed: {:?}", id, e); warn!("Authentication of event {} failed: {:?}", id, e);
back_off((**id).clone()); back_off(id.clone());
continue; continue;
} }
} }
} }
Err(_) => { Err(_) => {
warn!("Failed to fetch event: {}", id); warn!("Failed to fetch event: {}", id);
back_off((**id).clone()); back_off(id.clone());
continue; continue;
} }
} }
@ -2238,9 +2218,9 @@ fn append_incoming_pdu(
#[tracing::instrument(skip(starting_events, db))] #[tracing::instrument(skip(starting_events, db))]
pub(crate) fn get_auth_chain<'a>( pub(crate) fn get_auth_chain<'a>(
room_id: &RoomId, room_id: &RoomId,
starting_events: Vec<Arc<EventId>>, starting_events: Vec<EventId>,
db: &'a Database, db: &'a Database,
) -> Result<impl Iterator<Item = Arc<EventId>> + 'a> { ) -> Result<impl Iterator<Item = EventId> + 'a> {
const NUM_BUCKETS: usize = 50; const NUM_BUCKETS: usize = 50;
let mut buckets = vec![BTreeSet::new(); NUM_BUCKETS]; let mut buckets = vec![BTreeSet::new(); NUM_BUCKETS];
@ -2516,12 +2496,12 @@ pub fn get_event_authorization_route(
return Err(Error::BadRequest(ErrorKind::NotFound, "Event not found.")); return Err(Error::BadRequest(ErrorKind::NotFound, "Event not found."));
} }
let auth_chain_ids = get_auth_chain(&room_id, vec![Arc::new(body.event_id.clone())], &db)?; let auth_chain_ids = get_auth_chain(&room_id, vec![body.event_id.clone()], &db)?;
Ok(get_event_authorization::v1::Response { Ok(get_event_authorization::v1::Response {
auth_chain: auth_chain_ids auth_chain: auth_chain_ids
.filter_map(|id| db.rooms.get_pdu_json(&id).ok()?) .filter_map(|id| Some(db.rooms.get_pdu_json(&id).ok()??))
.map(PduEvent::convert_to_outgoing_federation_event) .map(|event| PduEvent::convert_to_outgoing_federation_event(event))
.collect(), .collect(),
} }
.into()) .into())
@ -2574,7 +2554,7 @@ pub fn get_room_state_route(
}) })
.collect(); .collect();
let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::new(body.event_id.clone())], &db)?; let auth_chain_ids = get_auth_chain(&body.room_id, vec![body.event_id.clone()], &db)?;
Ok(get_room_state::v1::Response { Ok(get_room_state::v1::Response {
auth_chain: auth_chain_ids auth_chain: auth_chain_ids
@ -2630,13 +2610,13 @@ pub fn get_room_state_ids_route(
.rooms .rooms
.state_full_ids(shortstatehash)? .state_full_ids(shortstatehash)?
.into_iter() .into_iter()
.map(|(_, id)| (*id).clone()) .map(|(_, id)| id)
.collect(); .collect();
let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::new(body.event_id.clone())], &db)?; let auth_chain_ids = get_auth_chain(&body.room_id, vec![body.event_id.clone()], &db)?;
Ok(get_room_state_ids::v1::Response { Ok(get_room_state_ids::v1::Response {
auth_chain_ids: auth_chain_ids.map(|id| (*id).clone()).collect(), auth_chain_ids: auth_chain_ids.collect(),
pdu_ids, pdu_ids,
} }
.into()) .into())