1
0
Fork 0
mirror of https://gitlab.com/famedly/conduit.git synced 2025-06-27 16:35:59 +00:00

Fix formatting.

Signed-off-by: Jonas Zohren <git-pbkyr@jzohren.de>
This commit is contained in:
Jonas Zohren 2021-08-26 15:36:33 +02:00
parent e8140cda33
commit dd14270a6e
3 changed files with 77 additions and 77 deletions

View file

@ -1,3 +1,4 @@
use std::hash::Hash;
use std::{ use std::{
collections::{BTreeMap, HashMap, HashSet}, collections::{BTreeMap, HashMap, HashSet},
convert::{TryFrom, TryInto}, convert::{TryFrom, TryInto},
@ -8,13 +9,12 @@ use std::{
path::Path, path::Path,
sync::{Arc, Mutex, RwLock}, sync::{Arc, Mutex, RwLock},
}; };
use std::hash::Hash;
use directories::ProjectDirs; use directories::ProjectDirs;
use lru_cache::LruCache; use lru_cache::LruCache;
use rocket::{ use rocket::{
futures::{channel::mpsc, stream::FuturesUnordered, StreamExt}, futures::{channel::mpsc, stream::FuturesUnordered, StreamExt},
outcome::{IntoOutcome, try_outcome}, outcome::{try_outcome, IntoOutcome},
request::{FromRequest, Request}, request::{FromRequest, Request},
Shutdown, State, Shutdown, State,
}; };
@ -25,7 +25,7 @@ use tracing::{debug, error, warn};
use abstraction::DatabaseEngine; use abstraction::DatabaseEngine;
use crate::{Error, Result, utils}; use crate::{utils, Error, Result};
use self::proxy::ProxyConfig; use self::proxy::ProxyConfig;
@ -178,27 +178,27 @@ impl Database {
fn check_sled_or_sqlite_db(config: &Config) -> Result<()> { fn check_sled_or_sqlite_db(config: &Config) -> Result<()> {
#[cfg(feature = "backend_sqlite")] #[cfg(feature = "backend_sqlite")]
{ {
let path = Path::new(&config.database_path); let path = Path::new(&config.database_path);
let sled_exists = path.join("db").exists(); let sled_exists = path.join("db").exists();
let sqlite_exists = path.join("conduit.db").exists(); let sqlite_exists = path.join("conduit.db").exists();
if sled_exists { if sled_exists {
if sqlite_exists { if sqlite_exists {
// most likely an in-place directory, only warn // most likely an in-place directory, only warn
warn!("Both sled and sqlite databases are detected in database directory"); warn!("Both sled and sqlite databases are detected in database directory");
warn!("Currently running from the sqlite database, but consider removing sled database files to free up space") warn!("Currently running from the sqlite database, but consider removing sled database files to free up space")
} else { } else {
error!( error!(
"Sled database detected, conduit now uses sqlite for database operations" "Sled database detected, conduit now uses sqlite for database operations"
); );
error!("This database must be converted to sqlite, go to https://github.com/ShadowJonathan/conduit_toolbox#conduit_sled_to_sqlite"); error!("This database must be converted to sqlite, go to https://github.com/ShadowJonathan/conduit_toolbox#conduit_sled_to_sqlite");
return Err(Error::bad_config( return Err(Error::bad_config(
"sled database detected, migrate to sqlite", "sled database detected, migrate to sqlite",
)); ));
}
} }
} }
}
Ok(()) Ok(())
} }
@ -752,9 +752,9 @@ impl Database {
drop(guard); drop(guard);
#[cfg(feature = "sqlite")] #[cfg(feature = "sqlite")]
{ {
Self::start_wal_clean_task(Arc::clone(&db), &config).await; Self::start_wal_clean_task(Arc::clone(&db), &config).await;
} }
Ok(db) Ok(db)
} }
@ -905,7 +905,7 @@ impl Database {
tokio::spawn(async move { tokio::spawn(async move {
let mut i = interval(timer_interval); let mut i = interval(timer_interval);
#[cfg(unix)] #[cfg(unix)]
let mut s = signal(SignalKind::hangup()).unwrap(); let mut s = signal(SignalKind::hangup()).unwrap();
loop { loop {
#[cfg(unix)] #[cfg(unix)]
@ -916,13 +916,12 @@ impl Database {
_ = s.recv() => { _ = s.recv() => {
info!("wal-trunc: Received SIGHUP"); info!("wal-trunc: Received SIGHUP");
} }
} };
;
#[cfg(not(unix))] #[cfg(not(unix))]
{ {
i.tick().await; i.tick().await;
info!("wal-trunc: Timer ticked") info!("wal-trunc: Timer ticked")
} }
let start = Instant::now(); let start = Instant::now();
if let Err(e) = db.read().await.flush_wal() { if let Err(e) = db.read().await.flush_wal() {
@ -936,7 +935,9 @@ impl Database {
/// Measures memory usage in bytes and how full the caches are in percent for all caches in the Database struct. /// Measures memory usage in bytes and how full the caches are in percent for all caches in the Database struct.
pub fn get_cache_usage(&mut self) -> Result<CacheUsageStatistics> { pub fn get_cache_usage(&mut self) -> Result<CacheUsageStatistics> {
fn memory_usage_of_locked_cache<K: Eq + Hash, V>(cache: &mut Mutex<LruCache<K, V>>) -> usize { fn memory_usage_of_locked_cache<K: Eq + Hash, V>(
cache: &mut Mutex<LruCache<K, V>>,
) -> usize {
let raw_cache = cache.lock().unwrap(); let raw_cache = cache.lock().unwrap();
let mut cache_items_size_sum: usize = 0; let mut cache_items_size_sum: usize = 0;
for cache_item in raw_cache.iter() { for cache_item in raw_cache.iter() {
@ -953,44 +954,43 @@ impl Database {
cache.lock().unwrap().capacity() cache.lock().unwrap().capacity()
} }
return return Ok(CacheUsageStatistics {
Ok(CacheUsageStatistics { pdu_cache: (
pdu_cache: ( memory_usage_of_locked_cache(&mut self.rooms.pdu_cache),
memory_usage_of_locked_cache(&mut self.rooms.pdu_cache), items_in_locked_cache(&mut self.rooms.pdu_cache),
items_in_locked_cache(&mut self.rooms.pdu_cache), capacity_of_locked_cache(&mut self.rooms.pdu_cache),
capacity_of_locked_cache(&mut self.rooms.pdu_cache) ),
), auth_chain_cache: (
auth_chain_cache: ( memory_usage_of_locked_cache(&mut self.rooms.auth_chain_cache),
memory_usage_of_locked_cache(&mut self.rooms.auth_chain_cache), items_in_locked_cache(&mut self.rooms.auth_chain_cache),
items_in_locked_cache(&mut self.rooms.auth_chain_cache), capacity_of_locked_cache(&mut self.rooms.auth_chain_cache),
capacity_of_locked_cache(&mut self.rooms.auth_chain_cache) ),
), shorteventid_cache: (
shorteventid_cache: ( memory_usage_of_locked_cache(&mut self.rooms.shorteventid_cache),
memory_usage_of_locked_cache(&mut self.rooms.shorteventid_cache), items_in_locked_cache(&mut self.rooms.shorteventid_cache),
items_in_locked_cache(&mut self.rooms.shorteventid_cache), capacity_of_locked_cache(&mut self.rooms.shorteventid_cache),
capacity_of_locked_cache(&mut self.rooms.shorteventid_cache) ),
), eventidshort_cache: (
eventidshort_cache: ( memory_usage_of_locked_cache(&mut self.rooms.eventidshort_cache),
memory_usage_of_locked_cache(&mut self.rooms.eventidshort_cache), items_in_locked_cache(&mut self.rooms.eventidshort_cache),
items_in_locked_cache(&mut self.rooms.eventidshort_cache), capacity_of_locked_cache(&mut self.rooms.eventidshort_cache),
capacity_of_locked_cache(&mut self.rooms.eventidshort_cache) ),
), statekeyshort_cache: (
statekeyshort_cache: ( memory_usage_of_locked_cache(&mut self.rooms.statekeyshort_cache),
memory_usage_of_locked_cache(&mut self.rooms.statekeyshort_cache), items_in_locked_cache(&mut self.rooms.statekeyshort_cache),
items_in_locked_cache(&mut self.rooms.statekeyshort_cache), capacity_of_locked_cache(&mut self.rooms.statekeyshort_cache),
capacity_of_locked_cache(&mut self.rooms.statekeyshort_cache) ),
), shortstatekey_cache: (
shortstatekey_cache: ( memory_usage_of_locked_cache(&mut self.rooms.shortstatekey_cache),
memory_usage_of_locked_cache(&mut self.rooms.shortstatekey_cache), items_in_locked_cache(&mut self.rooms.shortstatekey_cache),
items_in_locked_cache(&mut self.rooms.shortstatekey_cache), capacity_of_locked_cache(&mut self.rooms.shortstatekey_cache),
capacity_of_locked_cache(&mut self.rooms.shortstatekey_cache) ),
), stateinfo_cache: (
stateinfo_cache: ( memory_usage_of_locked_cache(&mut self.rooms.stateinfo_cache),
memory_usage_of_locked_cache(&mut self.rooms.stateinfo_cache), items_in_locked_cache(&mut self.rooms.stateinfo_cache),
items_in_locked_cache(&mut self.rooms.stateinfo_cache), capacity_of_locked_cache(&mut self.rooms.stateinfo_cache),
capacity_of_locked_cache(&mut self.rooms.stateinfo_cache) ),
), });
});
} }
} }

View file

@ -5,13 +5,13 @@ use std::{
use rocket::futures::{channel::mpsc, stream::StreamExt}; use rocket::futures::{channel::mpsc, stream::StreamExt};
use ruma::{ use ruma::{
events::{EventType, room::message}, events::{room::message, EventType},
UserId, UserId,
}; };
use tokio::sync::{MutexGuard, RwLock, RwLockWriteGuard}; use tokio::sync::{MutexGuard, RwLock, RwLockWriteGuard};
use tracing::warn; use tracing::warn;
use crate::{Database, pdu::PduBuilder}; use crate::{pdu::PduBuilder, Database};
pub enum AdminCommand { pub enum AdminCommand {
RegisterAppservice(serde_yaml::Value), RegisterAppservice(serde_yaml::Value),

View file

@ -12,24 +12,24 @@ use ring::digest;
use rocket::http::RawStr; use rocket::http::RawStr;
use ruma::{ use ruma::{
api::{client::error::ErrorKind, federation}, api::{client::error::ErrorKind, federation},
EventId,
events::{ events::{
AnyStrippedStateEvent, AnySyncStateEvent, ignored_user_list, push_rules,
EventType, room::{
ignored_user_list, push_rules, room::{
create::CreateEventContent, member, message, power_levels::PowerLevelsEventContent, create::CreateEventContent, member, message, power_levels::PowerLevelsEventContent,
}, },
AnyStrippedStateEvent, AnySyncStateEvent, EventType,
}, },
push::{self, Action, Tweak}, push::{self, Action, Tweak},
RoomAliasId, serde::{CanonicalJsonObject, CanonicalJsonValue, Raw},
RoomId, RoomVersionId, serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, ServerName, state_res::{self, RoomVersion, StateMap}, uint, UserId, state_res::{self, RoomVersion, StateMap},
uint, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId,
}; };
use tokio::sync::MutexGuard; use tokio::sync::MutexGuard;
use tracing::{error, warn}; use tracing::{error, warn};
pub use edus::RoomEdus; pub use edus::RoomEdus;
use crate::{Database, Error, pdu::PduBuilder, PduEvent, Result, utils}; use crate::{pdu::PduBuilder, utils, Database, Error, PduEvent, Result};
use super::{abstraction::Tree, admin::AdminCommand, pusher}; use super::{abstraction::Tree, admin::AdminCommand, pusher};