1
0
Fork 0
mirror of https://gitlab.com/famedly/conduit.git synced 2025-06-27 16:35:59 +00:00

chutulu is my copilot

This commit is contained in:
Jonathan de Jong 2021-07-03 21:19:49 +02:00
parent 22e3416745
commit 0753076e94
7 changed files with 262 additions and 55 deletions

1
Cargo.lock generated
View file

@ -255,6 +255,7 @@ dependencies = [
"image", "image",
"jsonwebtoken", "jsonwebtoken",
"log", "log",
"num_cpus",
"lru-cache", "lru-cache",
"opentelemetry", "opentelemetry",
"opentelemetry-jaeger", "opentelemetry-jaeger",

View file

@ -77,13 +77,14 @@ lru-cache = "0.1.2"
rusqlite = { version = "0.25.3", optional = true } rusqlite = { version = "0.25.3", optional = true }
parking_lot = { version = "0.11.1", optional = true } parking_lot = { version = "0.11.1", optional = true }
crossbeam = { version = "0.8.1", optional = true } crossbeam = { version = "0.8.1", optional = true }
num_cpus = { version = "1.13.0", optional = true }
[features] [features]
default = ["conduit_bin", "backend_sqlite"] default = ["conduit_bin", "backend_sqlite"]
backend_sled = ["sled"] backend_sled = ["sled"]
backend_rocksdb = ["rocksdb"] backend_rocksdb = ["rocksdb"]
backend_sqlite = ["sqlite"] backend_sqlite = ["sqlite"]
sqlite = ["rusqlite", "parking_lot", "crossbeam"] sqlite = ["rusqlite", "parking_lot", "crossbeam", "num_cpus"]
conduit_bin = [] # TODO: add rocket to this when it is optional conduit_bin = [] # TODO: add rocket to this when it is optional
[[bin]] [[bin]]

View file

@ -93,6 +93,7 @@ pub type Engine = abstraction::rocksdb::RocksDbEngine;
pub type Engine = abstraction::sqlite::SqliteEngine; pub type Engine = abstraction::sqlite::SqliteEngine;
pub struct Database { pub struct Database {
_db: Arc<Engine>,
pub globals: globals::Globals, pub globals: globals::Globals,
pub users: users::Users, pub users: users::Users,
pub uiaa: uiaa::Uiaa, pub uiaa: uiaa::Uiaa,
@ -132,6 +133,7 @@ impl Database {
let (sending_sender, sending_receiver) = mpsc::unbounded(); let (sending_sender, sending_receiver) = mpsc::unbounded();
let db = Arc::new(Self { let db = Arc::new(Self {
_db: builder.clone(),
users: users::Users { users: users::Users {
userid_password: builder.open_tree("userid_password")?, userid_password: builder.open_tree("userid_password")?,
userid_displayname: builder.open_tree("userid_displayname")?, userid_displayname: builder.open_tree("userid_displayname")?,
@ -421,8 +423,12 @@ impl Database {
} }
pub async fn flush(&self) -> Result<()> { pub async fn flush(&self) -> Result<()> {
// noop while we don't use sled 1.0 let start = std::time::Instant::now();
//self._db.flush_async().await?;
Ok(()) let res = self._db.flush();
log::debug!("flush: took {:?}", start.elapsed());
res
} }
} }

View file

@ -15,6 +15,7 @@ pub mod sqlite;
pub trait DatabaseEngine: Sized { pub trait DatabaseEngine: Sized {
fn open(config: &Config) -> Result<Arc<Self>>; fn open(config: &Config) -> Result<Arc<Self>>;
fn open_tree(self: &Arc<Self>, name: &'static str) -> Result<Arc<dyn Tree>>; fn open_tree(self: &Arc<Self>, name: &'static str) -> Result<Arc<dyn Tree>>;
fn flush(self: &Arc<Self>) -> Result<()>;
} }
pub trait Tree: Send + Sync { pub trait Tree: Send + Sync {

View file

@ -1,19 +1,27 @@
use std::{future::Future, pin::Pin, sync::Arc, thread}; use std::{
future::Future,
ops::Deref,
path::{Path, PathBuf},
pin::Pin,
sync::{Arc, Weak},
thread,
time::{Duration, Instant},
};
use crate::{database::Config, Result}; use crate::{database::Config, Result};
use super::{DatabaseEngine, Tree}; use super::{DatabaseEngine, Tree};
use std::{collections::BTreeMap, sync::RwLock}; use std::collections::BTreeMap;
use log::debug;
use crossbeam::channel::{bounded, Sender as ChannelSender}; use crossbeam::channel::{bounded, Sender as ChannelSender};
use parking_lot::{Mutex, MutexGuard}; use parking_lot::{Mutex, MutexGuard, RwLock};
use rusqlite::{params, Connection, OptionalExtension}; use rusqlite::{params, Connection, DatabaseName::Main, OptionalExtension};
use tokio::sync::oneshot::Sender; use tokio::sync::oneshot::Sender;
type SqliteHandle = Arc<Mutex<Connection>>;
// const SQL_CREATE_TABLE: &str = // const SQL_CREATE_TABLE: &str =
// "CREATE TABLE IF NOT EXISTS {} {{ \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL }}"; // "CREATE TABLE IF NOT EXISTS {} {{ \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL }}";
// const SQL_SELECT: &str = "SELECT value FROM {} WHERE key = ?"; // const SQL_SELECT: &str = "SELECT value FROM {} WHERE key = ?";
@ -25,23 +33,137 @@ type SqliteHandle = Arc<Mutex<Connection>>;
// const SQL_SELECT_ITER_FROM_BACKWARDS: &str = // const SQL_SELECT_ITER_FROM_BACKWARDS: &str =
// "SELECT key, value FROM {} WHERE key <= ? ORDER BY DESC"; // "SELECT key, value FROM {} WHERE key <= ? ORDER BY DESC";
struct Pool {
writer: Mutex<Connection>,
readers: Vec<Mutex<Connection>>,
reader_rwlock: RwLock<()>,
path: PathBuf,
}
pub const MILLI: Duration = Duration::from_millis(1);
enum HoldingConn<'a> {
FromGuard(MutexGuard<'a, Connection>),
FromOwned(Connection),
}
impl<'a> Deref for HoldingConn<'a> {
type Target = Connection;
fn deref(&self) -> &Self::Target {
match self {
HoldingConn::FromGuard(guard) => guard.deref(),
HoldingConn::FromOwned(conn) => conn,
}
}
}
impl Pool {
fn new<P: AsRef<Path>>(path: P, num_readers: usize) -> Result<Self> {
let writer = Mutex::new(Self::prepare_conn(&path)?);
let mut readers = Vec::new();
for _ in 0..num_readers {
readers.push(Mutex::new(Self::prepare_conn(&path)?))
}
Ok(Self {
writer,
readers,
reader_rwlock: RwLock::new(()),
path: path.as_ref().to_path_buf(),
})
}
fn prepare_conn<P: AsRef<Path>>(path: P) -> Result<Connection> {
let conn = Connection::open(path)?;
conn.pragma_update(Some(Main), "journal_mode", &"WAL".to_owned())?;
// conn.pragma_update(Some(Main), "wal_autocheckpoint", &250)?;
// conn.pragma_update(Some(Main), "wal_checkpoint", &"FULL".to_owned())?;
conn.pragma_update(Some(Main), "synchronous", &"OFF".to_owned())?;
Ok(conn)
}
fn write_lock(&self) -> MutexGuard<'_, Connection> {
self.writer.lock()
}
fn read_lock(&self) -> HoldingConn<'_> {
let _guard = self.reader_rwlock.read();
for r in &self.readers {
if let Some(reader) = r.try_lock() {
return HoldingConn::FromGuard(reader);
}
}
drop(_guard);
log::warn!("all readers locked, creating spillover reader...");
let spilled = Self::prepare_conn(&self.path).unwrap();
return HoldingConn::FromOwned(spilled);
}
}
pub struct SqliteEngine { pub struct SqliteEngine {
handle: SqliteHandle, pool: Pool,
iterator_lock: RwLock<()>,
} }
impl DatabaseEngine for SqliteEngine { impl DatabaseEngine for SqliteEngine {
fn open(config: &Config) -> Result<Arc<Self>> { fn open(config: &Config) -> Result<Arc<Self>> {
let conn = Connection::open(format!("{}/conduit.db", &config.database_path))?; let pool = Pool::new(
format!("{}/conduit.db", &config.database_path),
num_cpus::get(),
)?;
conn.pragma_update(None, "journal_mode", &"WAL".to_owned())?; pool.write_lock()
.execute("CREATE TABLE IF NOT EXISTS _noop (\"key\" INT)", params![])?;
let handle = Arc::new(Mutex::new(conn)); let arc = Arc::new(SqliteEngine {
pool,
iterator_lock: RwLock::new(()),
});
Ok(Arc::new(SqliteEngine { handle })) let weak: Weak<SqliteEngine> = Arc::downgrade(&arc);
thread::spawn(move || {
let r = crossbeam::channel::tick(Duration::from_secs(60));
let weak = weak;
loop {
let _ = r.recv();
if let Some(arc) = Weak::upgrade(&weak) {
log::warn!("wal-trunc: locking...");
let iterator_guard = arc.iterator_lock.write();
let read_guard = arc.pool.reader_rwlock.write();
log::warn!("wal-trunc: locked, flushing...");
let start = Instant::now();
arc.flush_wal().unwrap();
log::warn!("wal-trunc: locked, flushed in {:?}", start.elapsed());
drop(read_guard);
drop(iterator_guard);
} else {
break;
}
}
});
Ok(arc)
} }
fn open_tree(self: &Arc<Self>, name: &str) -> Result<Arc<dyn Tree>> { fn open_tree(self: &Arc<Self>, name: &str) -> Result<Arc<dyn Tree>> {
self.handle.lock().execute(format!("CREATE TABLE IF NOT EXISTS {} ( \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL )", name).as_str(), [])?; self.pool.write_lock().execute(format!("CREATE TABLE IF NOT EXISTS {} ( \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL )", name).as_str(), [])?;
Ok(Arc::new(SqliteTable { Ok(Arc::new(SqliteTable {
engine: Arc::clone(self), engine: Arc::clone(self),
@ -49,6 +171,40 @@ impl DatabaseEngine for SqliteEngine {
watchers: RwLock::new(BTreeMap::new()), watchers: RwLock::new(BTreeMap::new()),
})) }))
} }
fn flush(self: &Arc<Self>) -> Result<()> {
self.pool
.write_lock()
.execute_batch(
"
PRAGMA synchronous=FULL;
BEGIN;
DELETE FROM _noop;
INSERT INTO _noop VALUES (1);
COMMIT;
PRAGMA synchronous=OFF;
",
)
.map_err(Into::into)
}
}
impl SqliteEngine {
fn flush_wal(self: &Arc<Self>) -> Result<()> {
self.pool
.write_lock()
.execute_batch(
"
PRAGMA synchronous=FULL; PRAGMA wal_checkpoint=TRUNCATE;
BEGIN;
DELETE FROM _noop;
INSERT INTO _noop VALUES (1);
COMMIT;
PRAGMA wal_checkpoint=PASSIVE; PRAGMA synchronous=OFF;
",
)
.map_err(Into::into)
}
} }
pub struct SqliteTable { pub struct SqliteTable {
@ -60,26 +216,17 @@ pub struct SqliteTable {
type TupleOfBytes = (Vec<u8>, Vec<u8>); type TupleOfBytes = (Vec<u8>, Vec<u8>);
impl SqliteTable { impl SqliteTable {
fn get_with_guard( fn get_with_guard(&self, guard: &Connection, key: &[u8]) -> Result<Option<Vec<u8>>> {
&self,
guard: &MutexGuard<'_, Connection>,
key: &[u8],
) -> Result<Option<Vec<u8>>> {
Ok(guard Ok(guard
.prepare(format!("SELECT value FROM {} WHERE key = ?", self.name).as_str())? .prepare(format!("SELECT value FROM {} WHERE key = ?", self.name).as_str())?
.query_row([key], |row| row.get(0)) .query_row([key], |row| row.get(0))
.optional()?) .optional()?)
} }
fn insert_with_guard( fn insert_with_guard(&self, guard: &Connection, key: &[u8], value: &[u8]) -> Result<()> {
&self,
guard: &MutexGuard<'_, Connection>,
key: &[u8],
value: &[u8],
) -> Result<()> {
guard.execute( guard.execute(
format!( format!(
"INSERT OR REPLACE INTO {} (key, value) VALUES (?, ?)", "INSERT INTO {} (key, value) VALUES (?, ?) ON CONFLICT(key) DO UPDATE SET value = excluded.value",
self.name self.name
) )
.as_str(), .as_str(),
@ -88,18 +235,18 @@ impl SqliteTable {
Ok(()) Ok(())
} }
fn _iter_from_thread<F>( fn _iter_from_thread<F>(&self, f: F) -> Box<dyn Iterator<Item = TupleOfBytes> + Send>
&self,
mutex: Arc<Mutex<Connection>>,
f: F,
) -> Box<dyn Iterator<Item = TupleOfBytes> + Send>
where where
F: (FnOnce(MutexGuard<'_, Connection>, ChannelSender<TupleOfBytes>)) + Send + 'static, F: (for<'a> FnOnce(&'a Connection, ChannelSender<TupleOfBytes>)) + Send + 'static,
{ {
let (s, r) = bounded::<TupleOfBytes>(5); let (s, r) = bounded::<TupleOfBytes>(5);
let engine = self.engine.clone();
thread::spawn(move || { thread::spawn(move || {
let _ = f(mutex.lock(), s); let guard = engine.iterator_lock.read();
let _ = f(&engine.pool.read_lock(), s);
drop(guard);
}); });
Box::new(r.into_iter()) Box::new(r.into_iter())
@ -108,7 +255,7 @@ impl SqliteTable {
macro_rules! iter_from_thread { macro_rules! iter_from_thread {
($self:expr, $sql:expr, $param:expr) => { ($self:expr, $sql:expr, $param:expr) => {
$self._iter_from_thread($self.engine.handle.clone(), move |guard, s| { $self._iter_from_thread(move |guard, s| {
let _ = guard let _ = guard
.prepare($sql) .prepare($sql)
.unwrap() .unwrap()
@ -122,13 +269,33 @@ macro_rules! iter_from_thread {
impl Tree for SqliteTable { impl Tree for SqliteTable {
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> { fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
self.get_with_guard(&self.engine.handle.lock(), key) let guard = self.engine.pool.read_lock();
// let start = Instant::now();
let val = self.get_with_guard(&guard, key);
// debug!("get: took {:?}", start.elapsed());
// debug!("get key: {:?}", &key)
val
} }
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> {
self.insert_with_guard(&self.engine.handle.lock(), key, value)?; {
let guard = self.engine.pool.write_lock();
let watchers = self.watchers.read().unwrap(); let start = Instant::now();
self.insert_with_guard(&guard, key, value)?;
let elapsed = start.elapsed();
if elapsed > MILLI {
debug!("insert: took {:012?} : {}", elapsed, &self.name);
}
}
let watchers = self.watchers.read();
let mut triggered = Vec::new(); let mut triggered = Vec::new();
for length in 0..=key.len() { for length in 0..=key.len() {
@ -140,7 +307,7 @@ impl Tree for SqliteTable {
drop(watchers); drop(watchers);
if !triggered.is_empty() { if !triggered.is_empty() {
let mut watchers = self.watchers.write().unwrap(); let mut watchers = self.watchers.write();
for prefix in triggered { for prefix in triggered {
if let Some(txs) = watchers.remove(prefix) { if let Some(txs) = watchers.remove(prefix) {
for tx in txs { for tx in txs {
@ -154,10 +321,22 @@ impl Tree for SqliteTable {
} }
fn remove(&self, key: &[u8]) -> Result<()> { fn remove(&self, key: &[u8]) -> Result<()> {
self.engine.handle.lock().execute( let guard = self.engine.pool.write_lock();
let start = Instant::now();
guard.execute(
format!("DELETE FROM {} WHERE key = ?", self.name).as_str(), format!("DELETE FROM {} WHERE key = ?", self.name).as_str(),
[key], [key],
)?; )?;
let elapsed = start.elapsed();
if elapsed > MILLI {
debug!("remove: took {:012?} : {}", elapsed, &self.name);
}
// debug!("remove key: {:?}", &key);
Ok(()) Ok(())
} }
@ -201,7 +380,9 @@ impl Tree for SqliteTable {
} }
fn increment(&self, key: &[u8]) -> Result<Vec<u8>> { fn increment(&self, key: &[u8]) -> Result<Vec<u8>> {
let guard = self.engine.handle.lock(); let guard = self.engine.pool.write_lock();
let start = Instant::now();
let old = self.get_with_guard(&guard, key)?; let old = self.get_with_guard(&guard, key)?;
@ -210,11 +391,16 @@ impl Tree for SqliteTable {
self.insert_with_guard(&guard, key, &new)?; self.insert_with_guard(&guard, key, &new)?;
let elapsed = start.elapsed();
if elapsed > MILLI {
debug!("increment: took {:012?} : {}", elapsed, &self.name);
}
// debug!("increment key: {:?}", &key);
Ok(new) Ok(new)
} }
// TODO: make this use take_while
fn scan_prefix<'a>( fn scan_prefix<'a>(
&'a self, &'a self,
prefix: Vec<u8>, prefix: Vec<u8>,
@ -229,7 +415,10 @@ impl Tree for SqliteTable {
// .as_str(), // .as_str(),
// [prefix] // [prefix]
// ) // )
Box::new(self.iter_from(&prefix, false).take_while(move |(key, _)| key.starts_with(&prefix))) Box::new(
self.iter_from(&prefix, false)
.take_while(move |(key, _)| key.starts_with(&prefix)),
)
} }
fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>> { fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>> {
@ -237,7 +426,6 @@ impl Tree for SqliteTable {
self.watchers self.watchers
.write() .write()
.unwrap()
.entry(prefix.to_vec()) .entry(prefix.to_vec())
.or_default() .or_default()
.push(tx); .push(tx);
@ -249,10 +437,12 @@ impl Tree for SqliteTable {
} }
fn clear(&self) -> Result<()> { fn clear(&self) -> Result<()> {
self.engine.handle.lock().execute( debug!("clear: running");
format!("DELETE FROM {}", self.name).as_str(), self.engine
[], .pool
)?; .write_lock()
.execute(format!("DELETE FROM {}", self.name).as_str(), [])?;
debug!("clear: ran");
Ok(()) Ok(())
} }
} }

View file

@ -189,7 +189,10 @@ impl Media {
original_prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail original_prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail
original_prefix.push(0xff); original_prefix.push(0xff);
if let Some((key, _)) = self.mediaid_file.scan_prefix(thumbnail_prefix).next() { if let Some((key, _)) = {
/* scoped to explicitly drop iterator */
self.mediaid_file.scan_prefix(thumbnail_prefix).next()
} {
// Using saved thumbnail // Using saved thumbnail
let path = globals.get_media_file(&key); let path = globals.get_media_file(&key);
let mut file = Vec::new(); let mut file = Vec::new();
@ -224,7 +227,10 @@ impl Media {
content_type, content_type,
file: file.to_vec(), file: file.to_vec(),
})) }))
} else if let Some((key, _)) = self.mediaid_file.scan_prefix(original_prefix).next() { } else if let Some((key, _)) = {
/* scoped to explicitly drop iterator */
self.mediaid_file.scan_prefix(original_prefix).next()
} {
// Generate a thumbnail // Generate a thumbnail
let path = globals.get_media_file(&key); let path = globals.get_media_file(&key);
let mut file = Vec::new(); let mut file = Vec::new();

View file

@ -733,6 +733,8 @@ impl Rooms {
.filter(|user_id| user_id.server_name() == db.globals.server_name()) .filter(|user_id| user_id.server_name() == db.globals.server_name())
.filter(|user_id| !db.users.is_deactivated(user_id).unwrap_or(false)) .filter(|user_id| !db.users.is_deactivated(user_id).unwrap_or(false))
.filter(|user_id| self.is_joined(&user_id, &pdu.room_id).unwrap_or(false)) .filter(|user_id| self.is_joined(&user_id, &pdu.room_id).unwrap_or(false))
.collect::<Vec<_>>()
/* to consume iterator */
{ {
// Don't notify the user of their own events // Don't notify the user of their own events
if user == pdu.sender { if user == pdu.sender {
@ -1209,13 +1211,13 @@ impl Rooms {
redacts, redacts,
} = pdu_builder; } = pdu_builder;
// TODO: Make sure this isn't called twice in parallel // TODO: Make sure this isn't called twice in parallel
let prev_events = dbg!(self let prev_events = self
.get_pdu_leaves(&room_id)? .get_pdu_leaves(&room_id)?
.into_iter() .into_iter()
.take(20) .take(20)
.collect::<Vec<_>>()); .collect::<Vec<_>>();
let create_event = dbg!(self.room_state_get(&room_id, &EventType::RoomCreate, ""))?; let create_event = self.room_state_get(&room_id, &EventType::RoomCreate, "")?;
let create_event_content = create_event let create_event_content = create_event
.as_ref() .as_ref()