#![warn(missing_docs)]
#![doc = include_str!("../README.md")]
use sqlx::sqlite;
use std::{str::FromStr, time::Duration};
use tokio::try_join;
pub mod config;
mod r#impl;
pub mod types;
use config::SqliteConfig;
#[derive(Clone)]
pub struct Db {
pub reader: sqlx::Pool<sqlx::Sqlite>,
pub writer: sqlx::Pool<sqlx::Sqlite>,
}
impl Db {
pub async fn open(config: &SqliteConfig, as_of: types::DateTime) -> Result<Self, sqlx::Error> {
let db_path = config
.database_path
.as_ref()
.map(|p| p.to_string_lossy().into_owned());
let options =
sqlite::SqliteConnectOptions::from_str(db_path.as_deref().unwrap_or(":memory:"))?
.busy_timeout(Duration::from_secs(5))
.foreign_keys(true)
.journal_mode(sqlite::SqliteJournalMode::Wal)
.synchronous(sqlite::SqliteSynchronous::Normal)
.pragma("cache_size", "1000000000")
.pragma("journal_size_limit", "27103364")
.pragma("mmap_size", "134217728")
.pragma("temp_store", "memory")
.create_if_missing(config.create_if_missing);
let reader = sqlite::SqlitePoolOptions::new().connect_with(options.clone());
let writer = sqlite::SqlitePoolOptions::new()
.max_connections(1)
.connect_with(options);
let (reader, writer) = try_join!(reader, writer)?;
sqlx::migrate!("./schema").run(&writer).await?;
sqlx::query!(
r#"
insert into
batch (id, as_of, portfolio_outcomes, product_outcomes)
values
(0, $1, jsonb('{}'), jsonb('{}'))
on conflict
do nothing
"#,
as_of
)
.execute(&writer)
.await?;
Ok(Self { reader, writer })
}
}