use flate2::read::ZlibDecoder;
use flate2::write::ZlibEncoder;
use flate2::Compression;
use rusqlite::OptionalExtension;
use std::io::Read;
use std::io::Write;
use std::os::raw::c_char;
use crate::db::{DbError, DbResult, DucConnection};
use crate::parse::{decompress_duc_bytes, is_sqlite_header};
use crate::types::{
Checkpoint, Delta, SchemaMigration, VersionBase, VersionChain, VersionGraph,
VersionGraphMetadata,
};
pub const CURRENT_SCHEMA_VERSION: i32 = include!(concat!(env!("OUT_DIR"), "/schema_user_version.rs"));
pub fn open_duc_bytes(buf: &[u8]) -> DbResult<DucConnection> {
use rusqlite::Connection;
let conn = Connection::open_in_memory()
.map_err(DbError::Rusqlite)?;
let n = buf.len();
if n == 0 {
return Err(DbError::Bootstrap("empty .duc buffer".into()));
}
let db_name = b"main\0";
let mem = unsafe { rusqlite::ffi::sqlite3_malloc64(n as u64) as *mut u8 };
if mem.is_null() {
return Err(DbError::Bootstrap("sqlite3_malloc64 failed".into()));
}
unsafe {
std::ptr::copy_nonoverlapping(buf.as_ptr(), mem, n);
}
let flags = rusqlite::ffi::SQLITE_DESERIALIZE_FREEONCLOSE as u32;
let rc = unsafe {
rusqlite::ffi::sqlite3_deserialize(
conn.handle(),
db_name.as_ptr() as *const c_char,
mem,
n as i64,
n as i64,
flags,
)
};
if rc != rusqlite::ffi::SQLITE_OK {
unsafe { rusqlite::ffi::sqlite3_free(mem as *mut std::ffi::c_void) };
return Err(DbError::Bootstrap(format!(
"sqlite3_deserialize failed with code {rc}"
)));
}
conn.execute_batch("PRAGMA journal_mode = MEMORY; PRAGMA foreign_keys = ON;")
.map_err(|e| DbError::Bootstrap(format!("pragma apply failed: {e}")))?;
Ok(DucConnection::from_inner(conn))
}
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct RestoredVersion {
pub version_number: i64,
pub schema_version: i32,
pub data: Vec<u8>,
pub from_checkpoint: bool,
}
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct VersionEntry {
pub id: String,
pub version_number: i64,
pub schema_version: i32,
pub timestamp: i64,
pub description: Option<String>,
pub is_manual_save: bool,
pub user_id: Option<String>,
pub version_type: String,
pub size_bytes: i64,
}
pub struct VersionControl<'a> {
conn: &'a DucConnection,
}
impl<'a> VersionControl<'a> {
pub(crate) fn new(conn: &'a DucConnection) -> Self {
Self { conn }
}
pub fn from_connection(conn: &'a DucConnection) -> Self {
Self { conn }
}
pub fn restore_version(&self, version_number: i64) -> DbResult<RestoredVersion> {
self.conn.with(|c| {
let direct: Option<(Vec<u8>, i32)> = c
.query_row(
"SELECT data, schema_version FROM checkpoints
WHERE version_number = ?1",
[version_number],
|row| {
let data: Vec<u8> = row.get(0)?;
let sv: i32 = row.get(1)?;
Ok((data, sv))
},
)
.optional()
.map_err(DbError::from)?;
if let Some((data, schema_version)) = direct {
return Ok(RestoredVersion {
version_number,
schema_version,
data,
from_checkpoint: true,
});
}
let (target_sv, target_base_cp_id): (i32, String) = c
.query_row(
"SELECT schema_version, base_checkpoint_id FROM deltas
WHERE version_number = ?1",
[version_number],
|row| Ok((row.get(0)?, row.get(1)?)),
)
.map_err(DbError::from)?;
let base_data: Vec<u8> = c
.query_row(
"SELECT data FROM checkpoints WHERE id = ?1",
[&target_base_cp_id],
|row| row.get(0),
)
.map_err(DbError::from)?;
let target_changeset: Vec<u8> = c
.query_row(
"SELECT changeset FROM deltas WHERE version_number = ?1",
[version_number],
|row| row.get(0),
)
.map_err(DbError::from)?;
let final_data = apply_delta_changeset(&base_data, &target_changeset)?;
Ok(RestoredVersion {
version_number,
schema_version: target_sv,
data: final_data,
from_checkpoint: false,
})
})
}
pub fn restore_checkpoint(&self, checkpoint_id: &str) -> DbResult<RestoredVersion> {
self.conn.with(|c| {
let (data, version_number, schema_version): (Vec<u8>, i64, i32) = c
.query_row(
"SELECT data, version_number, schema_version FROM checkpoints WHERE id = ?1",
[checkpoint_id],
|row| Ok((row.get(0)?, row.get(1)?, row.get(2)?)),
)
.map_err(DbError::from)?;
Ok(RestoredVersion {
version_number,
schema_version,
data,
from_checkpoint: true,
})
})
}
pub fn read_version_graph(&self) -> DbResult<Option<VersionGraph>> {
self.conn.with(|c| {
read_version_graph_inner(c)
})
}
pub fn list_versions(&self) -> DbResult<Vec<VersionEntry>> {
self.conn.with(|c| {
let mut entries = Vec::new();
let mut cp_stmt = c
.prepare(
"SELECT id, version_number, schema_version, timestamp,
description, is_manual_save, user_id, size_bytes
FROM checkpoints ORDER BY version_number",
)
.map_err(DbError::from)?;
let cp_iter = cp_stmt
.query_map([], |row| {
Ok(VersionEntry {
id: row.get(0)?,
version_number: row.get(1)?,
schema_version: row.get(2)?,
timestamp: row.get(3)?,
description: row.get(4)?,
is_manual_save: row.get::<_, i32>(5)? != 0,
user_id: row.get(6)?,
version_type: "checkpoint".into(),
size_bytes: row.get::<_, Option<i64>>(7)?.unwrap_or(0),
})
})
.map_err(DbError::from)?;
for entry in cp_iter {
entries.push(entry.map_err(DbError::from)?);
}
let mut d_stmt = c
.prepare(
"SELECT id, version_number, schema_version, timestamp,
description, is_manual_save, user_id, size_bytes
FROM deltas ORDER BY version_number",
)
.map_err(DbError::from)?;
let d_iter = d_stmt
.query_map([], |row| {
Ok(VersionEntry {
id: row.get(0)?,
version_number: row.get(1)?,
schema_version: row.get(2)?,
timestamp: row.get(3)?,
description: row.get(4)?,
is_manual_save: row.get::<_, i32>(5)? != 0,
user_id: row.get(6)?,
version_type: "delta".into(),
size_bytes: row.get::<_, Option<i64>>(7)?.unwrap_or(0),
})
})
.map_err(DbError::from)?;
for entry in d_iter {
entries.push(entry.map_err(DbError::from)?);
}
entries.sort_by_key(|e| e.version_number);
Ok(entries)
})
}
pub fn get_metadata(&self) -> DbResult<Option<VersionGraphMetadata>> {
self.conn.with(|c| {
c.query_row(
"SELECT current_version, current_schema_version, chain_count,
total_size
FROM version_graph WHERE id = 1",
[],
|row| {
Ok(VersionGraphMetadata {
current_version: row.get(0)?,
current_schema_version: row.get(1)?,
chain_count: row.get(2)?,
total_size: row.get::<_, Option<i64>>(3)?.unwrap_or(0),
})
},
)
.optional()
.map_err(DbError::from)
})
}
pub fn create_checkpoint(&self, checkpoint: &Checkpoint) -> DbResult<()> {
self.conn.with(|c| {
self.maybe_migrate_schema(c, checkpoint.schema_version)?;
let chain_id = self.resolve_chain_id(c, checkpoint.schema_version)?;
c.execute(
"INSERT OR REPLACE INTO checkpoints
(id, parent_id, chain_id, version_number, schema_version,
timestamp, description, is_manual_save, is_schema_boundary,
user_id, data, size_bytes)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12)",
rusqlite::params![
checkpoint.base.id,
checkpoint.base.parent_id,
chain_id,
checkpoint.version_number,
checkpoint.schema_version,
checkpoint.base.timestamp,
checkpoint.base.description,
checkpoint.base.is_manual_save as i32,
checkpoint.is_schema_boundary as i32,
checkpoint.base.user_id,
checkpoint.data,
checkpoint.size_bytes,
],
)
.map_err(DbError::from)?;
self.update_version_graph_pointer(
c,
&checkpoint.base.id,
checkpoint.version_number,
checkpoint.schema_version,
)?;
Ok(())
})
}
pub fn create_delta(&self, delta: &Delta) -> DbResult<()> {
self.conn.with(|c| {
self.maybe_migrate_schema(c, delta.schema_version)?;
let chain_id = self.resolve_chain_id(c, delta.schema_version)?;
let delta_sequence: i64 = c
.query_row(
"SELECT COALESCE(MAX(delta_sequence), 0) + 1
FROM deltas WHERE base_checkpoint_id = ?1",
[&delta.base_checkpoint_id],
|row| row.get(0),
)
.map_err(DbError::from)?;
let base_data: Vec<u8> = c
.query_row(
"SELECT data FROM checkpoints WHERE id = ?1",
[&delta.base_checkpoint_id],
|row| row.get(0),
)
.map_err(DbError::from)?;
let changeset = create_bsdiff_changeset(&base_data, &delta.payload)?;
let stored_size = changeset.len() as i64;
c.execute(
"INSERT OR REPLACE INTO deltas
(id, parent_id, base_checkpoint_id, chain_id, delta_sequence,
version_number, schema_version, timestamp, description,
is_manual_save, user_id, changeset, size_bytes)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13)",
rusqlite::params![
delta.base.id,
delta.base.parent_id,
delta.base_checkpoint_id,
chain_id,
delta_sequence,
delta.version_number,
delta.schema_version,
delta.base.timestamp,
delta.base.description,
delta.base.is_manual_save as i32,
delta.base.user_id,
changeset,
stored_size,
],
)
.map_err(DbError::from)?;
self.update_version_graph_pointer(
c,
&delta.base.id,
delta.version_number,
delta.schema_version,
)?;
Ok(())
})
}
pub fn set_user_checkpoint(&self, version_id: &str) -> DbResult<()> {
self.conn.with(|c| {
c.execute(
"UPDATE version_graph SET user_checkpoint_version_id = ?1 WHERE id = 1",
[version_id],
)
.map(|_| ())
.map_err(DbError::from)
})
}
pub fn revert_to_version(&self, target_version: i64) -> DbResult<RestoredVersion> {
let restored = self.restore_version(target_version)?;
self.conn.with(|c| -> DbResult<()> {
c.execute(
"DELETE FROM deltas WHERE version_number > ?1",
[target_version],
)
.map_err(DbError::from)?;
c.execute(
"DELETE FROM checkpoints WHERE version_number > ?1",
[target_version],
)
.map_err(DbError::from)?;
let version_id: String = c
.query_row(
"SELECT id FROM checkpoints WHERE version_number = ?1
UNION ALL
SELECT id FROM deltas WHERE version_number = ?1
LIMIT 1",
[target_version],
|row| row.get(0),
)
.map_err(DbError::from)?;
c.execute(
"UPDATE version_graph
SET current_version = ?1,
latest_version_id = ?2
WHERE id = 1",
rusqlite::params![target_version, version_id],
)
.map_err(DbError::from)?;
self.recalculate_total_size(c)?;
Ok(())
})?;
Ok(restored)
}
fn update_version_graph_pointer(
&self,
c: &rusqlite::Connection,
version_id: &str,
version_number: i64,
schema_version: i32,
) -> DbResult<()> {
c.execute(
"UPDATE version_graph
SET current_version = MAX(current_version, ?1),
current_schema_version = ?2,
latest_version_id = ?3
WHERE id = 1",
rusqlite::params![version_number, schema_version, version_id],
)
.map(|_| ())
.map_err(DbError::from)?;
self.recalculate_total_size(c)?;
Ok(())
}
fn resolve_chain_id(
&self,
c: &rusqlite::Connection,
schema_version: i32,
) -> DbResult<String> {
let existing: Option<String> = c
.query_row(
"SELECT id FROM version_chains
WHERE schema_version = ?1 AND end_version IS NULL
ORDER BY start_version DESC LIMIT 1",
[schema_version],
|row| row.get(0),
)
.optional()
.map_err(DbError::from)?;
match existing {
Some(id) => Ok(id),
None => {
let new_id = nanoid();
let start_version: i64 = c
.query_row(
"SELECT COALESCE(MAX(version_number), 0) FROM checkpoints
UNION ALL
SELECT COALESCE(MAX(version_number), 0) FROM deltas",
[],
|row| row.get(0),
)
.unwrap_or(0);
c.execute(
"INSERT INTO version_chains (id, schema_version, start_version)
VALUES (?1, ?2, ?3)",
rusqlite::params![new_id, schema_version, start_version],
)
.map_err(DbError::from)?;
c.execute(
"UPDATE version_graph SET chain_count = chain_count + 1 WHERE id = 1",
[],
)
.map_err(DbError::from)?;
Ok(new_id)
}
}
}
fn recalculate_total_size(&self, c: &rusqlite::Connection) -> DbResult<()> {
let total: i64 = c
.query_row(
"SELECT COALESCE(
(SELECT SUM(COALESCE(size_bytes, 0)) FROM checkpoints), 0
) + COALESCE(
(SELECT SUM(COALESCE(size_bytes, 0)) FROM deltas), 0
)",
[],
|row| row.get(0),
)
.map_err(DbError::from)?;
c.execute(
"UPDATE version_graph SET total_size = ?1 WHERE id = 1",
[total],
)
.map(|_| ())
.map_err(DbError::from)
}
fn maybe_migrate_schema(
&self,
c: &rusqlite::Connection,
new_schema_version: i32,
) -> DbResult<()> {
let current_sv: i32 = c
.query_row(
"SELECT current_schema_version FROM version_graph WHERE id = 1",
[],
|row| row.get(0),
)
.map_err(DbError::from)?;
if new_schema_version <= current_sv {
return Ok(());
}
let current_max_version: i64 = c
.query_row(
"SELECT MAX(v) FROM (
SELECT COALESCE(MAX(version_number), 0) AS v FROM checkpoints
UNION ALL
SELECT COALESCE(MAX(version_number), 0) AS v FROM deltas
)",
[],
|row| row.get(0),
)
.unwrap_or(0);
c.execute(
"UPDATE version_chains
SET end_version = ?1
WHERE schema_version = ?2 AND end_version IS NULL",
rusqlite::params![current_max_version, current_sv],
)
.map_err(DbError::from)?;
let now_ms = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_millis() as i64;
c.execute(
"INSERT OR IGNORE INTO schema_migrations
(from_schema_version, to_schema_version, migration_name, applied_at)
VALUES (?1, ?2, ?3, ?4)",
rusqlite::params![
current_sv,
new_schema_version,
format!("auto_migration_v{}_to_v{}", current_sv, new_schema_version),
now_ms,
],
)
.map_err(DbError::from)?;
log::info!(
"Schema migration: {} → {} (closed old chains, recorded migration)",
current_sv,
new_schema_version
);
Ok(())
}
}
pub(crate) fn read_version_graph_inner(
conn: &rusqlite::Connection,
) -> DbResult<Option<VersionGraph>> {
use std::collections::HashMap;
let has_table: bool = conn
.prepare("SELECT count(*) FROM sqlite_master WHERE type='table' AND name='version_graph'")
.and_then(|mut s| s.query_row([], |row| row.get::<_, i32>(0)))
.unwrap_or(0)
> 0;
if !has_table {
return Ok(None);
}
let mut vg_stmt = conn
.prepare(
"SELECT current_version, current_schema_version, user_checkpoint_version_id,
latest_version_id, chain_count, total_size
FROM version_graph WHERE id = 1",
)
.map_err(DbError::from)?;
let (metadata, user_cp_id, latest_id) = match vg_stmt.query_row([], |row| {
Ok((
VersionGraphMetadata {
current_version: row.get(0)?,
current_schema_version: row.get(1)?,
chain_count: row.get(4)?,
total_size: row.get::<_, Option<i64>>(5)?.unwrap_or(0),
},
row.get::<_, Option<String>>(2)?.unwrap_or_default(),
row.get::<_, Option<String>>(3)?.unwrap_or_default(),
))
}) {
Ok(v) => v,
Err(rusqlite::Error::QueryReturnedNoRows) => return Ok(None),
Err(e) => return Err(DbError::from(e)),
};
let mut m_stmt = conn
.prepare(
"SELECT id, from_schema_version, to_schema_version, migration_name,
migration_checksum, applied_at, boundary_checkpoint_id
FROM schema_migrations",
)
.map_err(DbError::from)?;
let migrations: HashMap<i64, SchemaMigration> = m_stmt
.query_map([], |row| {
let id: i64 = row.get(0)?;
Ok((
id,
SchemaMigration {
from_schema_version: row.get(1)?,
to_schema_version: row.get(2)?,
migration_name: row.get(3)?,
migration_checksum: row.get(4)?,
applied_at: row.get(5)?,
boundary_checkpoint_id: row.get(6)?,
},
))
})
.map_err(DbError::from)?
.collect::<Result<HashMap<_, _>, _>>()
.unwrap_or_default();
let mut ch_stmt = conn
.prepare(
"SELECT id, schema_version, start_version, end_version, migration_id, root_checkpoint_id
FROM version_chains ORDER BY start_version",
)
.map_err(DbError::from)?;
let chains: Vec<VersionChain> = ch_stmt
.query_map([], |row| {
let mig_id: Option<i64> = row.get(4)?;
Ok(VersionChain {
id: row.get(0)?,
schema_version: row.get(1)?,
start_version: row.get(2)?,
end_version: row.get(3)?,
migration: mig_id.and_then(|mid| migrations.get(&mid).cloned()),
root_checkpoint_id: row.get(5)?,
})
})
.map_err(DbError::from)?
.collect::<Result<Vec<_>, _>>()
.map_err(DbError::from)?;
let mut cp_stmt = conn
.prepare(
"SELECT id, parent_id, version_number, schema_version, timestamp,
description, is_manual_save, is_schema_boundary, user_id, data, size_bytes
FROM checkpoints ORDER BY version_number",
)
.map_err(DbError::from)?;
let checkpoints: Vec<Checkpoint> = cp_stmt
.query_map([], |row| {
Ok(Checkpoint {
base: VersionBase {
id: row.get(0)?,
parent_id: row.get(1)?,
timestamp: row.get(4)?,
description: row.get(5)?,
is_manual_save: row.get::<_, i32>(6)? != 0,
user_id: row.get(8)?,
},
version_number: row.get(2)?,
schema_version: row.get(3)?,
is_schema_boundary: row.get::<_, i32>(7)? != 0,
data: row.get::<_, Option<Vec<u8>>>(9)?.unwrap_or_default(),
size_bytes: row.get::<_, Option<i64>>(10)?.unwrap_or(0),
})
})
.map_err(DbError::from)?
.collect::<Result<Vec<_>, _>>()
.map_err(DbError::from)?;
let mut d_stmt = conn
.prepare(
"SELECT id, parent_id, base_checkpoint_id, version_number, schema_version,
timestamp, description, is_manual_save, user_id, changeset, size_bytes
FROM deltas ORDER BY version_number",
)
.map_err(DbError::from)?;
let deltas: Vec<Delta> = d_stmt
.query_map([], |row| {
Ok(Delta {
base: VersionBase {
id: row.get(0)?,
parent_id: row.get(1)?,
timestamp: row.get(5)?,
description: row.get(6)?,
is_manual_save: row.get::<_, i32>(7)? != 0,
user_id: row.get(8)?,
},
base_checkpoint_id: row.get(2)?,
version_number: row.get(3)?,
schema_version: row.get(4)?,
payload: row.get::<_, Option<Vec<u8>>>(9)?.unwrap_or_default(),
size_bytes: row.get::<_, Option<i64>>(10)?.unwrap_or(0),
})
})
.map_err(DbError::from)?
.collect::<Result<Vec<_>, _>>()
.map_err(DbError::from)?;
Ok(Some(VersionGraph {
user_checkpoint_version_id: user_cp_id,
latest_version_id: latest_id,
chains,
checkpoints,
deltas,
metadata,
}))
}
fn decompress_zlib(compressed: &[u8]) -> DbResult<Vec<u8>> {
let mut decoder = ZlibDecoder::new(compressed);
let mut decompressed = Vec::new();
decoder
.read_to_end(&mut decompressed)
.map_err(|e| DbError::Bootstrap(format!("zlib decompression failed: {e}")))?;
Ok(decompressed)
}
fn compress_zlib(raw: &[u8]) -> DbResult<Vec<u8>> {
let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default());
encoder
.write_all(raw)
.map_err(|e| DbError::Bootstrap(format!("zlib compression failed: {e}")))?;
encoder
.finish()
.map_err(|e| DbError::Bootstrap(format!("zlib finalize failed: {e}")))
}
fn ensure_raw_sqlite(buf: &[u8]) -> DbResult<std::borrow::Cow<'_, [u8]>> {
if is_sqlite_header(buf) {
Ok(std::borrow::Cow::Borrowed(buf))
} else {
let raw = decompress_duc_bytes(buf)
.map_err(|e| DbError::Bootstrap(format!("failed to decompress .duc blob: {e}")))?;
if !is_sqlite_header(&raw) {
return Err(DbError::Bootstrap(
"decompressed blob is not a valid SQLite database".into(),
));
}
Ok(std::borrow::Cow::Owned(raw))
}
}
const DELTA_MAGIC_FOSSIL: [u8; 2] = [0x44, 0x46]; const DELTA_FORMAT_V5: u8 = 5;
const FOSSIL_HEADER_SIZE: usize = 2 + 1 + 4;
fn is_fossil_format(changeset: &[u8]) -> bool {
changeset.len() >= FOSSIL_HEADER_SIZE
&& changeset[0] == DELTA_MAGIC_FOSSIL[0]
&& changeset[1] == DELTA_MAGIC_FOSSIL[1]
&& changeset[2] == DELTA_FORMAT_V5
}
pub fn create_bsdiff_changeset(base: &[u8], current: &[u8]) -> DbResult<Vec<u8>> {
let raw_base = ensure_raw_sqlite(base)?;
let raw_current = ensure_raw_sqlite(current)?;
let raw_delta = fossil_delta::delta(&raw_current, &raw_base);
let compressed_delta = compress_zlib(&raw_delta)?;
let new_len = raw_current.len() as u32;
let mut encoded = Vec::with_capacity(FOSSIL_HEADER_SIZE + compressed_delta.len());
encoded.extend_from_slice(&DELTA_MAGIC_FOSSIL);
encoded.push(DELTA_FORMAT_V5);
encoded.extend_from_slice(&new_len.to_le_bytes());
encoded.extend_from_slice(&compressed_delta);
let snapshot = compress_zlib(&raw_current)?;
if encoded.len() < snapshot.len() {
Ok(encoded)
} else {
Ok(snapshot)
}
}
fn apply_fossil_changeset(base: &[u8], changeset: &[u8]) -> DbResult<Vec<u8>> {
let raw_base = ensure_raw_sqlite(base)?;
let compressed_delta = &changeset[FOSSIL_HEADER_SIZE..];
let raw_delta = decompress_zlib(compressed_delta)?;
fossil_delta::apply(&raw_base, &raw_delta)
.map_err(|e| DbError::Bootstrap(format!("fossil delta apply failed: {e:?}")))
}
pub fn apply_delta_changeset(base_data: &[u8], changeset: &[u8]) -> DbResult<Vec<u8>> {
if is_fossil_format(changeset) {
apply_fossil_changeset(base_data, changeset)
} else {
decompress_zlib(changeset)
}
}
fn nanoid() -> String {
use std::time::{SystemTime, UNIX_EPOCH};
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_nanos();
let charset: &[u8] = b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_-";
let mut id = String::with_capacity(21);
let mut val = now;
for _ in 0..10 {
id.push(charset[(val % 64) as usize] as char);
val /= 64;
}
val = now.wrapping_mul(6364136223846793005).wrapping_add(1442695040888963407);
for _ in 0..11 {
id.push(charset[(val % 64) as usize] as char);
val /= 64;
}
id
}