pub struct Aurora {
pub pubsub: PubSubSystem,
pub workers: Option<Arc<WorkerSystem>>,
pub computed: Arc<RwLock<ComputedFields>>,
/* private fields */
}Expand description
The main database engine
Aurora combines a tiered storage architecture with document-oriented database features:
- Hot tier: In-memory cache for frequently accessed data
- Cold tier: Persistent disk storage for durability
- Primary indices: Fast key-based access
- Secondary indices: Fast field-based queries
§Examples
// Open a database (creates if doesn't exist)
let db = Aurora::open("my_app.db")?;
// Insert a document
let doc_id = db.insert_into("users", vec![
("name", Value::String("Alice".to_string())),
("age", Value::Int(32)),
])?;
// Retrieve a document
let user = db.get_document("users", &doc_id)?;Fields§
§pubsub: PubSubSystem§workers: Option<Arc<WorkerSystem>>§computed: Arc<RwLock<ComputedFields>>Implementations§
Source§impl Aurora
impl Aurora
Sourcepub async fn execute<I: ToExecParams>(
&self,
input: I,
) -> Result<ExecutionResult>
pub async fn execute<I: ToExecParams>( &self, input: I, ) -> Result<ExecutionResult>
Execute AQL query (variables are optional)
Supports two forms:
db.execute("query").awaitdb.execute(("query", vars)).await
Sourcepub async fn stream(&self, aql: &str) -> Result<ChangeListener>
pub async fn stream(&self, aql: &str) -> Result<ChangeListener>
Stream real-time changes using AQL subscription syntax
This is a convenience method that extracts the ChangeListener from
an AQL subscription query, providing a cleaner API than using execute() directly.
§Example
// Stream changes from active products
let mut listener = db.stream(r#"
subscription {
products(where: { active: { eq: true } }) {
id
name
}
}
"#).await?;
// Receive real-time events
while let Ok(event) = listener.recv().await {
println!("Change: {:?} on {}", event.change_type, event.id);
}Sourcepub async fn explain<I: ToExecParams>(&self, input: I) -> Result<ExecutionPlan>
pub async fn explain<I: ToExecParams>(&self, input: I) -> Result<ExecutionPlan>
Explain AQL query execution plan
Sourcepub async fn analyze_execution_plan(
&self,
doc: &Document,
) -> Result<ExecutionPlan>
pub async fn analyze_execution_plan( &self, doc: &Document, ) -> Result<ExecutionPlan>
Analyze execution plan for a parsed query
Sourcepub fn remove_stale_lock<P: AsRef<Path>>(path: P) -> Result<bool>
pub fn remove_stale_lock<P: AsRef<Path>>(path: P) -> Result<bool>
Remove stale lock files from a database directory
If Aurora crashes or is forcefully terminated, it may leave behind lock files that prevent the database from being reopened. This method safely removes those lock files.
§Safety
Only call this when you’re certain no other Aurora instance is using the database. Removing lock files while another process is running could cause data corruption.
§Example
use aurora_db::Aurora;
// If you get "Access denied" error when opening:
if let Err(e) = Aurora::open("my_db") {
eprintln!("Failed to open: {}", e);
// Try removing stale lock
if Aurora::remove_stale_lock("my_db").unwrap_or(false) {
println!("Removed stale lock, try opening again");
let db = Aurora::open("my_db")?;
}
}Sourcepub fn open<P: AsRef<Path>>(path: P) -> Result<Self>
pub fn open<P: AsRef<Path>>(path: P) -> Result<Self>
Open or create a database at the specified location
§Arguments
path- Path to the database file or directory- Absolute paths (like
/data/myapp.db) are used as-is - Relative paths (like
./data/myapp.db) are resolved relative to the current directory - Simple names (like
myapp.db) use the current directory
- Absolute paths (like
§Returns
An initialized Aurora database instance
§Examples
use aurora_db::Aurora;
let db = Aurora::open(“./data/my_application.db”)?;
// Or use a relative path let db = Aurora::open(“customer_data.db”)?;
Sourcepub fn with_config(config: AuroraConfig) -> Result<Self>
pub fn with_config(config: AuroraConfig) -> Result<Self>
Open a database with custom configuration
§Arguments
config- Database configuration settings
§Examples
use aurora_db::{Aurora, types::AuroraConfig};
use std::time::Duration;
let config = AuroraConfig {
db_path: "my_data.db".into(),
hot_cache_size_mb: 512, // 512 MB cache
enable_write_buffering: true, // Batch writes for speed
enable_wal: true, // Durability
auto_compact: true, // Background compaction
compact_interval_mins: 60, // Compact every hour
..Default::default()
};
let db = Aurora::with_config(config)?;pub async fn ensure_indices_initialized(&self) -> Result<()>
Sourcepub fn get(&self, key: &str) -> Result<Option<Vec<u8>>>
pub fn get(&self, key: &str) -> Result<Option<Vec<u8>>>
Get a value by key (low-level key-value access)
This is the low-level method. For document access, use get_document() instead.
Checks hot cache first, then falls back to cold storage for maximum performance.
§Performance
- Hot cache hit: ~1M reads/sec (instant)
- Cold storage: ~500K reads/sec (disk I/O)
- Cache hit rate: typically 95%+ at scale
§Examples
// Low-level key-value access
let data = db.get("users:12345")?;
if let Some(bytes) = data {
let doc: Document = serde_json::from_slice(&bytes)?;
println!("Found: {:?}", doc);
}
// Better: use get_document() for documents
let user = db.get_document("users", "12345")?;Sourcepub fn get_hot_ref(&self, key: &str) -> Option<Arc<Vec<u8>>>
pub fn get_hot_ref(&self, key: &str) -> Option<Arc<Vec<u8>>>
Get value with zero-copy Arc reference (10-100x faster than get!) Only checks hot cache - returns None if not cached
Sourcepub fn get_cache_stats(&self) -> CacheStats
pub fn get_cache_stats(&self) -> CacheStats
Get cache statistics
Returns detailed metrics about cache performance including hit/miss rates, memory usage, and access patterns. Useful for monitoring, optimization, and understanding database performance characteristics.
§Returns
CacheStats struct containing:
hits: Number of cache hits (data found in memory)misses: Number of cache misses (had to read from disk)hit_rate: Percentage of requests served from cache (0.0-1.0)size: Current number of entries in cachecapacity: Maximum cache capacityevictions: Number of entries evicted due to capacity
§Examples
use aurora_db::Aurora;
let db = Aurora::open(“mydb.db”)?; let stats = db.get_cache_stats(); println!(“Cache hit rate: {:.1}%”, stats.hit_rate * 100.0); println!(“Cache size: {} / {} entries”, stats.size, stats.capacity); println!(“Total hits: {}, misses: {}”, stats.hits, stats.misses);
// Monitor performance during operations let before = db.get_cache_stats();
// Perform many reads for i in 0..1000 { db.get_document(“users”, &format!(“user-{}”, i))?; }
let after = db.get_cache_stats(); let hit_rate = (after.hits - before.hits) as f64 / 1000.0; println!(“Read hit rate: {:.1}%”, hit_rate * 100.0);
// Performance tuning let stats = db.get_cache_stats(); if stats.hit_rate < 0.80 { println!(“Low cache hit rate! Consider:”); println!(“- Increasing cache size in config”); println!(“- Prewarming cache with prewarm_cache()”); println!(“- Reviewing query patterns”); }
if stats.evictions > stats.size { println!(“High eviction rate! Cache may be too small.”); println!(“Consider increasing cache capacity.”); }
// Production monitoring use std::time::Duration; use std::thread;
loop { let stats = db.get_cache_stats();
// Log to monitoring system
if stats.hit_rate < 0.90 {
eprintln!("Warning: Cache hit rate dropped to {:.1}%",
stats.hit_rate * 100.0);
}
thread::sleep(Duration::from_secs(60));}
- **Excellent**: 95%+ hit rate (most reads from memory)
- **Good**: 80-95% hit rate (acceptable performance)
- **Poor**: <80% hit rate (consider cache tuning)
- `prewarm_cache()` to improve hit rates by preloading data
- `Aurora::with_config()` to adjust cache capacitypub fn has_index(&self, collection: &str, field: &str) -> bool
pub fn get_ids_from_index( &self, collection: &str, field: &str, value: &Value, ) -> Vec<String>
Sourcepub async fn register_computed_field(
&self,
collection: &str,
field: &str,
expression: ComputedExpression,
) -> Result<()>
pub async fn register_computed_field( &self, collection: &str, field: &str, expression: ComputedExpression, ) -> Result<()>
Register a computed field definition
Sourcepub fn listen(&self, collection: impl Into<String>) -> ChangeListener
pub fn listen(&self, collection: impl Into<String>) -> ChangeListener
Listen for real-time changes in a collection
Returns a stream of change events (inserts, updates, deletes) that you can subscribe to. Perfect for building reactive UIs, cache invalidation, audit logging, webhooks, and data synchronization systems.
§Performance
- Zero overhead when no listeners are active
- Events are broadcast to all listeners asynchronously
- Non-blocking - doesn’t slow down write operations
- Multiple listeners can watch the same collection
§Examples
use aurora_db::{Aurora, types::Value};
let db = Aurora::open(“mydb.db”)?;
// Basic listener let mut listener = db.listen(“users”);
tokio::spawn(async move { while let Ok(event) = listener.recv().await { match event.change_type { ChangeType::Insert => println!(“New user: {:?}”, event.document), ChangeType::Update => println!(“Updated user: {:?}”, event.document), ChangeType::Delete => println!(“Deleted user ID: {}”, event.id), } } });
// Now any insert/update/delete will trigger the listener db.insert_into(“users”, vec![(“name”, Value::String(“Alice”.into()))]).await?;
**Cache Invalidation:**
use std::sync::Arc;
use tokio::sync::RwLock;
use std::collections::HashMap;
let cache = Arc::new(RwLock::new(HashMap::new()));
let cache_clone = Arc::clone(&cache);
let mut listener = db.listen("products");
tokio::spawn(async move {
while let Ok(event) = listener.recv().await {
// Invalidate cache entry when product changes
cache_clone.write().await.remove(&event.id);
println!("Cache invalidated for product: {}", event.id);
}
});Webhook Notifications:
let mut listener = db.listen("orders");
tokio::spawn(async move {
while let Ok(event) = listener.recv().await {
if event.change_type == ChangeType::Insert {
// Send webhook for new orders
send_webhook("https://api.example.com/webhooks/order", &event).await;
}
}
});Audit Logging:
let mut listener = db.listen("sensitive_data");
tokio::spawn(async move {
while let Ok(event) = listener.recv().await {
// Log all changes to audit trail
db.insert_into("audit_log", vec![
("collection", Value::String("sensitive_data".into())),
("action", Value::String(format!("{:?}", event.change_type))),
("document_id", Value::String(event.id.clone())),
("timestamp", Value::String(chrono::Utc::now().to_rfc3339())),
]).await?;
}
});Data Synchronization:
let mut listener = db.listen("users");
tokio::spawn(async move {
while let Ok(event) = listener.recv().await {
// Sync changes to external system
match event.change_type {
ChangeType::Insert | ChangeType::Update => {
if let Some(doc) = event.document {
external_api.upsert_user(&doc).await?;
}
},
ChangeType::Delete => {
external_api.delete_user(&event.id).await?;
},
}
}
});Real-Time Notifications:
let mut listener = db.listen("messages");
tokio::spawn(async move {
while let Ok(event) = listener.recv().await {
if event.change_type == ChangeType::Insert {
if let Some(msg) = event.document {
// Push notification to connected websockets
if let Some(recipient) = msg.data.get("recipient_id") {
websocket_manager.send_to_user(recipient, &msg).await;
}
}
}
}
});Filtered Listener:
use aurora_db::pubsub::EventFilter;
// Only listen for inserts
let mut listener = db.listen("users")
.filter(EventFilter::ChangeType(ChangeType::Insert));
// Only listen for documents with specific field value
let mut listener = db.listen("users")
.filter(EventFilter::FieldEquals("role".to_string(), Value::String("admin".into())));§Important Notes
- Listener stays active until dropped
- Events are delivered in order
- Each listener has its own event stream
- Use filters to reduce unnecessary event processing
- Listeners don’t affect write performance
§See Also
listen_all()to listen to all collectionsChangeListener::filter()to filter eventsquery().watch()for reactive queries with filtering
Sourcepub fn listen_all(&self) -> ChangeListener
pub fn listen_all(&self) -> ChangeListener
Listen for all changes across all collections
Returns a stream of change events for every insert, update, and delete operation across the entire database. Useful for global audit logging, replication, and monitoring systems.
§Performance
- Same performance as single collection listener
- Filter events by collection in your handler
- Consider using
listen(collection)if only watching specific collections
§Examples
use aurora_db::Aurora;
let db = Aurora::open("mydb.db")?;
// Listen to everything
let mut listener = db.listen_all();
tokio::spawn(async move {
while let Ok(event) = listener.recv().await {
println!("Change in {}: {:?}", event.collection, event.change_type);
}
});§Real-World Use Cases
Global Audit Trail:
let mut listener = db.listen_all();
tokio::spawn(async move {
while let Ok(event) = listener.recv().await {
// Log every database change
audit_logger.log(AuditEntry {
timestamp: chrono::Utc::now(),
collection: event.collection,
action: event.change_type,
document_id: event.id,
user_id: get_current_user_id(),
}).await;
}
});Database Replication:
let mut listener = db.listen_all();
tokio::spawn(async move {
while let Ok(event) = listener.recv().await {
// Replicate to secondary database
replica_db.apply_change(event).await?;
}
});Change Data Capture (CDC):
let mut listener = db.listen_all();
tokio::spawn(async move {
while let Ok(event) = listener.recv().await {
// Stream changes to Kafka/RabbitMQ
kafka_producer.send(
&format!("cdc.{}", event.collection),
serde_json::to_string(&event)?
).await?;
}
});Monitoring & Metrics:
use std::sync::atomic::{AtomicUsize, Ordering};
let write_counter = Arc::new(AtomicUsize::new(0));
let counter_clone = Arc::clone(&write_counter);
let mut listener = db.listen_all();
tokio::spawn(async move {
while let Ok(_event) = listener.recv().await {
counter_clone.fetch_add(1, Ordering::Relaxed);
}
});
// Report metrics every 60 seconds
tokio::spawn(async move {
loop {
tokio::time::sleep(Duration::from_secs(60)).await;
let count = write_counter.swap(0, Ordering::Relaxed);
println!("Writes per minute: {}", count);
}
});Selective Processing:
let mut listener = db.listen_all();
tokio::spawn(async move {
while let Ok(event) = listener.recv().await {
// Handle different collections differently
match event.collection.as_str() {
"users" => handle_user_change(event).await,
"orders" => handle_order_change(event).await,
"payments" => handle_payment_change(event).await,
_ => {} // Ignore others
}
}
});§When to Use
- Global audit logging
- Database replication
- Change data capture (CDC)
- Monitoring and metrics
- Event sourcing systems
§When NOT to Use
- Only need to watch 1-2 collections → Use
listen(collection)instead - High write volume with selective interest → Use collection-specific listeners
- Need complex filtering → Use
query().watch()instead
§See Also
listen()for single collection listeninglistener_count()to check active listenersquery().watch()for filtered reactive queries
Sourcepub fn listener_count(&self, collection: &str) -> usize
pub fn listener_count(&self, collection: &str) -> usize
Get the number of active listeners for a collection
Sourcepub fn total_listeners(&self) -> usize
pub fn total_listeners(&self) -> usize
Get total number of active listeners
Sourcepub fn flush(&self) -> Result<()>
pub fn flush(&self) -> Result<()>
Flushes all buffered writes to disk to ensure durability.
This method forces all pending writes from:
- Write buffer (if enabled)
- Cold storage internal buffers
- Write-ahead log (if enabled)
Call this when you need to ensure data persistence before a critical operation or shutdown. After flush() completes, all data is guaranteed to be on disk even if power fails.
§Performance
- Flush time: ~10-50ms depending on buffered data
- Triggers OS-level fsync() for durability guarantee
- Truncates WAL after successful flush
- Not needed for every write (WAL provides durability)
§Examples
use aurora_db::Aurora;
let db = Aurora::open("mydb.db")?;
// Basic flush after critical write
db.insert_into("users", data).await?;
db.flush()?; // Ensure data is persisted to disk
// Graceful shutdown pattern
fn shutdown(db: &Aurora) -> Result<()> {
println!("Flushing pending writes...");
db.flush()?;
println!("Shutdown complete - all data persisted");
Ok(())
}
// Periodic checkpoint pattern
use std::time::Duration;
use std::thread;
let db = db.clone();
thread::spawn(move || {
loop {
thread::sleep(Duration::from_secs(60));
if let Err(e) = db.flush() {
eprintln!("Flush error: {}", e);
} else {
println!("Checkpoint: data flushed to disk");
}
}
});
// Critical transaction pattern
let tx_id = db.begin_transaction();
// Multiple operations
db.insert_into("orders", order_data).await?;
db.update_document("inventory", product_id, updates).await?;
db.insert_into("audit_log", audit_data).await?;
// Commit and flush immediately
db.commit_transaction(tx_id)?;
db.flush()?; // Critical: ensure transaction is on disk
// Backup preparation
println!("Preparing backup...");
db.flush()?; // Ensure all data is written
std::fs::copy("mydb.db", "backup.db")?;
println!("Backup complete");§When to Use
- Before graceful shutdown
- After critical transactions
- Before creating backups
- Periodic checkpoints (every 30-60 seconds)
- Before risky operations
§When NOT to Use
- After every single write (too slow, WAL provides durability)
- In high-throughput loops (batch instead)
- When durability mode is already Immediate
§Important Notes
- WAL provides durability even without explicit flush()
- flush() adds latency (~10-50ms) so use strategically
- Automatic flush happens during graceful shutdown
- After flush(), WAL is truncated (data is in main storage)
§See Also
Aurora::with_config()to set durability mode- WAL (Write-Ahead Log) provides durability without explicit flushes
Sourcepub async fn put(
&self,
key: String,
value: Vec<u8>,
ttl: Option<Duration>,
) -> Result<()>
pub async fn put( &self, key: String, value: Vec<u8>, ttl: Option<Duration>, ) -> Result<()>
Store a key-value pair (low-level storage)
This is the low-level method. For documents, use insert_into() instead.
Writes are buffered and batched for performance.
§Arguments
key- Unique key (format: “collection:id” for documents)value- Raw bytes to storettl- Optional time-to-live (None = permanent)
§Performance
- Buffered writes: ~15-30K docs/sec
- Batching improves throughput significantly
- Call
flush()to ensure data is persisted
§Examples
use std::time::Duration;
// Permanent storage
let data = serde_json::to_vec(&my_struct)?;
db.put("mykey".to_string(), data, None)?;
// With TTL (expires after 1 hour)
db.put("session:abc".to_string(), session_data, Some(Duration::from_secs(3600)))?;
// Better: use insert_into() for documents
db.insert_into("users", vec![("name", Value::String("Alice".into()))])?;Sourcepub fn scan_and_filter<F>(
&self,
collection: &str,
filter: F,
limit: Option<usize>,
) -> Result<Vec<Document>>
pub fn scan_and_filter<F>( &self, collection: &str, filter: F, limit: Option<usize>, ) -> Result<Vec<Document>>
Scan collection with filter and early termination support Used by QueryBuilder for optimized queries with LIMIT
pub async fn put_blob(&self, key: String, file_path: &Path) -> Result<()>
Sourcepub async fn new_collection<F: IntoFieldDefinition>(
&self,
name: &str,
fields: Vec<F>,
) -> Result<()>
pub async fn new_collection<F: IntoFieldDefinition>( &self, name: &str, fields: Vec<F>, ) -> Result<()>
Create a new collection with schema definition
Collections are like tables in SQL - they define the structure of your documents. The third boolean parameter indicates if the field should be indexed for fast lookups.
§Arguments
name- Collection namefields- Vector of (field_name, field_type, indexed) tuples- Field name (accepts both &str and String)
- Field type (String, Int, Float, Bool, etc.)
- Indexed: true for fast lookups, false for no index
§Performance
- Indexed fields: Fast equality queries (O(1) lookup)
- Non-indexed fields: Full scan required for queries
- Unique fields are automatically indexed
§Examples
use aurora_db::{Aurora, types::FieldType};
let db = Aurora::open("mydb.db")?;
// Create a users collection
db.new_collection("users", vec![
("name", FieldType::String, false), // Not indexed
("email", FieldType::String, true), // Indexed - fast lookups
("age", FieldType::Int, false),
("active", FieldType::Bool, true), // Indexed
("score", FieldType::Float, false),
])?;
// Idempotent - calling again is safe
db.new_collection("users", vec![/* ... */])?.await; // OK!Sourcepub async fn insert_into(
&self,
collection: &str,
data: Vec<(&str, Value)>,
) -> Result<String>
pub async fn insert_into( &self, collection: &str, data: Vec<(&str, Value)>, ) -> Result<String>
Insert a document into a collection
Automatically generates a UUID for the document and validates against collection schema and unique constraints. Returns the generated document ID.
§Performance
- Single insert: ~15,000 docs/sec
- Bulk insert: Use
batch_insert()for 10+ documents (~50,000 docs/sec) - Triggers PubSub events for real-time listeners
§Arguments
collection- Name of the collection to insert intodata- Document fields and values to insert
§Returns
The auto-generated ID of the inserted document or an error
§Errors
CollectionNotFound: Collection doesn’t existValidationError: Data violates schema or unique constraintsSerializationError: Invalid data format
§Examples
use aurora_db::{Aurora, types::Value};
let db = Aurora::open(“mydb.db”)?;
// Basic insertion let user_id = db.insert_into(“users”, vec![ (“name”, Value::String(“Alice Smith”.to_string())), (“email”, Value::String(“alice@example.com”.to_string())), (“age”, Value::Int(28)), (“active”, Value::Bool(true)), ]).await?;
println!(“Created user with ID: {}”, user_id);
// Inserting with nested data let order_id = db.insert_into(“orders”, vec![ (“user_id”, Value::String(user_id.clone())), (“total”, Value::Float(99.99)), (“status”, Value::String(“pending”.to_string())), (“items”, Value::Array(vec![ Value::String(“item-123”.to_string()), Value::String(“item-456”.to_string()), ])), ]).await?;
// Error handling - unique constraint violation match db.insert_into(“users”, vec![ (“email”, Value::String(“alice@example.com”.to_string())), // Duplicate! (“name”, Value::String(“Alice Clone”.to_string())), ]).await { Ok(id) => println!(“Inserted: {}”, id), Err(e) => println!(“Failed: {} (email already exists)”, e), }
// For bulk inserts (10+ documents), use batch_insert() instead let users = vec![ HashMap::from([ (“name”.to_string(), Value::String(“Bob”.to_string())), (“email”.to_string(), Value::String(“bob@example.com”.to_string())), ]), HashMap::from([ (“name”.to_string(), Value::String(“Carol”.to_string())), (“email”.to_string(), Value::String(“carol@example.com”.to_string())), ]), // … more documents ]; let ids = db.batch_insert(“users”, users).await?; // 3x faster! println!(“Inserted {} users”, ids.len());
pub async fn insert_map( &self, collection: &str, data: HashMap<String, Value>, ) -> Result<String>
Sourcepub async fn batch_insert(
&self,
collection: &str,
documents: Vec<HashMap<String, Value>>,
) -> Result<Vec<String>>
pub async fn batch_insert( &self, collection: &str, documents: Vec<HashMap<String, Value>>, ) -> Result<Vec<String>>
Batch insert multiple documents with optimized write path
Inserts multiple documents in a single optimized operation, bypassing the write buffer for better performance. Ideal for bulk data loading, migrations, or initial database seeding. 3x faster than individual inserts.
§Performance
- Insert speed: ~50,000 docs/sec (vs ~15,000 for single inserts)
- Batch writes to WAL and storage
- Validates all unique constraints
- Use for 10+ documents minimum
§Arguments
collection- Name of the collection to insert intodocuments- Vector of document data as HashMaps
§Returns
Vector of auto-generated document IDs or an error
§Examples
use aurora_db::{Aurora, types::Value}; use std::collections::HashMap;
let db = Aurora::open(“mydb.db”)?;
// Bulk user import let users = vec![ HashMap::from([ (“name”.to_string(), Value::String(“Alice”.into())), (“email”.to_string(), Value::String(“alice@example.com”.into())), (“age”.to_string(), Value::Int(28)), ]), HashMap::from([ (“name”.to_string(), Value::String(“Bob”.into())), (“email”.to_string(), Value::String(“bob@example.com”.into())), (“age”.to_string(), Value::Int(32)), ]), HashMap::from([ (“name”.to_string(), Value::String(“Carol”.into())), (“email”.to_string(), Value::String(“carol@example.com”.into())), (“age”.to_string(), Value::Int(25)), ]), ];
let ids = db.batch_insert(“users”, users).await?; println!(“Inserted {} users”, ids.len());
// Seeding test data let test_products: Vec<HashMap<String, Value>> = (0..1000) .map(|i| HashMap::from([ (“sku”.to_string(), Value::String(format!(“PROD-{:04}”, i))), (“price”.to_string(), Value::Float(9.99 + i as f64)), (“stock”.to_string(), Value::Int(100)), ])) .collect();
let ids = db.batch_insert(“products”, test_products).await?; // Much faster than 1000 individual insert_into() calls!
// Migration from CSV data let mut csv_reader = csv::Reader::from_path(“data.csv”)?; let mut batch = Vec::new();
for result in csv_reader.records() { let record = result?; let doc = HashMap::from([ (“field1”.to_string(), Value::String(record[0].to_string())), (“field2”.to_string(), Value::String(record[1].to_string())), ]); batch.push(doc);
// Insert in batches of 1000
if batch.len() >= 1000 {
db.batch_insert("imported_data", batch.clone()).await?;
batch.clear();
}}
// Insert remaining if !batch.is_empty() { db.batch_insert(“imported_data”, batch).await?; }
- `ValidationError`: Unique constraint violation on any document
- `CollectionNotFound`: Collection doesn't exist
- `IoError`: Storage write failure
- All inserts are atomic - if one fails, none are inserted
- UUIDs are auto-generated for all documents
- PubSub events are published for each insert
- For 10+ documents, this is 3x faster than individual inserts
- For < 10 documents, use `insert_into()` instead
- `insert_into()` for single document inserts
- `import_from_json()` for file-based bulk imports
- `batch_write()` for low-level batch operationsSourcepub async fn update_document(
&self,
collection: &str,
doc_id: &str,
updates: Vec<(&str, Value)>,
) -> Result<()>
pub async fn update_document( &self, collection: &str, doc_id: &str, updates: Vec<(&str, Value)>, ) -> Result<()>
Update a document by ID
§Arguments
collection- Collection namedoc_id- Document ID to updatedata- New field values to set
§Returns
Ok(()) on success, or an error if the document doesn’t exist
§Examples
db.update_document("users", &user_id, vec![
("status", Value::String("active".to_string())),
("last_login", Value::String(chrono::Utc::now().to_rfc3339())),
]).await?;pub async fn get_all_collection( &self, collection: &str, ) -> Result<Vec<Document>>
pub fn get_data_by_pattern( &self, pattern: &str, ) -> Result<Vec<(String, DataInfo)>>
Sourcepub fn begin_transaction(&self) -> TransactionId
pub fn begin_transaction(&self) -> TransactionId
Begin a transaction
All operations after beginning a transaction will be part of the transaction until either commit_transaction() or rollback_transaction() is called.
§Returns
Success or an error (e.g., if a transaction is already in progress)
§Examples
// Start a transaction for atomic operations db.begin_transaction()?;
// Perform multiple operations db.insert_into(“accounts”, vec![(“user_id”, Value::String(user_id)), (“balance”, Value::Float(100.0))])?; db.insert_into(“audit_log”, vec![(“action”, Value::String(“account_created”.to_string()))])?;
// Commit all changes or roll back if there’s an error if all_ok { db.commit_transaction()?; } else { db.rollback_transaction()?; }
Begin a new transaction for atomic operations
Transactions ensure all-or-nothing execution: either all operations succeed,
or none of them are applied. Perfect for maintaining data consistency.
use aurora_db::{Aurora, types::Value};
let db = Aurora::open("mydb.db")?;
// Start transaction
let tx_id = db.begin_transaction();
// Perform multiple operations
db.insert_into("accounts", vec![
("user_id", Value::String("alice".into())),
("balance", Value::Int(1000)),
]).await?;
db.insert_into("accounts", vec![
("user_id", Value::String("bob".into())),
("balance", Value::Int(500)),
])).await?;
// Commit if all succeeded
db.commit_transaction(tx_id)?;
// Or rollback on error
// db.rollback_transaction(tx_id)?;Sourcepub fn commit_transaction(&self, tx_id: TransactionId) -> Result<()>
pub fn commit_transaction(&self, tx_id: TransactionId) -> Result<()>
Commit a transaction, making all changes permanent
All operations within the transaction are atomically applied to the database. If any operation fails, none are applied.
§Arguments
tx_id- Transaction ID returned from begin_transaction()
§Examples
use aurora_db::{Aurora, types::Value};
let db = Aurora::open(“mydb.db”)?;
// Transfer money between accounts let tx_id = db.begin_transaction();
// Deduct from Alice db.update_document(“accounts”, “alice”, vec![ (“balance”, Value::Int(900)), // Was 1000 ]).await?;
// Add to Bob db.update_document(“accounts”, “bob”, vec![ (“balance”, Value::Int(600)), // Was 500 ]).await?;
// Both updates succeed - commit them db.commit_transaction(tx_id)?;
println!(“Transfer completed!”);
Sourcepub fn rollback_transaction(&self, tx_id: TransactionId) -> Result<()>
pub fn rollback_transaction(&self, tx_id: TransactionId) -> Result<()>
Roll back a transaction, discarding all changes
All operations within the transaction are discarded. The database state remains unchanged. Use this when an error occurs during transaction processing.
§Arguments
tx_id- Transaction ID returned from begin_transaction()
§Examples
use aurora_db::{Aurora, types::Value};
let db = Aurora::open(“mydb.db”)?;
// Attempt a transfer with validation let tx_id = db.begin_transaction();
let result = async { // Deduct from Alice let alice = db.get_document(“accounts”, “alice”).await?; let balance = alice.and_then(|doc| doc.data.get(“balance”));
if let Some(Value::Int(bal)) = balance {
if *bal < 100 {
return Err("Insufficient funds");
}
db.update_document("accounts", "alice", vec![
("balance", Value::Int(bal - 100)),
]).await?;
db.update_document("accounts", "bob", vec![
("balance", Value::Int(600)),
]).await?;
Ok(())
} else {
Err("Account not found")
}}.await;
match result { Ok(_) => { db.commit_transaction(tx_id)?; println!(“Transfer completed”); } Err(e) => { db.rollback_transaction(tx_id)?; println!(“Transfer failed: {}, changes rolled back”, e); } }
Sourcepub async fn create_index(&self, collection: &str, field: &str) -> Result<()>
pub async fn create_index(&self, collection: &str, field: &str) -> Result<()>
Create a secondary index on a field for faster queries
Indexes dramatically improve query performance for frequently accessed fields, trading increased memory usage and slower writes for much faster reads.
§When to Create Indexes
- Frequent queries: Fields used in 80%+ of your queries
- High cardinality: Fields with many unique values (user_id, email)
- Sorting/filtering: Fields used in ORDER BY or WHERE clauses
- Large collections: Most beneficial with 10,000+ documents
§When NOT to Index
- Low cardinality fields (e.g., boolean flags, small enums)
- Rarely queried fields
- Fields that change frequently (write-heavy workloads)
- Small collections (<1,000 documents) - full scans are fast enough
§Performance Characteristics
- Query speedup: O(n) → O(1) for equality filters
- Memory cost: ~100-200 bytes per document per index
- Write slowdown: ~20-30% longer insert/update times
- Build time: ~5,000 docs/sec for initial indexing Create a new collection with the given schema
§Arguments
name- Collection namefields- Field definitions as tuples of (name, type, unique)- The boolean indicates whether the field has a unique constraint
- Unique fields are automatically indexed
- Non-unique fields can be indexed separately using
create_index()
§Examples
db.new_collection("users", vec![
.first_one()
.await?;
// DON'T index 'active' - low cardinality (only 2 values: true/false)
// A full scan is fast enough for boolean fields
// DO index 'age' if you frequently query age ranges
db.create_index("users", "age").await?;
let young_users = db.query("users")
.filter(|f| f.lt("age", 30))
.collect()
.await?;§Real-World Example: E-commerce Orders
// Orders collection: 1 million documents
db.new_collection("orders", vec![
("user_id", FieldType::String), // High cardinality
("status", FieldType::String), // Low cardinality (pending, shipped, delivered)
("created_at", FieldType::String),
("total", FieldType::Float),
])?;
// Index user_id - queries like "show me my orders" are common
db.create_index("orders", "user_id").await?; // Good choice
// Query speedup: 2.5s → 0.001s
let my_orders = db.query("orders")
.filter(|f| f.eq("user_id", user_id))
.collect()
.await?;
// DON'T index 'status' - only 3 possible values
// Scanning 1M docs takes ~100ms, indexing won't help much
// Index created_at if you frequently query recent orders
db.create_index("orders", "created_at").await?; // Good for time-based queriesSourcepub fn query<'a>(&'a self, collection: &str) -> QueryBuilder<'a>
pub fn query<'a>(&'a self, collection: &str) -> QueryBuilder<'a>
Query documents in a collection with filtering, sorting, and pagination
Returns a QueryBuilder that allows fluent chaining of query operations.
Queries use early termination for LIMIT clauses, making them extremely fast
even on large collections (6,800x faster than naive implementations).
§Performance
- With LIMIT: O(k) where k = limit + offset (early termination!)
- Without LIMIT: O(n) where n = matching documents
- Uses secondary indices when available for equality filters
- Hot cache: ~1M reads/sec, Cold storage: ~500K reads/sec
§Examples
use aurora_db::{Aurora, types::Value};
let db = Aurora::open("mydb.db")?;
// Simple equality query
let active_users = db.query("users")
.filter(|f| f.eq("active", Value::Bool(true)))
.collect()
.await?;
// Range query with pagination (FAST - uses early termination!)
let top_scorers = db.query("users")
.filter(|f| f.gt("score", Value::Int(1000)))
.order_by("score", false) // descending
.limit(10)
.offset(20)
.collect()
.await?;
// Multiple filters
let premium_active = db.query("users")
.filter(|f| f.eq("tier", Value::String("premium".into())))
.filter(|f| f.eq("active", Value::Bool(true)))
.limit(100) // Only scans ~200 docs, not all million!
.collect()
.await?;
// Text search in a field
let matching = db.query("articles")
.filter(|f| f.contains("title", "rust"))
.collect()
.await?;Sourcepub fn search<'a>(&'a self, collection: &str) -> SearchBuilder<'a>
pub fn search<'a>(&'a self, collection: &str) -> SearchBuilder<'a>
Create a search builder for full-text search
§Arguments
collection- Name of the collection to search
§Returns
A SearchBuilder for configuring and executing searches
§Examples
// Search for documents containing text
let search_results = db.search("articles")
.field("content")
.matching("quantum computing")
.fuzzy(true) // Enable fuzzy matching for typo tolerance
.collect()
.await?;Sourcepub fn get_document(
&self,
collection: &str,
id: &str,
) -> Result<Option<Document>>
pub fn get_document( &self, collection: &str, id: &str, ) -> Result<Option<Document>>
Retrieve a document by ID
Fast direct lookup when you know the document ID. Significantly faster than querying with filters when ID is known.
§Performance
- Hot cache: ~1,000,000 reads/sec (instant)
- Cold storage: ~500,000 reads/sec (disk I/O)
- Complexity: O(1) - constant time lookup
- Much faster than
.query().filter(|f| f.eq("id", ...))which is O(n)
§Arguments
collection- Name of the collection to queryid- ID of the document to retrieve
§Returns
The document if found, None if not found, or an error
§Examples
use aurora_db::{Aurora, types::Value};
let db = Aurora::open(“mydb.db”)?;
// Basic retrieval if let Some(user) = db.get_document(“users”, &user_id)? { println!(“Found user: {}”, user.id);
// Access fields safely
if let Some(Value::String(name)) = user.data.get("name") {
println!("Name: {}", name);
}
if let Some(Value::Int(age)) = user.data.get("age") {
println!("Age: {}", age);
}} else { println!(“User not found”); }
// Idiomatic error handling let user = db.get_document(“users”, &user_id)? .ok_or_else(|| AqlError::new(ErrorCode::NotFound,“User not found”.into()))?;
// Checking existence before operations if db.get_document(“users”, &user_id)?.is_some() { db.update_document(“users”, &user_id, vec![ (“last_login”, Value::String(chrono::Utc::now().to_rfc3339())), ]).await?; }
// Batch retrieval (fetch multiple by ID)
let user_ids = vec![“user-1”, “user-2”, “user-3”];
let users: Vec
println!(“Found {} out of {} users”, users.len(), user_ids.len());
- You know the document ID (from insert, previous query, or URL param)
- Need fastest possible lookup (1M reads/sec)
- Fetching a single document
- Searching by other fields → Use `query().filter()` instead
- Need multiple documents by criteria → Use `query().collect()` instead
- Don't know the ID → Use `find_by_field()` or `query()` insteadSourcepub async fn delete(&self, key: &str) -> Result<()>
pub async fn delete(&self, key: &str) -> Result<()>
Delete a document by ID
Permanently removes a document from storage, cache, and all indices. Publishes a delete event for PubSub subscribers. This operation cannot be undone.
§Performance
- Delete speed: ~50,000 deletes/sec
- Cleans up hot cache, cold storage, primary + secondary indices
- Triggers PubSub events for listeners
§Arguments
key- Full key in format “collection:id” (e.g., “users:123”)
§Returns
Success or an error
§Errors
InvalidOperation: Invalid key format (must be “collection:id”)IoError: Storage deletion failed
§Examples
use aurora_db::Aurora;
let db = Aurora::open("mydb.db")?;
// Basic deletion (note: requires "collection:id" format)
db.delete("users:abc123").await?;
// Delete with existence check
let user_id = "user-456";
if db.get_document("users", user_id)?.is_some() {
db.delete(&format!("users:{}", user_id)).await?;
println!("User deleted");
} else {
println!("User not found");
}
// Error handling
match db.delete("users:nonexistent").await {
Ok(_) => println!("Deleted successfully"),
Err(e) => println!("Delete failed: {}", e),
}
// Batch deletion using query
let inactive_count = db.delete_where("users", |f| {
f.eq("active", Value::Bool(false))
}).await?;
println!("Deleted {} inactive users", inactive_count);
// Delete with cascading (manual cascade pattern)
let user_id = "user-123";
// Delete user's orders first
let orders = db.query("orders")
.filter(|f| f.eq("user_id", user_id))
.collect()
.await?;
for order in orders {
db.delete(&format!("orders:{}", order.id)).await?;
}
// Then delete the user
db.delete(&format!("users:{}", user_id)).await?;
println!("User and all orders deleted");§Alternative: Soft Delete Pattern
For recoverable deletions, use soft deletes instead:
// Soft delete - mark as deleted instead of removing
db.update_document("users", &user_id, vec![
("deleted", Value::Bool(true)),
("deleted_at", Value::String(chrono::Utc::now().to_rfc3339())),
]).await?;
// Query excludes soft-deleted items
let active_users = db.query("users")
.filter(|f| f.eq("deleted", Value::Bool(false)))
.collect()
.await?;
// Later: hard delete after retention period
let old_deletions = db.query("users")
.filter(|f| f.eq("deleted", Value::Bool(true)))
.filter(|f| f.lt("deleted_at", thirty_days_ago))
.collect()
.await?;
for user in old_deletions {
db.delete(&format!("users:{}", user.id)).await?;
}§Important Notes
- Deletion is permanent - no undo/recovery
- Consider soft deletes for recoverable operations
- Use transactions for multi-document deletions
- PubSub subscribers will receive delete events
- All indices are automatically cleaned up
pub async fn delete_collection(&self, collection: &str) -> Result<()>
pub async fn search_text( &self, collection: &str, field: &str, query: &str, ) -> Result<Vec<Document>>
Sourcepub fn export_as_json(&self, collection: &str, output_path: &str) -> Result<()>
pub fn export_as_json(&self, collection: &str, output_path: &str) -> Result<()>
Export a collection to a JSON file
Creates a JSON file containing all documents in the collection.
Useful for backups, data migration, or sharing datasets.
Automatically appends .json extension if not present.
§Performance
- Export speed: ~10,000 docs/sec
- Scans entire collection from cold storage
- Memory efficient: streams documents to file
§Arguments
collection- Name of the collection to exportoutput_path- Path to the output JSON file (.jsonauto-appended)
§Returns
Success or an error
§Examples
use aurora_db::Aurora;
let db = Aurora::open("mydb.db")?;
// Basic export
db.export_as_json("users", "./backups/users_2024-01-15")?;
// Creates: ./backups/users_2024-01-15.json
// Timestamped backup
let timestamp = chrono::Utc::now().format("%Y%m%d_%H%M%S");
let backup_path = format!("./backups/users_{}", timestamp);
db.export_as_json("users", &backup_path)?;
// Export multiple collections
for collection in &["users", "orders", "products"] {
db.export_as_json(collection, &format!("./export/{}", collection))?;
}§Output Format
The exported JSON has this structure:
{
"users": [
{ "id": "123", "name": "Alice", "email": "alice@example.com" },
{ "id": "456", "name": "Bob", "email": "bob@example.com" }
]
}§See Also
export_as_csv()for CSV format exportimport_from_json()to restore exported data
Sourcepub fn export_as_csv(&self, collection: &str, filename: &str) -> Result<()>
pub fn export_as_csv(&self, collection: &str, filename: &str) -> Result<()>
Export a collection to a CSV file
Creates a CSV file with headers from the first document and rows for each document.
Useful for spreadsheet analysis, data science workflows, or reporting.
Automatically appends .csv extension if not present.
§Performance
- Export speed: ~8,000 docs/sec
- Memory efficient: streams rows to file
- Headers determined from first document
§Arguments
collection- Name of the collection to exportfilename- Path to the output CSV file (.csvauto-appended)
§Returns
Success or an error
§Examples
use aurora_db::Aurora;
let db = Aurora::open("mydb.db")?;
// Basic CSV export
db.export_as_csv("users", "./reports/users")?;
// Creates: ./reports/users.csv
// Export for analysis in Excel/Google Sheets
db.export_as_csv("orders", "./analytics/sales_data")?;
// Monthly report generation
let month = chrono::Utc::now().format("%Y-%m");
db.export_as_csv("transactions", &format!("./reports/transactions_{}", month))?;§Output Format
id,name,email,age
123,Alice,alice@example.com,28
456,Bob,bob@example.com,32§Important Notes
- Headers are taken from the first document’s fields
- Documents with different fields will have empty values for missing fields
- Nested objects/arrays are converted to strings
- Best for flat document structures
§See Also
export_as_json()for JSON format (better for nested data)- For complex nested structures, use JSON export instead
pub fn find<'a>(&'a self, collection: &str) -> QueryBuilder<'a>
pub async fn find_by_id( &self, collection: &str, id: &str, ) -> Result<Option<Document>>
pub async fn find_one<F>( &self, collection: &str, filter_fn: F, ) -> Result<Option<Document>>
pub async fn find_by_field<T: Into<Value> + Clone + Send + Sync + 'static>( &self, collection: &str, field: &'static str, value: T, ) -> Result<Vec<Document>>
pub async fn find_by_fields( &self, collection: &str, fields: Vec<(&str, Value)>, ) -> Result<Vec<Document>>
pub async fn find_in_range<T: Into<Value> + Clone + Send + Sync + 'static>( &self, collection: &str, field: &'static str, min: T, max: T, ) -> Result<Vec<Document>>
pub async fn find_complex<'a>(&'a self, collection: &str) -> QueryBuilder<'a>
pub fn advanced_search<'a>(&'a self, collection: &str) -> SearchBuilder<'a>
pub async fn upsert( &self, collection: &str, id: &str, data: Vec<(&str, Value)>, ) -> Result<String>
pub async fn increment( &self, collection: &str, id: &str, field: &str, amount: i64, ) -> Result<i64>
pub async fn delete_by_query<F>( &self, collection: &str, filter_fn: F, ) -> Result<usize>
Sourcepub async fn import_from_json(
&self,
collection: &str,
filename: &str,
) -> Result<ImportStats>
pub async fn import_from_json( &self, collection: &str, filename: &str, ) -> Result<ImportStats>
Import documents from a JSON file into a collection
Validates each document against the collection schema, skips duplicates (by ID), and provides detailed statistics about the import operation. Useful for restoring backups, migrating data, or seeding development databases.
§Performance
- Import speed: ~5,000 docs/sec (with validation)
- Memory efficient: processes documents one at a time
- Validates schema and unique constraints
§Arguments
collection- Name of the collection to import intofilename- Path to the JSON file containing documents (array format)
§Returns
ImportStats containing counts of imported, skipped, and failed documents
§Examples
use aurora_db::Aurora;
let db = Aurora::open("mydb.db")?;
// Basic import
let stats = db.import_from_json("users", "./data/new_users.json").await?;
println!("Imported: {}, Skipped: {}, Failed: {}",
stats.imported, stats.skipped, stats.failed);
// Restore from backup
let backup_file = "./backups/users_2024-01-15.json";
let stats = db.import_from_json("users", backup_file).await?;
if stats.failed > 0 {
eprintln!("Warning: {} documents failed validation", stats.failed);
}
// Idempotent import - duplicates are skipped
let stats = db.import_from_json("users", "./data/users.json").await?;
// Running again will skip all existing documents
let stats2 = db.import_from_json("users", "./data/users.json").await?;
assert_eq!(stats2.skipped, stats.imported);
// Migration from another system
db.new_collection("products", vec![
("sku", FieldType::String),
("name", FieldType::String),
("price", FieldType::Float),
])?;
let stats = db.import_from_json("products", "./migration/products.json").await?;
println!("Migration complete: {} products imported", stats.imported);§Expected JSON Format
The JSON file should contain an array of document objects:
[
{ "id": "123", "name": "Alice", "email": "alice@example.com" },
{ "id": "456", "name": "Bob", "email": "bob@example.com" },
{ "name": "Carol", "email": "carol@example.com" }
]§Behavior
- Documents with existing IDs are skipped (duplicate detection)
- Documents without IDs get auto-generated UUIDs
- Schema validation is performed on all fields
- Failed documents are counted but don’t stop the import
- Unique constraints are checked
§See Also
export_as_json()to create compatible backup filesbatch_insert()for programmatic bulk inserts
Sourcepub fn get_collection_definition(&self, collection: &str) -> Result<Collection>
pub fn get_collection_definition(&self, collection: &str) -> Result<Collection>
Get collection definition
Sourcepub fn get_database_stats(&self) -> Result<DatabaseStats>
pub fn get_database_stats(&self) -> Result<DatabaseStats>
Get storage statistics and information about the database
Sourcepub fn is_in_hot_cache(&self, key: &str) -> bool
pub fn is_in_hot_cache(&self, key: &str) -> bool
Check if a key is currently stored in the hot cache
Sourcepub fn clear_hot_cache(&self)
pub fn clear_hot_cache(&self)
Clear the hot cache (useful when memory needs to be freed)
Sourcepub async fn prewarm_cache(
&self,
collection: &str,
limit: Option<usize>,
) -> Result<usize>
pub async fn prewarm_cache( &self, collection: &str, limit: Option<usize>, ) -> Result<usize>
Prewarm the cache by loading frequently accessed data from cold storage
Loads documents from a collection into memory cache to eliminate cold-start latency. Dramatically improves initial query performance after database startup by preloading the most commonly accessed data.
§Performance Impact
- Prewarming speed: ~20,000 docs/sec
- Improves subsequent read latency from ~2ms (disk) to ~0.001ms (memory)
- Cache hit rate jumps from 0% to 95%+ for prewarmed data
- Memory cost: ~500 bytes per document average
§Arguments
collection- The collection to prewarmlimit- Maximum number of documents to load (default: 1000, None = all)
§Returns
Number of documents loaded into cache
§Examples
use aurora_db::Aurora;
let db = Aurora::open("mydb.db")?;
// Prewarm frequently accessed collection
let loaded = db.prewarm_cache("users", Some(1000)).await?;
println!("Prewarmed {} user documents", loaded);
// Now queries are fast from the start
let stats_before = db.get_cache_stats();
let users = db.query("users").collect().await?;
let stats_after = db.get_cache_stats();
// High hit rate thanks to prewarming
assert!(stats_after.hit_rate > 0.95);
// Startup optimization pattern
async fn startup_prewarm(db: &Aurora) -> Result<()> {
println!("Prewarming caches...");
// Prewarm most frequently accessed collections
db.prewarm_cache("users", Some(5000)).await?;
db.prewarm_cache("sessions", Some(1000)).await?;
db.prewarm_cache("products", Some(500)).await?;
let stats = db.get_cache_stats();
println!("Cache prewarmed: {} entries loaded", stats.size);
Ok(())
}
// Web server startup
#[tokio::main]
async fn main() {
let db = Aurora::open("app.db").unwrap();
// Prewarm before accepting requests
db.prewarm_cache("users", Some(10000)).await.unwrap();
// Server is now ready with hot cache
start_web_server(db).await;
}
// Prewarm all documents (for small collections)
let all_loaded = db.prewarm_cache("config", None).await?;
// All config documents now in memory
// Selective prewarming based on access patterns
async fn smart_prewarm(db: &Aurora) -> Result<()> {
// Load recent users (they're accessed most)
db.prewarm_cache("users", Some(1000)).await?;
// Load active sessions only
let active_sessions = db.query("sessions")
.filter(|f| f.eq("active", Value::Bool(true)))
.limit(500)
.collect()
.await?;
// Manually populate cache with hot data
for session in active_sessions {
// Reading automatically caches
db.get_document("sessions", &session.id)?;
}
Ok(())
}§Typical Prewarming Scenarios
Web Application Startup:
// Load user data, sessions, and active content
db.prewarm_cache("users", Some(5000)).await?;
db.prewarm_cache("sessions", Some(2000)).await?;
db.prewarm_cache("posts", Some(1000)).await?;E-commerce Site:
// Load products, categories, and user carts
db.prewarm_cache("products", Some(500)).await?;
db.prewarm_cache("categories", None).await?; // All categories
db.prewarm_cache("active_carts", Some(1000)).await?;API Server:
// Load authentication data and rate limits
db.prewarm_cache("api_keys", None).await?;
db.prewarm_cache("rate_limits", Some(10000)).await?;§When to Use
- At application startup to eliminate cold-start latency
- After cache clear operations
- Before high-traffic events (product launches, etc.)
- When deploying new instances (load balancer warm-up)
§Memory Considerations
- 1,000 docs ≈ 500 KB memory
- 10,000 docs ≈ 5 MB memory
- 100,000 docs ≈ 50 MB memory
- Stay within configured cache capacity
§See Also
get_cache_stats()to monitor cache effectivenessprewarm_all_collections()to prewarm all collectionsAurora::with_config()to adjust cache capacity
Sourcepub async fn prewarm_all_collections(
&self,
docs_per_collection: Option<usize>,
) -> Result<HashMap<String, usize>>
pub async fn prewarm_all_collections( &self, docs_per_collection: Option<usize>, ) -> Result<HashMap<String, usize>>
Prewarm cache for all collections
Sourcepub async fn batch_write(&self, pairs: Vec<(String, Vec<u8>)>) -> Result<()>
pub async fn batch_write(&self, pairs: Vec<(String, Vec<u8>)>) -> Result<()>
Store multiple key-value pairs efficiently in a single batch operation
Low-level batch write operation that bypasses document validation and writes raw byte data directly to storage. Useful for advanced use cases, custom serialization, or maximum performance scenarios.
§Performance
- Write speed: ~100,000 writes/sec
- Single disk fsync for entire batch
- No validation or schema checking
- Direct storage access
§Arguments
pairs- Vector of (key, value) tuples where value is raw bytes
§Returns
Success or an error
§Examples
use aurora_db::Aurora;
let db = Aurora::open("mydb.db")?;
// Low-level batch write
let pairs = vec![
("users:123".to_string(), b"raw data 1".to_vec()),
("users:456".to_string(), b"raw data 2".to_vec()),
("cache:key1".to_string(), b"cached value".to_vec()),
];
db.batch_write(pairs)?;
// Custom binary serialization
use bincode;
#[derive(Serialize, Deserialize)]
struct CustomData {
id: u64,
payload: Vec<u8>,
}
let custom_data = vec![
CustomData { id: 1, payload: vec![1, 2, 3] },
CustomData { id: 2, payload: vec![4, 5, 6] },
];
let pairs: Vec<(String, Vec<u8>)> = custom_data
.iter()
.map(|data| {
let key = format!("binary:{}", data.id);
let value = bincode::serialize(data).unwrap();
(key, value)
})
.collect();
db.batch_write(pairs)?;
// Bulk cache population
let cache_entries: Vec<(String, Vec<u8>)> = (0..10000)
.map(|i| {
let key = format!("cache:item_{}", i);
let value = format!("value_{}", i).into_bytes();
(key, value)
})
.collect();
db.batch_write(cache_entries)?;
// Writes 10,000 entries in ~100ms§Important Notes
- No schema validation performed
- No unique constraint checking
- No automatic indexing
- Keys must follow “collection:id” format for proper grouping
- Values are raw bytes - you handle serialization
- Use
batch_insert()for validated document inserts
§When to Use
- Maximum write performance needed
- Custom serialization formats (bincode, msgpack, etc.)
- Cache population
- Low-level database operations
- You’re bypassing the document model
§When NOT to Use
- Regular document inserts → Use
batch_insert()instead - Need validation → Use
batch_insert()instead - Need indexing → Use
batch_insert()instead
§See Also
batch_insert()for validated document batch insertsput()for single key-value writes
Sourcepub fn scan_with_prefix(
&self,
prefix: &str,
) -> impl Iterator<Item = Result<(String, Vec<u8>)>> + '_
pub fn scan_with_prefix( &self, prefix: &str, ) -> impl Iterator<Item = Result<(String, Vec<u8>)>> + '_
Scan for keys with a specific prefix
Sourcepub fn get_collection_stats(&self) -> Result<HashMap<String, CollectionStats>>
pub fn get_collection_stats(&self) -> Result<HashMap<String, CollectionStats>>
Get storage efficiency metrics for the database
Sourcepub fn search_by_value(
&self,
collection: &str,
field: &str,
value: &Value,
) -> Result<Vec<Document>>
pub fn search_by_value( &self, collection: &str, field: &str, value: &Value, ) -> Result<Vec<Document>>
Search for documents by exact value using an index
This method performs a fast lookup using a pre-created index
Sourcepub fn full_text_search(
&self,
collection: &str,
field: &str,
query: &str,
) -> Result<Vec<Document>>
pub fn full_text_search( &self, collection: &str, field: &str, query: &str, ) -> Result<Vec<Document>>
Perform a full-text search on an indexed text field
This provides more advanced text search capabilities including relevance ranking of results
Sourcepub async fn create_text_index(
&self,
collection: &str,
field: &str,
_enable_stop_words: bool,
) -> Result<()>
pub async fn create_text_index( &self, collection: &str, field: &str, _enable_stop_words: bool, ) -> Result<()>
Create a full-text search index on a text field
pub async fn execute_simple_query( &self, builder: &SimpleQueryBuilder, ) -> Result<Vec<Document>>
pub async fn execute_dynamic_query( &self, collection: &str, payload: &QueryPayload, ) -> Result<Vec<Document>>
pub async fn process_network_request(&self, request: Request) -> Response
Sourcepub async fn create_indices(
&self,
collection: &str,
fields: &[&str],
) -> Result<()>
pub async fn create_indices( &self, collection: &str, fields: &[&str], ) -> Result<()>
Create indices for commonly queried fields automatically
This is a convenience method that creates indices for fields that are likely to be queried frequently, improving performance.
§Arguments
collection- Name of the collectionfields- List of field names to create indices for
§Examples
// Create indices for commonly queried fields
db.create_indices("users", &["email", "status", "created_at"]).await?;Sourcepub fn get_index_stats(&self, collection: &str) -> HashMap<String, IndexStats>
pub fn get_index_stats(&self, collection: &str) -> HashMap<String, IndexStats>
Get index statistics for a collection
This helps understand which indices exist and how effective they are.
Sourcepub async fn optimize_collection(&self, collection: &str) -> Result<()>
pub async fn optimize_collection(&self, collection: &str) -> Result<()>
Optimize a collection by creating indices for frequently filtered fields
This analyzes common query patterns and suggests/creates optimal indices.
Sourcepub async fn aql_get_all_collection(
&self,
collection: &str,
) -> Result<Vec<Document>>
pub async fn aql_get_all_collection( &self, collection: &str, ) -> Result<Vec<Document>>
Get all documents in a collection (AQL helper)
This is a wrapper around the internal query system optimized for bulk retrieval.
Sourcepub async fn aql_insert(
&self,
collection: &str,
data: HashMap<String, Value>,
) -> Result<Document>
pub async fn aql_insert( &self, collection: &str, data: HashMap<String, Value>, ) -> Result<Document>
Insert a document from a HashMap (AQL helper)
Returns the complete document (not just ID) for AQL executor
Sourcepub async fn aql_update_document(
&self,
collection: &str,
doc_id: &str,
updates: HashMap<String, Value>,
) -> Result<Document>
pub async fn aql_update_document( &self, collection: &str, doc_id: &str, updates: HashMap<String, Value>, ) -> Result<Document>
Update a document by ID with new data (AQL helper)
Merges new data with existing data and returns updated document
Sourcepub async fn aql_delete_document(
&self,
collection: &str,
doc_id: &str,
) -> Result<Document>
pub async fn aql_delete_document( &self, collection: &str, doc_id: &str, ) -> Result<Document>
Delete a document by ID (AQL helper)
Returns the deleted document
Sourcepub async fn aql_get_document(
&self,
collection: &str,
doc_id: &str,
) -> Result<Option<Document>>
pub async fn aql_get_document( &self, collection: &str, doc_id: &str, ) -> Result<Option<Document>>
Get a single document by ID (AQL helper)
Sourcepub fn aql_begin_transaction(&self) -> Result<TransactionId>
pub fn aql_begin_transaction(&self) -> Result<TransactionId>
Begin a transaction (AQL helper) - returns transaction ID
Sourcepub async fn aql_commit_transaction(&self, tx_id: TransactionId) -> Result<()>
pub async fn aql_commit_transaction(&self, tx_id: TransactionId) -> Result<()>
Commit a transaction (AQL helper)
Sourcepub async fn aql_rollback_transaction(&self, tx_id: TransactionId) -> Result<()>
pub async fn aql_rollback_transaction(&self, tx_id: TransactionId) -> Result<()>
Rollback a transaction (AQL helper)
Sourcepub async fn create_collection_schema(
&self,
name: &str,
fields: HashMap<String, FieldDefinition>,
) -> Result<()>
pub async fn create_collection_schema( &self, name: &str, fields: HashMap<String, FieldDefinition>, ) -> Result<()>
Create a collection from AST schema definition
Sourcepub async fn add_field_to_schema(
&self,
collection_name: &str,
name: String,
definition: FieldDefinition,
) -> Result<()>
pub async fn add_field_to_schema( &self, collection_name: &str, name: String, definition: FieldDefinition, ) -> Result<()>
Add a field to an existing collection schema
Sourcepub async fn drop_field_from_schema(
&self,
collection_name: &str,
field_name: String,
) -> Result<()>
pub async fn drop_field_from_schema( &self, collection_name: &str, field_name: String, ) -> Result<()>
Drop a field from an existing collection schema
Sourcepub async fn rename_field_in_schema(
&self,
collection_name: &str,
from: String,
to: String,
) -> Result<()>
pub async fn rename_field_in_schema( &self, collection_name: &str, from: String, to: String, ) -> Result<()>
Rename a field in an existing collection schema
Sourcepub async fn modify_field_in_schema(
&self,
collection_name: &str,
name: String,
definition: FieldDefinition,
) -> Result<()>
pub async fn modify_field_in_schema( &self, collection_name: &str, name: String, definition: FieldDefinition, ) -> Result<()>
Modify a field in an existing collection schema
Sourcepub async fn drop_collection_schema(&self, collection_name: &str) -> Result<()>
pub async fn drop_collection_schema(&self, collection_name: &str) -> Result<()>
Drop an entire collection definition
Sourcepub async fn is_migration_applied(&self, version: &str) -> Result<bool>
pub async fn is_migration_applied(&self, version: &str) -> Result<bool>
Check if a migration version has been applied
Sourcepub async fn mark_migration_applied(&self, version: &str) -> Result<()>
pub async fn mark_migration_applied(&self, version: &str) -> Result<()>
Mark a migration version as applied
Trait Implementations§
Auto Trait Implementations§
impl Freeze for Aurora
impl !RefUnwindSafe for Aurora
impl Send for Aurora
impl Sync for Aurora
impl Unpin for Aurora
impl !UnwindSafe for Aurora
Blanket Implementations§
Source§impl<T> ArchivePointee for T
impl<T> ArchivePointee for T
Source§type ArchivedMetadata = ()
type ArchivedMetadata = ()
Source§fn pointer_metadata(
_: &<T as ArchivePointee>::ArchivedMetadata,
) -> <T as Pointee>::Metadata
fn pointer_metadata( _: &<T as ArchivePointee>::ArchivedMetadata, ) -> <T as Pointee>::Metadata
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
Source§impl<T> CloneToUninit for Twhere
T: Clone,
impl<T> CloneToUninit for Twhere
T: Clone,
Source§impl<F, W, T, D> Deserialize<With<T, W>, D> for F
impl<F, W, T, D> Deserialize<With<T, W>, D> for F
Source§impl<T> Instrument for T
impl<T> Instrument for T
Source§fn instrument(self, span: Span) -> Instrumented<Self>
fn instrument(self, span: Span) -> Instrumented<Self>
Source§fn in_current_span(self) -> Instrumented<Self>
fn in_current_span(self) -> Instrumented<Self>
Source§impl<T> LayoutRaw for T
impl<T> LayoutRaw for T
Source§fn layout_raw(_: <T as Pointee>::Metadata) -> Result<Layout, LayoutError>
fn layout_raw(_: <T as Pointee>::Metadata) -> Result<Layout, LayoutError>
Source§impl<D> OwoColorize for D
impl<D> OwoColorize for D
Source§fn fg<C>(&self) -> FgColorDisplay<'_, C, Self>where
C: Color,
fn fg<C>(&self) -> FgColorDisplay<'_, C, Self>where
C: Color,
Source§fn bg<C>(&self) -> BgColorDisplay<'_, C, Self>where
C: Color,
fn bg<C>(&self) -> BgColorDisplay<'_, C, Self>where
C: Color,
Source§fn black(&self) -> FgColorDisplay<'_, Black, Self>
fn black(&self) -> FgColorDisplay<'_, Black, Self>
Source§fn on_black(&self) -> BgColorDisplay<'_, Black, Self>
fn on_black(&self) -> BgColorDisplay<'_, Black, Self>
Source§fn red(&self) -> FgColorDisplay<'_, Red, Self>
fn red(&self) -> FgColorDisplay<'_, Red, Self>
Source§fn on_red(&self) -> BgColorDisplay<'_, Red, Self>
fn on_red(&self) -> BgColorDisplay<'_, Red, Self>
Source§fn green(&self) -> FgColorDisplay<'_, Green, Self>
fn green(&self) -> FgColorDisplay<'_, Green, Self>
Source§fn on_green(&self) -> BgColorDisplay<'_, Green, Self>
fn on_green(&self) -> BgColorDisplay<'_, Green, Self>
Source§fn yellow(&self) -> FgColorDisplay<'_, Yellow, Self>
fn yellow(&self) -> FgColorDisplay<'_, Yellow, Self>
Source§fn on_yellow(&self) -> BgColorDisplay<'_, Yellow, Self>
fn on_yellow(&self) -> BgColorDisplay<'_, Yellow, Self>
Source§fn blue(&self) -> FgColorDisplay<'_, Blue, Self>
fn blue(&self) -> FgColorDisplay<'_, Blue, Self>
Source§fn on_blue(&self) -> BgColorDisplay<'_, Blue, Self>
fn on_blue(&self) -> BgColorDisplay<'_, Blue, Self>
Source§fn magenta(&self) -> FgColorDisplay<'_, Magenta, Self>
fn magenta(&self) -> FgColorDisplay<'_, Magenta, Self>
Source§fn on_magenta(&self) -> BgColorDisplay<'_, Magenta, Self>
fn on_magenta(&self) -> BgColorDisplay<'_, Magenta, Self>
Source§fn purple(&self) -> FgColorDisplay<'_, Magenta, Self>
fn purple(&self) -> FgColorDisplay<'_, Magenta, Self>
Source§fn on_purple(&self) -> BgColorDisplay<'_, Magenta, Self>
fn on_purple(&self) -> BgColorDisplay<'_, Magenta, Self>
Source§fn cyan(&self) -> FgColorDisplay<'_, Cyan, Self>
fn cyan(&self) -> FgColorDisplay<'_, Cyan, Self>
Source§fn on_cyan(&self) -> BgColorDisplay<'_, Cyan, Self>
fn on_cyan(&self) -> BgColorDisplay<'_, Cyan, Self>
Source§fn white(&self) -> FgColorDisplay<'_, White, Self>
fn white(&self) -> FgColorDisplay<'_, White, Self>
Source§fn on_white(&self) -> BgColorDisplay<'_, White, Self>
fn on_white(&self) -> BgColorDisplay<'_, White, Self>
Source§fn default_color(&self) -> FgColorDisplay<'_, Default, Self>
fn default_color(&self) -> FgColorDisplay<'_, Default, Self>
Source§fn on_default_color(&self) -> BgColorDisplay<'_, Default, Self>
fn on_default_color(&self) -> BgColorDisplay<'_, Default, Self>
Source§fn bright_black(&self) -> FgColorDisplay<'_, BrightBlack, Self>
fn bright_black(&self) -> FgColorDisplay<'_, BrightBlack, Self>
Source§fn on_bright_black(&self) -> BgColorDisplay<'_, BrightBlack, Self>
fn on_bright_black(&self) -> BgColorDisplay<'_, BrightBlack, Self>
Source§fn bright_red(&self) -> FgColorDisplay<'_, BrightRed, Self>
fn bright_red(&self) -> FgColorDisplay<'_, BrightRed, Self>
Source§fn on_bright_red(&self) -> BgColorDisplay<'_, BrightRed, Self>
fn on_bright_red(&self) -> BgColorDisplay<'_, BrightRed, Self>
Source§fn bright_green(&self) -> FgColorDisplay<'_, BrightGreen, Self>
fn bright_green(&self) -> FgColorDisplay<'_, BrightGreen, Self>
Source§fn on_bright_green(&self) -> BgColorDisplay<'_, BrightGreen, Self>
fn on_bright_green(&self) -> BgColorDisplay<'_, BrightGreen, Self>
Source§fn bright_yellow(&self) -> FgColorDisplay<'_, BrightYellow, Self>
fn bright_yellow(&self) -> FgColorDisplay<'_, BrightYellow, Self>
Source§fn on_bright_yellow(&self) -> BgColorDisplay<'_, BrightYellow, Self>
fn on_bright_yellow(&self) -> BgColorDisplay<'_, BrightYellow, Self>
Source§fn bright_blue(&self) -> FgColorDisplay<'_, BrightBlue, Self>
fn bright_blue(&self) -> FgColorDisplay<'_, BrightBlue, Self>
Source§fn on_bright_blue(&self) -> BgColorDisplay<'_, BrightBlue, Self>
fn on_bright_blue(&self) -> BgColorDisplay<'_, BrightBlue, Self>
Source§fn bright_magenta(&self) -> FgColorDisplay<'_, BrightMagenta, Self>
fn bright_magenta(&self) -> FgColorDisplay<'_, BrightMagenta, Self>
Source§fn on_bright_magenta(&self) -> BgColorDisplay<'_, BrightMagenta, Self>
fn on_bright_magenta(&self) -> BgColorDisplay<'_, BrightMagenta, Self>
Source§fn bright_purple(&self) -> FgColorDisplay<'_, BrightMagenta, Self>
fn bright_purple(&self) -> FgColorDisplay<'_, BrightMagenta, Self>
Source§fn on_bright_purple(&self) -> BgColorDisplay<'_, BrightMagenta, Self>
fn on_bright_purple(&self) -> BgColorDisplay<'_, BrightMagenta, Self>
Source§fn bright_cyan(&self) -> FgColorDisplay<'_, BrightCyan, Self>
fn bright_cyan(&self) -> FgColorDisplay<'_, BrightCyan, Self>
Source§fn on_bright_cyan(&self) -> BgColorDisplay<'_, BrightCyan, Self>
fn on_bright_cyan(&self) -> BgColorDisplay<'_, BrightCyan, Self>
Source§fn bright_white(&self) -> FgColorDisplay<'_, BrightWhite, Self>
fn bright_white(&self) -> FgColorDisplay<'_, BrightWhite, Self>
Source§fn on_bright_white(&self) -> BgColorDisplay<'_, BrightWhite, Self>
fn on_bright_white(&self) -> BgColorDisplay<'_, BrightWhite, Self>
Source§fn bold(&self) -> BoldDisplay<'_, Self>
fn bold(&self) -> BoldDisplay<'_, Self>
Source§fn dimmed(&self) -> DimDisplay<'_, Self>
fn dimmed(&self) -> DimDisplay<'_, Self>
Source§fn italic(&self) -> ItalicDisplay<'_, Self>
fn italic(&self) -> ItalicDisplay<'_, Self>
Source§fn underline(&self) -> UnderlineDisplay<'_, Self>
fn underline(&self) -> UnderlineDisplay<'_, Self>
Source§fn blink(&self) -> BlinkDisplay<'_, Self>
fn blink(&self) -> BlinkDisplay<'_, Self>
Source§fn blink_fast(&self) -> BlinkFastDisplay<'_, Self>
fn blink_fast(&self) -> BlinkFastDisplay<'_, Self>
Source§fn reversed(&self) -> ReversedDisplay<'_, Self>
fn reversed(&self) -> ReversedDisplay<'_, Self>
Source§fn strikethrough(&self) -> StrikeThroughDisplay<'_, Self>
fn strikethrough(&self) -> StrikeThroughDisplay<'_, Self>
Source§fn color<Color>(&self, color: Color) -> FgDynColorDisplay<'_, Color, Self>where
Color: DynColor,
fn color<Color>(&self, color: Color) -> FgDynColorDisplay<'_, Color, Self>where
Color: DynColor,
OwoColorize::fg or
a color-specific method, such as OwoColorize::green, Read moreSource§fn on_color<Color>(&self, color: Color) -> BgDynColorDisplay<'_, Color, Self>where
Color: DynColor,
fn on_color<Color>(&self, color: Color) -> BgDynColorDisplay<'_, Color, Self>where
Color: DynColor,
OwoColorize::bg or
a color-specific method, such as OwoColorize::on_yellow, Read more