Skip to main content

Aurora

Struct Aurora 

Source
pub struct Aurora {
    pub pubsub: PubSubSystem,
    pub workers: Option<Arc<WorkerSystem>>,
    pub computed: Arc<RwLock<ComputedFields>>,
    /* private fields */
}
Expand description

The main database engine

Aurora combines a tiered storage architecture with document-oriented database features:

  • Hot tier: In-memory cache for frequently accessed data
  • Cold tier: Persistent disk storage for durability
  • Primary indices: Fast key-based access
  • Secondary indices: Fast field-based queries

§Examples

// Open a database (creates if doesn't exist)
let db = Aurora::open("my_app.db")?;

// Insert a document
let doc_id = db.insert_into("users", vec![
    ("name", Value::String("Alice".to_string())),
    ("age", Value::Int(32)),
])?;

// Retrieve a document
let user = db.get_document("users", &doc_id)?;

Fields§

§pubsub: PubSubSystem§workers: Option<Arc<WorkerSystem>>§computed: Arc<RwLock<ComputedFields>>

Implementations§

Source§

impl Aurora

Source

pub async fn execute<I: ToExecParams>( &self, input: I, ) -> Result<ExecutionResult>

Execute AQL query (variables are optional)

Supports two forms:

  1. db.execute("query").await
  2. db.execute(("query", vars)).await
Source

pub async fn stream(&self, aql: &str) -> Result<ChangeListener>

Stream real-time changes using AQL subscription syntax

This is a convenience method that extracts the ChangeListener from an AQL subscription query, providing a cleaner API than using execute() directly.

§Example
// Stream changes from active products
let mut listener = db.stream(r#"
    subscription {
        products(where: { active: { eq: true } }) {
            id
            name
        }
    }
"#).await?;

// Receive real-time events
while let Ok(event) = listener.recv().await {
    println!("Change: {:?} on {}", event.change_type, event.id);
}
Source

pub async fn explain<I: ToExecParams>(&self, input: I) -> Result<ExecutionPlan>

Explain AQL query execution plan

Source

pub async fn analyze_execution_plan( &self, doc: &Document, ) -> Result<ExecutionPlan>

Analyze execution plan for a parsed query

Source

pub fn remove_stale_lock<P: AsRef<Path>>(path: P) -> Result<bool>

Remove stale lock files from a database directory

If Aurora crashes or is forcefully terminated, it may leave behind lock files that prevent the database from being reopened. This method safely removes those lock files.

§Safety

Only call this when you’re certain no other Aurora instance is using the database. Removing lock files while another process is running could cause data corruption.

§Example
use aurora_db::Aurora;

// If you get "Access denied" error when opening:
if let Err(e) = Aurora::open("my_db") {
    eprintln!("Failed to open: {}", e);
    // Try removing stale lock
    if Aurora::remove_stale_lock("my_db").unwrap_or(false) {
        println!("Removed stale lock, try opening again");
        let db = Aurora::open("my_db")?;
    }
}
Source

pub fn open<P: AsRef<Path>>(path: P) -> Result<Self>

Open or create a database at the specified location

§Arguments
  • path - Path to the database file or directory
    • Absolute paths (like /data/myapp.db) are used as-is
    • Relative paths (like ./data/myapp.db) are resolved relative to the current directory
    • Simple names (like myapp.db) use the current directory
§Returns

An initialized Aurora database instance

§Examples

use aurora_db::Aurora;

let db = Aurora::open(“./data/my_application.db”)?;

// Or use a relative path let db = Aurora::open(“customer_data.db”)?;

Source

pub fn with_config(config: AuroraConfig) -> Result<Self>

Open a database with custom configuration

§Arguments
  • config - Database configuration settings
§Examples
use aurora_db::{Aurora, types::AuroraConfig};
use std::time::Duration;

let config = AuroraConfig {
    db_path: "my_data.db".into(),
    hot_cache_size_mb: 512,           // 512 MB cache
    enable_write_buffering: true,     // Batch writes for speed
    enable_wal: true,                 // Durability
    auto_compact: true,               // Background compaction
    compact_interval_mins: 60,        // Compact every hour
    ..Default::default()
};

let db = Aurora::with_config(config)?;
Source

pub async fn ensure_indices_initialized(&self) -> Result<()>

Source

pub fn get(&self, key: &str) -> Result<Option<Vec<u8>>>

Get a value by key (low-level key-value access)

This is the low-level method. For document access, use get_document() instead. Checks hot cache first, then falls back to cold storage for maximum performance.

§Performance
  • Hot cache hit: ~1M reads/sec (instant)
  • Cold storage: ~500K reads/sec (disk I/O)
  • Cache hit rate: typically 95%+ at scale
§Examples
// Low-level key-value access
let data = db.get("users:12345")?;
if let Some(bytes) = data {
    let doc: Document = serde_json::from_slice(&bytes)?;
    println!("Found: {:?}", doc);
}

// Better: use get_document() for documents
let user = db.get_document("users", "12345")?;
Source

pub fn get_hot_ref(&self, key: &str) -> Option<Arc<Vec<u8>>>

Get value with zero-copy Arc reference (10-100x faster than get!) Only checks hot cache - returns None if not cached

Source

pub fn get_cache_stats(&self) -> CacheStats

Get cache statistics

Returns detailed metrics about cache performance including hit/miss rates, memory usage, and access patterns. Useful for monitoring, optimization, and understanding database performance characteristics.

§Returns

CacheStats struct containing:

  • hits: Number of cache hits (data found in memory)
  • misses: Number of cache misses (had to read from disk)
  • hit_rate: Percentage of requests served from cache (0.0-1.0)
  • size: Current number of entries in cache
  • capacity: Maximum cache capacity
  • evictions: Number of entries evicted due to capacity
§Examples

use aurora_db::Aurora;

let db = Aurora::open(“mydb.db”)?; let stats = db.get_cache_stats(); println!(“Cache hit rate: {:.1}%”, stats.hit_rate * 100.0); println!(“Cache size: {} / {} entries”, stats.size, stats.capacity); println!(“Total hits: {}, misses: {}”, stats.hits, stats.misses);

// Monitor performance during operations let before = db.get_cache_stats();

// Perform many reads for i in 0..1000 { db.get_document(“users”, &format!(“user-{}”, i))?; }

let after = db.get_cache_stats(); let hit_rate = (after.hits - before.hits) as f64 / 1000.0; println!(“Read hit rate: {:.1}%”, hit_rate * 100.0);

// Performance tuning let stats = db.get_cache_stats(); if stats.hit_rate < 0.80 { println!(“Low cache hit rate! Consider:”); println!(“- Increasing cache size in config”); println!(“- Prewarming cache with prewarm_cache()”); println!(“- Reviewing query patterns”); }

if stats.evictions > stats.size { println!(“High eviction rate! Cache may be too small.”); println!(“Consider increasing cache capacity.”); }

// Production monitoring use std::time::Duration; use std::thread;

loop { let stats = db.get_cache_stats();

// Log to monitoring system
if stats.hit_rate < 0.90 {
    eprintln!("Warning: Cache hit rate dropped to {:.1}%",
              stats.hit_rate * 100.0);
}

thread::sleep(Duration::from_secs(60));

}


- **Excellent**: 95%+ hit rate (most reads from memory)
- **Good**: 80-95% hit rate (acceptable performance)
- **Poor**: <80% hit rate (consider cache tuning)

- `prewarm_cache()` to improve hit rates by preloading data
- `Aurora::with_config()` to adjust cache capacity
Source

pub fn has_index(&self, collection: &str, field: &str) -> bool

Source

pub fn get_ids_from_index( &self, collection: &str, field: &str, value: &Value, ) -> Vec<String>

Source

pub async fn register_computed_field( &self, collection: &str, field: &str, expression: ComputedExpression, ) -> Result<()>

Register a computed field definition

Source

pub fn listen(&self, collection: impl Into<String>) -> ChangeListener

Listen for real-time changes in a collection

Returns a stream of change events (inserts, updates, deletes) that you can subscribe to. Perfect for building reactive UIs, cache invalidation, audit logging, webhooks, and data synchronization systems.

§Performance
  • Zero overhead when no listeners are active
  • Events are broadcast to all listeners asynchronously
  • Non-blocking - doesn’t slow down write operations
  • Multiple listeners can watch the same collection
§Examples

use aurora_db::{Aurora, types::Value};

let db = Aurora::open(“mydb.db”)?;

// Basic listener let mut listener = db.listen(“users”);

tokio::spawn(async move { while let Ok(event) = listener.recv().await { match event.change_type { ChangeType::Insert => println!(“New user: {:?}”, event.document), ChangeType::Update => println!(“Updated user: {:?}”, event.document), ChangeType::Delete => println!(“Deleted user ID: {}”, event.id), } } });

// Now any insert/update/delete will trigger the listener db.insert_into(“users”, vec![(“name”, Value::String(“Alice”.into()))]).await?;



**Cache Invalidation:**
use std::sync::Arc;
use tokio::sync::RwLock;
use std::collections::HashMap;

let cache = Arc::new(RwLock::new(HashMap::new()));
let cache_clone = Arc::clone(&cache);

let mut listener = db.listen("products");

tokio::spawn(async move {
    while let Ok(event) = listener.recv().await {
        // Invalidate cache entry when product changes
        cache_clone.write().await.remove(&event.id);
        println!("Cache invalidated for product: {}", event.id);
    }
});

Webhook Notifications:

let mut listener = db.listen("orders");

tokio::spawn(async move {
    while let Ok(event) = listener.recv().await {
        if event.change_type == ChangeType::Insert {
            // Send webhook for new orders
            send_webhook("https://api.example.com/webhooks/order", &event).await;
        }
    }
});

Audit Logging:

let mut listener = db.listen("sensitive_data");

tokio::spawn(async move {
    while let Ok(event) = listener.recv().await {
        // Log all changes to audit trail
        db.insert_into("audit_log", vec![
            ("collection", Value::String("sensitive_data".into())),
            ("action", Value::String(format!("{:?}", event.change_type))),
            ("document_id", Value::String(event.id.clone())),
            ("timestamp", Value::String(chrono::Utc::now().to_rfc3339())),
        ]).await?;
    }
});

Data Synchronization:

let mut listener = db.listen("users");

tokio::spawn(async move {
    while let Ok(event) = listener.recv().await {
        // Sync changes to external system
        match event.change_type {
            ChangeType::Insert | ChangeType::Update => {
                if let Some(doc) = event.document {
                    external_api.upsert_user(&doc).await?;
                }
            },
            ChangeType::Delete => {
                external_api.delete_user(&event.id).await?;
            },
        }
    }
});

Real-Time Notifications:

let mut listener = db.listen("messages");

tokio::spawn(async move {
    while let Ok(event) = listener.recv().await {
        if event.change_type == ChangeType::Insert {
            if let Some(msg) = event.document {
                // Push notification to connected websockets
                if let Some(recipient) = msg.data.get("recipient_id") {
                    websocket_manager.send_to_user(recipient, &msg).await;
                }
            }
        }
    }
});

Filtered Listener:

use aurora_db::pubsub::EventFilter;

// Only listen for inserts
let mut listener = db.listen("users")
    .filter(EventFilter::ChangeType(ChangeType::Insert));

// Only listen for documents with specific field value
let mut listener = db.listen("users")
    .filter(EventFilter::FieldEquals("role".to_string(), Value::String("admin".into())));
§Important Notes
  • Listener stays active until dropped
  • Events are delivered in order
  • Each listener has its own event stream
  • Use filters to reduce unnecessary event processing
  • Listeners don’t affect write performance
§See Also
  • listen_all() to listen to all collections
  • ChangeListener::filter() to filter events
  • query().watch() for reactive queries with filtering
Source

pub fn listen_all(&self) -> ChangeListener

Listen for all changes across all collections

Returns a stream of change events for every insert, update, and delete operation across the entire database. Useful for global audit logging, replication, and monitoring systems.

§Performance
  • Same performance as single collection listener
  • Filter events by collection in your handler
  • Consider using listen(collection) if only watching specific collections
§Examples
use aurora_db::Aurora;

let db = Aurora::open("mydb.db")?;

// Listen to everything
let mut listener = db.listen_all();

tokio::spawn(async move {
    while let Ok(event) = listener.recv().await {
        println!("Change in {}: {:?}", event.collection, event.change_type);
    }
});
§Real-World Use Cases

Global Audit Trail:

let mut listener = db.listen_all();

tokio::spawn(async move {
    while let Ok(event) = listener.recv().await {
        // Log every database change
        audit_logger.log(AuditEntry {
            timestamp: chrono::Utc::now(),
            collection: event.collection,
            action: event.change_type,
            document_id: event.id,
            user_id: get_current_user_id(),
        }).await;
    }
});

Database Replication:

let mut listener = db.listen_all();

tokio::spawn(async move {
    while let Ok(event) = listener.recv().await {
        // Replicate to secondary database
        replica_db.apply_change(event).await?;
    }
});

Change Data Capture (CDC):

let mut listener = db.listen_all();

tokio::spawn(async move {
    while let Ok(event) = listener.recv().await {
        // Stream changes to Kafka/RabbitMQ
        kafka_producer.send(
            &format!("cdc.{}", event.collection),
            serde_json::to_string(&event)?
        ).await?;
    }
});

Monitoring & Metrics:

use std::sync::atomic::{AtomicUsize, Ordering};

let write_counter = Arc::new(AtomicUsize::new(0));
let counter_clone = Arc::clone(&write_counter);

let mut listener = db.listen_all();

tokio::spawn(async move {
    while let Ok(_event) = listener.recv().await {
        counter_clone.fetch_add(1, Ordering::Relaxed);
    }
});

// Report metrics every 60 seconds
tokio::spawn(async move {
    loop {
        tokio::time::sleep(Duration::from_secs(60)).await;
        let count = write_counter.swap(0, Ordering::Relaxed);
        println!("Writes per minute: {}", count);
    }
});

Selective Processing:

let mut listener = db.listen_all();

tokio::spawn(async move {
    while let Ok(event) = listener.recv().await {
        // Handle different collections differently
        match event.collection.as_str() {
            "users" => handle_user_change(event).await,
            "orders" => handle_order_change(event).await,
            "payments" => handle_payment_change(event).await,
            _ => {} // Ignore others
        }
    }
});
§When to Use
  • Global audit logging
  • Database replication
  • Change data capture (CDC)
  • Monitoring and metrics
  • Event sourcing systems
§When NOT to Use
  • Only need to watch 1-2 collections → Use listen(collection) instead
  • High write volume with selective interest → Use collection-specific listeners
  • Need complex filtering → Use query().watch() instead
§See Also
  • listen() for single collection listening
  • listener_count() to check active listeners
  • query().watch() for filtered reactive queries
Source

pub fn listener_count(&self, collection: &str) -> usize

Get the number of active listeners for a collection

Source

pub fn total_listeners(&self) -> usize

Get total number of active listeners

Source

pub fn flush(&self) -> Result<()>

Flushes all buffered writes to disk to ensure durability.

This method forces all pending writes from:

  • Write buffer (if enabled)
  • Cold storage internal buffers
  • Write-ahead log (if enabled)

Call this when you need to ensure data persistence before a critical operation or shutdown. After flush() completes, all data is guaranteed to be on disk even if power fails.

§Performance
  • Flush time: ~10-50ms depending on buffered data
  • Triggers OS-level fsync() for durability guarantee
  • Truncates WAL after successful flush
  • Not needed for every write (WAL provides durability)
§Examples
use aurora_db::Aurora;

let db = Aurora::open("mydb.db")?;

// Basic flush after critical write
db.insert_into("users", data).await?;
db.flush()?;  // Ensure data is persisted to disk

// Graceful shutdown pattern
fn shutdown(db: &Aurora) -> Result<()> {
    println!("Flushing pending writes...");
    db.flush()?;
    println!("Shutdown complete - all data persisted");
    Ok(())
}

// Periodic checkpoint pattern
use std::time::Duration;
use std::thread;

let db = db.clone();
thread::spawn(move || {
    loop {
        thread::sleep(Duration::from_secs(60));
        if let Err(e) = db.flush() {
            eprintln!("Flush error: {}", e);
        } else {
            println!("Checkpoint: data flushed to disk");
        }
    }
});

// Critical transaction pattern
let tx_id = db.begin_transaction();

// Multiple operations
db.insert_into("orders", order_data).await?;
db.update_document("inventory", product_id, updates).await?;
db.insert_into("audit_log", audit_data).await?;

// Commit and flush immediately
db.commit_transaction(tx_id)?;
db.flush()?;  // Critical: ensure transaction is on disk

// Backup preparation
println!("Preparing backup...");
db.flush()?;  // Ensure all data is written
std::fs::copy("mydb.db", "backup.db")?;
println!("Backup complete");
§When to Use
  • Before graceful shutdown
  • After critical transactions
  • Before creating backups
  • Periodic checkpoints (every 30-60 seconds)
  • Before risky operations
§When NOT to Use
  • After every single write (too slow, WAL provides durability)
  • In high-throughput loops (batch instead)
  • When durability mode is already Immediate
§Important Notes
  • WAL provides durability even without explicit flush()
  • flush() adds latency (~10-50ms) so use strategically
  • Automatic flush happens during graceful shutdown
  • After flush(), WAL is truncated (data is in main storage)
§See Also
  • Aurora::with_config() to set durability mode
  • WAL (Write-Ahead Log) provides durability without explicit flushes
Source

pub async fn put( &self, key: String, value: Vec<u8>, ttl: Option<Duration>, ) -> Result<()>

Store a key-value pair (low-level storage)

This is the low-level method. For documents, use insert_into() instead. Writes are buffered and batched for performance.

§Arguments
  • key - Unique key (format: “collection:id” for documents)
  • value - Raw bytes to store
  • ttl - Optional time-to-live (None = permanent)
§Performance
  • Buffered writes: ~15-30K docs/sec
  • Batching improves throughput significantly
  • Call flush() to ensure data is persisted
§Examples
use std::time::Duration;

// Permanent storage
let data = serde_json::to_vec(&my_struct)?;
db.put("mykey".to_string(), data, None)?;

// With TTL (expires after 1 hour)
db.put("session:abc".to_string(), session_data, Some(Duration::from_secs(3600)))?;

// Better: use insert_into() for documents
db.insert_into("users", vec![("name", Value::String("Alice".into()))])?;
Source

pub fn scan_and_filter<F>( &self, collection: &str, filter: F, limit: Option<usize>, ) -> Result<Vec<Document>>
where F: Fn(&Document) -> bool,

Scan collection with filter and early termination support Used by QueryBuilder for optimized queries with LIMIT

Source

pub async fn put_blob(&self, key: String, file_path: &Path) -> Result<()>

Source

pub async fn new_collection<F: IntoFieldDefinition>( &self, name: &str, fields: Vec<F>, ) -> Result<()>

Create a new collection with schema definition

Collections are like tables in SQL - they define the structure of your documents. The third boolean parameter indicates if the field should be indexed for fast lookups.

§Arguments
  • name - Collection name
  • fields - Vector of (field_name, field_type, indexed) tuples
    • Field name (accepts both &str and String)
    • Field type (String, Int, Float, Bool, etc.)
    • Indexed: true for fast lookups, false for no index
§Performance
  • Indexed fields: Fast equality queries (O(1) lookup)
  • Non-indexed fields: Full scan required for queries
  • Unique fields are automatically indexed
§Examples
use aurora_db::{Aurora, types::FieldType};

let db = Aurora::open("mydb.db")?;

// Create a users collection
db.new_collection("users", vec![
    ("name", FieldType::String, false),      // Not indexed
    ("email", FieldType::String, true),      // Indexed - fast lookups
    ("age", FieldType::Int, false),
    ("active", FieldType::Bool, true),       // Indexed
    ("score", FieldType::Float, false),
])?;

// Idempotent - calling again is safe
db.new_collection("users", vec![/* ... */])?.await; // OK!
Source

pub async fn insert_into( &self, collection: &str, data: Vec<(&str, Value)>, ) -> Result<String>

Insert a document into a collection

Automatically generates a UUID for the document and validates against collection schema and unique constraints. Returns the generated document ID.

§Performance
  • Single insert: ~15,000 docs/sec
  • Bulk insert: Use batch_insert() for 10+ documents (~50,000 docs/sec)
  • Triggers PubSub events for real-time listeners
§Arguments
  • collection - Name of the collection to insert into
  • data - Document fields and values to insert
§Returns

The auto-generated ID of the inserted document or an error

§Errors
  • CollectionNotFound: Collection doesn’t exist
  • ValidationError: Data violates schema or unique constraints
  • SerializationError: Invalid data format
§Examples

use aurora_db::{Aurora, types::Value};

let db = Aurora::open(“mydb.db”)?;

// Basic insertion let user_id = db.insert_into(“users”, vec![ (“name”, Value::String(“Alice Smith”.to_string())), (“email”, Value::String(“alice@example.com”.to_string())), (“age”, Value::Int(28)), (“active”, Value::Bool(true)), ]).await?;

println!(“Created user with ID: {}”, user_id);

// Inserting with nested data let order_id = db.insert_into(“orders”, vec![ (“user_id”, Value::String(user_id.clone())), (“total”, Value::Float(99.99)), (“status”, Value::String(“pending”.to_string())), (“items”, Value::Array(vec![ Value::String(“item-123”.to_string()), Value::String(“item-456”.to_string()), ])), ]).await?;

// Error handling - unique constraint violation match db.insert_into(“users”, vec![ (“email”, Value::String(“alice@example.com”.to_string())), // Duplicate! (“name”, Value::String(“Alice Clone”.to_string())), ]).await { Ok(id) => println!(“Inserted: {}”, id), Err(e) => println!(“Failed: {} (email already exists)”, e), }

// For bulk inserts (10+ documents), use batch_insert() instead let users = vec![ HashMap::from([ (“name”.to_string(), Value::String(“Bob”.to_string())), (“email”.to_string(), Value::String(“bob@example.com”.to_string())), ]), HashMap::from([ (“name”.to_string(), Value::String(“Carol”.to_string())), (“email”.to_string(), Value::String(“carol@example.com”.to_string())), ]), // … more documents ]; let ids = db.batch_insert(“users”, users).await?; // 3x faster! println!(“Inserted {} users”, ids.len());

Source

pub async fn insert_map( &self, collection: &str, data: HashMap<String, Value>, ) -> Result<String>

Source

pub async fn batch_insert( &self, collection: &str, documents: Vec<HashMap<String, Value>>, ) -> Result<Vec<String>>

Batch insert multiple documents with optimized write path

Inserts multiple documents in a single optimized operation, bypassing the write buffer for better performance. Ideal for bulk data loading, migrations, or initial database seeding. 3x faster than individual inserts.

§Performance
  • Insert speed: ~50,000 docs/sec (vs ~15,000 for single inserts)
  • Batch writes to WAL and storage
  • Validates all unique constraints
  • Use for 10+ documents minimum
§Arguments
  • collection - Name of the collection to insert into
  • documents - Vector of document data as HashMaps
§Returns

Vector of auto-generated document IDs or an error

§Examples

use aurora_db::{Aurora, types::Value}; use std::collections::HashMap;

let db = Aurora::open(“mydb.db”)?;

// Bulk user import let users = vec![ HashMap::from([ (“name”.to_string(), Value::String(“Alice”.into())), (“email”.to_string(), Value::String(“alice@example.com”.into())), (“age”.to_string(), Value::Int(28)), ]), HashMap::from([ (“name”.to_string(), Value::String(“Bob”.into())), (“email”.to_string(), Value::String(“bob@example.com”.into())), (“age”.to_string(), Value::Int(32)), ]), HashMap::from([ (“name”.to_string(), Value::String(“Carol”.into())), (“email”.to_string(), Value::String(“carol@example.com”.into())), (“age”.to_string(), Value::Int(25)), ]), ];

let ids = db.batch_insert(“users”, users).await?; println!(“Inserted {} users”, ids.len());

// Seeding test data let test_products: Vec<HashMap<String, Value>> = (0..1000) .map(|i| HashMap::from([ (“sku”.to_string(), Value::String(format!(“PROD-{:04}”, i))), (“price”.to_string(), Value::Float(9.99 + i as f64)), (“stock”.to_string(), Value::Int(100)), ])) .collect();

let ids = db.batch_insert(“products”, test_products).await?; // Much faster than 1000 individual insert_into() calls!

// Migration from CSV data let mut csv_reader = csv::Reader::from_path(“data.csv”)?; let mut batch = Vec::new();

for result in csv_reader.records() { let record = result?; let doc = HashMap::from([ (“field1”.to_string(), Value::String(record[0].to_string())), (“field2”.to_string(), Value::String(record[1].to_string())), ]); batch.push(doc);

// Insert in batches of 1000
if batch.len() >= 1000 {
    db.batch_insert("imported_data", batch.clone()).await?;
    batch.clear();
}

}

// Insert remaining if !batch.is_empty() { db.batch_insert(“imported_data”, batch).await?; }


- `ValidationError`: Unique constraint violation on any document
- `CollectionNotFound`: Collection doesn't exist
- `IoError`: Storage write failure

- All inserts are atomic - if one fails, none are inserted
- UUIDs are auto-generated for all documents
- PubSub events are published for each insert
- For 10+ documents, this is 3x faster than individual inserts
- For < 10 documents, use `insert_into()` instead

- `insert_into()` for single document inserts
- `import_from_json()` for file-based bulk imports
- `batch_write()` for low-level batch operations
Source

pub async fn update_document( &self, collection: &str, doc_id: &str, updates: Vec<(&str, Value)>, ) -> Result<()>

Update a document by ID

§Arguments
  • collection - Collection name
  • doc_id - Document ID to update
  • data - New field values to set
§Returns

Ok(()) on success, or an error if the document doesn’t exist

§Examples
db.update_document("users", &user_id, vec![
    ("status", Value::String("active".to_string())),
    ("last_login", Value::String(chrono::Utc::now().to_rfc3339())),
]).await?;
Source

pub async fn get_all_collection( &self, collection: &str, ) -> Result<Vec<Document>>

Source

pub fn get_data_by_pattern( &self, pattern: &str, ) -> Result<Vec<(String, DataInfo)>>

Source

pub fn begin_transaction(&self) -> TransactionId

Begin a transaction

All operations after beginning a transaction will be part of the transaction until either commit_transaction() or rollback_transaction() is called.

§Returns

Success or an error (e.g., if a transaction is already in progress)

§Examples

// Start a transaction for atomic operations db.begin_transaction()?;

// Perform multiple operations db.insert_into(“accounts”, vec![(“user_id”, Value::String(user_id)), (“balance”, Value::Float(100.0))])?; db.insert_into(“audit_log”, vec![(“action”, Value::String(“account_created”.to_string()))])?;

// Commit all changes or roll back if there’s an error if all_ok { db.commit_transaction()?; } else { db.rollback_transaction()?; }

Begin a new transaction for atomic operations

Transactions ensure all-or-nothing execution: either all operations succeed,
or none of them are applied. Perfect for maintaining data consistency.


use aurora_db::{Aurora, types::Value};

let db = Aurora::open("mydb.db")?;

// Start transaction
let tx_id = db.begin_transaction();

// Perform multiple operations
db.insert_into("accounts", vec![
    ("user_id", Value::String("alice".into())),
    ("balance", Value::Int(1000)),
]).await?;

db.insert_into("accounts", vec![
    ("user_id", Value::String("bob".into())),
    ("balance", Value::Int(500)),
    ])).await?;

// Commit if all succeeded
db.commit_transaction(tx_id)?;

// Or rollback on error
// db.rollback_transaction(tx_id)?;
Source

pub fn commit_transaction(&self, tx_id: TransactionId) -> Result<()>

Commit a transaction, making all changes permanent

All operations within the transaction are atomically applied to the database. If any operation fails, none are applied.

§Arguments
  • tx_id - Transaction ID returned from begin_transaction()
§Examples

use aurora_db::{Aurora, types::Value};

let db = Aurora::open(“mydb.db”)?;

// Transfer money between accounts let tx_id = db.begin_transaction();

// Deduct from Alice db.update_document(“accounts”, “alice”, vec![ (“balance”, Value::Int(900)), // Was 1000 ]).await?;

// Add to Bob db.update_document(“accounts”, “bob”, vec![ (“balance”, Value::Int(600)), // Was 500 ]).await?;

// Both updates succeed - commit them db.commit_transaction(tx_id)?;

println!(“Transfer completed!”);

Source

pub fn rollback_transaction(&self, tx_id: TransactionId) -> Result<()>

Roll back a transaction, discarding all changes

All operations within the transaction are discarded. The database state remains unchanged. Use this when an error occurs during transaction processing.

§Arguments
  • tx_id - Transaction ID returned from begin_transaction()
§Examples

use aurora_db::{Aurora, types::Value};

let db = Aurora::open(“mydb.db”)?;

// Attempt a transfer with validation let tx_id = db.begin_transaction();

let result = async { // Deduct from Alice let alice = db.get_document(“accounts”, “alice”).await?; let balance = alice.and_then(|doc| doc.data.get(“balance”));

if let Some(Value::Int(bal)) = balance {
    if *bal < 100 {
        return Err("Insufficient funds");
    }

    db.update_document("accounts", "alice", vec![
        ("balance", Value::Int(bal - 100)),
    ]).await?;

    db.update_document("accounts", "bob", vec![
        ("balance", Value::Int(600)),
    ]).await?;

    Ok(())
} else {
    Err("Account not found")
}

}.await;

match result { Ok(_) => { db.commit_transaction(tx_id)?; println!(“Transfer completed”); } Err(e) => { db.rollback_transaction(tx_id)?; println!(“Transfer failed: {}, changes rolled back”, e); } }

Source

pub async fn create_index(&self, collection: &str, field: &str) -> Result<()>

Create a secondary index on a field for faster queries

Indexes dramatically improve query performance for frequently accessed fields, trading increased memory usage and slower writes for much faster reads.

§When to Create Indexes
  • Frequent queries: Fields used in 80%+ of your queries
  • High cardinality: Fields with many unique values (user_id, email)
  • Sorting/filtering: Fields used in ORDER BY or WHERE clauses
  • Large collections: Most beneficial with 10,000+ documents
§When NOT to Index
  • Low cardinality fields (e.g., boolean flags, small enums)
  • Rarely queried fields
  • Fields that change frequently (write-heavy workloads)
  • Small collections (<1,000 documents) - full scans are fast enough
§Performance Characteristics
  • Query speedup: O(n) → O(1) for equality filters
  • Memory cost: ~100-200 bytes per document per index
  • Write slowdown: ~20-30% longer insert/update times
  • Build time: ~5,000 docs/sec for initial indexing Create a new collection with the given schema
§Arguments
  • name - Collection name
  • fields - Field definitions as tuples of (name, type, unique)
    • The boolean indicates whether the field has a unique constraint
    • Unique fields are automatically indexed
    • Non-unique fields can be indexed separately using create_index()
§Examples
db.new_collection("users", vec![
    .first_one()
    .await?;

// DON'T index 'active' - low cardinality (only 2 values: true/false)
// A full scan is fast enough for boolean fields

// DO index 'age' if you frequently query age ranges
db.create_index("users", "age").await?;

let young_users = db.query("users")
    .filter(|f| f.lt("age", 30))
    .collect()
    .await?;
§Real-World Example: E-commerce Orders
// Orders collection: 1 million documents
db.new_collection("orders", vec![
    ("user_id", FieldType::String),    // High cardinality
    ("status", FieldType::String),      // Low cardinality (pending, shipped, delivered)
    ("created_at", FieldType::String),
    ("total", FieldType::Float),
])?;

// Index user_id - queries like "show me my orders" are common
db.create_index("orders", "user_id").await?;  // Good choice

// Query speedup: 2.5s → 0.001s
let my_orders = db.query("orders")
    .filter(|f| f.eq("user_id", user_id))
    .collect()
    .await?;

// DON'T index 'status' - only 3 possible values
// Scanning 1M docs takes ~100ms, indexing won't help much

// Index created_at if you frequently query recent orders
db.create_index("orders", "created_at").await?;  // Good for time-based queries
Source

pub fn query<'a>(&'a self, collection: &str) -> QueryBuilder<'a>

Query documents in a collection with filtering, sorting, and pagination

Returns a QueryBuilder that allows fluent chaining of query operations. Queries use early termination for LIMIT clauses, making them extremely fast even on large collections (6,800x faster than naive implementations).

§Performance
  • With LIMIT: O(k) where k = limit + offset (early termination!)
  • Without LIMIT: O(n) where n = matching documents
  • Uses secondary indices when available for equality filters
  • Hot cache: ~1M reads/sec, Cold storage: ~500K reads/sec
§Examples
use aurora_db::{Aurora, types::Value};

let db = Aurora::open("mydb.db")?;

// Simple equality query
let active_users = db.query("users")
    .filter(|f| f.eq("active", Value::Bool(true)))
    .collect()
    .await?;

// Range query with pagination (FAST - uses early termination!)
let top_scorers = db.query("users")
    .filter(|f| f.gt("score", Value::Int(1000)))
    .order_by("score", false)  // descending
    .limit(10)
    .offset(20)
    .collect()
    .await?;

// Multiple filters
let premium_active = db.query("users")
    .filter(|f| f.eq("tier", Value::String("premium".into())))
    .filter(|f| f.eq("active", Value::Bool(true)))
    .limit(100)  // Only scans ~200 docs, not all million!
    .collect()
    .await?;

// Text search in a field
let matching = db.query("articles")
    .filter(|f| f.contains("title", "rust"))
    .collect()
    .await?;
Source

pub fn search<'a>(&'a self, collection: &str) -> SearchBuilder<'a>

Create a search builder for full-text search

§Arguments
  • collection - Name of the collection to search
§Returns

A SearchBuilder for configuring and executing searches

§Examples
// Search for documents containing text
let search_results = db.search("articles")
    .field("content")
    .matching("quantum computing")
    .fuzzy(true)  // Enable fuzzy matching for typo tolerance
    .collect()
    .await?;
Source

pub fn get_document( &self, collection: &str, id: &str, ) -> Result<Option<Document>>

Retrieve a document by ID

Fast direct lookup when you know the document ID. Significantly faster than querying with filters when ID is known.

§Performance
  • Hot cache: ~1,000,000 reads/sec (instant)
  • Cold storage: ~500,000 reads/sec (disk I/O)
  • Complexity: O(1) - constant time lookup
  • Much faster than .query().filter(|f| f.eq("id", ...)) which is O(n)
§Arguments
  • collection - Name of the collection to query
  • id - ID of the document to retrieve
§Returns

The document if found, None if not found, or an error

§Examples

use aurora_db::{Aurora, types::Value};

let db = Aurora::open(“mydb.db”)?;

// Basic retrieval if let Some(user) = db.get_document(“users”, &user_id)? { println!(“Found user: {}”, user.id);

// Access fields safely
if let Some(Value::String(name)) = user.data.get("name") {
    println!("Name: {}", name);
}

if let Some(Value::Int(age)) = user.data.get("age") {
    println!("Age: {}", age);
}

} else { println!(“User not found”); }

// Idiomatic error handling let user = db.get_document(“users”, &user_id)? .ok_or_else(|| AqlError::new(ErrorCode::NotFound,“User not found”.into()))?;

// Checking existence before operations if db.get_document(“users”, &user_id)?.is_some() { db.update_document(“users”, &user_id, vec![ (“last_login”, Value::String(chrono::Utc::now().to_rfc3339())), ]).await?; }

// Batch retrieval (fetch multiple by ID) let user_ids = vec![“user-1”, “user-2”, “user-3”]; let users: Vec = user_ids.iter() .filter_map(|id| db.get_document(“users”, id).ok().flatten()) .collect();

println!(“Found {} out of {} users”, users.len(), user_ids.len());


- You know the document ID (from insert, previous query, or URL param)
- Need fastest possible lookup (1M reads/sec)
- Fetching a single document

- Searching by other fields → Use `query().filter()` instead
- Need multiple documents by criteria → Use `query().collect()` instead
- Don't know the ID → Use `find_by_field()` or `query()` instead
Source

pub async fn delete(&self, key: &str) -> Result<()>

Delete a document by ID

Permanently removes a document from storage, cache, and all indices. Publishes a delete event for PubSub subscribers. This operation cannot be undone.

§Performance
  • Delete speed: ~50,000 deletes/sec
  • Cleans up hot cache, cold storage, primary + secondary indices
  • Triggers PubSub events for listeners
§Arguments
  • key - Full key in format “collection:id” (e.g., “users:123”)
§Returns

Success or an error

§Errors
  • InvalidOperation: Invalid key format (must be “collection:id”)
  • IoError: Storage deletion failed
§Examples
use aurora_db::Aurora;

let db = Aurora::open("mydb.db")?;

// Basic deletion (note: requires "collection:id" format)
db.delete("users:abc123").await?;

// Delete with existence check
let user_id = "user-456";
if db.get_document("users", user_id)?.is_some() {
    db.delete(&format!("users:{}", user_id)).await?;
    println!("User deleted");
} else {
    println!("User not found");
}

// Error handling
match db.delete("users:nonexistent").await {
    Ok(_) => println!("Deleted successfully"),
    Err(e) => println!("Delete failed: {}", e),
}

// Batch deletion using query
let inactive_count = db.delete_where("users", |f| {
    f.eq("active", Value::Bool(false))
}).await?;
println!("Deleted {} inactive users", inactive_count);

// Delete with cascading (manual cascade pattern)
let user_id = "user-123";

// Delete user's orders first
let orders = db.query("orders")
    .filter(|f| f.eq("user_id", user_id))
    .collect()
    .await?;

for order in orders {
    db.delete(&format!("orders:{}", order.id)).await?;
}

// Then delete the user
db.delete(&format!("users:{}", user_id)).await?;
println!("User and all orders deleted");
§Alternative: Soft Delete Pattern

For recoverable deletions, use soft deletes instead:

// Soft delete - mark as deleted instead of removing
db.update_document("users", &user_id, vec![
    ("deleted", Value::Bool(true)),
    ("deleted_at", Value::String(chrono::Utc::now().to_rfc3339())),
]).await?;

// Query excludes soft-deleted items
let active_users = db.query("users")
    .filter(|f| f.eq("deleted", Value::Bool(false)))
    .collect()
    .await?;

// Later: hard delete after retention period
let old_deletions = db.query("users")
    .filter(|f| f.eq("deleted", Value::Bool(true)))
    .filter(|f| f.lt("deleted_at", thirty_days_ago))
    .collect()
    .await?;

for user in old_deletions {
    db.delete(&format!("users:{}", user.id)).await?;
}
§Important Notes
  • Deletion is permanent - no undo/recovery
  • Consider soft deletes for recoverable operations
  • Use transactions for multi-document deletions
  • PubSub subscribers will receive delete events
  • All indices are automatically cleaned up
Source

pub async fn delete_collection(&self, collection: &str) -> Result<()>

Source

pub async fn search_text( &self, collection: &str, field: &str, query: &str, ) -> Result<Vec<Document>>

Source

pub fn export_as_json(&self, collection: &str, output_path: &str) -> Result<()>

Export a collection to a JSON file

Creates a JSON file containing all documents in the collection. Useful for backups, data migration, or sharing datasets. Automatically appends .json extension if not present.

§Performance
  • Export speed: ~10,000 docs/sec
  • Scans entire collection from cold storage
  • Memory efficient: streams documents to file
§Arguments
  • collection - Name of the collection to export
  • output_path - Path to the output JSON file (.json auto-appended)
§Returns

Success or an error

§Examples
use aurora_db::Aurora;

let db = Aurora::open("mydb.db")?;

// Basic export
db.export_as_json("users", "./backups/users_2024-01-15")?;
// Creates: ./backups/users_2024-01-15.json

// Timestamped backup
let timestamp = chrono::Utc::now().format("%Y%m%d_%H%M%S");
let backup_path = format!("./backups/users_{}", timestamp);
db.export_as_json("users", &backup_path)?;

// Export multiple collections
for collection in &["users", "orders", "products"] {
    db.export_as_json(collection, &format!("./export/{}", collection))?;
}
§Output Format

The exported JSON has this structure:

{
  "users": [
    { "id": "123", "name": "Alice", "email": "alice@example.com" },
    { "id": "456", "name": "Bob", "email": "bob@example.com" }
  ]
}
§See Also
  • export_as_csv() for CSV format export
  • import_from_json() to restore exported data
Source

pub fn export_as_csv(&self, collection: &str, filename: &str) -> Result<()>

Export a collection to a CSV file

Creates a CSV file with headers from the first document and rows for each document. Useful for spreadsheet analysis, data science workflows, or reporting. Automatically appends .csv extension if not present.

§Performance
  • Export speed: ~8,000 docs/sec
  • Memory efficient: streams rows to file
  • Headers determined from first document
§Arguments
  • collection - Name of the collection to export
  • filename - Path to the output CSV file (.csv auto-appended)
§Returns

Success or an error

§Examples
use aurora_db::Aurora;

let db = Aurora::open("mydb.db")?;

// Basic CSV export
db.export_as_csv("users", "./reports/users")?;
// Creates: ./reports/users.csv

// Export for analysis in Excel/Google Sheets
db.export_as_csv("orders", "./analytics/sales_data")?;

// Monthly report generation
let month = chrono::Utc::now().format("%Y-%m");
db.export_as_csv("transactions", &format!("./reports/transactions_{}", month))?;
§Output Format
id,name,email,age
123,Alice,alice@example.com,28
456,Bob,bob@example.com,32
§Important Notes
  • Headers are taken from the first document’s fields
  • Documents with different fields will have empty values for missing fields
  • Nested objects/arrays are converted to strings
  • Best for flat document structures
§See Also
  • export_as_json() for JSON format (better for nested data)
  • For complex nested structures, use JSON export instead
Source

pub fn find<'a>(&'a self, collection: &str) -> QueryBuilder<'a>

Source

pub async fn find_by_id( &self, collection: &str, id: &str, ) -> Result<Option<Document>>

Source

pub async fn find_one<F>( &self, collection: &str, filter_fn: F, ) -> Result<Option<Document>>
where F: Fn(&FilterBuilder<'_, '_>) -> bool + Send + Sync + 'static,

Source

pub async fn find_by_field<T: Into<Value> + Clone + Send + Sync + 'static>( &self, collection: &str, field: &'static str, value: T, ) -> Result<Vec<Document>>

Source

pub async fn find_by_fields( &self, collection: &str, fields: Vec<(&str, Value)>, ) -> Result<Vec<Document>>

Source

pub async fn find_in_range<T: Into<Value> + Clone + Send + Sync + 'static>( &self, collection: &str, field: &'static str, min: T, max: T, ) -> Result<Vec<Document>>

Source

pub async fn find_complex<'a>(&'a self, collection: &str) -> QueryBuilder<'a>

Source

pub async fn upsert( &self, collection: &str, id: &str, data: Vec<(&str, Value)>, ) -> Result<String>

Source

pub async fn increment( &self, collection: &str, id: &str, field: &str, amount: i64, ) -> Result<i64>

Source

pub async fn delete_by_query<F>( &self, collection: &str, filter_fn: F, ) -> Result<usize>
where F: Fn(&FilterBuilder<'_, '_>) -> bool + Send + Sync + 'static,

Source

pub async fn import_from_json( &self, collection: &str, filename: &str, ) -> Result<ImportStats>

Import documents from a JSON file into a collection

Validates each document against the collection schema, skips duplicates (by ID), and provides detailed statistics about the import operation. Useful for restoring backups, migrating data, or seeding development databases.

§Performance
  • Import speed: ~5,000 docs/sec (with validation)
  • Memory efficient: processes documents one at a time
  • Validates schema and unique constraints
§Arguments
  • collection - Name of the collection to import into
  • filename - Path to the JSON file containing documents (array format)
§Returns

ImportStats containing counts of imported, skipped, and failed documents

§Examples
use aurora_db::Aurora;

let db = Aurora::open("mydb.db")?;

// Basic import
let stats = db.import_from_json("users", "./data/new_users.json").await?;
println!("Imported: {}, Skipped: {}, Failed: {}",
    stats.imported, stats.skipped, stats.failed);

// Restore from backup
let backup_file = "./backups/users_2024-01-15.json";
let stats = db.import_from_json("users", backup_file).await?;

if stats.failed > 0 {
    eprintln!("Warning: {} documents failed validation", stats.failed);
}

// Idempotent import - duplicates are skipped
let stats = db.import_from_json("users", "./data/users.json").await?;
// Running again will skip all existing documents
let stats2 = db.import_from_json("users", "./data/users.json").await?;
assert_eq!(stats2.skipped, stats.imported);

// Migration from another system
db.new_collection("products", vec![
    ("sku", FieldType::String),
    ("name", FieldType::String),
    ("price", FieldType::Float),
])?;

let stats = db.import_from_json("products", "./migration/products.json").await?;
println!("Migration complete: {} products imported", stats.imported);
§Expected JSON Format

The JSON file should contain an array of document objects:

[
  { "id": "123", "name": "Alice", "email": "alice@example.com" },
  { "id": "456", "name": "Bob", "email": "bob@example.com" },
  { "name": "Carol", "email": "carol@example.com" }
]
§Behavior
  • Documents with existing IDs are skipped (duplicate detection)
  • Documents without IDs get auto-generated UUIDs
  • Schema validation is performed on all fields
  • Failed documents are counted but don’t stop the import
  • Unique constraints are checked
§See Also
  • export_as_json() to create compatible backup files
  • batch_insert() for programmatic bulk inserts
Source

pub fn get_collection_definition(&self, collection: &str) -> Result<Collection>

Get collection definition

Source

pub fn get_database_stats(&self) -> Result<DatabaseStats>

Get storage statistics and information about the database

Source

pub fn is_in_hot_cache(&self, key: &str) -> bool

Check if a key is currently stored in the hot cache

Source

pub fn clear_hot_cache(&self)

Clear the hot cache (useful when memory needs to be freed)

Source

pub async fn prewarm_cache( &self, collection: &str, limit: Option<usize>, ) -> Result<usize>

Prewarm the cache by loading frequently accessed data from cold storage

Loads documents from a collection into memory cache to eliminate cold-start latency. Dramatically improves initial query performance after database startup by preloading the most commonly accessed data.

§Performance Impact
  • Prewarming speed: ~20,000 docs/sec
  • Improves subsequent read latency from ~2ms (disk) to ~0.001ms (memory)
  • Cache hit rate jumps from 0% to 95%+ for prewarmed data
  • Memory cost: ~500 bytes per document average
§Arguments
  • collection - The collection to prewarm
  • limit - Maximum number of documents to load (default: 1000, None = all)
§Returns

Number of documents loaded into cache

§Examples
use aurora_db::Aurora;

let db = Aurora::open("mydb.db")?;

// Prewarm frequently accessed collection
let loaded = db.prewarm_cache("users", Some(1000)).await?;
println!("Prewarmed {} user documents", loaded);

// Now queries are fast from the start
let stats_before = db.get_cache_stats();
let users = db.query("users").collect().await?;
let stats_after = db.get_cache_stats();

// High hit rate thanks to prewarming
assert!(stats_after.hit_rate > 0.95);

// Startup optimization pattern
async fn startup_prewarm(db: &Aurora) -> Result<()> {
    println!("Prewarming caches...");

    // Prewarm most frequently accessed collections
    db.prewarm_cache("users", Some(5000)).await?;
    db.prewarm_cache("sessions", Some(1000)).await?;
    db.prewarm_cache("products", Some(500)).await?;

    let stats = db.get_cache_stats();
    println!("Cache prewarmed: {} entries loaded", stats.size);

    Ok(())
}

// Web server startup
#[tokio::main]
async fn main() {
    let db = Aurora::open("app.db").unwrap();

    // Prewarm before accepting requests
    db.prewarm_cache("users", Some(10000)).await.unwrap();

    // Server is now ready with hot cache
    start_web_server(db).await;
}

// Prewarm all documents (for small collections)
let all_loaded = db.prewarm_cache("config", None).await?;
// All config documents now in memory

// Selective prewarming based on access patterns
async fn smart_prewarm(db: &Aurora) -> Result<()> {
    // Load recent users (they're accessed most)
    db.prewarm_cache("users", Some(1000)).await?;

    // Load active sessions only
    let active_sessions = db.query("sessions")
        .filter(|f| f.eq("active", Value::Bool(true)))
        .limit(500)
        .collect()
        .await?;

    // Manually populate cache with hot data
    for session in active_sessions {
        // Reading automatically caches
        db.get_document("sessions", &session.id)?;
    }

    Ok(())
}
§Typical Prewarming Scenarios

Web Application Startup:

// Load user data, sessions, and active content
db.prewarm_cache("users", Some(5000)).await?;
db.prewarm_cache("sessions", Some(2000)).await?;
db.prewarm_cache("posts", Some(1000)).await?;

E-commerce Site:

// Load products, categories, and user carts
db.prewarm_cache("products", Some(500)).await?;
db.prewarm_cache("categories", None).await?;  // All categories
db.prewarm_cache("active_carts", Some(1000)).await?;

API Server:

// Load authentication data and rate limits
db.prewarm_cache("api_keys", None).await?;
db.prewarm_cache("rate_limits", Some(10000)).await?;
§When to Use
  • At application startup to eliminate cold-start latency
  • After cache clear operations
  • Before high-traffic events (product launches, etc.)
  • When deploying new instances (load balancer warm-up)
§Memory Considerations
  • 1,000 docs ≈ 500 KB memory
  • 10,000 docs ≈ 5 MB memory
  • 100,000 docs ≈ 50 MB memory
  • Stay within configured cache capacity
§See Also
  • get_cache_stats() to monitor cache effectiveness
  • prewarm_all_collections() to prewarm all collections
  • Aurora::with_config() to adjust cache capacity
Source

pub async fn prewarm_all_collections( &self, docs_per_collection: Option<usize>, ) -> Result<HashMap<String, usize>>

Prewarm cache for all collections

Source

pub async fn batch_write(&self, pairs: Vec<(String, Vec<u8>)>) -> Result<()>

Store multiple key-value pairs efficiently in a single batch operation

Low-level batch write operation that bypasses document validation and writes raw byte data directly to storage. Useful for advanced use cases, custom serialization, or maximum performance scenarios.

§Performance
  • Write speed: ~100,000 writes/sec
  • Single disk fsync for entire batch
  • No validation or schema checking
  • Direct storage access
§Arguments
  • pairs - Vector of (key, value) tuples where value is raw bytes
§Returns

Success or an error

§Examples
use aurora_db::Aurora;

let db = Aurora::open("mydb.db")?;

// Low-level batch write
let pairs = vec![
    ("users:123".to_string(), b"raw data 1".to_vec()),
    ("users:456".to_string(), b"raw data 2".to_vec()),
    ("cache:key1".to_string(), b"cached value".to_vec()),
];

db.batch_write(pairs)?;

// Custom binary serialization
use bincode;

#[derive(Serialize, Deserialize)]
struct CustomData {
    id: u64,
    payload: Vec<u8>,
}

let custom_data = vec![
    CustomData { id: 1, payload: vec![1, 2, 3] },
    CustomData { id: 2, payload: vec![4, 5, 6] },
];

let pairs: Vec<(String, Vec<u8>)> = custom_data
    .iter()
    .map(|data| {
        let key = format!("binary:{}", data.id);
        let value = bincode::serialize(data).unwrap();
        (key, value)
    })
    .collect();

db.batch_write(pairs)?;

// Bulk cache population
let cache_entries: Vec<(String, Vec<u8>)> = (0..10000)
    .map(|i| {
        let key = format!("cache:item_{}", i);
        let value = format!("value_{}", i).into_bytes();
        (key, value)
    })
    .collect();

db.batch_write(cache_entries)?;
// Writes 10,000 entries in ~100ms
§Important Notes
  • No schema validation performed
  • No unique constraint checking
  • No automatic indexing
  • Keys must follow “collection:id” format for proper grouping
  • Values are raw bytes - you handle serialization
  • Use batch_insert() for validated document inserts
§When to Use
  • Maximum write performance needed
  • Custom serialization formats (bincode, msgpack, etc.)
  • Cache population
  • Low-level database operations
  • You’re bypassing the document model
§When NOT to Use
  • Regular document inserts → Use batch_insert() instead
  • Need validation → Use batch_insert() instead
  • Need indexing → Use batch_insert() instead
§See Also
  • batch_insert() for validated document batch inserts
  • put() for single key-value writes
Source

pub fn scan_with_prefix( &self, prefix: &str, ) -> impl Iterator<Item = Result<(String, Vec<u8>)>> + '_

Scan for keys with a specific prefix

Source

pub fn get_collection_stats(&self) -> Result<HashMap<String, CollectionStats>>

Get storage efficiency metrics for the database

Source

pub fn search_by_value( &self, collection: &str, field: &str, value: &Value, ) -> Result<Vec<Document>>

Search for documents by exact value using an index

This method performs a fast lookup using a pre-created index

Perform a full-text search on an indexed text field

This provides more advanced text search capabilities including relevance ranking of results

Source

pub async fn create_text_index( &self, collection: &str, field: &str, _enable_stop_words: bool, ) -> Result<()>

Create a full-text search index on a text field

Source

pub async fn execute_simple_query( &self, builder: &SimpleQueryBuilder, ) -> Result<Vec<Document>>

Source

pub async fn execute_dynamic_query( &self, collection: &str, payload: &QueryPayload, ) -> Result<Vec<Document>>

Source

pub async fn process_network_request(&self, request: Request) -> Response

Source

pub async fn create_indices( &self, collection: &str, fields: &[&str], ) -> Result<()>

Create indices for commonly queried fields automatically

This is a convenience method that creates indices for fields that are likely to be queried frequently, improving performance.

§Arguments
  • collection - Name of the collection
  • fields - List of field names to create indices for
§Examples
// Create indices for commonly queried fields
db.create_indices("users", &["email", "status", "created_at"]).await?;
Source

pub fn get_index_stats(&self, collection: &str) -> HashMap<String, IndexStats>

Get index statistics for a collection

This helps understand which indices exist and how effective they are.

Source

pub async fn optimize_collection(&self, collection: &str) -> Result<()>

Optimize a collection by creating indices for frequently filtered fields

This analyzes common query patterns and suggests/creates optimal indices.

Source

pub async fn aql_get_all_collection( &self, collection: &str, ) -> Result<Vec<Document>>

Get all documents in a collection (AQL helper)

This is a wrapper around the internal query system optimized for bulk retrieval.

Source

pub async fn aql_insert( &self, collection: &str, data: HashMap<String, Value>, ) -> Result<Document>

Insert a document from a HashMap (AQL helper)

Returns the complete document (not just ID) for AQL executor

Source

pub async fn aql_update_document( &self, collection: &str, doc_id: &str, updates: HashMap<String, Value>, ) -> Result<Document>

Update a document by ID with new data (AQL helper)

Merges new data with existing data and returns updated document

Source

pub async fn aql_delete_document( &self, collection: &str, doc_id: &str, ) -> Result<Document>

Delete a document by ID (AQL helper)

Returns the deleted document

Source

pub async fn aql_get_document( &self, collection: &str, doc_id: &str, ) -> Result<Option<Document>>

Get a single document by ID (AQL helper)

Source

pub fn aql_begin_transaction(&self) -> Result<TransactionId>

Begin a transaction (AQL helper) - returns transaction ID

Source

pub async fn aql_commit_transaction(&self, tx_id: TransactionId) -> Result<()>

Commit a transaction (AQL helper)

Source

pub async fn aql_rollback_transaction(&self, tx_id: TransactionId) -> Result<()>

Rollback a transaction (AQL helper)

Source

pub async fn create_collection_schema( &self, name: &str, fields: HashMap<String, FieldDefinition>, ) -> Result<()>

Create a collection from AST schema definition

Source

pub async fn add_field_to_schema( &self, collection_name: &str, name: String, definition: FieldDefinition, ) -> Result<()>

Add a field to an existing collection schema

Source

pub async fn drop_field_from_schema( &self, collection_name: &str, field_name: String, ) -> Result<()>

Drop a field from an existing collection schema

Source

pub async fn rename_field_in_schema( &self, collection_name: &str, from: String, to: String, ) -> Result<()>

Rename a field in an existing collection schema

Source

pub async fn modify_field_in_schema( &self, collection_name: &str, name: String, definition: FieldDefinition, ) -> Result<()>

Modify a field in an existing collection schema

Source

pub async fn drop_collection_schema(&self, collection_name: &str) -> Result<()>

Drop an entire collection definition

Source

pub async fn is_migration_applied(&self, version: &str) -> Result<bool>

Check if a migration version has been applied

Source

pub async fn mark_migration_applied(&self, version: &str) -> Result<()>

Mark a migration version as applied

Trait Implementations§

Source§

impl Clone for Aurora

Source§

fn clone(&self) -> Self

Returns a duplicate of the value. Read more
1.0.0 · Source§

fn clone_from(&mut self, source: &Self)

Performs copy-assignment from source. Read more
Source§

impl Debug for Aurora

Source§

fn fmt(&self, f: &mut Formatter<'_>) -> Result

Formats the value using the given formatter. Read more
Source§

impl Drop for Aurora

Source§

fn drop(&mut self)

Executes the destructor for this type. Read more

Auto Trait Implementations§

§

impl Freeze for Aurora

§

impl !RefUnwindSafe for Aurora

§

impl Send for Aurora

§

impl Sync for Aurora

§

impl Unpin for Aurora

§

impl !UnwindSafe for Aurora

Blanket Implementations§

Source§

impl<T> Any for T
where T: 'static + ?Sized,

Source§

fn type_id(&self) -> TypeId

Gets the TypeId of self. Read more
Source§

impl<T> ArchivePointee for T

Source§

type ArchivedMetadata = ()

The archived version of the pointer metadata for this type.
Source§

fn pointer_metadata( _: &<T as ArchivePointee>::ArchivedMetadata, ) -> <T as Pointee>::Metadata

Converts some archived metadata to the pointer metadata for itself.
Source§

impl<T> Borrow<T> for T
where T: ?Sized,

Source§

fn borrow(&self) -> &T

Immutably borrows from an owned value. Read more
Source§

impl<T> BorrowMut<T> for T
where T: ?Sized,

Source§

fn borrow_mut(&mut self) -> &mut T

Mutably borrows from an owned value. Read more
Source§

impl<T> CloneToUninit for T
where T: Clone,

Source§

unsafe fn clone_to_uninit(&self, dest: *mut u8)

🔬This is a nightly-only experimental API. (clone_to_uninit)
Performs copy-assignment from self to dest. Read more
Source§

impl<F, W, T, D> Deserialize<With<T, W>, D> for F
where W: DeserializeWith<F, T, D>, D: Fallible + ?Sized, F: ?Sized,

Source§

fn deserialize( &self, deserializer: &mut D, ) -> Result<With<T, W>, <D as Fallible>::Error>

Deserializes using the given deserializer
Source§

impl<T> From<T> for T

Source§

fn from(t: T) -> T

Returns the argument unchanged.

Source§

impl<T> Instrument for T

Source§

fn instrument(self, span: Span) -> Instrumented<Self>

Instruments this type with the provided Span, returning an Instrumented wrapper. Read more
Source§

fn in_current_span(self) -> Instrumented<Self>

Instruments this type with the current Span, returning an Instrumented wrapper. Read more
Source§

impl<T, U> Into<U> for T
where U: From<T>,

Source§

fn into(self) -> U

Calls U::from(self).

That is, this conversion is whatever the implementation of From<T> for U chooses to do.

Source§

impl<T> LayoutRaw for T

Source§

fn layout_raw(_: <T as Pointee>::Metadata) -> Result<Layout, LayoutError>

Gets the layout of the type.
Source§

impl<D> OwoColorize for D

Source§

fn fg<C>(&self) -> FgColorDisplay<'_, C, Self>
where C: Color,

Set the foreground color generically Read more
Source§

fn bg<C>(&self) -> BgColorDisplay<'_, C, Self>
where C: Color,

Set the background color generically. Read more
Source§

fn black(&self) -> FgColorDisplay<'_, Black, Self>

Change the foreground color to black
Source§

fn on_black(&self) -> BgColorDisplay<'_, Black, Self>

Change the background color to black
Source§

fn red(&self) -> FgColorDisplay<'_, Red, Self>

Change the foreground color to red
Source§

fn on_red(&self) -> BgColorDisplay<'_, Red, Self>

Change the background color to red
Source§

fn green(&self) -> FgColorDisplay<'_, Green, Self>

Change the foreground color to green
Source§

fn on_green(&self) -> BgColorDisplay<'_, Green, Self>

Change the background color to green
Source§

fn yellow(&self) -> FgColorDisplay<'_, Yellow, Self>

Change the foreground color to yellow
Source§

fn on_yellow(&self) -> BgColorDisplay<'_, Yellow, Self>

Change the background color to yellow
Source§

fn blue(&self) -> FgColorDisplay<'_, Blue, Self>

Change the foreground color to blue
Source§

fn on_blue(&self) -> BgColorDisplay<'_, Blue, Self>

Change the background color to blue
Source§

fn magenta(&self) -> FgColorDisplay<'_, Magenta, Self>

Change the foreground color to magenta
Source§

fn on_magenta(&self) -> BgColorDisplay<'_, Magenta, Self>

Change the background color to magenta
Source§

fn purple(&self) -> FgColorDisplay<'_, Magenta, Self>

Change the foreground color to purple
Source§

fn on_purple(&self) -> BgColorDisplay<'_, Magenta, Self>

Change the background color to purple
Source§

fn cyan(&self) -> FgColorDisplay<'_, Cyan, Self>

Change the foreground color to cyan
Source§

fn on_cyan(&self) -> BgColorDisplay<'_, Cyan, Self>

Change the background color to cyan
Source§

fn white(&self) -> FgColorDisplay<'_, White, Self>

Change the foreground color to white
Source§

fn on_white(&self) -> BgColorDisplay<'_, White, Self>

Change the background color to white
Source§

fn default_color(&self) -> FgColorDisplay<'_, Default, Self>

Change the foreground color to the terminal default
Source§

fn on_default_color(&self) -> BgColorDisplay<'_, Default, Self>

Change the background color to the terminal default
Source§

fn bright_black(&self) -> FgColorDisplay<'_, BrightBlack, Self>

Change the foreground color to bright black
Source§

fn on_bright_black(&self) -> BgColorDisplay<'_, BrightBlack, Self>

Change the background color to bright black
Source§

fn bright_red(&self) -> FgColorDisplay<'_, BrightRed, Self>

Change the foreground color to bright red
Source§

fn on_bright_red(&self) -> BgColorDisplay<'_, BrightRed, Self>

Change the background color to bright red
Source§

fn bright_green(&self) -> FgColorDisplay<'_, BrightGreen, Self>

Change the foreground color to bright green
Source§

fn on_bright_green(&self) -> BgColorDisplay<'_, BrightGreen, Self>

Change the background color to bright green
Source§

fn bright_yellow(&self) -> FgColorDisplay<'_, BrightYellow, Self>

Change the foreground color to bright yellow
Source§

fn on_bright_yellow(&self) -> BgColorDisplay<'_, BrightYellow, Self>

Change the background color to bright yellow
Source§

fn bright_blue(&self) -> FgColorDisplay<'_, BrightBlue, Self>

Change the foreground color to bright blue
Source§

fn on_bright_blue(&self) -> BgColorDisplay<'_, BrightBlue, Self>

Change the background color to bright blue
Source§

fn bright_magenta(&self) -> FgColorDisplay<'_, BrightMagenta, Self>

Change the foreground color to bright magenta
Source§

fn on_bright_magenta(&self) -> BgColorDisplay<'_, BrightMagenta, Self>

Change the background color to bright magenta
Source§

fn bright_purple(&self) -> FgColorDisplay<'_, BrightMagenta, Self>

Change the foreground color to bright purple
Source§

fn on_bright_purple(&self) -> BgColorDisplay<'_, BrightMagenta, Self>

Change the background color to bright purple
Source§

fn bright_cyan(&self) -> FgColorDisplay<'_, BrightCyan, Self>

Change the foreground color to bright cyan
Source§

fn on_bright_cyan(&self) -> BgColorDisplay<'_, BrightCyan, Self>

Change the background color to bright cyan
Source§

fn bright_white(&self) -> FgColorDisplay<'_, BrightWhite, Self>

Change the foreground color to bright white
Source§

fn on_bright_white(&self) -> BgColorDisplay<'_, BrightWhite, Self>

Change the background color to bright white
Source§

fn bold(&self) -> BoldDisplay<'_, Self>

Make the text bold
Source§

fn dimmed(&self) -> DimDisplay<'_, Self>

Make the text dim
Source§

fn italic(&self) -> ItalicDisplay<'_, Self>

Make the text italicized
Source§

fn underline(&self) -> UnderlineDisplay<'_, Self>

Make the text underlined
Make the text blink
Make the text blink (but fast!)
Source§

fn reversed(&self) -> ReversedDisplay<'_, Self>

Swap the foreground and background colors
Source§

fn hidden(&self) -> HiddenDisplay<'_, Self>

Hide the text
Source§

fn strikethrough(&self) -> StrikeThroughDisplay<'_, Self>

Cross out the text
Source§

fn color<Color>(&self, color: Color) -> FgDynColorDisplay<'_, Color, Self>
where Color: DynColor,

Set the foreground color at runtime. Only use if you do not know which color will be used at compile-time. If the color is constant, use either OwoColorize::fg or a color-specific method, such as OwoColorize::green, Read more
Source§

fn on_color<Color>(&self, color: Color) -> BgDynColorDisplay<'_, Color, Self>
where Color: DynColor,

Set the background color at runtime. Only use if you do not know what color to use at compile-time. If the color is constant, use either OwoColorize::bg or a color-specific method, such as OwoColorize::on_yellow, Read more
Source§

fn fg_rgb<const R: u8, const G: u8, const B: u8>( &self, ) -> FgColorDisplay<'_, CustomColor<R, G, B>, Self>

Set the foreground color to a specific RGB value.
Source§

fn bg_rgb<const R: u8, const G: u8, const B: u8>( &self, ) -> BgColorDisplay<'_, CustomColor<R, G, B>, Self>

Set the background color to a specific RGB value.
Source§

fn truecolor(&self, r: u8, g: u8, b: u8) -> FgDynColorDisplay<'_, Rgb, Self>

Sets the foreground color to an RGB value.
Source§

fn on_truecolor(&self, r: u8, g: u8, b: u8) -> BgDynColorDisplay<'_, Rgb, Self>

Sets the background color to an RGB value.
Source§

fn style(&self, style: Style) -> Styled<&Self>

Apply a runtime-determined style
Source§

impl<T> Pointable for T

Source§

const ALIGN: usize

The alignment of pointer.
Source§

type Init = T

The type for initializers.
Source§

unsafe fn init(init: <T as Pointable>::Init) -> usize

Initializes a with the given initializer. Read more
Source§

unsafe fn deref<'a>(ptr: usize) -> &'a T

Dereferences the given pointer. Read more
Source§

unsafe fn deref_mut<'a>(ptr: usize) -> &'a mut T

Mutably dereferences the given pointer. Read more
Source§

unsafe fn drop(ptr: usize)

Drops the object pointed to by the given pointer. Read more
Source§

impl<T> Pointee for T

Source§

type Metadata = ()

The type for metadata in pointers and references to Self.
Source§

impl<T> Same for T

Source§

type Output = T

Should always be Self
Source§

impl<T> ToOwned for T
where T: Clone,

Source§

type Owned = T

The resulting type after obtaining ownership.
Source§

fn to_owned(&self) -> T

Creates owned data from borrowed data, usually by cloning. Read more
Source§

fn clone_into(&self, target: &mut T)

Uses borrowed data to replace owned data, usually by cloning. Read more
Source§

impl<T, U> TryFrom<U> for T
where U: Into<T>,

Source§

type Error = Infallible

The type returned in the event of a conversion error.
Source§

fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>

Performs the conversion.
Source§

impl<T, U> TryInto<U> for T
where U: TryFrom<T>,

Source§

type Error = <U as TryFrom<T>>::Error

The type returned in the event of a conversion error.
Source§

fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>

Performs the conversion.
Source§

impl<T> WithSubscriber for T

Source§

fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>
where S: Into<Dispatch>,

Attaches the provided Subscriber to this type, returning a WithDispatch wrapper. Read more
Source§

fn with_current_subscriber(self) -> WithDispatch<Self>

Attaches the current default Subscriber to this type, returning a WithDispatch wrapper. Read more