rust-job-queue-api-worker-system 0.1.0

A production-shaped Rust job queue: Axum API + async workers + Postgres SKIP LOCKED dequeue, retries with decorrelated jitter, idempotency, cooperative cancellation, OpenAPI, Prometheus metrics.
//! One-shot migration runner.
//!
//! Connects to `DATABASE_URL`, applies the embedded idempotent schema,
//! and exits 0. Designed to run as the `migrations` service in
//! [docker-compose.yml](../../docker-compose.yml); the api and worker
//! services then wait on `service_completed_successfully` before
//! starting, so they never observe a half-migrated schema.
//!
//! Re-running this against an already-migrated database is a no-op.
//! Running it against a database from a previous version of the crate
//! is safe as long as the schema remains compatible with the embedded
//! migration.
//!
//! Why a separate binary instead of bolting the migration into the
//! api/worker startup: with multiple worker pods, running migrations on
//! every startup adds unnecessary coordination. One dedicated container
//! runs exactly once per deployment.

use anyhow::Context;
use rust_job_queue_api_worker_system::{connect, migrate, PoolConfig};
use tracing_subscriber::EnvFilter;

#[tokio::main]
async fn main() -> anyhow::Result<()> {
    init_tracing();
    let database_url = std::env::var("DATABASE_URL").context("DATABASE_URL must be set")?;
    // Two connections is enough for migrate-then-quit; setting max=2
    // leaves a little headroom without consuming a large pool.
    let pool = connect(&PoolConfig::from_url(database_url).with_max_connections(2)).await?;
    migrate(&pool).await?;
    tracing::info!("migrations applied");
    Ok(())
}

fn init_tracing() {
    // Default to `info` so the "migrations applied" line is visible
    // without users having to set RUST_LOG explicitly.
    let filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info"));
    tracing_subscriber::fmt().with_env_filter(filter).init();
}