unigateway 1.2.1

Lightweight, local-first LLM gateway for developers. A stable, single-binary unified entry point for all your AI tools and models.
use std::sync::Arc;

use axum::response::Response;
use tracing::info;
use unigateway_core::{ProxyChatRequest, ProxyEmbeddingsRequest, ProxyResponsesRequest};
use unigateway_runtime::{
    core::{
        try_anthropic_chat_via_core, try_anthropic_chat_via_env_core, try_openai_chat_via_core,
        try_openai_chat_via_env_core, try_openai_embeddings_via_core,
        try_openai_embeddings_via_env_core, try_openai_responses_via_core,
        try_openai_responses_via_env_core,
    },
    flow::{
        RuntimeResponseResult, missing_upstream_api_key_response, prepare_anthropic_env_config,
        prepare_openai_env_config, resolve_core_only_runtime_flow,
    },
};

use crate::types::AppState;

use super::request_flow::PreparedGatewayRequest;
use super::response_flow::respond_prepared_runtime_result;

pub(super) async fn execute_prepared_openai_chat(
    state: &Arc<AppState>,
    prepared: &PreparedGatewayRequest<'_>,
    request: &ProxyChatRequest,
) -> Response {
    let endpoint = "/v1/chat/completions";

    let result = if let Some(auth) = prepared.auth.as_ref() {
        resolve_core_only_runtime_flow(
            try_openai_chat_via_core(
                &prepared.runtime,
                &auth.key.service_id,
                prepared.hint.as_deref(),
                request.clone(),
            ),
            "no provider pool available for chat",
        )
        .await
    } else {
        execute_openai_chat_env(prepared, request).await
    };

    respond_prepared_runtime_result(state, prepared, endpoint, result).await
}

pub(super) async fn execute_prepared_openai_responses(
    state: &Arc<AppState>,
    prepared: &PreparedGatewayRequest<'_>,
    request: ProxyResponsesRequest,
) -> Response {
    let endpoint = "/v1/responses";

    let result = if let Some(auth) = prepared.auth.as_ref() {
        resolve_core_only_runtime_flow(
            try_openai_responses_via_core(
                &prepared.runtime,
                &auth.key.service_id,
                prepared.hint.as_deref(),
                request.clone(),
            ),
            "no openai-compatible provider available for responses",
        )
        .await
    } else {
        execute_openai_responses_env(prepared, &request).await
    };

    respond_prepared_runtime_result(state, prepared, endpoint, result).await
}

pub(super) async fn execute_prepared_anthropic_chat(
    state: &Arc<AppState>,
    prepared: &PreparedGatewayRequest<'_>,
    request: &ProxyChatRequest,
) -> Response {
    let endpoint = "/v1/messages";

    info!(
        endpoint,
        gateway_key_matched = prepared.auth.is_some(),
        token_present = !prepared.token.is_empty(),
        "anthropic request authentication result"
    );

    if prepared.auth.is_none() {
        info!(
            endpoint,
            token_present = !prepared.token.is_empty(),
            env_key_present = !prepared.runtime.config.anthropic_api_key.is_empty(),
            using_env_fallback =
                prepared.token.is_empty() && !prepared.runtime.config.anthropic_api_key.is_empty(),
            "anthropic request falling back to env upstream key"
        );
    }

    let result = if let Some(auth) = prepared.auth.as_ref() {
        resolve_core_only_runtime_flow(
            try_anthropic_chat_via_core(
                &prepared.runtime,
                &auth.key.service_id,
                prepared.hint.as_deref(),
                request.clone(),
                &request.model,
            ),
            "no provider pool available for messages",
        )
        .await
    } else {
        execute_anthropic_chat_env(prepared, request).await
    };

    respond_prepared_runtime_result(state, prepared, endpoint, result).await
}

pub(super) async fn execute_prepared_openai_embeddings(
    state: &Arc<AppState>,
    prepared: &PreparedGatewayRequest<'_>,
    request: &ProxyEmbeddingsRequest,
) -> Response {
    let endpoint = "/v1/embeddings";

    let result = if let Some(auth) = prepared.auth.as_ref() {
        resolve_core_only_runtime_flow(
            try_openai_embeddings_via_core(
                &prepared.runtime,
                &auth.key.service_id,
                prepared.hint.as_deref(),
                request.clone(),
            ),
            "no openai-compatible provider available for embeddings",
        )
        .await
    } else {
        execute_openai_embeddings_env(prepared, request).await
    };

    respond_prepared_runtime_result(state, prepared, endpoint, result).await
}

async fn execute_openai_chat_env(
    prepared: &PreparedGatewayRequest<'_>,
    request: &ProxyChatRequest,
) -> RuntimeResponseResult {
    let env = match prepare_openai_env_config(&prepared.token, prepared.runtime.config) {
        Some(env) => env,
        None => return Err(missing_upstream_api_key_response()),
    };

    resolve_core_only_runtime_flow(
        try_openai_chat_via_env_core(
            &prepared.runtime,
            prepared.hint.as_deref(),
            request.clone(),
            env.base_url,
            &env.api_key,
        ),
        "no provider pool available for chat",
    )
    .await
}

async fn execute_openai_responses_env(
    prepared: &PreparedGatewayRequest<'_>,
    request: &ProxyResponsesRequest,
) -> RuntimeResponseResult {
    let env = match prepare_openai_env_config(&prepared.token, prepared.runtime.config) {
        Some(env) => env,
        None => return Err(missing_upstream_api_key_response()),
    };

    resolve_core_only_runtime_flow(
        try_openai_responses_via_env_core(
            &prepared.runtime,
            prepared.hint.as_deref(),
            request.clone(),
            env.base_url,
            &env.api_key,
        ),
        "no openai-compatible provider available for responses",
    )
    .await
}

async fn execute_anthropic_chat_env(
    prepared: &PreparedGatewayRequest<'_>,
    request: &ProxyChatRequest,
) -> RuntimeResponseResult {
    let env = match prepare_anthropic_env_config(&prepared.token, prepared.runtime.config) {
        Some(env) => env,
        None => return Err(missing_upstream_api_key_response()),
    };

    resolve_core_only_runtime_flow(
        try_anthropic_chat_via_env_core(
            &prepared.runtime,
            prepared.hint.as_deref(),
            request.clone(),
            &request.model,
            env.base_url,
            &env.api_key,
        ),
        "no provider pool available for messages",
    )
    .await
}

async fn execute_openai_embeddings_env(
    prepared: &PreparedGatewayRequest<'_>,
    request: &ProxyEmbeddingsRequest,
) -> RuntimeResponseResult {
    let env = match prepare_openai_env_config(&prepared.token, prepared.runtime.config) {
        Some(env) => env,
        None => return Err(missing_upstream_api_key_response()),
    };

    resolve_core_only_runtime_flow(
        try_openai_embeddings_via_env_core(
            &prepared.runtime,
            prepared.hint.as_deref(),
            request.clone(),
            env.base_url,
            &env.api_key,
        ),
        "no openai-compatible provider available for embeddings",
    )
    .await
}