systemprompt-ai 0.2.2

Provider-agnostic LLM integration for systemprompt.io AI governance — Anthropic, OpenAI, Gemini, and local models unified behind one governed pipeline with cost tracking and audit.
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
use crate::models::providers::openai::OpenAiReasoningEffort;

pub fn build_reasoning_config(model: &str) -> Option<OpenAiReasoningEffort> {
    if is_reasoning_model(model) {
        Some(OpenAiReasoningEffort::Medium)
    } else {
        None
    }
}

pub fn is_reasoning_model(model: &str) -> bool {
    model.starts_with("o1") || model.starts_with("o3")
}