// src/llm/providers.rs
//! Concrete implementations of network APIs and local inference engines.
/// Local Llama.cpp inference engine implementation.
#[cfg(feature ="llama-cpp")]pubmodllama_cpp;/// OpenAI API and compatible network service implementations.
#[cfg(feature ="openai-api")]pubmodopenai_api;