# Generic live OpenAI-compatible endpoint configuration.
# Copy to `.env` and fill in the values for your environment.
#
# Supported protocol values:
# - openai_responses
# - openai_chat_completions
# - claude_messages
# - gemini_generate_content
# - openai_responses_compat
# - openai_chat_completions_compat
# - claude_messages_compat
# - gemini_generate_content_compat
#
# In `*_compat` mode, `OMNILLM_RESPONSES_BASE_URL` must be the full request URL.
OMNILLM_RESPONSES_BASE_URL=
OMNILLM_RESPONSES_API_KEY=
OMNILLM_RESPONSES_PROTOCOL=openai_responses
OMNILLM_RESPONSES_AUTH_SCHEME=bearer
OMNILLM_RESPONSES_AUTH_NAME=
OMNILLM_RESPONSES_EXTRA_HEADER_NAME=
OMNILLM_RESPONSES_EXTRA_HEADER_VALUE=
OMNILLM_RESPONSES_STREAM=false
OMNILLM_RESPONSES_MAX_OUTPUT_TOKENS=
OMNILLM_RESPONSES_VISION_MODEL=
OMNILLM_RESPONSES_TOOL_MODEL=
OMNILLM_RESPONSES_IMAGE_URL=
# Optional overrides
OMNILLM_RESPONSES_VISION_PROMPT=what is in this image?
OMNILLM_RESPONSES_TOOL_PROMPT=What is the weather like in Boston today?