loki-ai 0.1.2

An all-in-one, batteries included LLM CLI Tool
# ---- LLM ----
model: openai:gpt-4o             # Specify the LLM to use
temperature: null                # Set default temperature parameter (0, 1)
top_p: null                      # Set default top-p parameter, with a range of (0, 1) or (0, 2) depending on the model

# ---- Behavior ----
stream: true                     # Controls whether to use the stream-style APIs when querying for completions from LLM clients.
save: true                       # Indicates whether to persist the conversation to messages.md for posterity
keybindings: emacs               # Choose keybinding style (emacs, vi)
editor: null                     # Specifies the editor used to edit the input buffer or session. (e.g. vim, emacs, nano, hx). Defaults to $EDITOR
wrap: no                         # Controls text wrapping (no, auto, <max-width>)
wrap_code: false                 # Enables or disables the wrapping of code blocks

# ---- Prelude ----
repl_prelude: null               # Set a default session or role for REPL mode to use (e.g. role:<name>, session:<name>, <session>:<role>)
cmd_prelude: null                # Set a default session or role for CMD mode to use (e.g. role:<name>, session:<name>, <session>:<role>)
agent_session: null              # Set a session to use when starting an agent (e.g. temp, default)

# ---- Appearance ----
highlight: true                  # Controls syntax highlighting
light_theme: false               # Activates a light color theme when true. env: LOKI_LIGHT_THEME

# ---- Miscellaneous ----
user_agent: null                 # Set User-Agent HTTP header, use `auto` for loki/<current-version>
save_shell_history: true         # Whether to save shell execution command to the history file
sync_models_url: >               # URL to sync model changes from
  https://raw.githubusercontent.com/Dark-Alex-17/loki/refs/heads/main/models.yaml

# ---- REPL Prompt ----
# Custom REPL left/right prompts; see the [REPL Prompt Documentation](./docs/REPL-PROMPT.md) for more information
left_prompt:
  '{color.red}{model}){color.green}{?session {?agent {agent}>}{session}{?role /}}{!session {?agent {agent}>}}{role}{?rag @{rag}}{color.cyan}{?session )}{!session >}{color.reset} '
right_prompt:
  '{color.purple}{?session {?consume_tokens {consume_tokens}({consume_percent}%)}{!consume_tokens {consume_tokens}}}{color.reset}'

# ---- Vault ----
# See the [Vault documentation](./docs/VAULT.md) for more information on the Loki vault
vault_password_file: null        # Path to a file containing the password for the Loki vault (cannot be a secret template)

# ---- Function Calling ----
# See the [Tools documentation](./docs/function-calling/TOOLS.md) for more details
function_calling: true           # Enables or disables function calling (Globally).
mapping_tools:                   # Alias for a tool or toolset
  fs: 'fs_cat,fs_ls,fs_mkdir,fs_rm,fs_write'
enabled_tools: null              # Which tools to enable by default. (e.g. 'fs,web_search_loki')
visible_tools:                   # Which tools are visible to be compiled (and are thus able to be defined in 'enabled_tools')
#  - demo_py.py
#  - demo_sh.sh
  - execute_command.sh
#  - execute_py_code.py
#  - execute_sql_code.sh
#  - fetch_url_via_curl.sh
#  - fetch_url_via_jina.sh
  - fs_cat.sh
  - fs_ls.sh
#  - fs_mkdir.sh
#  - fs_patch.sh
#  - fs_write.sh
  - get_current_time.sh
#  - get_current_weather.py
  - get_current_weather.sh
  - query_jira_issues.sh
#  - search_arxiv.sh
#  - search_wikipedia.sh
#  - search_wolframalpha.sh
#  - send_mail.sh
#  - send_twilio.sh
#  - web_search_loki.sh
#  - web_search_perplexity.sh
#  - web_search_tavily.sh

# ---- MCP Servers ----
# See the [MCP Servers documentation](./docs/MCP-SERVERS.md) for more details
mcp_server_support: true         # Enables or disables MCP servers (globally).
mapping_mcp_servers:             # Alias for an MCP server or set of servers
  git: github,gitmcp
enabled_mcp_servers: null        # Which MCP servers to enable by default (e.g. 'github,slack')

# ---- Session ----
# See the [Session documentation](./docs/SESSIONS.md) for more information
save_session: null               # Controls the persistence of the session. If true, auto save; if false, don't auto-save save; if null, ask the user what to do
compression_threshold: 4000      # Compress the session when the token count reaches or exceeds this threshold
summarization_prompt: >          # The text prompt used for creating a concise summary of session message
  'Summarize the discussion briefly in 200 words or less to use as a prompt for future context.'
summary_context_prompt: >        # The text prompt used for including the summary of the entire session as context to the model
  'This is a summary of the chat history as a recap: '

# ---- RAG ----
# See the [RAG Docs](./docs/RAG.md) for more details.
rag_embedding_model: null        # Specifies the embedding model used for context retrieval
rag_reranker_model: null         # Specifies the reranker model used for sorting retrieved documents; Loki uses Reciprocal Rank Fusion by default
rag_top_k: 5                     # Specifies the number of documents to retrieve for answering queries
rag_chunk_size: null             # Defines the size of chunks for document processing in characters
rag_chunk_overlap: null          # Defines the overlap between chunks
# Defines the query structure using variables like __CONTEXT__ and __INPUT__ to tailor searches to specific needs
rag_template: |
  Answer the query based on the context while respecting the rules. (user query, some textual context and rules, all inside xml tags)

  <context>
  __CONTEXT__
  </context>

  <rules>
  - If you don't know, just say so.
  - If you are not sure, ask for clarification.
  - Answer in the same language as the user query.
  - If the context appears unreadable or of poor quality, tell the user then answer as best as you can.
  - If the answer is not in the context but you think you know the answer, explain that to the user then answer with your own knowledge.
  - Answer directly and without using xml tags.
  </rules>

  <user_query>
  __INPUT__
  </user_query>
# Define document loaders to control how RAG and `.file`/`--file` load files of specific formats.
document_loaders:
  # You can add custom loaders using the following syntax:
  #   <file-extension>: <command-to-load-the-file>
  # Note: Use `$1` for input file and `$2` for output file. If `$2` is omitted, use stdout as output.
  pdf: 'pdftotext $1 -'                                                                 # Use pdftotext to convert a PDF file to text
  # (see https://poppler.freedesktop.org for details on how to install pdftotext)
  docx: 'pandoc --to plain $1'                                                          # Use pandoc to convert a .docx file to text
  # (see https://pandoc.org for details on how to install pandoc)
  jina: 'curl -fsSL https://r.jina.ai/$1 -H "Authorization: Bearer {{JINA_API_KEY}}'    # Use Jina to translate a website into text;
  # Requires a Jina API key to be added to the Loki vault
  git: >                                                                                # Use yek to load a git repository into the knowledgebase (https://github.com/bodo-run/yek)
    sh -c "yek $1 --json | jq 'map({ path: .filename, contents: .content })'"

# ---- Clients ----
# See the [Clients documentation](./docs/clients/CLIENTS.md) for more details
clients:
  # All clients have the following configuration:
  # - type: xxxx
  #   name: xxxx                                      # Only use it to distinguish clients with the same client type. Optional
  #   models:
  #     - name: xxxx                                  # Chat model
  #       max_input_tokens: 100000
  #       supports_vision: true
  #       supports_function_calling: true
  #     - name: xxxx                                  # Embedding model
  #       type: embedding
  #       default_chunk_size: 1500                        
  #       max_batch_size: 100
  #     - name: xxxx                                  # Reranker model
  #       type: reranker 
  #   patch:                                          # Patch API calls
  #     chat_completions:                             # API type; Possible values: chat_completions, embeddings, and rerank
  #       <regex>:                                    # The regex to match model names, e.g. '.*' 'gpt-4o' 'gpt-4o|gpt-4-.*'
  #         url: ''                                   # Patch request URL
  #         body:                                     # Patch request body
  #           <json>
  #         headers:                                  # Patch request headers
  #           <key>: <value>
  #   extra:
  #     proxy: socks5://127.0.0.1:1080                # Set proxy
  #     connect_timeout: 10                           # Set timeout in seconds for connect to api

  # See https://platform.openai.com/docs/quickstart
  - type: openai
    api_base: https://api.openai.com/v1               # Optional
    api_key: '{{OPENAI_API_KEY}}'                     # You can either hard-code or inject secrets from the Loki vault
    organization_id: org-xxx                          # Optional

  # For any platform compatible with OpenAI's API
  - type: openai-compatible
    name: ollama
    api_base: http://localhost:11434/v1
    api_key: '{{OLLAMA_API_KEY}}'                     # Optional; You can either hard-code or inject secrets from the Loki vault
    models:
      - name: deepseek-r1
        max_input_tokens: 131072
      - name: llama3.1
        max_input_tokens: 128000
        supports_function_calling: true
      - name: llama3.2-vision
        max_input_tokens: 131072
        supports_vision: true
      - name: nomic-embed-text
        type: embedding
        default_chunk_size: 1000
        max_batch_size: 50

  # See https://ai.google.dev/docs
  - type: gemini
    api_base: https://generativelanguage.googleapis.com/v1beta
    api_key: '{{GEMINI_API_KEY}}'                       # You can either hard-code or inject secrets from the Loki vault
    patch:
      chat_completions:
        '.*':
          body:
            safetySettings:
              - category: HARM_CATEGORY_HARASSMENT
                threshold: BLOCK_NONE
              - category: HARM_CATEGORY_HATE_SPEECH
                threshold: BLOCK_NONE
              - category: HARM_CATEGORY_SEXUALLY_EXPLICIT
                threshold: BLOCK_NONE
              - category: HARM_CATEGORY_DANGEROUS_CONTENT
                threshold: BLOCK_NONE

  # See https://docs.anthropic.com/claude/reference/getting-started-with-the-api
  - type: claude
    api_base: https://api.anthropic.com/v1              # Optional
    api_key: '{{ANTHROPIC_API_KEY}}'                    # You can either hard-code or inject secrets from the Loki vault

  # See https://docs.mistral.ai/
  - type: openai-compatible
    name: mistral
    api_base: https://api.mistral.ai/v1
    api_key: '{{MISTRAL_API_KEY}}'                      # You can either hard-code or inject secrets from the Loki vault

  # See https://docs.x.ai/docs
  - type: openai-compatible
    name: xai
    api_base: https://api.x.ai/v1
    api_key: '{{XAI_API_KEY}}'                          # You can either hard-code or inject secrets from the Loki vault

  # See https://docs.ai21.com/docs/overview
  - type: openai-compatible
    name: ai12
    api_base: https://api.ai21.com/studio/v1
    api_key: '{{AI21_API_KEY}}'                         # You can either hard-code or inject secrets from the Loki vault

  # See https://docs.cohere.com/docs/the-cohere-platform
  - type: cohere
    api_base: https://api.cohere.ai/v2                  # Optional
    api_key: '{{COHERE_API_KEY}}'                       # You can either hard-code or inject secrets from the Loki vault

  # See https://docs.perplexity.ai/getting-started/overview
  - type: openai-compatible
    name: perplexity
    api_base: https://api.perplexity.ai
    api_key: '{{PERPLEXITY_API_KEY}}'                   # You can either hard-code or inject secrets from the Loki vault

  # See https://console.groq.com/docs/quickstart
  - type: openai-compatible
    name: groq
    api_base: https://api.groq.com/openai/v1
    api_key: '{{GROQ_API_KEY}}'                         # You can either hard-code or inject secrets from the Loki vault

  # See https://learn.microsoft.com/en-us/azure/ai-services/openai/chatgpt-quickstart
  - type: azure-openai
    api_base: https://{RESOURCE}.openai.azure.com
    api_key: '{{AZURE_OPENAI_API_KEY}}'                 # You can either hard-code or inject secrets from the Loki vault
    models:
      - name: gpt-4o                                    # Model deployment name
        max_input_tokens: 128000
        supports_vision: true
        supports_function_calling: true

  # See https://cloud.google.com/vertex-ai
  - type: vertexai
    project_id: xxx
    location: xxx
    # Specifies an application default credentials (adc) file
    # Run `gcloud auth application-default login` to initialize the ADC file
    # see https://cloud.google.com/docs/authentication/external/set-up-adc for more information
    adc_file: <gcloud-config-dir>/application_default_credentials.json  # Optional
    patch:
      chat_completions:
        'gemini-.*':
          body:
            safetySettings:
              - category: HARM_CATEGORY_HARASSMENT
                threshold: BLOCK_ONLY_HIGH
              - category: HARM_CATEGORY_HATE_SPEECH
                threshold: BLOCK_ONLY_HIGH
              - category: HARM_CATEGORY_SEXUALLY_EXPLICIT
                threshold: BLOCK_ONLY_HIGH
              - category: HARM_CATEGORY_DANGEROUS_CONTENT
                threshold: BLOCK_ONLY_HIGH

  # See https://docs.aws.amazon.com/bedrock/latest/userguide/
  - type: bedrock
    access_key_id: '{{AWS_ACCESS_KEY_ID}}'              # You can either hard-code or inject secrets from the Loki vault
    secret_access_key: '{{AWS_SECRET_ACCESS_KEY}}'      # You can either hard-code or inject secrets from the Loki vault
    region: xxx
    session_token: xxx                                  # Optional, only needed for temporary credentials

  # See https://developers.cloudflare.com/workers-ai/
  - type: openai-compatible
    name: cloudflare
    api_base: https://api.cloudflare.com/client/v4/accounts/{ACCOUNT_ID}/ai/v1
    api_key: '{{CLOUDFLARE_API_KEY}}'                    # You can either hard-code or inject secrets from the Loki vault

  # See https://cloud.baidu.com/doc/WENXINWORKSHOP/index.html
  - type: openai-compatible
    name: ernie
    api_base: https://qianfan.baidubce.com/v2
    api_key: '{{BAIDU_API_KEY}}'                        # You can either hard-code or inject secrets from the Loki vault

  # See https://dashscope.aliyun.com/
  - type: openai-compatible
    name: qianwen
    api_base: https://dashscope.aliyuncs.com/compatible-mode/v1
    api_key: '{{ALIYUN_API_KEY}}'                       # You can either hard-code or inject secrets from the Loki vault

  # See https://cloud.tencent.com/product/hunyuan
  - type: openai-compatible
    name: hunyuan
    api_base: https://api.hunyuan.cloud.tencent.com/v1
    api_key: '{{TENCENT_API_KEY}}'                      # You can either hard-code or inject secrets from the Loki vault

  # See https://platform.moonshot.cn/docs/intro
  - type: openai-compatible
    name: moonshot
    api_base: https://api.moonshot.cn/v1
    api_key: '{{MOONSHOT_API_KEY}}'                     # You can either hard-code or inject secrets from the Loki vault

  # See https://platform.deepseek.com/api-docs/
  - type: openai-compatible
    name: deepseek
    api_base: https://api.deepseek.com
    api_key: '{{DEEPSEEK_API_KEY}}'                     # You can either hard-code or inject secrets from the Loki vault

  # See https://open.bigmodel.cn/dev/howuse/introduction
  - type: openai-compatible
    name: zhipuai
    api_base: https://open.bigmodel.cn/api/paas/v4
    api_key: '{{ZHIPUAI_API_KEY}}'                      # You can either hard-code or inject secrets from the Loki vault

  # See https://platform.minimaxi.com/document/Fast%20access
  - type: openai-compatible
    name: minimax
    api_base: https://api.minimax.chat/v1
    api_key: '{{MINIMAX_API_KEY}}'                      # You can either hard-code or inject secrets from the Loki vault

  # See https://openrouter.ai/docs#quick-start
  - type: openai-compatible
    name: openrouter
    api_base: https://openrouter.ai/api/v1
    api_key: '{{OPENROUTER_API_KEY}}'                   # You can either hard-code or inject secrets from the Loki vault

  # See https://github.com/marketplace/models
  - type: openai-compatible
    name: github
    api_base: https://models.inference.ai.azure.com
    api_key: '{{GITHUB_API_KEY}}'                       # You can either hard-code or inject secrets from the Loki vault

  # See https://deepinfra.com/docs
  - type: openai-compatible
    name: deepinfra
    api_base: https://api.deepinfra.com/v1/openai
    api_key: '{{DEEPINFRA_API_KEY}}'                    # You can either hard-code or inject secrets from the Loki vault


  # ----- RAG dedicated -----

  # See https://jina.ai
  - type: openai-compatible
    name: jina
    api_base: https://api.jina.ai/v1
    api_key: '{{JINA_API_KEY}}'                         # You can either hard-code or inject secrets from the Loki vault

  # See https://docs.voyageai.com/docs/introduction
  - type: openai-compatible
    name: voyageai
    api_base: https://api.voyageai.com/v1
    api_key: '{{VOYAGEAI_API_KEY}}'                     # You can either hard-code or inject secrets from the Loki vault