1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
# Slippy configuration file.
#
# Place as one of (searched in order per directory):
# slippy.toml
# .slippy.toml
# .slippy/config.toml
#
# Slippy searches the current directory and all parent directories.
# The innermost (closest to cwd) config file wins.
# Falls back to ~/.slippy/config.toml (%USERPROFILE%\.slippy\config.toml on
# Windows) if no project-level config is found.
[]
= "forbid"
# ---------------------------------------------------------------------------
# LLM Provider
#
# Two sections are required: [llms.fast] and [llms.best].
# Each section independently selects a backend and model.
# This lets you mix providers — e.g. a local Ollama model for fast scanning
# and a cloud OpenAI model for best-quality analysis.
# ---------------------------------------------------------------------------
# --- Ollama (requires the "ollama" feature, default) -----------------------
# Model used for fast, low-latency inference. It's used for general scanning,
# without requiring precision.
[]
= "Ollama"
= "http://localhost:11434" # optional
= "nemotron-3-nano:4b"
# Model used for highest-quality inference. It's used for detailed analysis
# of potential issues, and extracting token positions for annotations.
[]
= "Ollama"
= "glm-4.7-flash:q4_K_M"
# --- OpenAI (requires the "openai" feature) --------------------------------
# [llms.fast]
# # Base URL of the OpenAI-compatible API.
# url = "https://api.openai.com" # optional, also works with compatible APIs
# # API key for authentication. Falls back to OPENAI_API_KEY env var.
# api_key = "sk-..."
# type = "OpenAI"
# model = { model = "gpt-5.4-mini", effort = "none", service_tier = "flex" }
# [llms.best]
# type = "OpenAI"
# model = "gpt-5.4"