slippy-cli 0.1.0

AI Linter for Rust projects
# Slippy configuration file.
#
# Place as one of (searched in order per directory):
#   slippy.toml
#   .slippy.toml
#   .slippy/config.toml
#
# Slippy searches the current directory and all parent directories.
# The innermost (closest to cwd) config file wins.
# Falls back to ~/.slippy/config.toml (%USERPROFILE%\.slippy\config.toml on
# Windows) if no project-level config is found.

[lints]
unnecessary_contains_in_loop = "forbid"

# ---------------------------------------------------------------------------
# LLM Provider
#
# Two sections are required: [llms.fast] and [llms.best].
# Each section independently selects a backend and model.
# This lets you mix providers — e.g. a local Ollama model for fast scanning
# and a cloud OpenAI model for best-quality analysis.
# ---------------------------------------------------------------------------

# --- Ollama (requires the "ollama" feature, default) -----------------------

# Model used for fast, low-latency inference. It's used for general scanning,
# without requiring precision.
[llms.fast]
type = "Ollama"
url = "http://localhost:11434" # optional
model = "nemotron-3-nano:4b"

# Model used for highest-quality inference. It's used for detailed analysis
# of potential issues, and extracting token positions for annotations.
[llms.best]
type = "Ollama"
model = "glm-4.7-flash:q4_K_M"

# --- OpenAI (requires the "openai" feature) --------------------------------

# [llms.fast]
# # Base URL of the OpenAI-compatible API.
# url = "https://api.openai.com" # optional, also works with compatible APIs
# # API key for authentication. Falls back to OPENAI_API_KEY env var.
# api_key = "sk-..."
# type = "OpenAI"
# model = { model = "gpt-5.4-mini", effort = "none", service_tier = "flex" }

# [llms.best]
# type = "OpenAI"
# model = "gpt-5.4"