# whet configuration
# Copy to ~/.whet/config.toml
[]
= "ollama" # "ollama" or "openai_compat"
= "qwen3:8b"
= "http://localhost:11434"
# api_key = "sk-..." # Optional: required for some OpenAI-compatible servers
# streaming = true # Enable streaming responses (default: false)
# OpenAI-compatible server examples:
# [llm]
# provider = "openai_compat"
# model = "gpt-3.5-turbo"
# base_url = "http://localhost:8080" # llama.cpp server
# api_key = "sk-..." # Optional
# streaming = true
[]
= 10
# permission_mode = "default" # "default", "accept_edits", or "yolo"
# - default: Ask before file writes and command execution
# - accept_edits: Auto-approve file edits, ask for shell/git
# - yolo: No confirmation needed
# web_enabled = false # Enable web_fetch and web_search tools (requires internet)
[]
= "~/.whet/memory.db"
# MCP (Model Context Protocol) servers
# [[mcp.servers]]
# name = "filesystem"
# command = "npx"
# args = ["-y", "@modelcontextprotocol/server-filesystem", "/tmp"]