# Selfware configuration
# Copy this file to selfware.toml and adjust values for your setup.
# Set this to your OpenAI-compatible API endpoint (e.g., local vLLM/sglang server, ngrok tunnel, etc.)
= "http://localhost:8080/v1"
= "your-model-name-here"
= 98304
[]
= ["./**", "~/**"]
= ["**/.env", "**/secrets/**", "**/.ssh/**", "**/target/**"]
[]
= 500
= 600
# Enable native function calling (requires backend support like sglang --tool-call-parser)
= true
[]
= true
= 10
= 300
= true
= 3
[]
= 5
= 1000
= 60000