[llm]
default_provider = "llama"
default_model = "qwen/qwen3-30b"
default_temperature = 0.7
default_max_tokens = 2048
[llm.openai]
api_base = "https://api.openai.com/v1"
[llm.llama]
base_url = "http://localhost:1234/v1"
insecure = false
[retry]
max_attempts = 3
initial_delay_ms = 100
max_delay_ms = 30000
backoff_multiplier = 2.0
jitter_factor = 0.1
[timeout]
total_ms = 300000
first_response_ms = 30000
[logging]
level = "info"
directory = "output"
json_format = false
[workflow]
max_concurrent = 10
max_tool_iterations = 5