1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
# Agent Runtime Configuration File
# This file uses YAML format and can be loaded via RuntimeConfig::from_yaml_file()
llm:
# Default LLM provider
default_provider: llama
default_model: qwen/qwen3-30b
default_temperature: 0.7
default_max_tokens: 2048
# OpenAI configuration
openai:
# API key can also be set via OPENAI_API_KEY environment variable
# api_key: sk-...
api_base: https://api.openai.com/v1
# Llama.cpp configuration (for local inference)
llama:
base_url: http://localhost:1234/v1
insecure: false
retry:
# Retry policy settings
max_attempts: 3
initial_delay_ms: 100
max_delay_ms: 30000
backoff_multiplier: 2.0
jitter_factor: 0.1
timeout:
# Timeout settings in milliseconds
total_ms: 300000 # 5 minutes total timeout
first_response_ms: 30000 # 30 seconds for first response
logging:
# Logging configuration
level: info # trace, debug, info, warn, error
directory: output
json_format: false
workflow:
# Workflow execution settings
max_concurrent: 10
max_tool_iterations: 5