1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
# Microscope Memory Configuration Example
# Copy to config.toml and adjust paths.
[]
# Directory containing memory layer JSON files
= "./layers"
# Output directory for binary index files
= "./output"
# Temporary directory for processing
= "./tmp"
[]
# Block size in characters (fixed viewport)
= 256
# Maximum depth levels (0-8)
= 8
# Header size in bytes
= 32
[]
# Default number of results
= 10
# Zoom weight for 4D search
= 2.0
# Keyword boost factor
= 0.1
# Semantic blending weight (0.0 = pure hash coords, 1.0 = pure embedding coords)
= 0.0
# Emotional bias warp strength (0.0 = disabled, 0.0-1.0 = warp toward emotional centroid)
= 0.0
[]
# Configure which cognitive layers to include
= [
"long_term",
"short_term",
"associative",
"emotional",
"relational",
"reflections",
"crypto_chain",
"echo_cache",
"rust_state"
]
[]
# Use memory mapping
= true
# Cache size in MB
= 64
# Number of parallel workers for building
= 4
# Use GPU acceleration for soft search (requires --features gpu)
= false
# Zstd compression for data.bin (requires --features compression)
= false
# Query cache TTL in seconds
= 300
[]
# Provider: "mock" (hash-based), "candle" (BERT), or "onnx" (ONNX Runtime)
= "mock"
# Model name (for candle/onnx providers)
= "sentence-transformers/all-MiniLM-L6-v2"
# Embedding dimension
= 384
# Max depth to embed (deeper = more vectors, more disk)
= 4
[]
# HTTP server port
= 6060
# CORS origin
= "*"
[]
# Log level: trace, debug, info, warn, error
= "info"
# Log file path (optional)
= "microscope.log"
# ─── Federation (optional) ───────────────────────────
# Uncomment and configure to enable multi-index federated search.
# Each federated index gets its own section.
#
# [[federation.indices]]
# name = "project_a"
# config_path = "/path/to/project_a/config.toml"
# weight = 1.0
#
# [[federation.indices]]
# name = "project_b"
# config_path = "/path/to/project_b/config.toml"
# weight = 0.8