microscope-memory 0.6.1

Pure binary cognitive memory engine. Zero-JSON, mmap-based, hierarchical memory architecture.
Documentation
# Microscope Memory Configuration Example
# Copy to config.toml and adjust paths.

[paths]
# Directory containing memory layer JSON files
layers_dir = "./layers"

# Output directory for binary index files
output_dir = "./output"

# Temporary directory for processing
temp_dir = "./tmp"

[index]
# Block size in characters (fixed viewport)
block_size = 256

# Maximum depth levels (0-8)
max_depth = 8

# Header size in bytes
header_size = 32

[search]
# Default number of results
default_k = 10

# Zoom weight for 4D search
zoom_weight = 2.0

# Keyword boost factor
keyword_boost = 0.1

# Semantic blending weight (0.0 = pure hash coords, 1.0 = pure embedding coords)
semantic_weight = 0.0

# Emotional bias warp strength (0.0 = disabled, 0.0-1.0 = warp toward emotional centroid)
emotional_bias_weight = 0.0

[memory_layers]
# Configure which cognitive layers to include
layers = [
    "long_term",
    "short_term",
    "associative",
    "emotional",
    "relational",
    "reflections",
    "crypto_chain",
    "echo_cache",
    "rust_state"
]

[performance]
# Use memory mapping
use_mmap = true

# Cache size in MB
cache_size = 64

# Number of parallel workers for building
build_workers = 4

# Use GPU acceleration for soft search (requires --features gpu)
use_gpu = false

# Zstd compression for data.bin (requires --features compression)
compression = false

# Query cache TTL in seconds
cache_ttl_secs = 300

[embedding]
# Provider: "mock" (hash-based), "candle" (BERT), or "onnx" (ONNX Runtime)
provider = "mock"

# Model name (for candle/onnx providers)
model = "sentence-transformers/all-MiniLM-L6-v2"

# Embedding dimension
dim = 384

# Max depth to embed (deeper = more vectors, more disk)
max_depth = 4

[server]
# HTTP server port
port = 6060

# CORS origin
cors_origin = "*"

[logging]
# Log level: trace, debug, info, warn, error
level = "info"

# Log file path (optional)
file = "microscope.log"

# ─── Federation (optional) ───────────────────────────
# Uncomment and configure to enable multi-index federated search.
# Each federated index gets its own section.
#
# [[federation.indices]]
# name = "project_a"
# config_path = "/path/to/project_a/config.toml"
# weight = 1.0
#
# [[federation.indices]]
# name = "project_b"
# config_path = "/path/to/project_b/config.toml"
# weight = 0.8