1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
# morpharch.toml — MorphArch Project Configuration
#
# Place this file at the root of your Git repository to customize
# the scoring engine. All fields are optional — omitted values use
# the defaults shown below.
#
# This file is designed to be version-controlled alongside your code,
# so your team shares the same architectural health standards.
# ─── Ignore Rules ─────────────────────────────────────────────────────────────
# Glob patterns for paths to exclude from AST parsing and scoring.
# Ignored paths are skipped during the Git tree walk, so this also
# improves scan performance for large repositories.
[]
# paths = ["tests/**", "benches/**", "**/generated_*.rs", "vendor/**"]
# ─── Scoring Weights ─────────────────────────────────────────────────────────
# Relative weights for each debt component. Values are normalized to
# sum to 1.0 internally, so they don't need to sum to 100.
#
# Use these to prioritize what matters most for your project:
# - Legacy codebase? Lower cycle weight to make adoption gradual.
# - Microservices? Raise coupling weight to catch tight bindings.
# - Monolith? Raise hub weight to catch god modules early.
[]
# cycle = 30 # Circular dependencies (SCC analysis)
# layering = 25 # Back-edges violating layered architecture
# hub = 15 # God modules (high fan-in AND fan-out)
# coupling = 12 # Edge weight density (import concentration)
# cognitive = 10 # Graph complexity (edge excess + degree excess)
# instability = 8 # Brittle modules (Martin instability metric)
# ─── Scoring Thresholds ──────────────────────────────────────────────────────
# Fine-tune when exemptions and penalties kick in.
[]
# Hub exemption ratio: modules with fan_out/(fan_in+1) below this
# are treated as legitimate shared cores (e.g., deno_core).
# hub_exemption_ratio = 0.3
# Modules with fan-in at or below this are treated as entry-point
# composition roots (e.g., main.rs, cli/tools) — exempt from hub debt.
# entry_point_max_fan_in = 2
# Instability threshold: modules with I > this are flagged as brittle.
# I = Ce/(Ca+Ce), where Ca=fan-in, Ce=fan-out.
# brittle_instability_ratio = 0.8
# ─── Boundary Rules ──────────────────────────────────────────────────────────
# Define architectural boundaries — forbidden dependency directions.
# These are checked during `morpharch analyze` and reported as violations.
#
# Each rule says: modules matching `from` must NOT depend on modules
# matching `deny`. Uses prefix matching (glob wildcards are stripped).
# [[scoring.boundaries]]
# from = "packages/**"
# deny = ["apps/**", "cmd/**"]
# [[scoring.boundaries]]
# from = "libs/shared/**"
# deny = ["libs/feature_*/**"]
# [[scoring.boundaries]]
# from = "modules/billing/**"
# deny = ["modules/auth/**"]
# ─── Exemptions ──────────────────────────────────────────────────────────────
# Specific modules exempted from certain debt calculations.
[]
# Modules exempted from hub/god-module debt.
# Useful for intentional utility modules or framework entry points.
# hub_exempt = ["src/utils.rs", "libs/core/index.ts"]
# Modules exempted from instability debt.
# instability_exempt = ["packages/ui-kit/src/index.ts"]
# File stems treated as entry points (exempt from fragility penalties).
# These modules naturally have high fan-out and low fan-in.
# entry_point_stems = ["main", "index", "app", "lib", "mod"]
# ─── AI Configuration ────────────────────────────────────────────────────────
# Configuration for the AI Assistant panel. Supports any OpenAI-compatible
# endpoint (OpenAI, Ollama, LM Studio, etc.).
#
# Default (OpenAI):
# [ai]
# provider = "openai"
# api_key_env = "OPENAI_API_KEY"
# model = "gpt-4o-mini"
# endpoint = "https://api.openai.com/v1/chat/completions"
# stream = true # Enable streaming responses (SSE)
# max_tokens = 4096 # Maximum tokens in AI response (default: 4096)
# temperature = 0.3 # Response creativity 0.0-1.0 (default: 0.3)
# max_context_tokens = 12000 # Token budget for architecture context data
#
# Example (Local Ollama with llama3.1:8b):
# [ai]
# provider = "ollama"
# api_key_env = "" # Not needed
# model = "llama3.1:8b"
# endpoint = "http://localhost:11434/v1/chat/completions"
# stream = true # Set to false if your endpoint doesn't support SSE