use std::path::{Path, PathBuf};
use std::process;
use anyhow::{Context, Result};
use clap::{Parser, Subcommand, ValueEnum};
use rayon::prelude::*;
use walkdir::WalkDir;
use boundary_core::analyzer::LanguageAnalyzer;
use boundary_core::config::Config;
use boundary_core::graph::DependencyGraph;
use boundary_core::layer::LayerClassifier;
use boundary_core::metrics;
use boundary_core::pipeline::{self, reclassify_infra_handlers, AnalysisPipeline};
use boundary_core::types::{DependencyKind, Severity};
use boundary_go::GoAnalyzer;
use boundary_java::JavaAnalyzer;
use boundary_report::{json, text};
use boundary_rust::RustAnalyzer;
use boundary_typescript::TypeScriptAnalyzer;
#[derive(Debug, Clone, Copy, ValueEnum)]
enum OutputFormat {
Text,
Json,
Markdown,
}
#[derive(Parser)]
#[command(name = "boundary")]
#[command(about = "Analyze and score DDD/Hexagonal architecture boundaries")]
#[command(version)]
struct Cli {
#[command(subcommand)]
command: Commands,
}
#[derive(Subcommand)]
enum Commands {
Analyze {
path: PathBuf,
#[arg(short, long)]
config: Option<PathBuf>,
#[arg(long, value_enum, default_value_t = OutputFormat::Text)]
format: OutputFormat,
#[arg(long)]
compact: bool,
#[arg(long, value_delimiter = ',')]
languages: Option<Vec<String>>,
#[arg(long)]
incremental: bool,
#[arg(long)]
per_service: bool,
#[arg(long)]
score_only: bool,
#[arg(long, value_delimiter = ',')]
ignore: Option<Vec<String>>,
},
Check {
path: PathBuf,
#[arg(long, default_value = "error")]
fail_on: String,
#[arg(short, long)]
config: Option<PathBuf>,
#[arg(long, value_enum, default_value_t = OutputFormat::Text)]
format: OutputFormat,
#[arg(long)]
compact: bool,
#[arg(long, value_delimiter = ',')]
languages: Option<Vec<String>>,
#[arg(long)]
track: bool,
#[arg(long)]
no_regression: bool,
#[arg(long)]
incremental: bool,
#[arg(long)]
per_service: bool,
#[arg(long, value_delimiter = ',')]
ignore: Option<Vec<String>>,
},
Init {
#[arg(long)]
force: bool,
},
Diagram {
path: PathBuf,
#[arg(short, long)]
config: Option<PathBuf>,
#[arg(long, value_enum, default_value_t = DiagramType::Layers)]
diagram_type: DiagramType,
#[arg(long, value_delimiter = ',')]
languages: Option<Vec<String>>,
},
Forensics {
path: PathBuf,
#[arg(long)]
project_root: Option<PathBuf>,
#[arg(short, long)]
config: Option<PathBuf>,
#[arg(long, value_delimiter = ',')]
languages: Option<Vec<String>>,
#[arg(short, long)]
output: Option<PathBuf>,
},
}
#[derive(Debug, Clone, Copy, ValueEnum)]
enum DiagramType {
Layers,
Dependencies,
Dot,
DotDependencies,
}
fn main() {
let cli = Cli::parse();
let result = match cli.command {
Commands::Analyze {
path,
config,
format,
compact,
languages,
incremental,
per_service,
score_only,
ignore,
} => cmd_analyze(
&path,
config.as_deref(),
format,
compact,
languages.as_deref(),
incremental,
per_service,
score_only,
ignore.as_deref(),
),
Commands::Check {
path,
fail_on,
config,
format,
compact,
languages,
track,
no_regression,
incremental,
per_service,
ignore,
} => cmd_check(
&path,
&fail_on,
config.as_deref(),
format,
compact,
languages.as_deref(),
track,
no_regression,
incremental,
per_service,
ignore.as_deref(),
),
Commands::Init { force } => cmd_init(force),
Commands::Diagram {
path,
config,
diagram_type,
languages,
} => cmd_diagram(&path, config.as_deref(), diagram_type, languages.as_deref()),
Commands::Forensics {
path,
project_root,
config,
languages,
output,
} => cmd_forensics(
&path,
project_root.as_deref(),
config.as_deref(),
languages.as_deref(),
output.as_deref(),
),
};
if let Err(e) = result {
eprintln!("Error: {e:#}");
process::exit(2);
}
}
fn validate_path(path: &Path) -> Result<()> {
if !path.exists() {
anyhow::bail!("path '{}' does not exist", path.display());
}
if !path.is_dir() {
anyhow::bail!("path '{}' is not a directory", path.display());
}
Ok(())
}
#[allow(clippy::too_many_arguments)]
fn cmd_analyze(
path: &Path,
config_path: Option<&Path>,
format: OutputFormat,
compact: bool,
languages: Option<&[String]>,
incremental: bool,
per_service: bool,
score_only: bool,
ignore: Option<&[String]>,
) -> Result<()> {
validate_path(path)?;
let project_root = resolve_project_root(path, config_path);
let config = load_config(&project_root, config_path)?;
if per_service {
let analyzers = create_analyzers(path, &config, languages)?;
let pipeline = AnalysisPipeline::new(analyzers, config);
let multi = pipeline.analyze_per_service(path)?;
if score_only {
for svc in &multi.services {
print_score_only(&svc.service_name, svc.result.score.as_ref(), format);
}
return Ok(());
}
let report = match format {
OutputFormat::Text => text::format_multi_service_report(&multi),
OutputFormat::Json => json::format_multi_service_report(&multi, compact),
OutputFormat::Markdown => {
boundary_report::markdown::format_multi_service_report(&multi)
}
};
println!("{report}");
return Ok(());
}
let mut analysis = run_analysis(path, &project_root, &config, languages, incremental)?;
filter_ignored_violations(&mut analysis.result, ignore);
if score_only {
let module_name = path
.file_name()
.map(|n| n.to_string_lossy().into_owned())
.unwrap_or_else(|| path.to_string_lossy().into_owned());
print_score_only(&module_name, analysis.result.score.as_ref(), format);
return Ok(());
}
let report = match format {
OutputFormat::Text => text::format_report(&analysis.result),
OutputFormat::Json => json::format_report(&analysis.result, compact),
OutputFormat::Markdown => boundary_report::markdown::format_report(&analysis.result),
};
println!("{report}");
Ok(())
}
fn print_score_only(
module: &str,
score: Option<&metrics::ArchitectureScore>,
format: OutputFormat,
) {
let overall = score.map(|s| s.overall).unwrap_or(0.0);
let presence = score.map(|s| s.structural_presence).unwrap_or(0.0);
let conformance = score.map(|s| s.layer_conformance).unwrap_or(0.0);
let compliance = score.map(|s| s.dependency_compliance).unwrap_or(0.0);
let iface = score.map(|s| s.interface_coverage).unwrap_or(0.0);
match format {
OutputFormat::Json => {
println!(
"{{\"module\":\"{module}\",\"overall\":{overall:.1},\"structural_presence\":{presence:.1},\"layer_conformance\":{conformance:.1},\"dependency_compliance\":{compliance:.1},\"interface_coverage\":{iface:.1}}}"
);
}
OutputFormat::Text | OutputFormat::Markdown => {
println!(
"{module}: {overall:.1}/100 (Presence: {presence:.1}, Conformance: {conformance:.1}, Compliance: {compliance:.1}, Interfaces: {iface:.1})"
);
}
}
}
#[allow(clippy::too_many_arguments)]
fn cmd_check(
path: &Path,
fail_on_str: &str,
config_path: Option<&Path>,
format: OutputFormat,
compact: bool,
languages: Option<&[String]>,
track: bool,
no_regression: bool,
incremental: bool,
per_service: bool,
ignore: Option<&[String]>,
) -> Result<()> {
validate_path(path)?;
let project_root = resolve_project_root(path, config_path);
let config = load_config(&project_root, config_path)?;
let fail_on: Severity = fail_on_str.parse()?;
if per_service {
let analyzers = create_analyzers(path, &config, languages)?;
let pipeline = AnalysisPipeline::new(analyzers, config);
let multi = pipeline.analyze_per_service(path)?;
let report = match format {
OutputFormat::Text => text::format_multi_service_report(&multi),
OutputFormat::Json => json::format_multi_service_report(&multi, compact),
OutputFormat::Markdown => {
boundary_report::markdown::format_multi_service_report(&multi)
}
};
println!("{report}");
let has_failures = multi
.services
.iter()
.any(|s| s.result.violations.iter().any(|v| v.severity >= fail_on));
if has_failures {
process::exit(1);
}
return Ok(());
}
let mut analysis = run_analysis(path, &project_root, &config, languages, incremental)?;
filter_ignored_violations(&mut analysis.result, ignore);
if track {
boundary_core::evolution::save_snapshot(path, &analysis.result)?;
}
if no_regression {
if let Some(trend) = boundary_core::evolution::check_regression(path, &analysis.result)? {
let (report, _) = match format {
OutputFormat::Text => text::format_check(&analysis.result, fail_on),
OutputFormat::Json => json::format_check(&analysis.result, fail_on, compact),
OutputFormat::Markdown => {
boundary_report::markdown::format_check(&analysis.result, fail_on)
}
};
println!("{report}");
eprintln!("Architecture regression detected!");
eprintln!(
" Score: {:.1} -> {:.1} ({:+.1})",
trend.previous_score, trend.current_score, trend.score_delta
);
eprintln!(
" Violations: {} -> {} ({:+})",
trend.previous_violations, trend.current_violations, trend.violation_delta
);
for rt in &trend.rule_trends {
if rt.delta != 0 {
eprintln!(
" {}: {} -> {} ({:+})",
rt.rule_id, rt.previous_count, rt.current_count, rt.delta
);
}
}
process::exit(1);
}
}
let (report, passed) = match format {
OutputFormat::Text => text::format_check(&analysis.result, fail_on),
OutputFormat::Json => json::format_check(&analysis.result, fail_on, compact),
OutputFormat::Markdown => {
boundary_report::markdown::format_check(&analysis.result, fail_on)
}
};
println!("{report}");
if !passed {
process::exit(1);
}
Ok(())
}
fn cmd_init(force: bool) -> Result<()> {
let target = PathBuf::from(".boundary.toml");
if target.exists() && !force {
anyhow::bail!(".boundary.toml already exists. Use --force to overwrite.");
}
std::fs::write(&target, Config::default_toml())?;
println!("Created .boundary.toml with default configuration.");
Ok(())
}
fn cmd_diagram(
path: &Path,
config_path: Option<&Path>,
diagram_type: DiagramType,
languages: Option<&[String]>,
) -> Result<()> {
validate_path(path)?;
let project_root = resolve_project_root(path, config_path);
let config = load_config(&project_root, config_path)?;
let analysis = run_analysis(path, &project_root, &config, languages, false)?;
let diagram = match diagram_type {
DiagramType::Layers => boundary_report::diagram::generate_layer_diagram(&analysis.graph),
DiagramType::Dependencies => {
boundary_report::diagram::generate_dependency_flow(&analysis.graph)
}
DiagramType::Dot => boundary_report::dot::generate_layer_diagram(&analysis.graph),
DiagramType::DotDependencies => {
boundary_report::dot::generate_dependency_flow(&analysis.graph)
}
};
println!("{diagram}");
Ok(())
}
fn cmd_forensics(
module_path: &Path,
project_root_override: Option<&Path>,
config_path: Option<&Path>,
languages: Option<&[String]>,
output_path: Option<&Path>,
) -> Result<()> {
validate_path(module_path)?;
let module_path = module_path
.canonicalize()
.with_context(|| format!("failed to resolve path '{}'", module_path.display()))?;
let project_root = if let Some(root) = project_root_override {
root.to_path_buf()
} else {
pipeline::find_project_root(&module_path).unwrap_or_else(|| module_path.to_path_buf())
};
validate_path(&project_root)?;
let config = load_config(&project_root, config_path)?;
let analyzers = create_analyzers(&project_root, &config, languages)?;
let pipeline = AnalysisPipeline::new(analyzers, config);
let full_analysis = pipeline.analyze_module(&module_path, &project_root)?;
let forensics =
boundary_core::forensics::build_forensics(&full_analysis, &module_path, &project_root);
let report = boundary_report::forensics::format_forensics_report(&forensics);
if let Some(out_path) = output_path {
std::fs::write(out_path, &report)
.with_context(|| format!("failed to write output to {}", out_path.display()))?;
eprintln!("Forensics report written to {}", out_path.display());
} else {
println!("{report}");
}
Ok(())
}
fn filter_ignored_violations(result: &mut metrics::AnalysisResult, ignore: Option<&[String]>) {
if let Some(rules) = ignore {
result
.violations
.retain(|v| !rules.iter().any(|r| r == v.kind.rule_id().as_str()));
}
}
fn load_config(project_path: &Path, config_path: Option<&Path>) -> Result<Config> {
match config_path {
Some(p) => Config::load(p),
None => Ok(Config::load_or_default(project_path)),
}
}
fn resolve_project_root(analysis_path: &Path, config_path: Option<&Path>) -> PathBuf {
if let Some(cp) = config_path {
if let Some(parent) = cp.parent() {
if parent.exists() {
return parent.to_path_buf();
}
}
}
pipeline::find_project_root(analysis_path).unwrap_or_else(|| analysis_path.to_path_buf())
}
pub struct FullAnalysis {
pub result: metrics::AnalysisResult,
pub graph: DependencyGraph,
}
type ClassifiedDependency = (
boundary_core::types::Dependency,
Option<boundary_core::types::ArchLayer>,
Option<boundary_core::types::ArchLayer>,
bool,
boundary_core::types::ArchitectureMode,
bool, );
struct FileResult {
components: Vec<(
boundary_core::types::Component,
Option<boundary_core::types::ArchLayer>,
)>,
dependencies: Vec<ClassifiedDependency>,
}
fn create_analyzers(
project_path: &Path,
config: &Config,
language_override: Option<&[String]>,
) -> Result<Vec<Box<dyn LanguageAnalyzer>>> {
let languages: Vec<String> = if let Some(langs) = language_override {
langs.to_vec()
} else if config.project.languages.is_empty() {
auto_detect_languages(project_path)
} else {
config.project.languages.clone()
};
let mut analyzers: Vec<Box<dyn LanguageAnalyzer>> = Vec::new();
for lang in &languages {
match lang.as_str() {
"go" => {
analyzers.push(Box::new(
GoAnalyzer::new().context("failed to init Go analyzer")?,
));
}
"rust" => {
analyzers.push(Box::new(
RustAnalyzer::new().context("failed to init Rust analyzer")?,
));
}
"typescript" | "ts" => {
analyzers.push(Box::new(
TypeScriptAnalyzer::new().context("failed to init TypeScript analyzer")?,
));
}
"java" => {
analyzers.push(Box::new(
JavaAnalyzer::new().context("failed to init Java analyzer")?,
));
}
other => {
eprintln!("Warning: unsupported language '{other}', skipping");
}
}
}
if analyzers.is_empty() {
anyhow::bail!("no supported language analyzers could be initialized");
}
Ok(analyzers)
}
fn auto_detect_languages(project_path: &Path) -> Vec<String> {
let mut has_go = false;
let mut has_rust = false;
let mut has_ts = false;
let mut has_java = false;
for entry in WalkDir::new(project_path)
.into_iter()
.filter_map(|e| e.ok())
.take(1000)
{
if let Some(ext) = entry.path().extension() {
match ext.to_str() {
Some("go") => has_go = true,
Some("rs") => has_rust = true,
Some("ts" | "tsx") => {
if !entry.path().to_string_lossy().ends_with(".d.ts") {
has_ts = true;
}
}
Some("java") => has_java = true,
_ => {}
}
}
if has_go && has_rust && has_ts && has_java {
break;
}
}
let mut languages = Vec::new();
if has_go {
languages.push("go".to_string());
}
if has_rust {
languages.push("rust".to_string());
}
if has_ts {
languages.push("typescript".to_string());
}
if has_java {
languages.push("java".to_string());
}
if languages.is_empty() {
languages.push("go".to_string());
}
languages
}
fn run_analysis(
project_path: &Path,
project_root: &Path,
config: &Config,
language_override: Option<&[String]>,
incremental: bool,
) -> Result<FullAnalysis> {
let analyzers = create_analyzers(project_path, config, language_override)?;
let classifier = LayerClassifier::new(&config.layers);
let mut graph = DependencyGraph::new();
let mut total_deps = 0usize;
let mut total_files = 0usize;
let mut all_components = Vec::new();
let mut all_dependencies: Vec<boundary_core::types::Dependency> = Vec::new();
let mut cache = if incremental {
boundary_core::cache::AnalysisCache::load(project_path).unwrap_or_default()
} else {
boundary_core::cache::AnalysisCache::new()
};
for analyzer in &analyzers {
let extensions: Vec<&str> = analyzer.file_extensions().to_vec();
let source_files: Vec<PathBuf> = WalkDir::new(project_path)
.into_iter()
.filter_map(|e| e.ok())
.filter(|e| {
let p = e.path();
let matches_ext = p
.extension()
.is_some_and(|ext| extensions.iter().any(|e| ext == *e));
if !matches_ext {
return false;
}
let path_str = p.to_string_lossy();
!path_str.contains("vendor/")
&& !path_str.contains("/target/")
&& !path_str.ends_with("_test.go")
&& !path_str.ends_with(".d.ts")
})
.map(|e| e.into_path())
.collect();
if source_files.is_empty() {
continue;
}
total_files += source_files.len();
let file_results: Vec<(String, FileResult, String)> = source_files
.par_iter()
.filter_map(|file_path| {
let content = match std::fs::read_to_string(file_path) {
Ok(c) => c,
Err(e) => {
eprintln!("Warning: failed to read {}: {e}", file_path.display());
return None;
}
};
let rel_path = file_path
.strip_prefix(project_root)
.unwrap_or(file_path)
.to_string_lossy()
.to_string();
let is_cross_cutting = classifier.is_cross_cutting(&rel_path);
let arch_mode = classifier.architecture_mode(&rel_path);
if incremental {
if let Some(cached) = cache.get(&rel_path, &content) {
let file_layer = classifier.classify(&rel_path);
let components: Vec<_> = cached
.components
.iter()
.map(|comp| {
let mut comp = comp.clone();
if comp.layer.is_none() {
comp.layer = file_layer;
}
comp.is_cross_cutting = is_cross_cutting;
comp.architecture_mode = arch_mode;
reclassify_infra_handlers(&mut comp);
let layer = comp.layer;
(comp, layer)
})
.collect();
let dependencies: Vec<_> = cached
.dependencies
.iter()
.filter(|dep| {
matches!(dep.kind, DependencyKind::MethodCall)
|| !dep
.import_path
.as_deref()
.is_some_and(|p| analyzer.is_stdlib_import(p))
})
.map(|dep| {
let to_layer = dep
.import_path
.as_deref()
.and_then(|p| classifier.classify_import(p));
let to_is_cross_cutting = dep
.import_path
.as_deref()
.is_some_and(|p| classifier.is_cross_cutting_import(p));
let from_layer = classifier.classify(&rel_path);
(
dep.clone(),
from_layer,
to_layer,
is_cross_cutting,
arch_mode,
to_is_cross_cutting,
)
})
.collect();
return Some((
rel_path,
FileResult {
components,
dependencies,
},
content,
));
}
}
let parsed = match analyzer.parse_file(file_path, &content) {
Ok(p) => p,
Err(e) => {
eprintln!("Warning: failed to parse {}: {e}", file_path.display());
return None;
}
};
let mut components_raw = analyzer.extract_components(&parsed);
let file_layer = classifier.classify(&rel_path);
let components: Vec<_> = components_raw
.drain(..)
.map(|mut comp| {
if comp.layer.is_none() {
comp.layer = file_layer;
}
comp.is_cross_cutting = is_cross_cutting;
comp.architecture_mode = arch_mode;
reclassify_infra_handlers(&mut comp);
let layer = comp.layer;
(comp, layer)
})
.collect();
let deps = analyzer.extract_dependencies(&parsed);
let dependencies: Vec<_> = deps
.into_iter()
.filter(|dep| {
matches!(dep.kind, DependencyKind::MethodCall)
|| !dep
.import_path
.as_deref()
.is_some_and(|p| analyzer.is_stdlib_import(p))
})
.map(|dep| {
let to_layer = dep
.import_path
.as_deref()
.and_then(|p| classifier.classify_import(p));
let to_is_cross_cutting = dep
.import_path
.as_deref()
.is_some_and(|p| classifier.is_cross_cutting_import(p));
let from_layer = classifier.classify(&rel_path);
(
dep,
from_layer,
to_layer,
is_cross_cutting,
arch_mode,
to_is_cross_cutting,
)
})
.collect();
Some((
rel_path,
FileResult {
components,
dependencies,
},
content,
))
})
.collect();
let current_files: Vec<String> = file_results.iter().map(|(p, _, _)| p.clone()).collect();
for (rel_path, fr, content) in &file_results {
if incremental {
let cached_components: Vec<_> =
fr.components.iter().map(|(comp, _)| comp.clone()).collect();
let cached_deps: Vec<_> = fr
.dependencies
.iter()
.map(|(dep, _, _, _, _, _)| dep.clone())
.collect();
cache.insert(
rel_path.clone(),
content,
boundary_core::cache::CachedFileResult {
hash: String::new(),
components: cached_components,
dependencies: cached_deps,
},
);
}
for (comp, _) in &fr.components {
graph.add_component(comp);
all_components.push(comp.clone());
}
}
for (_rel_path, fr, _content) in file_results {
for (dep, from_layer, to_layer, is_cc, arch_mode, to_is_cc) in &fr.dependencies {
graph.ensure_node_with_mode(&dep.from, *from_layer, *is_cc, *arch_mode);
graph.ensure_node(&dep.to, *to_layer, *to_is_cc);
graph.add_dependency(dep);
all_dependencies.push(dep.clone());
}
total_deps += fr.dependencies.len();
}
if incremental {
cache.prune(¤t_files);
}
}
if incremental {
if let Err(e) = cache.save(project_path) {
eprintln!("Warning: failed to save analysis cache: {e}");
}
}
let source_ids: std::collections::HashSet<_> = all_components.iter().map(|c| &c.id).collect();
let source_rel_dirs: std::collections::HashSet<String> = all_components
.iter()
.filter_map(|c| {
let rel = c
.location
.file
.strip_prefix(project_root)
.unwrap_or(&c.location.file);
rel.parent().map(|p| p.to_string_lossy().replace('\\', "/"))
})
.collect();
let project_root_str = project_root.to_string_lossy().replace('\\', "/");
let external_ids: Vec<_> = graph
.nodes()
.iter()
.filter(|n| {
if source_ids.contains(&n.id) {
return false;
}
let id = n.id.0.replace('\\', "/");
let path_part = id.split("::").next().unwrap_or(&id);
if path_part.starts_with('.') {
return false;
}
if path_part.starts_with("crate") {
return false;
}
if path_part.starts_with(project_root_str.as_str()) {
return false;
}
let path_normalized = path_part.replace('.', "/");
let is_internal = source_rel_dirs.iter().any(|dir| {
if dir.is_empty() {
return false;
}
if path_part.ends_with(dir.as_str()) {
return true;
}
let dir_segments: Vec<&str> = dir.split('/').collect();
if dir_segments.len() >= 2 {
for window in dir_segments.windows(2) {
let pair = format!("{}/{}", window[0], window[1]);
if path_normalized.contains(&pair) {
return true;
}
}
}
false
});
!is_internal
})
.map(|n| n.id.clone())
.collect();
for id in &external_ids {
graph.mark_external(id);
}
let result = metrics::build_result(
&graph,
config,
total_deps,
&all_components,
total_files,
&all_dependencies,
);
Ok(FullAnalysis { result, graph })
}