set -euo pipefail
IFS=$'\n\t'
trap 'log_error_with_trace "Error at line $LINENO in cli-test"' ERR
CLI_TEST_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
source "$CLI_TEST_ROOT/utils/logger.sh"
source "$CLI_TEST_ROOT/core/validator.sh"
source "$CLI_TEST_ROOT/core/config-loader.sh"
VERSION=$(cat "$CLI_TEST_ROOT/VERSION" 2>/dev/null || echo "1.1.0-dev")
load_all_config || {
log ERROR "Failed to load configuration"
exit 1
}
DEFAULT_OUTPUT_DIR="${CONFIG_OUTPUT_BASE_DIR}"
DEFAULT_REPORT_FORMAT="${CONFIG_REPORT_FORMAT}"
DEFAULT_TEST_MODULES="${CONFIG_TEST_MODULES}"
DEFAULT_DOCKER_ENVIRONMENTS="${CONFIG_DOCKER_ENVIRONMENTS}"
show_usage() {
cat <<EOF
CLI Testing Specialist v$VERSION
Usage: $0 [OPTIONS] <cli-binary>
Options:
-o, --output DIR Output directory (default: $DEFAULT_OUTPUT_DIR)
-f, --format FORMAT Report format: markdown|json|html|all (default: $DEFAULT_REPORT_FORMAT)
-m, --modules MODULES Test modules: all|basic|help|security|path (default: $DEFAULT_TEST_MODULES)
-s, --skip-analysis Skip CLI analysis (use existing analysis.json)
-S, --skip-generation Skip test generation (use existing tests)
-r, --report-only Generate report only (skip all execution)
-v, --verbose Enable verbose logging (DEBUG level)
-h, --help Show this help message
Docker Options:
-d, --docker Run tests in Docker containers
-e, --environments ENV Docker environments: alpine|ubuntu|debian|all (default: all)
-t, --timeout SECONDS Docker test timeout in seconds (default: 300)
Coverage Options (Phase 2):
-c, --coverage Enable coverage analysis and reporting
--coverage-format FMT Coverage report format: html|markdown|json|all (default: html)
Profiling Options (Phase 2):
-p, --profile Enable performance profiling
--baseline Save performance baseline
--compare-baseline Compare with saved baseline
Security Options (Phase 2):
--security Enable security vulnerability scanning
--security-format FMT Security report format: html|markdown|json|all (default: html)
Examples:
# Full workflow
$0 /usr/bin/git
# Custom output directory
$0 -o ./git-tests /usr/bin/git
# Only specific modules
$0 -m "basic,security" /usr/bin/docker
# Skip analysis (use existing)
$0 -s -o ./existing-tests /usr/bin/ls
# Report only
$0 -r -o ./existing-tests
# Docker mode - all environments
$0 --docker /usr/bin/git
# Docker mode - specific environments
$0 --docker -e alpine,ubuntu /usr/bin/ls
# Coverage analysis
$0 --coverage -o ./coverage-tests /usr/bin/ls
# Coverage with specific format
$0 --coverage --coverage-format markdown /usr/bin/git
# Performance profiling
$0 --profile -o ./profile-tests /usr/bin/ls
# Profiling with baseline
$0 --profile --baseline /usr/bin/git
# Coverage + Profiling (combined)
$0 --coverage --profile /usr/bin/ls
# Security scanning
$0 --security -o ./security-tests /usr/bin/ls
# Security with specific format
$0 --security --security-format markdown /usr/bin/git
# Coverage + Profiling + Security (combined)
$0 --coverage --profile --security /usr/bin/ls
Workflow Steps:
1. CLI Analysis → {output}/analysis.json
2. Test Generation → {output}/tests/*.bats
3. Test Execution → TAP output (native or Docker)
4. Report Generation → {output}/reports/
EOF
}
parse_arguments() {
local cli_binary=""
local output_dir="$DEFAULT_OUTPUT_DIR"
local report_format="$DEFAULT_REPORT_FORMAT"
local test_modules="$DEFAULT_TEST_MODULES"
local skip_analysis=false
local skip_generation=false
local report_only=false
local verbose=false
local docker_mode=false
local docker_environments="$DEFAULT_DOCKER_ENVIRONMENTS"
local docker_timeout=300
local coverage_mode=false
local coverage_format="html"
local profile_mode=false
local save_baseline=false
local compare_baseline=false
local security_mode=false
local security_format="html"
while [[ $# -gt 0 ]]; do
case "$1" in
-o|--output)
output_dir="$2"
shift 2
;;
-f|--format)
report_format="$2"
shift 2
;;
-m|--modules)
test_modules="$2"
shift 2
;;
-s|--skip-analysis)
skip_analysis=true
shift
;;
-S|--skip-generation)
skip_generation=true
shift
;;
-r|--report-only)
report_only=true
shift
;;
-v|--verbose)
verbose=true
export CLI_TEST_LOG_LEVEL=DEBUG
shift
;;
-d|--docker)
docker_mode=true
shift
;;
-e|--environments)
docker_environments="$2"
shift 2
;;
-t|--timeout)
docker_timeout="$2"
shift 2
;;
-c|--coverage)
coverage_mode=true
shift
;;
--coverage-format)
coverage_format="$2"
shift 2
;;
-p|--profile)
profile_mode=true
shift
;;
--baseline)
save_baseline=true
shift
;;
--compare-baseline)
compare_baseline=true
shift
;;
--security)
security_mode=true
shift
;;
--security-format)
security_format="$2"
shift 2
;;
-h|--help)
show_usage
exit 0
;;
-*)
log ERROR "Unknown option: $1"
show_usage
exit 1
;;
*)
cli_binary="$1"
shift
;;
esac
done
if [[ "$report_only" == "false" ]] && [[ -z "$cli_binary" ]]; then
log ERROR "CLI binary is required"
show_usage
exit 1
fi
export CLI_BINARY="$cli_binary"
export OUTPUT_DIR="$output_dir"
export REPORT_FORMAT="$report_format"
export TEST_MODULES="$test_modules"
export SKIP_ANALYSIS="$skip_analysis"
export SKIP_GENERATION="$skip_generation"
export REPORT_ONLY="$report_only"
export VERBOSE="$verbose"
export DOCKER_MODE="$docker_mode"
export DOCKER_ENVIRONMENTS="$docker_environments"
export DOCKER_TIMEOUT="$docker_timeout"
export COVERAGE_MODE="$coverage_mode"
export COVERAGE_FORMAT="$coverage_format"
export PROFILE_MODE="$profile_mode"
export SAVE_BASELINE="$save_baseline"
export COMPARE_BASELINE="$compare_baseline"
export SECURITY_MODE="$security_mode"
export SECURITY_FORMAT="$security_format"
}
setup_output_structure() {
local output_dir="$1"
log INFO "Setting up output directory structure"
local validated_dir
validated_dir=$(validate_output_dir "$output_dir") || {
log ERROR "Failed to validate output directory"
return 1
}
mkdir -p "$validated_dir/tests"
mkdir -p "$validated_dir/reports"
log DEBUG "Created directory structure:"
log DEBUG " - $validated_dir/tests"
log DEBUG " - $validated_dir/reports"
echo "$validated_dir"
}
step_detect_shells() {
local output_dir="$1"
log INFO "=== Step 0: Shell Detection ==="
if [[ -f "$output_dir/shell-detection.json" ]]; then
log INFO "Using existing shell detection: $output_dir/shell-detection.json"
return 0
fi
log INFO "Detecting available shells"
local detection_output
detection_output=$(bash "$CLI_TEST_ROOT/core/shell-detector.sh" "$output_dir/shell-detection.json" 2>&1) || {
log WARN "Shell detection failed (non-critical)"
log WARN "$detection_output"
return 0 }
log INFO "Shell detection completed: $output_dir/shell-detection.json"
local available_count
available_count=$(jq -r '.summary.available_shells' "$output_dir/shell-detection.json" 2>/dev/null || echo "unknown")
log INFO " Available shells: $available_count"
}
step_analyze() {
local cli_binary="$1"
local output_dir="$2"
log INFO "=== Step 1: CLI Analysis ==="
if [[ "$SKIP_ANALYSIS" == "true" ]]; then
log INFO "Skipping CLI analysis (using existing)"
if [[ ! -f "$output_dir/analysis.json" ]]; then
log ERROR "analysis.json not found in $output_dir"
return 1
fi
log INFO "Using existing: $output_dir/analysis.json"
return 0
fi
log INFO "Analyzing CLI tool: $cli_binary"
local analysis_output
analysis_output=$(bash "$CLI_TEST_ROOT/core/cli-analyzer.sh" "$cli_binary" "$output_dir/analysis.json" 2>&1) || {
log ERROR "CLI analysis failed"
log ERROR "$analysis_output"
return 1
}
log INFO "Analysis completed: $output_dir/analysis.json"
}
step_generate() {
local output_dir="$1"
local test_modules="$2"
log INFO "=== Step 2: Test Generation ==="
if [[ "$SKIP_GENERATION" == "true" ]]; then
log INFO "Skipping test generation (using existing)"
local bats_count
bats_count=$(find "$output_dir/tests" -name "*.bats" -type f 2>/dev/null | wc -l)
if [[ $bats_count -eq 0 ]]; then
log ERROR "No .bats files found in $output_dir/tests"
return 1
fi
log INFO "Using existing $bats_count test files"
return 0
fi
log INFO "Generating BATS tests (modules: $test_modules)"
local generation_output
generation_output=$(bash "$CLI_TEST_ROOT/core/test-generator.sh" \
"$output_dir/analysis.json" \
"$output_dir/tests" \
"$test_modules" 2>&1) || {
log ERROR "Test generation failed"
log ERROR "$generation_output"
return 1
}
local bats_count
bats_count=$(find "$output_dir/tests" -name "*.bats" -type f | wc -l)
log INFO "Generated $bats_count test files"
}
step_execute() {
local output_dir="$1"
log INFO "=== Step 3: Test Execution (Native) ==="
log INFO "Running BATS tests"
local test_output
local exit_code=0
test_output=$(bash "$CLI_TEST_ROOT/core/run-tests.sh" \
"$output_dir/tests" \
"both" \
"$output_dir/reports" 2>&1) || exit_code=$?
if [[ $exit_code -eq 0 ]]; then
log INFO "All tests passed ✅"
else
log WARN "Some tests failed (exit code: $exit_code) ❌"
fi
log INFO "Reports generated in: $output_dir/reports"
return $exit_code
}
step_execute_docker() {
local cli_binary="$1"
local output_dir="$2"
local environments="$3"
local timeout="$4"
log INFO "=== Step 3: Test Execution (Docker) ==="
log INFO "Running tests in Docker environments: $environments"
local docker_output_dir="$output_dir/docker-results"
mkdir -p "$docker_output_dir"
local test_output
local exit_code=0
test_output=$(bash "$CLI_TEST_ROOT/core/docker-test-runner.sh" \
-e "$environments" \
-t "$timeout" \
"$cli_binary" \
"$output_dir/tests" \
"$docker_output_dir" 2>&1) || exit_code=$?
if [[ $exit_code -eq 0 ]]; then
log INFO "All Docker environment tests passed ✅"
else
log WARN "Some Docker environment tests failed (exit code: $exit_code) ❌"
fi
log INFO "Docker test results: $docker_output_dir"
return $exit_code
}
step_analyze_coverage() {
local output_dir="$1"
local coverage_format="$2"
log INFO "=== Step 4: Coverage Analysis (Phase 2) ==="
local coverage_db="$output_dir/coverage.db"
if [[ ! -f "$coverage_db" ]]; then
log WARN "Coverage database not found: $coverage_db"
log WARN "Skipping coverage analysis"
return 0
fi
if [[ ! -f "$output_dir/analysis.json" ]]; then
log ERROR "analysis.json not found for coverage analysis"
return 1
fi
log INFO "Analyzing coverage data"
local coverage_result="$output_dir/coverage-result.json"
bash "$CLI_TEST_ROOT/core/coverage-analyzer.sh" \
"$output_dir/analysis.json" \
"$coverage_db" \
"$coverage_result" || {
log ERROR "Coverage analysis failed"
return 1
}
log INFO "Coverage analysis completed: $coverage_result"
local report_dir="$output_dir/reports"
mkdir -p "$report_dir"
case "$coverage_format" in
html)
bash "$CLI_TEST_ROOT/core/coverage-reporter.sh" \
"$coverage_result" \
"$report_dir/coverage-report.html" \
"html"
log INFO "Coverage report (HTML): $report_dir/coverage-report.html"
;;
markdown|md)
bash "$CLI_TEST_ROOT/core/coverage-reporter.sh" \
"$coverage_result" \
"$report_dir/coverage-report.md" \
"markdown"
log INFO "Coverage report (Markdown): $report_dir/coverage-report.md"
;;
json)
bash "$CLI_TEST_ROOT/core/coverage-reporter.sh" \
"$coverage_result" \
"$report_dir/coverage-report.json" \
"json"
log INFO "Coverage report (JSON): $report_dir/coverage-report.json"
;;
all)
bash "$CLI_TEST_ROOT/core/coverage-reporter.sh" \
"$coverage_result" \
"$report_dir/coverage-report.html" \
"html"
bash "$CLI_TEST_ROOT/core/coverage-reporter.sh" \
"$coverage_result" \
"$report_dir/coverage-report.md" \
"markdown"
bash "$CLI_TEST_ROOT/core/coverage-reporter.sh" \
"$coverage_result" \
"$report_dir/coverage-report.json" \
"json"
log INFO "Coverage reports (all formats): $report_dir/"
;;
*)
log WARN "Unknown coverage format: $coverage_format, using html"
bash "$CLI_TEST_ROOT/core/coverage-reporter.sh" \
"$coverage_result" \
"$report_dir/coverage-report.html" \
"html"
;;
esac
return 0
}
step_profile_execution() {
local output_dir="$1"
local cli_binary="$2"
local save_baseline="$3"
local compare_baseline="$4"
log INFO "=== Step 5: Performance Profiling (Phase 2) ==="
local profile_db="$output_dir/profiles.json"
local baseline_db="$output_dir/baselines.json"
bash "$CLI_TEST_ROOT/core/profiler.sh" init "$profile_db" || {
log ERROR "Failed to initialize profile database"
return 1
}
bash "$CLI_TEST_ROOT/core/baseline-manager.sh" init "$baseline_db" || {
log ERROR "Failed to initialize baseline database"
return 1
}
log INFO "Profiling command: $cli_binary"
local profile_result="$output_dir/profile-result.json"
bash "$CLI_TEST_ROOT/core/profiler.sh" profile "$cli_binary" "$profile_result" || {
log ERROR "Profiling failed"
return 1
}
log INFO "Profiling completed: $profile_result"
if [[ "$save_baseline" == "true" ]]; then
log INFO "Saving performance baseline"
bash "$CLI_TEST_ROOT/core/baseline-manager.sh" save "$cli_binary" "$profile_result" "$baseline_db" || {
log WARN "Failed to save baseline (non-critical)"
}
fi
if [[ "$compare_baseline" == "true" ]]; then
log INFO "Comparing with baseline"
local comparison_result="$output_dir/baseline-comparison.json"
bash "$CLI_TEST_ROOT/core/baseline-manager.sh" compare "$cli_binary" "$profile_result" "$baseline_db" "$comparison_result" || {
log WARN "Baseline comparison failed (non-critical)"
}
if [[ -f "$comparison_result" ]]; then
local regression_detected
regression_detected=$(jq -r '.comparison.regression_detected' "$comparison_result" 2>/dev/null || echo "false")
if [[ "$regression_detected" == "true" ]]; then
local diff_percent
diff_percent=$(jq -r '.comparison.wall_time_diff_percent' "$comparison_result")
log WARN "Performance regression detected: +${diff_percent}%"
fi
fi
fi
return 0
}
step_scan_security() {
local output_dir="$1"
local cli_binary="$2"
local security_format="$3"
log INFO "=== Step 6: Security Vulnerability Scanning (Phase 2) ==="
local security_db="$output_dir/security-findings.json"
bash "$CLI_TEST_ROOT/core/security-scanner.sh" init "$security_db" || {
log ERROR "Failed to initialize security database"
return 1
}
log INFO "Running comprehensive security scan: $cli_binary"
local scan_result="$output_dir/security-scan-result.json"
bash "$CLI_TEST_ROOT/core/security-scanner.sh" scan "$cli_binary" "cli-test-scan" "$scan_result" || {
log ERROR "Security scan failed"
return 1
}
log INFO "Security scan completed: $scan_result"
local scan_data
scan_data=$(<"$scan_result")
bash "$CLI_TEST_ROOT/core/security-scanner.sh" save "$scan_data" "$security_db" 2>/dev/null || {
log WARN "Failed to save scan result to database (non-critical)"
}
local report_dir="$output_dir/reports"
mkdir -p "$report_dir"
case "$security_format" in
html)
bash "$CLI_TEST_ROOT/core/security-reporter.sh" \
"$scan_result" \
"$report_dir/security-report.html" \
"html"
log INFO "Security report (HTML): $report_dir/security-report.html"
;;
markdown|md)
bash "$CLI_TEST_ROOT/core/security-reporter.sh" \
"$scan_result" \
"$report_dir/security-report.md" \
"markdown"
log INFO "Security report (Markdown): $report_dir/security-report.md"
;;
json)
bash "$CLI_TEST_ROOT/core/security-reporter.sh" \
"$scan_result" \
"$report_dir/security-report.json" \
"json"
log INFO "Security report (JSON): $report_dir/security-report.json"
;;
all)
bash "$CLI_TEST_ROOT/core/security-reporter.sh" \
"$scan_result" \
"$report_dir/security-report.html" \
"html"
bash "$CLI_TEST_ROOT/core/security-reporter.sh" \
"$scan_result" \
"$report_dir/security-report.md" \
"markdown"
bash "$CLI_TEST_ROOT/core/security-reporter.sh" \
"$scan_result" \
"$report_dir/security-report.json" \
"json"
log INFO "Security reports (all formats): $report_dir/"
;;
*)
log WARN "Unknown security format: $security_format, using html"
bash "$CLI_TEST_ROOT/core/security-reporter.sh" \
"$scan_result" \
"$report_dir/security-report.html" \
"html"
;;
esac
local overall_severity
overall_severity=$(jq -r '.overall_severity // "info"' "$scan_result" 2>/dev/null || echo "info")
if [[ "$overall_severity" == "critical" ]] || [[ "$overall_severity" == "high" ]]; then
log WARN "Security scan detected $overall_severity severity issues"
elif [[ "$overall_severity" == "medium" ]]; then
log INFO "Security scan detected medium severity issues"
else
log INFO "Security scan completed with low/info severity findings"
fi
return 0
}
main() {
log INFO "CLI Testing Specialist v$VERSION"
log INFO "Starting workflow execution"
parse_arguments "$@"
local validated_output_dir
validated_output_dir=$(setup_output_structure "$OUTPUT_DIR") || exit 1
if [[ "$COVERAGE_MODE" == "true" ]]; then
log INFO "Initializing coverage database (Phase 2)"
export COVERAGE_DB_PATH="$validated_output_dir/coverage.db"
bash "$CLI_TEST_ROOT/core/coverage-tracker.sh" init "$COVERAGE_DB_PATH" || {
log WARN "Coverage database initialization failed (non-critical)"
COVERAGE_MODE="false" }
fi
local exit_code=0
if [[ "$REPORT_ONLY" == "true" ]]; then
log INFO "Report-only mode: skipping analysis, generation, and execution"
if [[ -f "$validated_output_dir/reports/test-report.md" ]]; then
log INFO "Report already exists: $validated_output_dir/reports/test-report.md"
else
log ERROR "No existing report found in $validated_output_dir/reports"
exit 1
fi
else
step_detect_shells "$validated_output_dir" || exit_code=$?
step_analyze "$CLI_BINARY" "$validated_output_dir" || exit_code=$?
[[ $exit_code -ne 0 ]] && exit $exit_code
step_generate "$validated_output_dir" "$TEST_MODULES" || exit_code=$?
[[ $exit_code -ne 0 ]] && exit $exit_code
if [[ "$DOCKER_MODE" == "true" ]]; then
step_execute_docker "$CLI_BINARY" "$validated_output_dir" "$DOCKER_ENVIRONMENTS" "$DOCKER_TIMEOUT" || exit_code=$?
else
step_execute "$validated_output_dir" || exit_code=$?
fi
if [[ "$COVERAGE_MODE" == "true" ]]; then
step_analyze_coverage "$validated_output_dir" "$COVERAGE_FORMAT" || {
log WARN "Coverage analysis failed (non-critical)"
}
fi
if [[ "$PROFILE_MODE" == "true" ]]; then
step_profile_execution "$validated_output_dir" "$CLI_BINARY" "$SAVE_BASELINE" "$COMPARE_BASELINE" || {
log WARN "Performance profiling failed (non-critical)"
}
fi
if [[ "$SECURITY_MODE" == "true" ]]; then
step_scan_security "$validated_output_dir" "$CLI_BINARY" "$SECURITY_FORMAT" || {
log WARN "Security scanning failed (non-critical)"
}
fi
fi
log INFO "=== Workflow Completed ==="
log INFO " Output directory: $validated_output_dir"
if [[ "$DOCKER_MODE" == "true" ]]; then
log INFO " Docker results: $validated_output_dir/docker-results/"
log INFO " Summary report: $validated_output_dir/docker-results/docker-test-summary.md"
else
log INFO " Reports: $validated_output_dir/reports/"
fi
if [[ "$COVERAGE_MODE" == "true" ]]; then
log INFO " Coverage analysis: $validated_output_dir/coverage-result.json"
case "$COVERAGE_FORMAT" in
html) log INFO " Coverage report: $validated_output_dir/reports/coverage-report.html" ;;
markdown|md) log INFO " Coverage report: $validated_output_dir/reports/coverage-report.md" ;;
json) log INFO " Coverage report: $validated_output_dir/reports/coverage-report.json" ;;
all) log INFO " Coverage reports: $validated_output_dir/reports/coverage-report.*" ;;
esac
fi
if [[ "$PROFILE_MODE" == "true" ]]; then
log INFO " Performance profile: $validated_output_dir/profile-result.json"
if [[ "$SAVE_BASELINE" == "true" ]]; then
log INFO " Baseline saved: $validated_output_dir/baselines.json"
fi
if [[ "$COMPARE_BASELINE" == "true" ]]; then
log INFO " Baseline comparison: $validated_output_dir/baseline-comparison.json"
fi
fi
if [[ "$SECURITY_MODE" == "true" ]]; then
log INFO " Security scan result: $validated_output_dir/security-scan-result.json"
case "$SECURITY_FORMAT" in
html) log INFO " Security report: $validated_output_dir/reports/security-report.html" ;;
markdown|md) log INFO " Security report: $validated_output_dir/reports/security-report.md" ;;
json) log INFO " Security report: $validated_output_dir/reports/security-report.json" ;;
all) log INFO " Security reports: $validated_output_dir/reports/security-report.*" ;;
esac
fi
if [[ $exit_code -eq 0 ]]; then
log INFO " Status: ✅ SUCCESS"
else
log WARN " Status: ❌ FAILED (exit code: $exit_code)"
fi
exit $exit_code
}
if [[ "${BASH_SOURCE[0]}" == "${0}" ]]; then
main "$@"
fi