#!/bin/bash
set -euo pipefail

# Comprehensive Testing Script for OpenCrates
# Runs all test suites, generates coverage reports, and performs security scans

# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color

# Configuration
PROJECT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
TEST_RESULTS_DIR="${PROJECT_ROOT}/test-results"
COVERAGE_DIR="${PROJECT_ROOT}/coverage"
REPORTS_DIR="${PROJECT_ROOT}/reports"
BENCHMARK_DIR="${PROJECT_ROOT}/benchmarks"

# Test configuration
RUST_LOG="${RUST_LOG:-info}"
RUST_BACKTRACE="${RUST_BACKTRACE:-1}"
CARGO_TERM_COLOR="${CARGO_TERM_COLOR:-always}"

# Database and service configuration for tests
TEST_DATABASE_URL="${TEST_DATABASE_URL:-sqlite::memory:}"
TEST_REDIS_URL="${TEST_REDIS_URL:-redis://localhost:6379}"
OPENAI_API_KEY="${OPENAI_API_KEY:-test-key-not-real}"

# Ensure directories exist
mkdir -p "${TEST_RESULTS_DIR}" "${COVERAGE_DIR}" "${REPORTS_DIR}" "${BENCHMARK_DIR}"

# Function to print colored output
print_status() {
    local color=$1
    local message=$2
    echo -e "${color}[$(date +'%Y-%m-%d %H:%M:%S')] ${message}${NC}"
}

print_success() {
    print_status "${GREEN}" "✅ $1"
}

print_error() {
    print_status "${RED}" "❌ $1"
}

print_warning() {
    print_status "${YELLOW}" "⚠  $1"
}

print_info() {
    print_status "${BLUE}" "ℹ  $1"
}

# Function to check if a command exists
command_exists() {
    command -v "$1" >/dev/null 2>&1
}

# Function to install required tools
install_tools() {
    print_info "Installing required testing tools..."
    
    # Install cargo tools
    if ! command_exists cargo-tarpaulin; then
        print_info "Installing cargo-tarpaulin for coverage..."
        cargo install cargo-tarpaulin
    fi
    
    if ! command_exists cargo-audit; then
        print_info "Installing cargo-audit for security scanning..."
        cargo install cargo-audit
    fi
    
    if ! command_exists cargo-deny; then
        print_info "Installing cargo-deny for dependency checking..."
        cargo install cargo-deny
    fi
    
    if ! command_exists cargo-outdated; then
        print_info "Installing cargo-outdated for dependency updates..."
        cargo install cargo-outdated
    fi
    
    if ! command_exists cargo-bloat; then
        print_info "Installing cargo-bloat for binary analysis..."
        cargo install cargo-bloat
    fi
    
    if ! command_exists cargo-criterion; then
        print_info "Installing cargo-criterion for benchmarking..."
        cargo install cargo-criterion
    fi
    
    print_success "All tools installed successfully"
}

# Function to start test services
start_test_services() {
    print_info "Starting test services..."
    
    # Check if Docker is available and start services
    if command_exists docker-compose; then
        if [ -f "${PROJECT_ROOT}/docker-compose.test.yml" ]; then
            print_info "Starting Docker test services..."
            docker-compose -f "${PROJECT_ROOT}/docker-compose.test.yml" up -d postgres redis
            sleep 10
            
            # Update test URLs to use Docker services
            export TEST_DATABASE_URL="postgres://postgres:postgres@localhost:5432/opencrates_test"
            export TEST_REDIS_URL="redis://localhost:6379"
        else
            print_warning "docker-compose.test.yml not found, using default test configuration"
        fi
    else
        print_warning "Docker not available, using in-memory/mock services"
    fi
}

# Function to stop test services
stop_test_services() {
    print_info "Stopping test services..."
    
    if command_exists docker-compose && [ -f "${PROJECT_ROOT}/docker-compose.test.yml" ]; then
        docker-compose -f "${PROJECT_ROOT}/docker-compose.test.yml" down
    fi
}

# Function to run code formatting check
check_formatting() {
    print_info "Checking code formatting..."
    
    if cargo fmt --all -- --check; then
        print_success "Code formatting is correct"
    else
        print_error "Code formatting issues found. Run 'cargo fmt' to fix."
        exit 1
    fi
}

# Function to run linting
run_linting() {
    print_info "Running Clippy linting..."
    
    if cargo clippy --all-targets --all-features -- -D warnings; then
        print_success "No linting issues found"
    else
        print_error "Linting issues found"
        exit 1
    fi
}

# Function to run unit tests
run_unit_tests() {
    print_info "Running unit tests..."
    
    local test_output="${TEST_RESULTS_DIR}/unit-tests.xml"
    
    if cargo test --all-features --lib --bins --verbose \
        --message-format=json > "${TEST_RESULTS_DIR}/unit-tests.json" 2>&1; then
        print_success "Unit tests passed"
    else
        print_error "Unit tests failed"
        cat "${TEST_RESULTS_DIR}/unit-tests.json"
        exit 1
    fi
}

# Function to run integration tests
run_integration_tests() {
    print_info "Running integration tests..."
    
    local test_output="${TEST_RESULTS_DIR}/integration-tests.xml"
    
    # Set environment variables for integration tests
    export TEST_DATABASE_URL TEST_REDIS_URL OPENAI_API_KEY RUST_LOG
    
    if cargo test --test integration_comprehensive --all-features --verbose \
        -- --test-threads=1 --nocapture \
        --format=json > "${TEST_RESULTS_DIR}/integration-tests.json" 2>&1; then
        print_success "Integration tests passed"
    else
        print_error "Integration tests failed"
        cat "${TEST_RESULTS_DIR}/integration-tests.json"
        exit 1
    fi
}

# Function to run documentation tests
run_doc_tests() {
    print_info "Running documentation tests..."
    
    if cargo test --doc --all-features --verbose; then
        print_success "Documentation tests passed"
    else
        print_error "Documentation tests failed"
        exit 1
    fi
}

# Function to run benchmarks
run_benchmarks() {
    print_info "Running benchmarks..."
    
    if cargo bench --all-features -- --output-format json \
        > "${BENCHMARK_DIR}/benchmark-results.json" 2>&1; then
        print_success "Benchmarks completed"
        
        # Generate benchmark report
        if command_exists cargo-criterion; then
            cargo criterion --message-format=json > "${BENCHMARK_DIR}/criterion-results.json" 2>&1 || true
        fi
    else
        print_warning "Benchmarks failed or not available"
    fi
}

# Function to generate test coverage
generate_coverage() {
    print_info "Generating test coverage..."
    
    export TEST_DATABASE_URL TEST_REDIS_URL
    
    if cargo tarpaulin \
        --all-features \
        --workspace \
        --timeout 300 \
        --exclude-files "target/*" "tests/*" "examples/*" "benches/*" \
        --out xml --out html --out json \
        --output-dir "${COVERAGE_DIR}" \
        --verbose; then
        print_success "Coverage report generated in ${COVERAGE_DIR}"
        
        # Extract coverage percentage
        if [ -f "${COVERAGE_DIR}/tarpaulin-report.json" ]; then
            local coverage_percent=$(jq -r '.files | map(.coverage) | add / length' "${COVERAGE_DIR}/tarpaulin-report.json" 2>/dev/null || echo "unknown")
            print_info "Overall coverage: ${coverage_percent}%"
        fi
    else
        print_warning "Coverage generation failed"
    fi
}

# Function to run security audit
run_security_audit() {
    print_info "Running security audit..."
    
    local audit_output="${REPORTS_DIR}/security-audit.json"
    
    if cargo audit --format json --output "${audit_output}"; then
        print_success "Security audit passed"
    else
        print_error "Security vulnerabilities found"
        cat "${audit_output}"
        exit 1
    fi
}

# Function to check dependencies
check_dependencies() {
    print_info "Checking dependencies..."
    
    # Run cargo-deny
    if cargo deny check --format json > "${REPORTS_DIR}/deny-report.json" 2>&1; then
        print_success "Dependency check passed"
    else
        print_error "Dependency issues found"
        cat "${REPORTS_DIR}/deny-report.json"
        exit 1
    fi
    
    # Check for outdated dependencies
    if command_exists cargo-outdated; then
        print_info "Checking for outdated dependencies..."
        cargo outdated --format json > "${REPORTS_DIR}/outdated-deps.json" 2>&1 || true
    fi
}

# Function to analyze binary size
analyze_binary_size() {
    print_info "Analyzing binary size..."
    
    # Build release binary
    cargo build --release --all-features
    
    if command_exists cargo-bloat; then
        cargo bloat --release --crates --format json > "${REPORTS_DIR}/binary-analysis.json" 2>&1 || true
        print_info "Binary analysis saved to ${REPORTS_DIR}/binary-analysis.json"
    fi
    
    # Get binary sizes
    find target/release -name "opencrates*" -type f -executable | while read -r binary; do
        local size=$(stat -c%s "$binary" 2>/dev/null || stat -f%z "$binary" 2>/dev/null || echo "unknown")
        print_info "Binary $(basename "$binary"): ${size} bytes"
    done
}

# Function to run performance tests
run_performance_tests() {
    print_info "Running performance tests..."
    
    # Build release binary
    cargo build --release --all-features
    
    # Start the server in background for performance testing
    local server_pid=""
    if [ -f "target/release/opencrates-server" ]; then
        print_info "Starting server for performance testing..."
        export OPENCRATES_DATABASE_URL="${TEST_DATABASE_URL}"
        export OPENCRATES_REDIS_URL="${TEST_REDIS_URL}"
        
        target/release/opencrates-server &
        server_pid=$!
        sleep 5
        
        # Check if server is running
        if curl -f http://localhost:8080/system/health >/dev/null 2>&1; then
            print_success "Server started successfully"
            
            # Run basic performance tests
            if command_exists wrk; then
                print_info "Running wrk performance tests..."
                wrk -t4 -c100 -d30s --latency http://localhost:8080/system/health \
                    > "${REPORTS_DIR}/performance-wrk.txt" 2>&1 || true
            fi
            
            if command_exists ab; then
                print_info "Running Apache Bench tests..."
                ab -n 1000 -c 10 http://localhost:8080/system/health \
                    > "${REPORTS_DIR}/performance-ab.txt" 2>&1 || true
            fi
        else
            print_warning "Server failed to start, skipping performance tests"
        fi
        
        # Stop the server
        if [ -n "$server_pid" ]; then
            kill "$server_pid" 2>/dev/null || true
            wait "$server_pid" 2>/dev/null || true
        fi
    else
        print_warning "Server binary not found, skipping performance tests"
    fi
}

# Function to generate final report
generate_report() {
    print_info "Generating final test report..."
    
    local report_file="${REPORTS_DIR}/test-summary.html"
    
    cat > "$report_file" << EOF
<!DOCTYPE html>
<html>
<head>
    <title>OpenCrates Test Report</title>
    <style>
        body { font-family: Arial, sans-serif; margin: 20px; }
        .header { background-color: #f0f0f0; padding: 10px; }
        .section { margin: 20px 0; padding: 10px; border: 1px solid #ddd; }
        .success { color: green; }
        .error { color: red; }
        .warning { color: orange; }
        pre { background-color: #f5f5f5; padding: 10px; overflow-x: auto; }
    </style>
</head>
<body>
    <div class="header">
        <h1> OpenCrates Test Report</h1>
        <p>Generated on: $(date)</p>
        <p>Git commit: $(git rev-parse HEAD 2>/dev/null || echo "unknown")</p>
    </div>
    
    <div class="section">
        <h2>Test Results Summary</h2>
        <p>All test phases completed successfully! ✅</p>
    </div>
    
    <div class="section">
        <h2>Coverage Report</h2>
EOF

    if [ -f "${COVERAGE_DIR}/tarpaulin-report.html" ]; then
        echo "<p><a href=\"../coverage/tarpaulin-report.html\">View detailed coverage report</a></p>" >> "$report_file"
    else
        echo "<p>Coverage report not available</p>" >> "$report_file"
    fi
    
    cat >> "$report_file" << EOF
    </div>
    
    <div class="section">
        <h2>Files Generated</h2>
        <ul>
EOF

    find "${TEST_RESULTS_DIR}" "${COVERAGE_DIR}" "${REPORTS_DIR}" "${BENCHMARK_DIR}" -type f 2>/dev/null | while read -r file; do
        local rel_path="${file#$PROJECT_ROOT/}"
        echo "            <li><a href=\"../${rel_path}\">${rel_path}</a></li>" >> "$report_file"
    done
    
    cat >> "$report_file" << EOF
        </ul>
    </div>
    
    <div class="section">
        <h2>Next Steps</h2>
        <ul>
            <li>Review coverage report and improve test coverage for areas below 80%</li>
            <li>Address any performance regressions identified in benchmark results</li>
            <li>Update dependencies if outdated versions were found</li>
            <li>Review binary size analysis for optimization opportunities</li>
        </ul>
    </div>
</body>
</html>
EOF

    print_success "Test report generated: ${report_file}"
}

# Function to cleanup
cleanup() {
    print_info "Cleaning up..."
    stop_test_services
    
    # Remove temporary files
    find . -name "*.tmp" -delete 2>/dev/null || true
    
    print_success "Cleanup completed"
}

# Main execution
main() {
    print_info "Starting comprehensive test suite for OpenCrates"
    print_info "Project root: ${PROJECT_ROOT}"
    
    # Set up trap for cleanup
    trap cleanup EXIT
    
    cd "${PROJECT_ROOT}"
    
    # Parse command line arguments
    local skip_install=false
    local skip_services=false
    local only_unit=false
    local only_integration=false
    local skip_benchmarks=false
    local skip_performance=false
    
    while [[ $# -gt 0 ]]; do
        case $1 in
            --skip-install)
                skip_install=true
                shift
                ;;
            --skip-services)
                skip_services=true
                shift
                ;;
            --only-unit)
                only_unit=true
                shift
                ;;
            --only-integration)
                only_integration=true
                shift
                ;;
            --skip-benchmarks)
                skip_benchmarks=true
                shift
                ;;
            --skip-performance)
                skip_performance=true
                shift
                ;;
            --help|-h)
                echo "Usage: $0 [options]"
                echo "Options:"
                echo "  --skip-install      Skip tool installation"
                echo "  --skip-services     Skip starting test services"
                echo "  --only-unit         Run only unit tests"
                echo "  --only-integration  Run only integration tests"
                echo "  --skip-benchmarks   Skip benchmark tests"
                echo "  --skip-performance  Skip performance tests"
                echo "  --help, -h          Show this help message"
                exit 0
                ;;
            *)
                print_error "Unknown option: $1"
                exit 1
                ;;
        esac
    done
    
    # Install tools
    if [ "$skip_install" = false ]; then
        install_tools
    fi
    
    # Start services
    if [ "$skip_services" = false ]; then
        start_test_services
    fi
    
    # Run test phases
    check_formatting
    run_linting
    
    if [ "$only_integration" = false ]; then
        run_unit_tests
        run_doc_tests
    fi
    
    if [ "$only_unit" = false ]; then
        run_integration_tests
    fi
    
    generate_coverage
    run_security_audit
    check_dependencies
    analyze_binary_size
    
    if [ "$skip_benchmarks" = false ]; then
        run_benchmarks
    fi
    
    if [ "$skip_performance" = false ]; then
        run_performance_tests
    fi
    
    generate_report
    
    print_success " All tests completed successfully!"
    print_info "Results available in:"
    print_info "   Test results: ${TEST_RESULTS_DIR}"
    print_info "   Coverage: ${COVERAGE_DIR}"
    print_info "   Reports: ${REPORTS_DIR}"
    print_info "  🏃 Benchmarks: ${BENCHMARK_DIR}"
    print_info "  📄 Summary: ${REPORTS_DIR}/test-summary.html"
}

# Run main function with all arguments
main "$@" 