omni-dev 0.19.0

A powerful Git commit message analysis and amendment toolkit
Documentation
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
//! Preflight validation checks for early failure detection.
//!
//! This module provides functions to validate required services and credentials
//! before starting expensive operations. Commands should call these checks early
//! to fail fast with clear error messages.

use anyhow::{bail, Context, Result};

use crate::claude::model_config::get_model_registry;

/// Result of AI credential validation.
#[derive(Debug)]
pub struct AiCredentialInfo {
    /// The AI provider that will be used.
    pub provider: AiProvider,
    /// The model that will be used.
    pub model: String,
}

/// AI provider types.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum AiProvider {
    /// Anthropic Claude API.
    Claude,
    /// AWS Bedrock with Claude.
    Bedrock,
    /// OpenAI API.
    OpenAi,
    /// Local Ollama.
    Ollama,
}

impl std::fmt::Display for AiProvider {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
        match self {
            Self::Claude => write!(f, "Claude API"),
            Self::Bedrock => write!(f, "AWS Bedrock"),
            Self::OpenAi => write!(f, "OpenAI API"),
            Self::Ollama => write!(f, "Ollama"),
        }
    }
}

/// Validates that AI credentials are available before processing.
///
/// This performs a lightweight check of environment variables without
/// creating a full AI client. Use this at the start of commands that
/// require AI to fail fast if credentials are missing.
pub fn check_ai_credentials(model_override: Option<&str>) -> Result<AiCredentialInfo> {
    use crate::utils::settings::{get_env_var, get_env_vars};

    // Check provider selection flags
    let use_openai = get_env_var("USE_OPENAI")
        .map(|val| val == "true")
        .unwrap_or(false);

    let use_ollama = get_env_var("USE_OLLAMA")
        .map(|val| val == "true")
        .unwrap_or(false);

    let use_bedrock = get_env_var("CLAUDE_CODE_USE_BEDROCK")
        .map(|val| val == "true")
        .unwrap_or(false);

    // Check Ollama (no credentials required, just model)
    if use_ollama {
        let model = model_override
            .map(String::from)
            .or_else(|| get_env_var("OLLAMA_MODEL").ok())
            .unwrap_or_else(|| "llama2".to_string());

        return Ok(AiCredentialInfo {
            provider: AiProvider::Ollama,
            model,
        });
    }

    // Check OpenAI
    if use_openai {
        let registry = get_model_registry();
        let model = model_override
            .map(String::from)
            .or_else(|| get_env_var("OPENAI_MODEL").ok())
            .unwrap_or_else(|| {
                registry
                    .get_default_model("openai")
                    .unwrap_or("gpt-5")
                    .to_string()
            });

        // Verify API key exists
        get_env_vars(&["OPENAI_API_KEY", "OPENAI_AUTH_TOKEN"]).map_err(|_| {
            anyhow::anyhow!(
                "OpenAI API key not found.\n\
                 Set one of these environment variables:\n\
                 - OPENAI_API_KEY\n\
                 - OPENAI_AUTH_TOKEN"
            )
        })?;

        return Ok(AiCredentialInfo {
            provider: AiProvider::OpenAi,
            model,
        });
    }

    // Check Bedrock
    if use_bedrock {
        let registry = get_model_registry();
        let model = model_override
            .map(String::from)
            .or_else(|| get_env_var("ANTHROPIC_MODEL").ok())
            .unwrap_or_else(|| {
                registry
                    .get_default_model("claude")
                    .unwrap_or("claude-sonnet-4-6")
                    .to_string()
            });

        // Verify Bedrock configuration
        get_env_var("ANTHROPIC_AUTH_TOKEN").map_err(|_| {
            anyhow::anyhow!(
                "AWS Bedrock authentication not configured.\n\
                 Set ANTHROPIC_AUTH_TOKEN environment variable."
            )
        })?;

        get_env_var("ANTHROPIC_BEDROCK_BASE_URL").map_err(|_| {
            anyhow::anyhow!(
                "AWS Bedrock base URL not configured.\n\
                 Set ANTHROPIC_BEDROCK_BASE_URL environment variable."
            )
        })?;

        return Ok(AiCredentialInfo {
            provider: AiProvider::Bedrock,
            model,
        });
    }

    // Default: Claude API
    let registry = get_model_registry();
    let model = model_override
        .map(String::from)
        .or_else(|| get_env_var("ANTHROPIC_MODEL").ok())
        .unwrap_or_else(|| {
            registry
                .get_default_model("claude")
                .unwrap_or("claude-sonnet-4-6")
                .to_string()
        });

    // Verify API key exists
    get_env_vars(&[
        "CLAUDE_API_KEY",
        "ANTHROPIC_API_KEY",
        "ANTHROPIC_AUTH_TOKEN",
    ])
    .map_err(|_| {
        anyhow::anyhow!(
            "Claude API key not found.\n\
                 Set one of these environment variables:\n\
                 - CLAUDE_API_KEY\n\
                 - ANTHROPIC_API_KEY\n\
                 - ANTHROPIC_AUTH_TOKEN"
        )
    })?;

    Ok(AiCredentialInfo {
        provider: AiProvider::Claude,
        model,
    })
}

/// Validates that GitHub CLI is available and authenticated.
///
/// This checks:
/// 1. `gh` CLI is installed and in PATH
/// 2. User is authenticated (can access the current repo)
///
/// Use this at the start of commands that require GitHub API access.
pub fn check_github_cli() -> Result<()> {
    // Check if gh CLI is available
    let gh_check = std::process::Command::new("gh")
        .args(["--version"])
        .output();

    match gh_check {
        Ok(output) if output.status.success() => {
            // Test if gh can access the current repo
            let repo_check = std::process::Command::new("gh")
                .args(["repo", "view", "--json", "name"])
                .output();

            match repo_check {
                Ok(repo_output) if repo_output.status.success() => Ok(()),
                Ok(repo_output) => {
                    let error_details = String::from_utf8_lossy(&repo_output.stderr);
                    if error_details.contains("authentication") || error_details.contains("login") {
                        bail!(
                            "GitHub CLI authentication failed.\n\
                             Please run 'gh auth login' or set GITHUB_TOKEN environment variable."
                        )
                    }
                    bail!(
                        "GitHub CLI cannot access this repository.\n\
                         Error: {}",
                        error_details.trim()
                    )
                }
                Err(e) => bail!("Failed to test GitHub CLI access: {e}"),
            }
        }
        _ => bail!(
            "GitHub CLI (gh) is not installed or not in PATH.\n\
             Please install it from https://cli.github.com/"
        ),
    }
}

/// Validates that the current directory is in a valid git repository.
///
/// This is a lightweight check that opens the repository without
/// loading any commit data.
pub fn check_git_repository() -> Result<()> {
    crate::git::GitRepository::open().context(
        "Not in a git repository. Please run this command from within a git repository.",
    )?;
    Ok(())
}

/// Validates that the working directory is clean (no uncommitted changes).
///
/// This checks for:
/// - Staged changes
/// - Unstaged modifications
/// - Untracked files (excluding ignored files)
///
/// Use this before operations that require a clean working directory,
/// like amending commits.
pub fn check_working_directory_clean() -> Result<()> {
    let repo = crate::git::GitRepository::open().context("Failed to open git repository")?;

    let status = repo
        .get_working_directory_status()
        .context("Failed to get working directory status")?;

    if !status.clean {
        let mut message = String::from("Working directory has uncommitted changes:\n");
        for change in &status.untracked_changes {
            message.push_str(&format!("  {} {}\n", change.status, change.file));
        }
        message.push_str("\nPlease commit or stash your changes before proceeding.");
        bail!(message);
    }

    Ok(())
}

/// Performs combined preflight check for AI commands.
///
/// Validates:
/// - Git repository access
/// - AI credentials
///
/// Returns information about the AI provider that will be used.
pub fn check_ai_command_prerequisites(model_override: Option<&str>) -> Result<AiCredentialInfo> {
    check_git_repository()?;
    check_ai_credentials(model_override)
}

/// Performs combined preflight check for PR creation.
///
/// Validates:
/// - Git repository access
/// - AI credentials
/// - GitHub CLI availability and authentication
///
/// Returns information about the AI provider that will be used.
pub fn check_pr_command_prerequisites(model_override: Option<&str>) -> Result<AiCredentialInfo> {
    check_git_repository()?;
    let ai_info = check_ai_credentials(model_override)?;
    check_github_cli()?;
    Ok(ai_info)
}

#[cfg(test)]
#[allow(clippy::unwrap_used, clippy::expect_used)]
mod tests {
    use super::*;

    use std::env;
    use std::sync::Mutex;
    use std::sync::OnceLock;

    /// Global lock to ensure environment variable tests don't interfere with each other.
    static ENV_TEST_LOCK: OnceLock<Mutex<()>> = OnceLock::new();

    /// Manages environment variables in tests to avoid interference.
    struct EnvGuard {
        _lock: std::sync::MutexGuard<'static, ()>,
        vars: Vec<(String, Option<String>)>,
    }

    impl EnvGuard {
        fn new() -> Self {
            let lock = ENV_TEST_LOCK.get_or_init(|| Mutex::new(())).lock().unwrap();
            Self {
                _lock: lock,
                vars: Vec::new(),
            }
        }

        fn set(&mut self, key: &str, value: &str) {
            let original = env::var(key).ok();
            self.vars.push((key.to_string(), original));
            env::set_var(key, value);
        }

        fn remove(&mut self, key: &str) {
            let original = env::var(key).ok();
            self.vars.push((key.to_string(), original));
            env::remove_var(key);
        }
    }

    impl Drop for EnvGuard {
        fn drop(&mut self) {
            for (key, original_value) in self.vars.drain(..).rev() {
                match original_value {
                    Some(value) => env::set_var(&key, value),
                    None => env::remove_var(&key),
                }
            }
        }
    }

    #[test]
    fn ai_provider_display() {
        assert_eq!(format!("{}", AiProvider::Claude), "Claude API");
        assert_eq!(format!("{}", AiProvider::Bedrock), "AWS Bedrock");
        assert_eq!(format!("{}", AiProvider::OpenAi), "OpenAI API");
        assert_eq!(format!("{}", AiProvider::Ollama), "Ollama");
    }

    #[test]
    fn ai_provider_equality() {
        assert_eq!(AiProvider::Claude, AiProvider::Claude);
        assert_ne!(AiProvider::Claude, AiProvider::OpenAi);
        assert_ne!(AiProvider::Bedrock, AiProvider::Ollama);
    }

    #[test]
    fn ai_provider_clone() {
        let provider = AiProvider::Bedrock;
        let cloned = provider;
        assert_eq!(provider, cloned);
    }

    #[test]
    fn ai_provider_debug() {
        let debug_str = format!("{:?}", AiProvider::Claude);
        assert_eq!(debug_str, "Claude");
    }

    #[test]
    fn ai_credential_info_debug() {
        let info = AiCredentialInfo {
            provider: AiProvider::Ollama,
            model: "llama2".to_string(),
        };
        let debug_str = format!("{info:?}");
        assert!(debug_str.contains("Ollama"));
        assert!(debug_str.contains("llama2"));
    }

    #[test]
    fn claude_default_model_from_registry() {
        let mut guard = EnvGuard::new();
        // Enable Claude API path with a dummy key, no model override
        guard.remove("USE_OPENAI");
        guard.remove("USE_OLLAMA");
        guard.remove("CLAUDE_CODE_USE_BEDROCK");
        guard.remove("ANTHROPIC_MODEL");
        guard.set("ANTHROPIC_API_KEY", "sk-test-dummy");

        let info = check_ai_credentials(None).unwrap();
        assert_eq!(info.provider, AiProvider::Claude);
        assert_eq!(info.model, "claude-sonnet-4-6");
    }

    #[test]
    fn openai_default_model_from_registry() {
        let mut guard = EnvGuard::new();
        guard.set("USE_OPENAI", "true");
        guard.remove("USE_OLLAMA");
        guard.remove("OPENAI_MODEL");
        guard.set("OPENAI_API_KEY", "sk-test-dummy");

        let info = check_ai_credentials(None).unwrap();
        assert_eq!(info.provider, AiProvider::OpenAi);
        assert_eq!(info.model, "gpt-5-mini");
    }

    #[test]
    fn bedrock_default_model_from_registry() {
        let mut guard = EnvGuard::new();
        guard.remove("USE_OPENAI");
        guard.remove("USE_OLLAMA");
        guard.set("CLAUDE_CODE_USE_BEDROCK", "true");
        guard.remove("ANTHROPIC_MODEL");
        guard.set("ANTHROPIC_AUTH_TOKEN", "test-token");
        guard.set("ANTHROPIC_BEDROCK_BASE_URL", "https://bedrock.example.com");

        let info = check_ai_credentials(None).unwrap();
        assert_eq!(info.provider, AiProvider::Bedrock);
        assert_eq!(info.model, "claude-sonnet-4-6");
    }

    #[test]
    fn model_override_takes_precedence() {
        let mut guard = EnvGuard::new();
        guard.remove("USE_OPENAI");
        guard.remove("USE_OLLAMA");
        guard.remove("CLAUDE_CODE_USE_BEDROCK");
        guard.remove("ANTHROPIC_MODEL");
        guard.set("ANTHROPIC_API_KEY", "sk-test-dummy");

        let info = check_ai_credentials(Some("claude-opus-4-6")).unwrap();
        assert_eq!(info.model, "claude-opus-4-6");
    }
}