abuseipdb_example/
common.rs

1//! # Common Utility Module for Cortex Examples
2//!
3//! This module provides shared functionality used across various examples for interacting
4//! with the Cortex API. It handles:
5//!
6//! 1.  **Configuration Setup**: Reads Cortex API endpoint URL and API key from
7//!     environment variables (`CORTEX_ENDPOINT` and `CORTEX_API_KEY`) and
8//!     initializes a `cortex_client::apis::configuration::Configuration` object.
9//!
10//! 2.  **Analyzer ID Retrieval**: Fetches all analyzer instances from Cortex and
11//!     allows finding a specific analyzer's instance ID by its unique name. This
12//!     ID is required when creating analyzer jobs.
13//!
14//! 3.  **Job Execution and Report Waiting**: Provides a helper function
15//!     `run_job_and_wait_for_report` that:
16//!     *   Submits an analyzer job.
17//!     *   Polls the job status with a configurable number of retries and delay.
18//!     *   Fetches and returns the job report if the job completes successfully.
19//!     *   Handles job failures or timeouts gracefully.
20//!
21//! ## Environment Variables
22//!
23//! Ensure the following environment variables are set before running examples that use this module:
24//!
25//! -   `CORTEX_ENDPOINT`: The full URL to your Cortex API (e.g., `http://localhost:9000/api`).
26//! -   `CORTEX_API_KEY`: Your API key for authenticating with Cortex.
27//!
28//! ## Usage
29//!
30//! Examples typically call `common::setup_configuration()` at the beginning to get a
31//! `Configuration` object. They might use `common::get_analyzer_id_by_name()`
32//! to resolve an analyzer's name to its ID, and `common::run_job_and_wait_for_report()`
33//! to execute an analysis and retrieve its results.
34
35use cortex_client::apis::configuration::Configuration;
36use std::env;
37
38pub fn setup_configuration() -> Result<Configuration, String> {
39    let base_path = env::var("CORTEX_ENDPOINT")
40        .map_err(|_| "CORTEX_ENDPOINT environment variable not set. Please set it to your Cortex API URL (e.g., http://localhost:9000/api).".to_string())?;
41
42    let api_key = env::var("CORTEX_API_KEY").map_err(|_| {
43        "CORTEX_API_KEY environment variable not set. Please set your Cortex API key.".to_string()
44    })?;
45
46    let mut configuration = Configuration::new();
47    configuration.base_path = base_path;
48    configuration.bearer_access_token = Some(api_key);
49
50    Ok(configuration)
51}
52
53pub async fn get_analyzer_id_by_name(
54    config: &Configuration,
55    analyzer_name_to_find: &str,
56) -> Result<Option<String>, Box<dyn std::error::Error>> {
57    println!(
58        "Fetching all analyzer instances to find ID for '{}'...",
59        analyzer_name_to_find
60    );
61
62    let find_request = Some(cortex_client::models::AnalyzerFindRequest::default());
63
64    match cortex_client::apis::analyzer_api::find_analyzers(config, find_request).await {
65        Ok(analyzer_instances) => {
66            // Directly a Vec<Worker>
67            for analyzer_instance in analyzer_instances {
68                if let Some(name) = &analyzer_instance.name {
69                    if name == analyzer_name_to_find {
70                        if let Some(id) = analyzer_instance._id {
71                            println!(
72                                "Found analyzer ID '{}' for name '{}'",
73                                id, analyzer_name_to_find
74                            );
75                            return Ok(Some(id));
76                        }
77                    }
78                }
79            }
80            println!("Analyzer with name '{}' not found.", analyzer_name_to_find);
81            Ok(None)
82        }
83        Err(e) => {
84            eprintln!("Error fetching analyzer instances: {:?}", e);
85            Err(Box::new(e))
86        }
87    }
88}
89
90pub async fn run_job_and_wait_for_report(
91    config: &cortex_client::apis::configuration::Configuration,
92    analyzer_worker_id: &str,
93    job_request: cortex_client::models::JobCreateRequest,
94    analyzer_name_for_log: &str, // For clearer log messages
95    observable_for_log: &str,    // For clearer log messages
96) -> Result<cortex_client::models::JobReportResponse, Box<dyn std::error::Error>> {
97    use cortex_client::apis::job_api;
98    use std::time::Duration;
99
100    println!(
101        "Submitting job to analyzer '{}' (ID: '{}') for observable: {}...",
102        analyzer_name_for_log, analyzer_worker_id, observable_for_log
103    );
104
105    match job_api::create_analyzer_job(config, analyzer_worker_id, job_request).await {
106        Ok(job_response) => {
107            println!(
108                "Successfully created job. Job ID: {}, Status: {:?}",
109                job_response._id.as_ref().unwrap_or(&"unknown".to_string()),
110                job_response.status
111            );
112
113            if let Some(job_id) = job_response._id {
114                println!(
115                    "\nAttempting to fetch report for job ID: {} with retries...",
116                    job_id
117                );
118
119                let max_retries = 3;
120                let retry_delay = Duration::from_secs(15);
121
122                for attempt in 1..=max_retries {
123                    println!(
124                        "\nAttempt {} of {} to check job status...",
125                        attempt, max_retries
126                    );
127                    match job_api::get_job_by_id(config, &job_id).await {
128                        Ok(job_details) => {
129                            println!("Current job status: {:?}", job_details.status);
130                            match job_details.status {
131                                Some(cortex_client::models::job::Status::Success) => {
132                                    println!(
133                                        "Job status is Success. Attempting to fetch report..."
134                                    );
135                                    match job_api::get_job_report(config, &job_id).await {
136                                        Ok(report_response) => {
137                                            println!("\n✅ Successfully fetched job report!");
138                                            return Ok(report_response);
139                                        }
140                                        Err(e) => {
141                                            let err_msg = format!("Error fetching job report even though status was Success: {:?}", e);
142                                            eprintln!("{}", err_msg);
143                                            // Decide if to break or retry. For now, break on report fetch error.
144                                            return Err(err_msg.into());
145                                        }
146                                    }
147                                }
148                                Some(cortex_client::models::job::Status::Failure) => {
149                                    let err_msg = format!(
150                                        "Job failed. Error message: {:?}",
151                                        job_details.error_message.unwrap_or_else(|| Some(
152                                            "No error message provided.".to_string()
153                                        ))
154                                    );
155                                    eprintln!("{}", err_msg);
156                                    return Err(err_msg.into());
157                                }
158                                Some(cortex_client::models::job::Status::InProgress)
159                                | Some(cortex_client::models::job::Status::Waiting) => {
160                                    if attempt < max_retries {
161                                        println!("Job is still {:?}. Waiting for {} seconds before next attempt...", job_details.status.unwrap(), retry_delay.as_secs());
162                                        tokio::time::sleep(retry_delay).await;
163                                    } else {
164                                        let info_msg = format!("Job did not complete (still {:?}) after {} attempts. Job ID: {}", job_details.status.unwrap(), max_retries, job_id);
165                                        println!("{}", info_msg);
166                                        // Return an error indicating timeout after retries
167                                        return Err(info_msg.into());
168                                    }
169                                }
170                                Some(cortex_client::models::job::Status::Deleted) => {
171                                    let err_msg = format!(
172                                        "Job has been deleted. Cannot fetch report. Job ID: {}",
173                                        job_id
174                                    );
175                                    eprintln!("{}", err_msg);
176                                    return Err(err_msg.into());
177                                }
178                                None => {
179                                    let warn_msg = format!("Job status is unknown. Cannot determine if report is ready. Job ID: {}", job_id);
180                                    eprintln!("{}", warn_msg);
181                                    if attempt < max_retries {
182                                        tokio::time::sleep(retry_delay).await;
183                                    } else {
184                                        let info_msg = format!("Job status remained unknown after {} attempts. Job ID: {}", max_retries, job_id);
185                                        println!("{}", info_msg);
186                                        return Err(info_msg.into());
187                                    }
188                                }
189                            }
190                        }
191                        Err(e) => {
192                            eprintln!(
193                                "\nError fetching job details on attempt {}: {:?}",
194                                attempt, e
195                            );
196                            if attempt == max_retries {
197                                let err_msg = format!("Could not fetch job details after {} attempts for job ID: {}. Error: {:?}", max_retries, job_id, e);
198                                eprintln!("{}", err_msg);
199                                return Err(err_msg.into());
200                            } else {
201                                tokio::time::sleep(retry_delay).await;
202                            }
203                        }
204                    }
205                }
206                let final_err_msg = format!(
207                    "\nCould not retrieve job report for job ID {} after {} attempts.",
208                    job_id, max_retries
209                );
210                eprintln!("{}", final_err_msg);
211                Err(final_err_msg.into())
212            } else {
213                let err_msg = "\nJob created, but no job ID was returned in the response. Cannot fetch report.".to_string();
214                eprintln!("{}", err_msg);
215                Err(err_msg.into())
216            }
217        }
218        Err(e) => {
219            let err_msg = format!(
220                "\nError creating analyzer job for '{}' on '{}': {:?}",
221                analyzer_name_for_log, observable_for_log, e
222            );
223            eprintln!("{}", err_msg);
224            Err(err_msg.into())
225        }
226    }
227}