image_anonymizer/face/
detection.rs1use anyhow::{Context, Result};
2use base64::{Engine as _, engine::general_purpose};
3use reqwest::blocking::Client;
4use serde::{Deserialize, Serialize};
5use std::env;
6use std::path::Path;
7use tracing::{debug, error};
8
9#[derive(Debug, Deserialize)]
10pub struct FaceDetectionResponse {
11 pub responses: Vec<Response>,
12}
13
14#[derive(Debug, Deserialize)]
15pub struct Response {
16 #[serde(default)]
17 #[serde(rename = "faceAnnotations")]
18 pub face_annotations: Vec<FaceAnnotation>,
19}
20
21#[derive(Debug, Deserialize, Clone)]
22pub struct FaceAnnotation {
23 #[serde(rename = "boundingPoly")]
25 pub bounding_poly: Option<BoundingPoly>,
26 pub landmarks: Option<Vec<Landmark>>,
28 #[serde(rename = "detectionConfidence")]
30 pub detection_confidence: Option<f32>,
31}
32
33#[derive(Debug, Deserialize, Clone)]
34pub struct Landmark {
35 #[serde(rename = "type")]
36 pub landmark_type: String,
37 pub position: Position,
38}
39
40#[derive(Debug, Deserialize, Clone)]
41pub struct Position {
42 pub x: f32,
43 pub y: f32,
44 pub z: f32,
45}
46
47#[derive(Debug, Deserialize, Clone, Default)]
48pub struct BoundingPoly {
49 pub vertices: Vec<Vertex>,
50}
51
52#[derive(Debug, Deserialize, Clone)]
53pub struct Vertex {
54 #[serde(default)]
55 pub x: i32,
56 #[serde(default)]
57 pub y: i32,
58}
59
60#[derive(Debug, Serialize)]
61struct FaceDetectionRequest {
62 requests: Vec<Request>,
63}
64
65#[derive(Debug, Serialize)]
66struct Request {
67 image: Image,
68 features: Vec<Feature>,
69}
70
71#[derive(Debug, Serialize)]
72struct Image {
73 content: String,
74}
75
76#[derive(Debug, Serialize)]
77struct Feature {
78 #[serde(rename = "type")]
79 feature_type: String,
80 max_results: i32,
81}
82
83pub fn detect_faces_with_api(image_path: &Path) -> Result<Vec<FaceAnnotation>> {
98 let api_key = env::var("GCP_API_KEY").context("GCP_API_KEY environment variable not set")?;
99 debug!("image_path: {}", image_path.display());
100
101 let image_data = std::fs::read(image_path).context("Failed to read image file")?;
102 let base64_image = general_purpose::STANDARD.encode(&image_data);
103
104 let request = FaceDetectionRequest {
105 requests: vec![Request {
106 image: Image {
107 content: base64_image,
108 },
109 features: vec![Feature {
110 feature_type: "FACE_DETECTION".to_string(),
111 max_results: 100,
112 }],
113 }],
114 };
115
116 let client = Client::new();
117 let response = client
118 .post(&format!(
119 "https://vision.googleapis.com/v1/images:annotate?key={}",
120 api_key
121 ))
122 .json(&request)
123 .send()
124 .context("Failed to send request to Google Cloud Vision API")?;
125
126 let response_text = response.text().context("Failed to get response text")?;
127
128 if response_text.len() > 1000 {
129 debug!(
130 "Response text (first 1000 chars): {}",
131 &response_text[..1000]
132 );
133 debug!("Response text length: {}", response_text.len());
134 } else {
135 debug!("Response text: {}", &response_text);
136 }
137
138 let response_body: FaceDetectionResponse = serde_json::from_str(&response_text)
139 .context("Failed to parse Google Cloud Vision API response")?;
140
141 if response_body.responses.is_empty() {
142 error!("No responses from Google Cloud Vision API");
143 anyhow::bail!("No responses from Google Cloud Vision API");
144 }
145
146 let annotations = response_body.responses[0].face_annotations.clone();
147 debug!("Detected {} face annotations", annotations.len());
148
149 Ok(annotations)
150}