1use crate::diagnose::size_parser::to_bytes;
2use crate::{
3 bloat, cache_hit, duplicate_indexes, extensions, null_indexes, outliers, ssl_used,
4 unused_indexes, Extensions, PgExtrasError,
5};
6use serde_json::Value;
7use sqlx::types::BigDecimal;
8use sqlx::{Pool, Postgres};
9
10const TABLE_CACHE_HIT_MIN: f32 = 0.985;
11const INDEX_CACHE_HIT_MIN: f32 = 0.985;
12const UNUSED_INDEXES_MIN_SIZE_BYTES: u64 = 1_000_000; const NULL_INDEXES_MIN_SIZE_MB: &str = "1"; const NULL_MIN_NULL_FRAC_PERCENT: f64 = 50.0; const BLOAT_MIN_VALUE: f64 = 10.0;
16const OUTLIERS_MIN_EXEC_RATIO: f64 = 33.0; #[derive(Debug, Hash, Eq, PartialEq, Clone, serde::Serialize)]
19pub enum Check {
20 TableCacheHit,
21 IndexCacheHit,
22 SslUsed,
23 UnusedIndexes,
24 NullIndexes,
25 Bloat,
26 DuplicateIndexes,
27 Outliers,
28}
29
30#[derive(Debug, Clone, serde::Serialize)]
31pub struct CheckResult {
32 pub ok: bool,
33 pub message: String,
34 pub check: Check,
35}
36
37impl CheckResult {
38 pub fn to_json(&self) -> Value {
39 serde_json::to_value(self).unwrap()
40 }
41}
42
43impl std::fmt::Display for Check {
44 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
45 let name = format!("{:?}", self);
46 let snake_case_name = name
47 .chars()
48 .flat_map(|c| {
49 if c.is_uppercase() {
50 vec!['_', c.to_ascii_lowercase()]
51 } else {
52 vec![c]
53 }
54 })
55 .skip(1)
56 .collect::<String>();
57 write!(f, "{}", snake_case_name)
58 }
59}
60
61pub async fn run_diagnose(pool: &Pool<Postgres>) -> Result<Vec<CheckResult>, PgExtrasError> {
62 let mut checks = vec![
63 Check::TableCacheHit,
64 Check::IndexCacheHit,
65 Check::UnusedIndexes,
66 Check::NullIndexes,
67 Check::Bloat,
68 Check::DuplicateIndexes,
69 ];
70
71 let extensions_data = extensions(pool).await?;
72
73 if extension_enabled(&extensions_data, "sslinfo") {
74 checks.push(Check::SslUsed);
75 }
76
77 if extension_enabled(&extensions_data, "pg_stat_statements") {
78 checks.push(Check::Outliers);
79 }
80
81 let mut results = Vec::new();
82 for check in checks {
83 results.push(run_check(check, pool).await?);
84 }
85
86 Ok(results)
87}
88
89fn extension_enabled(extensions_data: &[Extensions], extension_name: &str) -> bool {
90 extensions_data
91 .iter()
92 .any(|e| e.name == extension_name && !e.installed_version.is_empty())
93}
94
95async fn run_check(check: Check, pool: &Pool<Postgres>) -> Result<CheckResult, PgExtrasError> {
96 match check {
97 Check::TableCacheHit => check_table_cache_hit(pool).await,
98 Check::IndexCacheHit => check_index_cache_hit(pool).await,
99 Check::UnusedIndexes => check_unused_index(pool).await,
100 Check::NullIndexes => check_null_index(pool).await,
101 Check::Bloat => check_bloat(pool).await,
102 Check::DuplicateIndexes => check_duplicate_indexes(pool).await,
103 Check::SslUsed => detect_ssl_used(pool).await,
104 Check::Outliers => check_outliers(pool).await,
105 }
106}
107
108async fn check_table_cache_hit(pool: &Pool<Postgres>) -> Result<CheckResult, PgExtrasError> {
109 let min_expected = BigDecimal::try_from(TABLE_CACHE_HIT_MIN).unwrap();
110 let cache_hit = cache_hit(None, pool).await?;
111 let table_cache_hit = cache_hit.iter().find(|item| item.name == "table hit rate");
112
113 let Some(table_hit_rate) = table_cache_hit else {
114 return Ok(CheckResult {
115 ok: false,
116 message: "Table cache hit rate not found".to_string(),
117 check: Check::TableCacheHit,
118 });
119 };
120
121 let ok = table_hit_rate.ratio >= min_expected;
122 let message = format!(
123 "Table cache hit rate is {}: {:.4}",
124 if ok { "correct" } else { "too low" },
125 table_hit_rate.ratio
126 );
127
128 Ok(CheckResult {
129 ok,
130 message,
131 check: Check::TableCacheHit,
132 })
133}
134
135async fn check_index_cache_hit(pool: &Pool<Postgres>) -> Result<CheckResult, PgExtrasError> {
136 let min_expected = BigDecimal::try_from(INDEX_CACHE_HIT_MIN).unwrap();
137 let cache_hit = cache_hit(None, pool).await?;
138 let index_cache_hit = cache_hit.iter().find(|item| item.name == "index hit rate");
139
140 let Some(index_hit_rate) = index_cache_hit else {
141 return Ok(CheckResult {
142 ok: false,
143 message: "Index cache hit rate not found".to_string(),
144 check: Check::IndexCacheHit,
145 });
146 };
147
148 let ok = index_hit_rate.ratio >= min_expected;
149 let message = format!(
150 "Index cache hit rate is {}: {:.4}",
151 if ok { "correct" } else { "too low" },
152 index_hit_rate.ratio
153 );
154
155 Ok(CheckResult {
156 ok,
157 message,
158 check: Check::IndexCacheHit,
159 })
160}
161
162async fn detect_ssl_used(pool: &Pool<Postgres>) -> Result<CheckResult, PgExtrasError> {
163 let ssl_results = ssl_used(pool).await?;
164 let Some(ssl_conn) = ssl_results.first() else {
165 return Ok(CheckResult {
166 ok: false,
167 message: "Unable to get connection information.".to_string(),
168 check: Check::SslUsed,
169 });
170 };
171
172 let message = if ssl_conn.ssl_used {
173 "Database client is using a secure SSL connection."
174 } else {
175 "Database client is using an unencrypted connection."
176 };
177
178 Ok(CheckResult {
179 ok: ssl_conn.ssl_used,
180 message: message.to_string(),
181 check: Check::SslUsed,
182 })
183}
184
185async fn check_unused_index(pool: &Pool<Postgres>) -> Result<CheckResult, PgExtrasError> {
186 let indexes = unused_indexes(None, pool)
187 .await?
188 .into_iter()
189 .filter(|i| to_bytes(&i.index_size).unwrap_or(0) >= UNUSED_INDEXES_MIN_SIZE_BYTES)
190 .collect::<Vec<_>>();
191
192 if indexes.is_empty() {
193 return Ok(CheckResult {
194 ok: true,
195 message: "No unused indexes detected.".to_string(),
196 check: Check::UnusedIndexes,
197 });
198 }
199
200 let print_indexes = indexes
201 .iter()
202 .map(|i| format!("'{}' on '{}' size {}", i.index, i.table, i.index_size))
203 .collect::<Vec<_>>()
204 .join(",\n");
205
206 Ok(CheckResult {
207 ok: false,
208 message: format!("Unused indexes detected:\n{}", print_indexes),
209 check: Check::UnusedIndexes,
210 })
211}
212
213async fn check_null_index(pool: &Pool<Postgres>) -> Result<CheckResult, PgExtrasError> {
214 let indexes = null_indexes(Some(NULL_INDEXES_MIN_SIZE_MB.to_string()), pool)
215 .await?
216 .into_iter()
217 .filter(|i| {
218 i.null_frac
219 .trim_end_matches('%')
220 .parse::<f64>()
221 .unwrap_or(0.0)
222 >= NULL_MIN_NULL_FRAC_PERCENT
223 })
224 .collect::<Vec<_>>();
225
226 if indexes.is_empty() {
227 return Ok(CheckResult {
228 ok: true,
229 message: "No null indexes detected.".to_string(),
230 check: Check::NullIndexes,
231 });
232 }
233
234 let print_indexes = indexes
235 .iter()
236 .map(|i| {
237 format!(
238 "'{}' size {} null values fraction {}",
239 i.index, i.index_size, i.null_frac
240 )
241 })
242 .collect::<Vec<_>>()
243 .join(",\n");
244
245 Ok(CheckResult {
246 ok: false,
247 message: format!("Null indexes detected:\n{}", print_indexes),
248 check: Check::NullIndexes,
249 })
250}
251
252async fn check_bloat(pool: &Pool<Postgres>) -> Result<CheckResult, PgExtrasError> {
253 let bloat_data = bloat(pool)
254 .await?
255 .into_iter()
256 .filter(|b| b.bloat >= BigDecimal::try_from(BLOAT_MIN_VALUE).unwrap())
257 .collect::<Vec<_>>();
258
259 if bloat_data.is_empty() {
260 return Ok(CheckResult {
261 ok: true,
262 message: "No bloat detected.".to_string(),
263 check: Check::Bloat,
264 });
265 }
266
267 let print_bloat = bloat_data
268 .iter()
269 .map(|b| format!("'{}' bloat {} waste {}", b.object_name, b.bloat, b.waste))
270 .collect::<Vec<_>>()
271 .join(",\n");
272
273 Ok(CheckResult {
274 ok: false,
275 message: format!("Bloat detected:\n{}", print_bloat),
276 check: Check::Bloat,
277 })
278}
279
280async fn check_duplicate_indexes(pool: &Pool<Postgres>) -> Result<CheckResult, PgExtrasError> {
281 let indexes = duplicate_indexes(pool).await?;
282
283 if indexes.is_empty() {
284 return Ok(CheckResult {
285 ok: true,
286 message: "No duplicate indexes detected.".to_string(),
287 check: Check::DuplicateIndexes,
288 });
289 }
290
291 let print_indexes = indexes
292 .iter()
293 .map(|i| {
294 format!(
295 "'{}' of size {} is identical to '{}'",
296 i.idx1, i.size, i.idx2
297 )
298 })
299 .collect::<Vec<_>>()
300 .join(",\n");
301
302 Ok(CheckResult {
303 ok: false,
304 message: format!("Duplicate indexes detected:\n{}", print_indexes),
305 check: Check::DuplicateIndexes,
306 })
307}
308
309async fn check_outliers(pool: &Pool<Postgres>) -> Result<CheckResult, PgExtrasError> {
310 let queries = outliers(pool)
311 .await?
312 .into_iter()
313 .filter(|q| {
314 q.prop_exec_time.replace("%", "").parse::<f64>().unwrap() >= OUTLIERS_MIN_EXEC_RATIO
315 })
316 .collect::<Vec<_>>();
317
318 if queries.is_empty() {
319 return Ok(CheckResult {
320 ok: true,
321 message: "No queries using significant execution ratio detected.".to_string(),
322 check: Check::Outliers,
323 });
324 }
325
326 let print_queries = queries
327 .iter()
328 .map(|q| {
329 format!(
330 "'{}...' called {} times, using {} of total exec time.",
331 q.query.chars().take(30).collect::<String>(),
332 q.ncalls,
333 q.prop_exec_time
334 )
335 })
336 .collect::<Vec<_>>()
337 .join(",\n");
338
339 Ok(CheckResult {
340 ok: false,
341 message: format!(
342 "Queries using significant execution ratio detected:\n{}",
343 print_queries
344 ),
345 check: Check::Outliers,
346 })
347}