ceph_async/
cmd.rs

1//! Ceph has a command system defined
2//! in https://github.com/ceph/ceph/blob/master/src/mon/MonCommands.h
3//! The cli commands mostly use this json based system.  This allows you to
4//! make the exact
5//! same calls without having to shell out with std::process::Command.
6//! Many of the commands defined in this file have a simulate parameter to
7//! allow you to test without actually calling Ceph.
8extern crate serde_json;
9
10use crate::ceph::Rados;
11use crate::error::{RadosError, RadosResult};
12use crate::CephVersion;
13use std::collections::HashMap;
14use std::fmt;
15use std::str::FromStr;
16use uuid::Uuid;
17
18#[derive(Deserialize, Debug)]
19pub struct CephMon {
20    pub rank: i64,
21    pub name: String,
22    pub addr: String,
23}
24
25#[derive(Deserialize, Debug)]
26pub struct CrushNode {
27    pub id: i64,
28    pub name: String,
29    #[serde(rename = "type")]
30    pub crush_type: String,
31    pub type_id: i64,
32    pub children: Option<Vec<i64>>,
33    pub crush_weight: Option<f64>,
34    pub depth: Option<i64>,
35    pub exists: Option<i64>,
36    pub status: Option<String>,
37    pub reweight: Option<f64>,
38    pub primary_affinity: Option<f64>,
39}
40
41#[derive(Deserialize, Debug)]
42pub struct CrushTree {
43    pub nodes: Vec<CrushNode>,
44    pub stray: Vec<String>,
45}
46
47#[derive(Deserialize, Debug, Clone)]
48#[serde(untagged)]
49pub enum Mem {
50    MemNum {
51        mem_swap_kb: u64,
52        mem_total_kb: u64,
53    },
54    MemStr {
55        mem_swap_kb: String,
56        mem_total_kb: String,
57    },
58}
59
60#[derive(Deserialize, Debug)]
61/// Manager Metadata
62pub struct MgrMetadata {
63    #[serde(alias = "name")]
64    pub id: String,
65    pub addr: Option<String>, //nautilous
66    pub addrs: Option<String>,
67    pub arch: String,
68    pub ceph_release: Option<String>,
69    pub ceph_version: String,
70    pub ceph_version_short: Option<String>,
71    pub cpu: String,
72    pub distro: String,
73    pub distro_description: String,
74    pub distro_version: String,
75    pub hostname: String,
76    pub kernel_description: String,
77    pub kernel_version: String,
78    #[serde(flatten)]
79    pub mem: Mem,
80    pub os: String,
81    // other metadata not captured through the above attributes
82    #[serde(flatten)]
83    other_meta: Option<HashMap<String, String>>,
84}
85
86#[derive(Deserialize, Debug, Clone)]
87#[serde(rename_all = "lowercase")]
88pub enum ObjectStoreType {
89    Bluestore,
90    Filestore,
91}
92
93#[derive(Deserialize, Debug, Clone)]
94#[serde(untagged, rename_all = "lowercase")]
95pub enum ObjectStoreMeta {
96    Bluestore {
97        bluefs: String,
98        bluefs_db_access_mode: String,
99        bluefs_db_block_size: String,
100        bluefs_db_dev: Option<String>, //Not in Nautilous
101        bluefs_db_dev_node: String,
102        bluefs_db_driver: String,
103        bluefs_db_model: Option<String>, //Not in Nautilous
104        bluefs_db_partition_path: String,
105        bluefs_db_rotational: String,
106        bluefs_db_serial: Option<String>, //Not in Nautilous
107        bluefs_db_size: String,
108        bluefs_db_support_discard: Option<String>, //Nautilous
109        bluefs_db_type: String,
110        bluefs_single_shared_device: String,
111        bluefs_slow_access_mode: Option<String>, //Not in Nautilous
112        bluefs_slow_block_size: Option<String>,  //Not in Nautilous
113        bluefs_slow_dev: Option<String>,         //Not in Nautilous
114        bluefs_slow_dev_node: Option<String>,    //Not in Nautilous
115        bluefs_slow_driver: Option<String>,      //Not in Nautilous
116        bluefs_slow_model: Option<String>,       //Not in Nautilous
117        bluefs_slow_partition_path: Option<String>, //Not in Nautilous
118        bluefs_slow_rotational: Option<String>,  //Not in Nautilous
119        bluefs_slow_size: Option<String>,        //Not in Nautilous
120        bluefs_slow_type: Option<String>,        //Not in Nautilous
121        bluefs_wal_access_mode: Option<String>,  //Not in Nautilous
122        bluefs_wal_block_size: Option<String>,   //Not in Nautilous
123        bluefs_wal_dev: Option<String>,          //Not in Nautilous
124        bluefs_wal_dev_node: Option<String>,     //Not in Nautilous
125        bluefs_wal_driver: Option<String>,       //Not in Nautilous
126        bluefs_wal_model: Option<String>,        //Not in Nautilous
127        bluefs_wal_partition_path: Option<String>, //Not in Nautilous
128        bluefs_wal_rotational: Option<String>,   //Not in Nautilous
129        bluefs_wal_serial: Option<String>,       //Not in Nautilous
130        bluefs_wal_size: Option<String>,         //Not in Nautilous
131        bluefs_wal_type: Option<String>,         //Not in Nautilous
132        bluestore_bdev_access_mode: String,
133        bluestore_bdev_block_size: String,
134        bluestore_bdev_dev: Option<String>, //Not in Nautilous
135        bluestore_bdev_dev_node: String,
136        bluestore_bdev_driver: String,
137        bluestore_bdev_model: Option<String>, //Not in Nautilous
138        bluestore_bdev_partition_path: String,
139        bluestore_bdev_rotational: String,
140        bluestore_bdev_size: String,
141        bluestore_bdev_support_discard: Option<String>, //Nautilous
142        bluestore_bdev_type: String,
143    },
144    Filestore {
145        backend_filestore_dev_node: String,
146        backend_filestore_partition_path: String,
147        filestore_backend: String,
148        filestore_f_type: String,
149    },
150}
151
152#[derive(Deserialize, Debug, Clone)]
153pub struct OsdMetadata {
154    pub id: u64,
155    pub arch: String,
156    pub back_addr: String,
157    pub back_iface: Option<String>,   //not in Jewel
158    pub ceph_release: Option<String>, //Nautilous
159    pub ceph_version: String,
160    pub ceph_version_short: Option<String>, //Nautilous
161    pub cpu: String,
162    pub default_device_class: Option<String>, //not in Jewel
163    pub device_ids: Option<String>,           //Nautilous
164    pub devices: Option<String>,              //Nautilous
165    pub distro: String,
166    pub distro_description: String,
167    pub distro_version: String,
168    pub front_addr: String,
169    pub front_iface: Option<String>, //not in Jewel
170    pub hb_back_addr: String,
171    pub hb_front_addr: String,
172    pub hostname: String,
173    pub journal_rotational: Option<String>, //not in Jewel
174    pub kernel_description: String,
175    pub kernel_version: String,
176    pub mem_swap_kb: String,
177    pub mem_total_kb: String,
178    pub os: String,
179    pub osd_data: String,
180    pub osd_journal: Option<String>, //not usually in bluestore
181    pub osd_objectstore: ObjectStoreType,
182    pub rotational: Option<String>, //Not in Jewel
183    #[serde(flatten)]
184    pub objectstore_meta: ObjectStoreMeta,
185    // other metadata not captured through the above attributes
186    #[serde(flatten)]
187    other_meta: Option<HashMap<String, String>>,
188}
189
190#[derive(Deserialize, Debug, Clone)]
191pub struct PgState {
192    pub name: String,
193    pub num: u64,
194}
195
196#[derive(Deserialize, Debug, Clone)]
197pub struct PgSummary {
198    pub num_pg_by_state: Vec<PgState>,
199    pub num_pgs: u64,
200    pub num_bytes: u64,
201    pub total_bytes: Option<u64>,          //Nautilous
202    pub total_avail_bytes: Option<u64>,    //Nautilous
203    pub total_used_bytes: Option<u64>,     //Nautilous
204    pub total_used_raw_bytes: Option<u64>, //Nautilous
205    pub raw_bytes_used: Option<u64>,
206    pub raw_bytes_avail: Option<u64>,
207    pub raw_bytes: Option<u64>,
208    pub read_bytes_sec: Option<u64>,
209    pub write_bytes_sec: Option<u64>,
210    pub io_sec: Option<u64>,
211    pub version: Option<u64>, //Jewel
212    pub degraded_objects: Option<u64>,
213    pub degraded_total: Option<u64>,
214    pub degraded_ratio: Option<f64>,
215    pub misplaced_objects: Option<u64>,
216    pub misplaced_total: Option<u64>,
217    pub misplaced_ratio: Option<f64>,
218    pub recovering_objects_per_sec: Option<u64>,
219    pub recovering_bytes_per_sec: Option<u64>,
220    pub recovering_keys_per_sec: Option<u64>,
221    pub num_objects_recovered: Option<u64>,
222    pub num_bytes_recovered: Option<u64>,
223    pub num_keys_recovered: Option<u64>,
224    // other metadata not captured through the above attributes
225    #[serde(flatten)]
226    other_meta: Option<HashMap<String, String>>,
227}
228
229#[derive(Deserialize, Debug, Clone)]
230#[serde(untagged)]
231pub enum PgStat {
232    Wrapped {
233        pg_ready: bool,
234        pg_summary: PgSummary,
235    },
236    UnWrapped {
237        #[serde(flatten)]
238        pg_summary: PgSummary,
239    },
240}
241
242#[derive(Deserialize, Debug)]
243pub struct MgrStandby {
244    pub gid: u64,
245    pub name: String,
246    pub available_modules: Vec<String>,
247}
248
249#[derive(Deserialize, Debug)]
250pub struct MgrDump {
251    pub epoch: u64,
252    pub active_gid: u64,
253    pub active_name: String,
254    pub active_addr: String,
255    pub available: bool,
256    pub standbys: Vec<MgrStandby>,
257    pub modules: Vec<String>,
258    pub available_modules: Vec<String>,
259}
260
261#[derive(Deserialize, Debug)]
262pub struct MonDump {
263    pub epoch: i64,
264    pub fsid: String,
265    pub modified: String,
266    pub created: String,
267    pub mons: Vec<CephMon>,
268    pub quorum: Vec<i64>,
269}
270
271#[derive(Deserialize, Debug)]
272pub struct MonStatus {
273    pub name: String,
274    pub rank: u64,
275    pub state: MonState,
276    pub election_epoch: u64,
277    pub quorum: Vec<u64>,
278    pub outside_quorum: Vec<String>,
279    pub extra_probe_peers: Vec<ExtraProbePeer>,
280    pub sync_provider: Vec<u64>,
281    pub monmap: MonMap,
282}
283
284#[derive(Deserialize, Debug)]
285#[serde(untagged)]
286pub enum ExtraProbePeer {
287    Present { addrvec: Vec<AddrVec> },
288    Absent(String),
289}
290
291#[derive(Deserialize, Debug)]
292pub struct AddrVec {
293    r#type: String,
294    addr: String,
295    nonce: i32,
296}
297
298#[derive(Deserialize, Debug)]
299pub struct MonMap {
300    pub epoch: u64,
301    pub fsid: Uuid,
302    pub modified: String,
303    pub created: String,
304    pub mons: Vec<Mon>,
305}
306
307#[derive(Deserialize, Debug)]
308pub struct Mon {
309    pub rank: u64,
310    pub name: String,
311    pub addr: String,
312}
313
314#[derive(Deserialize, Debug)]
315pub enum HealthStatus {
316    #[serde(rename = "HEALTH_ERR")]
317    Err,
318    #[serde(rename = "HEALTH_WARN")]
319    Warn,
320    #[serde(rename = "HEALTH_OK")]
321    Ok,
322}
323
324#[derive(Deserialize, Debug)]
325pub struct ClusterHealth {
326    pub health: Health,
327    pub timechecks: TimeChecks,
328    pub summary: Vec<String>,
329    pub overall_status: HealthStatus,
330    pub detail: Vec<String>,
331}
332
333#[derive(Deserialize, Debug)]
334pub struct Health {
335    pub health_services: Vec<ServiceHealth>,
336}
337
338#[derive(Deserialize, Debug)]
339pub struct TimeChecks {
340    pub epoch: u64,
341    pub round: u64,
342    pub round_status: RoundStatus,
343    pub mons: Vec<MonTimeChecks>,
344}
345
346#[derive(Deserialize, Debug)]
347pub struct MonTimeChecks {
348    pub name: String,
349    pub skew: f64,
350    pub latency: f64,
351    pub health: HealthStatus,
352}
353
354#[derive(Deserialize, Debug)]
355pub struct ServiceHealth {
356    pub mons: Vec<MonHealth>,
357}
358
359#[derive(Deserialize, Debug)]
360pub struct MonHealth {
361    pub name: String,
362    pub kb_total: u64,
363    pub kb_used: u64,
364    pub kb_avail: u64,
365    pub avail_percent: u8,
366    pub last_updated: String,
367    pub store_stats: StoreStats,
368    pub health: HealthStatus,
369}
370
371#[derive(Deserialize, Debug)]
372pub struct StoreStats {
373    pub bytes_total: u64,
374    pub bytes_sst: u64,
375    pub bytes_log: u64,
376    pub bytes_misc: u64,
377    pub last_updated: String,
378}
379
380#[derive(Deserialize, Debug)]
381pub enum RoundStatus {
382    #[serde(rename = "finished")]
383    Finished,
384    #[serde(rename = "on-going")]
385    OnGoing,
386}
387
388#[derive(Deserialize, Debug)]
389pub enum MonState {
390    #[serde(rename = "probing")]
391    Probing,
392    #[serde(rename = "synchronizing")]
393    Synchronizing,
394    #[serde(rename = "electing")]
395    Electing,
396    #[serde(rename = "leader")]
397    Leader,
398    #[serde(rename = "peon")]
399    Peon,
400    #[serde(rename = "shutdown")]
401    Shutdown,
402}
403
404#[derive(Deserialize, Debug, Serialize)]
405pub enum OsdOption {
406    #[serde(rename = "full")]
407    Full,
408    #[serde(rename = "pause")]
409    Pause,
410    #[serde(rename = "noup")]
411    NoUp,
412    #[serde(rename = "nodown")]
413    NoDown,
414    #[serde(rename = "noout")]
415    NoOut,
416    #[serde(rename = "noin")]
417    NoIn,
418    #[serde(rename = "nobackfill")]
419    NoBackfill,
420    #[serde(rename = "norebalance")]
421    NoRebalance,
422    #[serde(rename = "norecover")]
423    NoRecover,
424    #[serde(rename = "noscrub")]
425    NoScrub,
426    #[serde(rename = "nodeep-scrub")]
427    NoDeepScrub,
428    #[serde(rename = "notieragent")]
429    NoTierAgent,
430    #[serde(rename = "sortbitwise")]
431    SortBitwise,
432    #[serde(rename = "recovery_deletes")]
433    RecoveryDeletes,
434    #[serde(rename = "require_jewel_osds")]
435    RequireJewelOsds,
436    #[serde(rename = "require_kraken_osds")]
437    RequireKrakenOsds,
438}
439
440impl fmt::Display for OsdOption {
441    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
442        match *self {
443            OsdOption::Full => write!(f, "full"),
444            OsdOption::Pause => write!(f, "pause"),
445            OsdOption::NoUp => write!(f, "noup"),
446            OsdOption::NoDown => write!(f, "nodown"),
447            OsdOption::NoOut => write!(f, "noout"),
448            OsdOption::NoIn => write!(f, "noin"),
449            OsdOption::NoBackfill => write!(f, "nobackfill"),
450            OsdOption::NoRebalance => write!(f, "norebalance"),
451            OsdOption::NoRecover => write!(f, "norecover"),
452            OsdOption::NoScrub => write!(f, "noscrub"),
453            OsdOption::NoDeepScrub => write!(f, "nodeep-scrub"),
454            OsdOption::NoTierAgent => write!(f, "notieragent"),
455            OsdOption::SortBitwise => write!(f, "sortbitwise"),
456            OsdOption::RecoveryDeletes => write!(f, "recovery_deletes"),
457            OsdOption::RequireJewelOsds => write!(f, "require_jewel_osds"),
458            OsdOption::RequireKrakenOsds => write!(f, "require_kraken_osds"),
459        }
460    }
461}
462
463impl AsRef<str> for OsdOption {
464    fn as_ref(&self) -> &str {
465        match *self {
466            OsdOption::Full => "full",
467            OsdOption::Pause => "pause",
468            OsdOption::NoUp => "noup",
469            OsdOption::NoDown => "nodown",
470            OsdOption::NoOut => "noout",
471            OsdOption::NoIn => "noin",
472            OsdOption::NoBackfill => "nobackfill",
473            OsdOption::NoRebalance => "norebalance",
474            OsdOption::NoRecover => "norecover",
475            OsdOption::NoScrub => "noscrub",
476            OsdOption::NoDeepScrub => "nodeep-scrub",
477            OsdOption::NoTierAgent => "notieragent",
478            OsdOption::SortBitwise => "sortbitwise",
479            OsdOption::RecoveryDeletes => "recovery_deletes",
480            OsdOption::RequireJewelOsds => "require_jewel_osds",
481            OsdOption::RequireKrakenOsds => "require_kraken_osds",
482        }
483    }
484}
485
486#[derive(Deserialize, Debug, Serialize)]
487pub enum PoolOption {
488    #[serde(rename = "size")]
489    Size,
490    #[serde(rename = "min_size")]
491    MinSize,
492    #[serde(rename = "crash_replay_interval")]
493    CrashReplayInterval,
494    #[serde(rename = "pg_num")]
495    PgNum,
496    #[serde(rename = "pgp_num")]
497    PgpNum,
498    #[serde(rename = "crush_rule")]
499    CrushRule,
500    #[serde(rename = "hashpspool")]
501    HashPsPool,
502    #[serde(rename = "nodelete")]
503    NoDelete,
504    #[serde(rename = "nopgchange")]
505    NoPgChange,
506    #[serde(rename = "nosizechange")]
507    NoSizeChange,
508    #[serde(rename = "write_fadvice_dontneed")]
509    WriteFadviceDontNeed,
510    #[serde(rename = "noscrub")]
511    NoScrub,
512    #[serde(rename = "nodeep-scrub")]
513    NoDeepScrub,
514    #[serde(rename = "hit_set_type")]
515    HitSetType,
516    #[serde(rename = "hit_set_period")]
517    HitSetPeriod,
518    #[serde(rename = "hit_set_count")]
519    HitSetCount,
520    #[serde(rename = "hit_set_fpp")]
521    HitSetFpp,
522    #[serde(rename = "use_gmt_hitset")]
523    UseGmtHitset,
524    #[serde(rename = "target_max_bytes")]
525    TargetMaxBytes,
526    #[serde(rename = "target_max_objects")]
527    TargetMaxObjects,
528    #[serde(rename = "cache_target_dirty_ratio")]
529    CacheTargetDirtyRatio,
530    #[serde(rename = "cache_target_dirty_high_ratio")]
531    CacheTargetDirtyHighRatio,
532    #[serde(rename = "cache_target_full_ratio")]
533    CacheTargetFullRatio,
534    #[serde(rename = "cache_min_flush_age")]
535    CacheMinFlushAge,
536    #[serde(rename = "cachem_min_evict_age")]
537    CacheMinEvictAge,
538    #[serde(rename = "auid")]
539    Auid,
540    #[serde(rename = "min_read_recency_for_promote")]
541    MinReadRecencyForPromote,
542    #[serde(rename = "min_write_recency_for_promote")]
543    MinWriteRecencyForPromte,
544    #[serde(rename = "fast_read")]
545    FastRead,
546    #[serde(rename = "hit_set_decay_rate")]
547    HitSetGradeDecayRate,
548    #[serde(rename = "hit_set_search_last_n")]
549    HitSetSearchLastN,
550    #[serde(rename = "scrub_min_interval")]
551    ScrubMinInterval,
552    #[serde(rename = "scrub_max_interval")]
553    ScrubMaxInterval,
554    #[serde(rename = "deep_scrub_interval")]
555    DeepScrubInterval,
556    #[serde(rename = "recovery_priority")]
557    RecoveryPriority,
558    #[serde(rename = "recovery_op_priority")]
559    RecoveryOpPriority,
560    #[serde(rename = "scrub_priority")]
561    ScrubPriority,
562    #[serde(rename = "compression_mode")]
563    CompressionMode,
564    #[serde(rename = "compression_algorithm")]
565    CompressionAlgorithm,
566    #[serde(rename = "compression_required_ratio")]
567    CompressionRequiredRatio,
568    #[serde(rename = "compression_max_blob_size")]
569    CompressionMaxBlobSize,
570    #[serde(rename = "compression_min_blob_size")]
571    CompressionMinBlobSize,
572    #[serde(rename = "csum_type")]
573    CsumType,
574    #[serde(rename = "csum_min_block")]
575    CsumMinBlock,
576    #[serde(rename = "csum_max_block")]
577    CsumMaxBlock,
578    #[serde(rename = "allow_ec_overwrites")]
579    AllocEcOverwrites,
580}
581
582impl fmt::Display for PoolOption {
583    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
584        match *self {
585            PoolOption::Size => write!(f, "size"),
586            PoolOption::MinSize => write!(f, "min_size"),
587            PoolOption::CrashReplayInterval => write!(f, "crash_replay_interval"),
588            PoolOption::PgNum => write!(f, "pg_num"),
589            PoolOption::PgpNum => write!(f, "pgp_num"),
590            PoolOption::CrushRule => write!(f, "crush_rule"),
591            PoolOption::HashPsPool => write!(f, "hashpspool"),
592            PoolOption::NoDelete => write!(f, "nodelete"),
593            PoolOption::NoPgChange => write!(f, "nopgchange"),
594            PoolOption::NoSizeChange => write!(f, "nosizechange"),
595            PoolOption::WriteFadviceDontNeed => write!(f, "write_fadvice_dontneed"),
596            PoolOption::NoScrub => write!(f, "noscrub"),
597            PoolOption::NoDeepScrub => write!(f, "nodeep-scrub"),
598            PoolOption::HitSetType => write!(f, "hit_set_type"),
599            PoolOption::HitSetPeriod => write!(f, "hit_set_period"),
600            PoolOption::HitSetCount => write!(f, "hit_set_count"),
601            PoolOption::HitSetFpp => write!(f, "hit_set_fpp"),
602            PoolOption::UseGmtHitset => write!(f, "use_gmt_hitset"),
603            PoolOption::TargetMaxBytes => write!(f, "target_max_bytes"),
604            PoolOption::TargetMaxObjects => write!(f, "target_max_objects"),
605            PoolOption::CacheTargetDirtyRatio => write!(f, "cache_target_dirty_ratio"),
606            PoolOption::CacheTargetDirtyHighRatio => write!(f, "cache_target_dirty_high_ratio"),
607            PoolOption::CacheTargetFullRatio => write!(f, "cache_target_full_ratio"),
608            PoolOption::CacheMinFlushAge => write!(f, "cache_min_flush_age"),
609            PoolOption::CacheMinEvictAge => write!(f, "cachem_min_evict_age"),
610            PoolOption::Auid => write!(f, "auid"),
611            PoolOption::MinReadRecencyForPromote => write!(f, "min_read_recency_for_promote"),
612            PoolOption::MinWriteRecencyForPromte => write!(f, "min_write_recency_for_promote"),
613            PoolOption::FastRead => write!(f, "fast_read"),
614            PoolOption::HitSetGradeDecayRate => write!(f, "hit_set_decay_rate"),
615            PoolOption::HitSetSearchLastN => write!(f, "hit_set_search_last_n"),
616            PoolOption::ScrubMinInterval => write!(f, "scrub_min_interval"),
617            PoolOption::ScrubMaxInterval => write!(f, "scrub_max_interval"),
618            PoolOption::DeepScrubInterval => write!(f, "deep_scrub_interval"),
619            PoolOption::RecoveryPriority => write!(f, "recovery_priority"),
620            PoolOption::RecoveryOpPriority => write!(f, "recovery_op_priority"),
621            PoolOption::ScrubPriority => write!(f, "scrub_priority"),
622            PoolOption::CompressionMode => write!(f, "compression_mode"),
623            PoolOption::CompressionAlgorithm => write!(f, "compression_algorithm"),
624            PoolOption::CompressionRequiredRatio => write!(f, "compression_required_ratio"),
625            PoolOption::CompressionMaxBlobSize => write!(f, "compression_max_blob_size"),
626            PoolOption::CompressionMinBlobSize => write!(f, "compression_min_blob_size"),
627            PoolOption::CsumType => write!(f, "csum_type"),
628            PoolOption::CsumMinBlock => write!(f, "csum_min_block"),
629            PoolOption::CsumMaxBlock => write!(f, "csum_max_block"),
630            PoolOption::AllocEcOverwrites => write!(f, "allow_ec_overwrites"),
631        }
632    }
633}
634
635impl AsRef<str> for PoolOption {
636    fn as_ref(&self) -> &str {
637        match *self {
638            PoolOption::Size => "size",
639            PoolOption::MinSize => "min_size",
640            PoolOption::CrashReplayInterval => "crash_replay_interval",
641            PoolOption::PgNum => "pg_num",
642            PoolOption::PgpNum => "pgp_num",
643            PoolOption::CrushRule => "crush_rule",
644            PoolOption::HashPsPool => "hashpspool",
645            PoolOption::NoDelete => "nodelete",
646            PoolOption::NoPgChange => "nopgchange",
647            PoolOption::NoSizeChange => "nosizechange",
648            PoolOption::WriteFadviceDontNeed => "write_fadvice_dontneed",
649            PoolOption::NoScrub => "noscrub",
650            PoolOption::NoDeepScrub => "nodeep-scrub",
651            PoolOption::HitSetType => "hit_set_type",
652            PoolOption::HitSetPeriod => "hit_set_period",
653            PoolOption::HitSetCount => "hit_set_count",
654            PoolOption::HitSetFpp => "hit_set_fpp",
655            PoolOption::UseGmtHitset => "use_gmt_hitset",
656            PoolOption::TargetMaxBytes => "target_max_bytes",
657            PoolOption::TargetMaxObjects => "target_max_objects",
658            PoolOption::CacheTargetDirtyRatio => "cache_target_dirty_ratio",
659            PoolOption::CacheTargetDirtyHighRatio => "cache_target_dirty_high_ratio",
660            PoolOption::CacheTargetFullRatio => "cache_target_full_ratio",
661            PoolOption::CacheMinFlushAge => "cache_min_flush_age",
662            PoolOption::CacheMinEvictAge => "cachem_min_evict_age",
663            PoolOption::Auid => "auid",
664            PoolOption::MinReadRecencyForPromote => "min_read_recency_for_promote",
665            PoolOption::MinWriteRecencyForPromte => "min_write_recency_for_promote",
666            PoolOption::FastRead => "fast_read",
667            PoolOption::HitSetGradeDecayRate => "hit_set_decay_rate",
668            PoolOption::HitSetSearchLastN => "hit_set_search_last_n",
669            PoolOption::ScrubMinInterval => "scrub_min_interval",
670            PoolOption::ScrubMaxInterval => "scrub_max_interval",
671            PoolOption::DeepScrubInterval => "deep_scrub_interval",
672            PoolOption::RecoveryPriority => "recovery_priority",
673            PoolOption::RecoveryOpPriority => "recovery_op_priority",
674            PoolOption::ScrubPriority => "scrub_priority",
675            PoolOption::CompressionMode => "compression_mode",
676            PoolOption::CompressionAlgorithm => "compression_algorithm",
677            PoolOption::CompressionRequiredRatio => "compression_required_ratio",
678            PoolOption::CompressionMaxBlobSize => "compression_max_blob_size",
679            PoolOption::CompressionMinBlobSize => "compression_min_blob_size",
680            PoolOption::CsumType => "csum_type",
681            PoolOption::CsumMinBlock => "csum_min_block",
682            PoolOption::CsumMaxBlock => "csum_max_block",
683            PoolOption::AllocEcOverwrites => "allow_ec_overwrites",
684        }
685    }
686}
687
688impl fmt::Display for HealthStatus {
689    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
690        match *self {
691            HealthStatus::Err => write!(f, "HEALTH_ERR"),
692            HealthStatus::Ok => write!(f, "HEALTH_OK"),
693            HealthStatus::Warn => write!(f, "HEALTH_WARN"),
694        }
695    }
696}
697
698impl AsRef<str> for HealthStatus {
699    fn as_ref(&self) -> &str {
700        match *self {
701            HealthStatus::Err => "HEALTH_ERR",
702            HealthStatus::Ok => "HEALTH_OK",
703            HealthStatus::Warn => "HEALTH_WARN",
704        }
705    }
706}
707
708impl fmt::Display for MonState {
709    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
710        match *self {
711            MonState::Probing => write!(f, "probing"),
712            MonState::Synchronizing => write!(f, "synchronizing"),
713            MonState::Electing => write!(f, "electing"),
714            MonState::Leader => write!(f, "leader"),
715            MonState::Peon => write!(f, "peon"),
716            MonState::Shutdown => write!(f, "shutdown"),
717        }
718    }
719}
720
721impl AsRef<str> for MonState {
722    fn as_ref(&self) -> &str {
723        match *self {
724            MonState::Probing => "probing",
725            MonState::Synchronizing => "synchronizing",
726            MonState::Electing => "electing",
727            MonState::Leader => "leader",
728            MonState::Peon => "peon",
729            MonState::Shutdown => "shutdown",
730        }
731    }
732}
733
734impl fmt::Display for RoundStatus {
735    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
736        match *self {
737            RoundStatus::Finished => write!(f, "finished"),
738            RoundStatus::OnGoing => write!(f, "on-going"),
739        }
740    }
741}
742
743impl AsRef<str> for RoundStatus {
744    fn as_ref(&self) -> &str {
745        match *self {
746            RoundStatus::Finished => "finished",
747            RoundStatus::OnGoing => "on-going",
748        }
749    }
750}
751
752pub fn cluster_health(cluster_handle: &Rados) -> RadosResult<ClusterHealth> {
753    let cmd = json!({
754        "prefix": "health",
755        "format": "json"
756    });
757    let result = cluster_handle.ceph_mon_command_without_data(&cmd)?;
758    let return_data = String::from_utf8(result.0)?;
759    Ok(serde_json::from_str(&return_data)?)
760}
761
762/// Check with the monitor whether a given key exists
763pub fn config_key_exists(cluster_handle: &Rados, key: &str) -> RadosResult<bool> {
764    let cmd = json!({
765        "prefix": "config-key exists",
766        "key": key,
767    });
768
769    let result = match cluster_handle.ceph_mon_command_without_data(&cmd) {
770        Ok(data) => data,
771        Err(e) => {
772            match e {
773                RadosError::Error(e) => {
774                    // Ceph returns ENOENT here but RadosError masks that
775                    // by turning it into a string first
776                    if e.contains("doesn't exist") {
777                        return Ok(false);
778                    } else {
779                        return Err(RadosError::Error(e));
780                    }
781                }
782                _ => return Err(e),
783            }
784        }
785    };
786    // I don't know why but config-key exists uses the status message
787    // and not the regular output buffer
788    match result.1 {
789        Some(status) => {
790            if status.contains("exists") {
791                Ok(true)
792            } else {
793                Err(RadosError::Error(format!(
794                    "Unable to parse config-key exists output: {}",
795                    status,
796                )))
797            }
798        }
799        None => Err(RadosError::Error(format!(
800            "Unable to parse config-key exists output: {:?}",
801            result.1,
802        ))),
803    }
804}
805
806/// Ask the monitor for the value of the configuration key
807pub fn config_key_get(cluster_handle: &Rados, key: &str) -> RadosResult<String> {
808    let cmd = json!({
809        "prefix": "config-key get",
810        "key": key,
811    });
812
813    let result = cluster_handle.ceph_mon_command_without_data(&cmd)?;
814    let return_data = String::from_utf8(result.0)?;
815    let mut l = return_data.lines();
816    match l.next() {
817        Some(val) => Ok(val.to_string()),
818        None => Err(RadosError::Error(format!(
819            "Unable to parse config-key get output: {:?}",
820            return_data,
821        ))),
822    }
823}
824
825/// Remove a given configuration key from the monitor cluster
826pub fn config_key_remove(cluster_handle: &Rados, key: &str, simulate: bool) -> RadosResult<()> {
827    let cmd = json!({
828        "prefix": "config-key rm",
829        "key": key,
830        "format": "json"
831    });
832
833    if !simulate {
834        cluster_handle.ceph_mon_command_without_data(&cmd)?;
835    }
836    Ok(())
837}
838
839/// Set a given configuration key in the monitor cluster
840pub fn config_key_set(
841    cluster_handle: &Rados,
842    key: &str,
843    value: &str,
844    simulate: bool,
845) -> RadosResult<()> {
846    let cmd = json!({
847        "prefix": "config-key set",
848        "key": key,
849        "val": value,
850        "format": "json"
851    });
852
853    if !simulate {
854        cluster_handle.ceph_mon_command_without_data(&cmd)?;
855    }
856    Ok(())
857}
858
859pub fn osd_out(cluster_handle: &Rados, osd_id: u64, simulate: bool) -> RadosResult<()> {
860    let cmd = json!({
861        "prefix": "osd out",
862        "ids": [osd_id.to_string()]
863    });
864
865    if !simulate {
866        cluster_handle.ceph_mon_command_without_data(&cmd)?;
867    }
868    Ok(())
869}
870
871pub fn osd_crush_remove(cluster_handle: &Rados, osd_id: u64, simulate: bool) -> RadosResult<()> {
872    let cmd = json!({
873        "prefix": "osd crush remove",
874        "name": format!("osd.{}", osd_id),
875    });
876    if !simulate {
877        cluster_handle.ceph_mon_command_without_data(&cmd)?;
878    }
879    Ok(())
880}
881
882/// Get a list of all pools in the cluster
883pub fn osd_pool_ls(cluster_handle: &Rados) -> RadosResult<Vec<String>> {
884    let cmd = json!({
885        "prefix": "osd pool ls",
886        "format": "json",
887    });
888    let result = cluster_handle.ceph_mon_command_without_data(&cmd)?;
889    let return_data = String::from_utf8(result.0)?;
890    Ok(serde_json::from_str(&return_data)?)
891}
892
893/// Query a ceph pool.
894pub fn osd_pool_get(
895    cluster_handle: &Rados,
896    pool: &str,
897    choice: &PoolOption,
898) -> RadosResult<String> {
899    let cmd = json!({
900        "prefix": "osd pool get",
901        "pool": pool,
902        "var": choice,
903    });
904    let result = cluster_handle.ceph_mon_command_without_data(&cmd)?;
905    let return_data = String::from_utf8(result.0)?;
906    let mut l = return_data.lines();
907    match l.next() {
908        Some(res) => Ok(res.into()),
909        None => Err(RadosError::Error(format!(
910            "Unable to parse osd pool get output: {:?}",
911            return_data,
912        ))),
913    }
914}
915
916/// Set a pool value
917pub fn osd_pool_set(
918    cluster_handle: &Rados,
919    pool: &str,
920    key: &PoolOption,
921    value: &str,
922    simulate: bool,
923) -> RadosResult<()> {
924    let cmd = json!({
925        "prefix": "osd pool set",
926        "pool": pool,
927        "var": key,
928        "val": value,
929    });
930    if !simulate {
931        cluster_handle.ceph_mon_command_without_data(&cmd)?;
932    }
933    Ok(())
934}
935
936pub fn osd_set(
937    cluster_handle: &Rados,
938    key: &OsdOption,
939    force: bool,
940    simulate: bool,
941) -> RadosResult<()> {
942    let cmd = if force {
943        json!({
944            "prefix": "osd set",
945            "key": key,
946            "sure": "--yes-i-really-mean-it",
947        })
948    } else {
949        json!({
950            "prefix": "osd set",
951            "key": key,
952        })
953    };
954    if !simulate {
955        cluster_handle.ceph_mon_command_without_data(&cmd)?;
956    }
957    Ok(())
958}
959
960pub fn osd_unset(cluster_handle: &Rados, key: &OsdOption, simulate: bool) -> RadosResult<()> {
961    let cmd = json!({
962        "prefix": "osd unset",
963        "key": key,
964    });
965    if !simulate {
966        cluster_handle.ceph_mon_command_without_data(&cmd)?;
967    }
968    Ok(())
969}
970
971pub enum CrushNodeStatus {
972    Up,
973    Down,
974    In,
975    Out,
976    Destroyed,
977}
978
979impl CrushNodeStatus {
980    pub fn to_string(&self) -> String {
981        match self {
982            CrushNodeStatus::Up => "up".to_string(),
983            CrushNodeStatus::Down => "down".to_string(),
984            CrushNodeStatus::In => "in".to_string(),
985            CrushNodeStatus::Out => "out".to_string(),
986            CrushNodeStatus::Destroyed => "destroyed".to_string(),
987        }
988    }
989}
990
991/// get a crush tree of all osds that have the given status
992pub fn osd_tree_status(cluster_handle: &Rados, status: CrushNodeStatus) -> RadosResult<CrushTree> {
993    let cmd = json!({
994        "prefix": "osd tree",
995        "states" : &[&status.to_string()],
996        "format": "json-pretty"
997    });
998    let result = cluster_handle.ceph_mon_command_without_data(&cmd)?;
999    let return_data = String::from_utf8(result.0)?;
1000    Ok(serde_json::from_str(&return_data)?)
1001}
1002
1003pub fn osd_tree(cluster_handle: &Rados) -> RadosResult<CrushTree> {
1004    let cmd = json!({
1005        "prefix": "osd tree",
1006        "format": "json"
1007    });
1008    let result = cluster_handle.ceph_mon_command_without_data(&cmd)?;
1009    let return_data = String::from_utf8(result.0)?;
1010    Ok(serde_json::from_str(&return_data)?)
1011}
1012
1013// Get cluster status
1014pub fn status(cluster_handle: &Rados) -> RadosResult<String> {
1015    let cmd = json!({
1016        "prefix": "status",
1017        "format": "json"
1018    });
1019    let result = cluster_handle.ceph_mon_command_without_data(&cmd)?;
1020    let return_data = String::from_utf8(result.0)?;
1021    let mut l = return_data.lines();
1022    match l.next() {
1023        Some(res) => Ok(res.into()),
1024        None => Err(RadosError::Error(format!(
1025            "Unable to parse status output: {:?}",
1026            return_data,
1027        ))),
1028    }
1029}
1030
1031/// List all the monitors in the cluster and their current rank
1032pub fn mon_dump(cluster_handle: &Rados) -> RadosResult<MonDump> {
1033    let cmd = json!({
1034        "prefix": "mon dump",
1035        "format": "json"
1036    });
1037    let result = cluster_handle.ceph_mon_command_without_data(&cmd)?;
1038    let return_data = String::from_utf8(result.0)?;
1039    Ok(serde_json::from_str(&return_data)?)
1040}
1041
1042pub fn mon_getmap(cluster_handle: &Rados, epoch: Option<u64>) -> RadosResult<Vec<u8>> {
1043    let mut cmd = json!({
1044        "prefix": "mon getmap"
1045    });
1046    if let Some(epoch) = epoch {
1047        cmd["epoch"] = json!(epoch);
1048    }
1049
1050    Ok(cluster_handle.ceph_mon_command_without_data(&cmd)?.0)
1051}
1052
1053/// Get the mon quorum
1054pub fn mon_quorum(cluster_handle: &Rados) -> RadosResult<String> {
1055    let cmd = json!({
1056        "prefix": "quorum_status",
1057        "format": "json"
1058    });
1059    let result = cluster_handle.ceph_mon_command_without_data(&cmd)?;
1060    let return_data = String::from_utf8(result.0)?;
1061    Ok(serde_json::from_str(&return_data)?)
1062}
1063
1064/// Get the mon status
1065pub fn mon_status(cluster_handle: &Rados) -> RadosResult<MonStatus> {
1066    let cmd = json!({
1067        "prefix": "mon_status",
1068    });
1069    let result = cluster_handle.ceph_mon_command_without_data(&cmd)?;
1070    let return_data = String::from_utf8(result.0)?;
1071    Ok(serde_json::from_str(&return_data)?)
1072}
1073
1074/// Show mon daemon version
1075pub fn version(cluster_handle: &Rados) -> RadosResult<String> {
1076    let cmd = json!({
1077        "prefix": "version",
1078    });
1079    let result = cluster_handle.ceph_mon_command_without_data(&cmd)?;
1080    let return_data = String::from_utf8(result.0)?;
1081    let mut l = return_data.lines();
1082    match l.next() {
1083        Some(res) => Ok(res.to_string()),
1084        None => Err(RadosError::Error(format!(
1085            "Unable to parse version output: {:?}",
1086            return_data,
1087        ))),
1088    }
1089}
1090
1091pub fn osd_pool_quota_get(cluster_handle: &Rados, pool: &str) -> RadosResult<u64> {
1092    let cmd = json!({
1093        "prefix": "osd pool get-quota",
1094        "pool": pool
1095    });
1096    let result = cluster_handle.ceph_mon_command_without_data(&cmd)?;
1097    let return_data = String::from_utf8(result.0)?;
1098    let mut l = return_data.lines();
1099    match l.next() {
1100        Some(res) => Ok(u64::from_str(res)?),
1101        None => Err(RadosError::Error(format!(
1102            "Unable to parse osd pool quota-get output: {:?}",
1103            return_data,
1104        ))),
1105    }
1106}
1107
1108pub fn auth_del(cluster_handle: &Rados, osd_id: u64, simulate: bool) -> RadosResult<()> {
1109    let cmd = json!({
1110        "prefix": "auth del",
1111        "entity": format!("osd.{}", osd_id)
1112    });
1113
1114    if !simulate {
1115        cluster_handle.ceph_mon_command_without_data(&cmd)?;
1116    }
1117    Ok(())
1118}
1119
1120pub fn osd_rm(cluster_handle: &Rados, osd_id: u64, simulate: bool) -> RadosResult<()> {
1121    let cmd = json!({
1122        "prefix": "osd rm",
1123        "ids": [osd_id.to_string()]
1124    });
1125
1126    if !simulate {
1127        cluster_handle.ceph_mon_command_without_data(&cmd)?;
1128    }
1129    Ok(())
1130}
1131
1132pub fn osd_create(cluster_handle: &Rados, id: Option<u64>, simulate: bool) -> RadosResult<u64> {
1133    let cmd = match id {
1134        Some(osd_id) => json!({
1135            "prefix": "osd create",
1136            "id": format!("osd.{}", osd_id),
1137        }),
1138        None => json!({
1139            "prefix": "osd create"
1140        }),
1141    };
1142
1143    if simulate {
1144        return Ok(0);
1145    }
1146
1147    let result = cluster_handle.ceph_mon_command_without_data(&cmd)?;
1148    let return_data = String::from_utf8(result.0)?;
1149    let mut l = return_data.lines();
1150    match l.next() {
1151        Some(num) => Ok(u64::from_str(num)?),
1152        None => Err(RadosError::Error(format!(
1153            "Unable to parse osd create output: {:?}",
1154            return_data,
1155        ))),
1156    }
1157}
1158
1159// Add a new mgr to the cluster
1160pub fn mgr_auth_add(cluster_handle: &Rados, mgr_id: &str, simulate: bool) -> RadosResult<()> {
1161    let cmd = json!({
1162        "prefix": "auth add",
1163        "entity": format!("mgr.{}", mgr_id),
1164        "caps": ["mon", "allow profile mgr", "osd", "allow *", "mds", "allow *"],
1165    });
1166
1167    if !simulate {
1168        cluster_handle.ceph_mon_command_without_data(&cmd)?;
1169    }
1170    Ok(())
1171}
1172
1173// Add a new osd to the cluster
1174pub fn osd_auth_add(cluster_handle: &Rados, osd_id: u64, simulate: bool) -> RadosResult<()> {
1175    let cmd = json!({
1176        "prefix": "auth add",
1177        "entity": format!("osd.{}", osd_id),
1178        "caps": ["mon", "allow rwx", "osd", "allow *"],
1179    });
1180
1181    if !simulate {
1182        cluster_handle.ceph_mon_command_without_data(&cmd)?;
1183    }
1184    Ok(())
1185}
1186
1187/// Get a ceph-x key.  The id parameter can be either a number or a string
1188/// depending on the type of client so I went with string.
1189pub fn auth_get_key(cluster_handle: &Rados, client_type: &str, id: &str) -> RadosResult<String> {
1190    let cmd = json!({
1191        "prefix": "auth get-key",
1192        "entity": format!("{}.{}", client_type, id),
1193    });
1194
1195    let result = cluster_handle.ceph_mon_command_without_data(&cmd)?;
1196    let return_data = String::from_utf8(result.0)?;
1197    let mut l = return_data.lines();
1198    match l.next() {
1199        Some(key) => Ok(key.into()),
1200        None => Err(RadosError::Error(format!(
1201            "Unable to parse auth get-key: {:?}",
1202            return_data,
1203        ))),
1204    }
1205}
1206
1207// ceph osd crush add {id-or-name} {weight}  [{bucket-type}={bucket-name} ...]
1208/// add or update crushmap position and weight for an osd
1209pub fn osd_crush_add(
1210    cluster_handle: &Rados,
1211    osd_id: u64,
1212    weight: f64,
1213    host: &str,
1214    simulate: bool,
1215) -> RadosResult<()> {
1216    let cmd = json!({
1217        "prefix": "osd crush add",
1218        "id": osd_id,
1219        "weight": weight,
1220        "args": [format!("host={}", host)]
1221    });
1222
1223    if !simulate {
1224        cluster_handle.ceph_mon_command_without_data(&cmd)?;
1225    }
1226    Ok(())
1227}
1228
1229// Luminous mgr commands below
1230
1231/// dump the latest MgrMap
1232pub fn mgr_dump(cluster_handle: &Rados) -> RadosResult<MgrDump> {
1233    let cmd = json!({
1234        "prefix": "mgr dump",
1235    });
1236
1237    let result = cluster_handle.ceph_mon_command_without_data(&cmd)?;
1238    let return_data = String::from_utf8(result.0)?;
1239    Ok(serde_json::from_str(&return_data)?)
1240}
1241
1242/// Treat the named manager daemon as failed
1243pub fn mgr_fail(cluster_handle: &Rados, mgr_id: &str, simulate: bool) -> RadosResult<()> {
1244    let cmd = json!({
1245        "prefix": "mgr fail",
1246        "name": mgr_id,
1247    });
1248
1249    if !simulate {
1250        cluster_handle.ceph_mon_command_without_data(&cmd)?;
1251    }
1252    Ok(())
1253}
1254
1255/// List active mgr modules
1256pub fn mgr_list_modules(cluster_handle: &Rados) -> RadosResult<Vec<String>> {
1257    let cmd = json!({
1258        "prefix": "mgr module ls",
1259    });
1260
1261    let result = cluster_handle.ceph_mon_command_without_data(&cmd)?;
1262    let return_data = String::from_utf8(result.0)?;
1263    Ok(serde_json::from_str(&return_data)?)
1264}
1265
1266/// List service endpoints provided by mgr modules
1267pub fn mgr_list_services(cluster_handle: &Rados) -> RadosResult<Vec<String>> {
1268    let cmd = json!({
1269        "prefix": "mgr services",
1270    });
1271
1272    let result = cluster_handle.ceph_mon_command_without_data(&cmd)?;
1273    let return_data = String::from_utf8(result.0)?;
1274    Ok(serde_json::from_str(&return_data)?)
1275}
1276
1277/// Enable a mgr module
1278pub fn mgr_enable_module(
1279    cluster_handle: &Rados,
1280    module: &str,
1281    force: bool,
1282    simulate: bool,
1283) -> RadosResult<()> {
1284    let cmd = if force {
1285        json!({
1286            "prefix": "mgr module enable",
1287            "module": module,
1288            "force": "--force",
1289        })
1290    } else {
1291        json!({
1292            "prefix": "mgr module enable",
1293            "module": module,
1294        })
1295    };
1296
1297    if !simulate {
1298        cluster_handle.ceph_mon_command_without_data(&cmd)?;
1299    }
1300    Ok(())
1301}
1302
1303/// Disable a mgr module
1304pub fn mgr_disable_module(cluster_handle: &Rados, module: &str, simulate: bool) -> RadosResult<()> {
1305    let cmd = json!({
1306        "prefix": "mgr module disable",
1307        "module": module,
1308    });
1309
1310    if !simulate {
1311        cluster_handle.ceph_mon_command_without_data(&cmd)?;
1312    }
1313    Ok(())
1314}
1315
1316/// dump metadata for all daemons.  Note this only works for Luminous+
1317pub fn mgr_metadata(cluster_handle: &Rados) -> RadosResult<Vec<MgrMetadata>> {
1318    let vrsn: CephVersion = version(cluster_handle)?.parse()?;
1319    if vrsn < CephVersion::Luminous {
1320        return Err(RadosError::MinVersion(CephVersion::Luminous, vrsn));
1321    }
1322    let cmd = json!({
1323        "prefix": "mgr metadata",
1324    });
1325
1326    let result = cluster_handle.ceph_mon_command_without_data(&cmd)?;
1327    let return_data = String::from_utf8(result.0)?;
1328    Ok(serde_json::from_str(&return_data)?)
1329}
1330
1331/// dump metadata for all osds
1332pub fn osd_metadata(cluster_handle: &Rados) -> RadosResult<Vec<OsdMetadata>> {
1333    let cmd = json!({
1334        "prefix": "osd metadata",
1335    });
1336
1337    let result = cluster_handle.ceph_mon_command_without_data(&cmd)?;
1338    let return_data = String::from_utf8(result.0)?;
1339    Ok(serde_json::from_str(&return_data)?)
1340}
1341
1342/// get osd metadata for a specific osd id
1343pub fn osd_metadata_by_id(cluster_handle: &Rados, osd_id: u64) -> RadosResult<OsdMetadata> {
1344    let cmd = json!({
1345        "prefix": "osd metadata",
1346        "id": osd_id,
1347    });
1348
1349    let result = cluster_handle.ceph_mon_command_without_data(&cmd)?;
1350    let return_data = String::from_utf8(result.0)?;
1351    trace!("{:?}", return_data);
1352    Ok(serde_json::from_str(&return_data)?)
1353}
1354
1355/// reweight an osd in the CRUSH map
1356pub fn osd_crush_reweight(
1357    cluster_handle: &Rados,
1358    osd_id: u64,
1359    weight: f64,
1360    simulate: bool,
1361) -> RadosResult<()> {
1362    let cmd = json!({
1363        "prefix": "osd crush reweight",
1364        "name":  format!("osd.{}", osd_id),
1365        "weight": weight,
1366    });
1367    if !simulate {
1368        cluster_handle.ceph_mon_command_without_data(&cmd)?;
1369    }
1370    Ok(())
1371}
1372
1373/// check if a single osd is safe to destroy/remove
1374pub fn osd_safe_to_destroy(cluster_handle: &Rados, osd_id: u64) -> bool {
1375    let cmd = json!({
1376        "prefix": "osd safe-to-destroy",
1377        "ids": [osd_id.to_string()]
1378    });
1379    match cluster_handle.ceph_mon_command_without_data(&cmd) {
1380        Err(_) => false,
1381        Ok(_) => true,
1382    }
1383}
1384
1385/// count ceph-mgr daemons by metadata field property
1386pub fn mgr_count_metadata(
1387    cluster_handle: &Rados,
1388    property: &str,
1389) -> RadosResult<HashMap<String, u64>> {
1390    let cmd = json!({
1391        "prefix": "mgr count-metadata",
1392        "name": property,
1393    });
1394
1395    let result = cluster_handle.ceph_mon_command_without_data(&cmd)?;
1396    let return_data = String::from_utf8(result.0)?;
1397    Ok(serde_json::from_str(&return_data)?)
1398}
1399
1400/// check running versions of ceph-mgr daemons
1401pub fn mgr_versions(cluster_handle: &Rados) -> RadosResult<HashMap<String, u64>> {
1402    let cmd = json!({
1403        "prefix": "mgr versions",
1404    });
1405
1406    let result = cluster_handle.ceph_mon_command_without_data(&cmd)?;
1407    let return_data = String::from_utf8(result.0)?;
1408    Ok(serde_json::from_str(&return_data)?)
1409}
1410
1411pub fn pg_stat(cluster_handle: &Rados) -> RadosResult<PgStat> {
1412    let cmd = json!({ "prefix": "pg stat", "format": "json"});
1413    let result = cluster_handle.ceph_mon_command_without_data(&cmd)?;
1414    let return_data = String::from_utf8(result.0)?;
1415    Ok(serde_json::from_str(&return_data)?)
1416}