kubectl_view_allocations/
lib.rs

1pub mod metrics;
2pub mod qty;
3pub mod tree;
4
5// mod human_format;
6use chrono::prelude::*;
7use clap::{Parser, ValueEnum};
8use core::convert::TryFrom;
9use futures::future::try_join_all;
10use itertools::Itertools;
11use k8s_openapi::api::core::v1::{Node, Pod};
12use kube::api::{Api, ListParams, ObjectList};
13#[cfg(feature = "prettytable")]
14use prettytable::{Cell, Row, Table, format, row};
15use qty::Qty;
16use std::str::FromStr;
17use std::{collections::BTreeMap, path::PathBuf};
18use tracing::{info, instrument, warn};
19
20#[derive(thiserror::Error, Debug)]
21pub enum Error {
22    #[error("Failed to run '{cmd}'")]
23    CmdError {
24        cmd: String,
25        output: Option<std::process::Output>,
26        source: Option<std::io::Error>,
27    },
28
29    #[error("Failed to read Qty of location {location:?} / {qualifier:?} {kind}={input}")]
30    ResourceQtyParseError {
31        location: Location,
32        qualifier: ResourceQualifier,
33        kind: String,
34        input: String,
35        source: qty::Error,
36    },
37
38    #[error("Failed to process Qty")]
39    QtyError {
40        #[from]
41        source: qty::Error,
42    },
43
44    #[error("Failed to {context}")]
45    KubeError {
46        context: String,
47        source: kube::Error,
48    },
49
50    #[error("Failed to {context}")]
51    KubeConfigError {
52        context: String,
53        source: kube::config::KubeconfigError,
54    },
55
56    #[error("Failed to {context}")]
57    KubeInferConfigError {
58        context: String,
59        source: kube::config::InferConfigError,
60    },
61}
62
63#[derive(Debug, Clone, Default)]
64pub struct Location {
65    pub node_name: String,
66    pub namespace: Option<String>,
67    pub pod_name: Option<String>,
68}
69
70#[derive(Debug, Clone)]
71pub struct Resource {
72    pub kind: String,
73    pub quantity: Qty,
74    pub location: Location,
75    pub qualifier: ResourceQualifier,
76}
77
78#[derive(Debug, Clone)]
79pub enum ResourceQualifier {
80    Limit,
81    Requested,
82    Allocatable,
83    Utilization,
84    // HACK special qualifier, used to show zero/undef cpu & memory
85    Present,
86}
87
88#[derive(Debug, Clone, Default)]
89pub struct QtyByQualifier {
90    pub limit: Option<Qty>,
91    pub requested: Option<Qty>,
92    pub allocatable: Option<Qty>,
93    pub utilization: Option<Qty>,
94    pub present: Option<Qty>,
95}
96
97fn add(lhs: Option<Qty>, rhs: &Qty) -> Option<Qty> {
98    lhs.map(|l| &l + rhs).or_else(|| Some(rhs.clone()))
99}
100
101impl QtyByQualifier {
102    pub fn calc_free(&self, used_mode: UsedMode) -> Option<Qty> {
103        let total_used = match used_mode {
104            UsedMode::max_request_limit => {
105                std::cmp::max(self.limit.as_ref(), self.requested.as_ref())
106            }
107            UsedMode::only_request => self.requested.as_ref(),
108        };
109        self.allocatable
110            .as_ref()
111            .zip(total_used)
112            .map(|(allocatable, total_used)| {
113                if allocatable > total_used {
114                    allocatable - total_used
115                } else {
116                    Qty::default()
117                }
118            })
119    }
120}
121
122pub fn sum_by_qualifier(rsrcs: &[&Resource]) -> Option<QtyByQualifier> {
123    if !rsrcs.is_empty() {
124        let kind = rsrcs
125            .first()
126            .expect("group contains at least 1 element")
127            .kind
128            .clone();
129
130        if rsrcs.iter().all(|i| i.kind == kind) {
131            let sum = rsrcs.iter().fold(QtyByQualifier::default(), |mut acc, v| {
132                match &v.qualifier {
133                    ResourceQualifier::Limit => acc.limit = add(acc.limit, &v.quantity),
134                    ResourceQualifier::Requested => acc.requested = add(acc.requested, &v.quantity),
135                    ResourceQualifier::Allocatable => {
136                        acc.allocatable = add(acc.allocatable, &v.quantity)
137                    }
138                    ResourceQualifier::Utilization => {
139                        acc.utilization = add(acc.utilization, &v.quantity)
140                    }
141                    ResourceQualifier::Present => acc.present = add(acc.present, &v.quantity),
142                };
143                acc
144            });
145            Some(sum)
146        } else {
147            None
148        }
149    } else {
150        None
151    }
152}
153
154pub fn make_qualifiers(
155    rsrcs: &[Resource],
156    group_by: &[GroupBy],
157    resource_names: &[String],
158) -> Vec<(Vec<String>, Option<QtyByQualifier>)> {
159    let group_by_fct = group_by.iter().map(GroupBy::to_fct).collect::<Vec<_>>();
160    let mut out = make_group_x_qualifier(
161        &(rsrcs
162            .iter()
163            .filter(|a| accept_resource(&a.kind, resource_names))
164            .collect::<Vec<_>>()),
165        &[],
166        &group_by_fct,
167        0,
168    );
169    out.sort_by_key(|i| i.0.clone());
170    out
171}
172
173fn make_group_x_qualifier(
174    rsrcs: &[&Resource],
175    prefix: &[String],
176    group_by_fct: &[fn(&Resource) -> Option<String>],
177    group_by_depth: usize,
178) -> Vec<(Vec<String>, Option<QtyByQualifier>)> {
179    // Note: The `&` is significant here, `GroupBy` is iterable
180    // only by reference. You can also call `.into_iter()` explicitly.
181    let mut out = vec![];
182    if let Some(group_by) = group_by_fct.get(group_by_depth) {
183        for (key, group) in rsrcs
184            .iter()
185            .filter_map(|e| group_by(e).map(|k| (k, *e)))
186            .into_group_map()
187        {
188            let mut key_full = prefix.to_vec();
189            key_full.push(key);
190            let children =
191                make_group_x_qualifier(&group, &key_full, group_by_fct, group_by_depth + 1);
192            out.push((key_full, sum_by_qualifier(&group)));
193            out.extend(children);
194        }
195    }
196    // let kg = &rsrcs.into_iter().group_by(|v| v.kind);
197    // kg.into_iter().map(|(key, group)|  ).collect()
198    out
199}
200
201fn accept_resource(name: &str, resource_filter: &[String]) -> bool {
202    resource_filter.is_empty() || resource_filter.iter().any(|x| name.contains(x))
203}
204
205#[instrument(skip(client, resources))]
206pub async fn collect_from_nodes(
207    client: kube::Client,
208    resources: &mut Vec<Resource>,
209    selector: &Option<String>,
210) -> Result<Vec<String>, Error> {
211    let api_nodes: Api<Node> = Api::all(client);
212    let mut lp = ListParams::default();
213    if let Some(labels) = &selector {
214        lp = lp.labels(labels);
215    }
216    let nodes = api_nodes
217        .list(&lp)
218        .await
219        .map_err(|source| Error::KubeError {
220            context: "list nodes".to_string(),
221            source,
222        })?
223        .items;
224    let node_names = nodes
225        .iter()
226        .filter_map(|node| node.metadata.name.clone())
227        .collect();
228    extract_allocatable_from_nodes(nodes, resources).await?;
229    Ok(node_names)
230}
231
232#[instrument(skip(node_list, resources))]
233pub async fn extract_allocatable_from_nodes(
234    node_list: Vec<Node>,
235    resources: &mut Vec<Resource>,
236) -> Result<(), Error> {
237    for node in node_list {
238        let location = Location {
239            node_name: node.metadata.name.unwrap_or_default(),
240            ..Location::default()
241        };
242        if let Some(als) = node.status.and_then(|v| v.allocatable) {
243            // add_resource(resources, &location, ResourceUsage::Allocatable, &als)?
244            for (kind, value) in als.iter() {
245                let quantity =
246                    Qty::from_str(&(value).0).map_err(|source| Error::ResourceQtyParseError {
247                        location: location.clone(),
248                        qualifier: ResourceQualifier::Allocatable,
249                        kind: kind.to_string(),
250                        input: value.0.to_string(),
251                        source,
252                    })?;
253                resources.push(Resource {
254                    kind: kind.clone(),
255                    qualifier: ResourceQualifier::Allocatable,
256                    quantity,
257                    location: location.clone(),
258                });
259            }
260        }
261    }
262    Ok(())
263}
264
265/*
266The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status.
267
268There are five possible phase values:
269Pending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while.
270Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting.
271Succeeded: All containers in the pod have terminated in success, and will not be restarted.
272Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system.
273Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.
274
275More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase
276*/
277
278pub fn is_scheduled(pod: &Pod) -> bool {
279    pod.status
280        .as_ref()
281        .and_then(|ps| {
282            ps.phase.as_ref().and_then(|phase| {
283                match &phase[..] {
284                    "Succeeded" | "Failed" => Some(false),
285                    "Running" => Some(true),
286                    "Unknown" => None, // this is the case when a node is down (kubelet is not responding)
287                    "Pending" => ps.conditions.as_ref().map(|o| {
288                        o.iter()
289                            .any(|c| c.type_ == "PodScheduled" && c.status == "True")
290                    }),
291                    &_ => None, // should not happen
292                }
293            })
294        })
295        .unwrap_or(false)
296}
297
298#[allow(clippy::result_large_err)]
299fn push_resources(
300    resources: &mut Vec<Resource>,
301    location: &Location,
302    qualifier: ResourceQualifier,
303    resource_list: &BTreeMap<String, Qty>,
304) -> Result<(), Error> {
305    for (key, quantity) in resource_list.iter() {
306        resources.push(Resource {
307            kind: key.clone(),
308            qualifier: qualifier.clone(),
309            quantity: quantity.clone(),
310            location: location.clone(),
311        });
312    }
313    // add a "pods" resource as well
314    resources.push(Resource {
315        kind: "pods".to_string(),
316        qualifier,
317        quantity: Qty::from_str("1")?,
318        location: location.clone(),
319    });
320    Ok(())
321}
322
323#[allow(clippy::result_large_err)]
324fn process_resources<F>(
325    effective_resources: &mut BTreeMap<String, Qty>,
326    resource_list: &BTreeMap<String, k8s_openapi::apimachinery::pkg::api::resource::Quantity>,
327    op: F,
328) -> Result<(), Error>
329where
330    F: Fn(Qty, Qty) -> Qty,
331{
332    for (key, value) in resource_list.iter() {
333        let quantity = Qty::from_str(&(value).0)?;
334        if let Some(current_quantity) = effective_resources.get_mut(key) {
335            *current_quantity = op(current_quantity.clone(), quantity).clone();
336        } else {
337            effective_resources.insert(key.clone(), quantity.clone());
338        }
339    }
340    Ok(())
341}
342
343#[instrument(skip(client, resources))]
344pub async fn collect_from_pods(
345    client: kube::Client,
346    resources: &mut Vec<Resource>,
347    namespace: &[String],
348    selected_node_names: &[String],
349) -> Result<(), Error> {
350    let mut apis: Vec<Api<Pod>> = vec![];
351    if namespace.is_empty() {
352        apis.push(Api::all(client))
353    } else {
354        for ns in namespace {
355            apis.push(Api::namespaced(client.clone(), ns))
356        }
357    }
358
359    // Call `list` concurrently on every apis
360    let pods: Vec<Pod> = try_join_all(
361        apis.iter()
362            .map(|api| async { api.list(&ListParams::default()).await }),
363    )
364    .await
365    .map_err(|source| Error::KubeError {
366        context: "list pods".to_string(),
367        source,
368    })?
369    .into_iter()
370    .flat_map(|list| list.items)
371    .collect();
372
373    extract_allocatable_from_pods(pods, resources, selected_node_names).await?;
374    Ok(())
375}
376
377#[instrument(skip(pod_list, resources))]
378pub async fn extract_allocatable_from_pods(
379    pod_list: Vec<Pod>,
380    resources: &mut Vec<Resource>,
381    selected_node_names: &[String],
382) -> Result<(), Error> {
383    for pod in pod_list.into_iter().filter(is_scheduled) {
384        let spec = pod.spec.as_ref();
385        let node_name = spec.and_then(|s| s.node_name.clone()).unwrap_or_default();
386        if !selected_node_names.contains(&node_name) {
387            continue;
388        }
389        let metadata = &pod.metadata;
390        let location = Location {
391            node_name: node_name.clone(),
392            namespace: metadata.namespace.clone(),
393            pod_name: metadata.name.clone(),
394        };
395        // compute the effective resource qualifier
396        // see https://kubernetes.io/docs/concepts/workloads/pods/init-containers/#resources
397        let mut resource_requests: BTreeMap<String, Qty> = BTreeMap::new();
398        let mut resource_limits: BTreeMap<String, Qty> = BTreeMap::new();
399        // handle regular containers
400        let containers = spec.map(|s| s.containers.clone()).unwrap_or_default();
401        for container in containers.into_iter() {
402            if let Some(requirements) = container.resources {
403                if let Some(r) = requirements.requests {
404                    process_resources(&mut resource_requests, &r, std::ops::Add::add)?;
405                }
406                if let Some(r) = requirements.limits {
407                    process_resources(&mut resource_limits, &r, std::ops::Add::add)?;
408                }
409            }
410        }
411        // handle initContainers
412        let init_containers = spec
413            .and_then(|s| s.init_containers.clone())
414            .unwrap_or_default();
415        for container in init_containers.into_iter() {
416            if let Some(requirements) = container.resources {
417                if let Some(r) = requirements.requests {
418                    process_resources(&mut resource_requests, &r, std::cmp::max)?;
419                }
420                if let Some(r) = requirements.limits {
421                    process_resources(&mut resource_limits, &r, std::cmp::max)?;
422                }
423            }
424        }
425        // handler overhead (add to both requests and limits)
426        if let Some(ref overhead) = spec.and_then(|s| s.overhead.clone()) {
427            process_resources(&mut resource_requests, overhead, std::ops::Add::add)?;
428            process_resources(&mut resource_limits, overhead, std::ops::Add::add)?;
429        }
430        // push these onto resources
431        push_resources(
432            resources,
433            &location,
434            ResourceQualifier::Requested,
435            &resource_requests,
436        )?;
437        push_resources(
438            resources,
439            &location,
440            ResourceQualifier::Limit,
441            &resource_limits,
442        )?;
443        // HACK add zero/None cpu & memory, to allow show-zero to display them
444        resources.push(Resource {
445            kind: "cpu".to_string(),
446            qualifier: ResourceQualifier::Present,
447            quantity: Qty::zero(),
448            location: location.clone(),
449        });
450        resources.push(Resource {
451            kind: "memory".to_string(),
452            qualifier: ResourceQualifier::Present,
453            quantity: Qty::zero(),
454            location: location.clone(),
455        });
456    }
457    Ok(())
458}
459
460pub fn extract_locations(
461    resources: &[Resource],
462) -> std::collections::HashMap<(String, String), Location> {
463    resources
464        .iter()
465        .filter_map(|resource| {
466            let loc = &resource.location;
467            loc.pod_name.as_ref().map(|n| {
468                (
469                    (loc.namespace.clone().unwrap_or_default(), n.to_owned()),
470                    loc.clone(),
471                )
472            })
473        })
474        .collect()
475}
476
477//TODO need location of pods (aka node because its not part of metrics)
478//TODO filter to only retreive info from node's selector
479#[instrument(skip(client, resources))]
480pub async fn collect_from_metrics(
481    client: kube::Client,
482    resources: &mut Vec<Resource>,
483) -> Result<(), Error> {
484    let api_pod_metrics: Api<metrics::PodMetrics> = Api::all(client);
485    let pod_metrics = api_pod_metrics
486        .list(&ListParams::default())
487        .await
488        .map_err(|source| Error::KubeError {
489            context: "list podmetrics, maybe Metrics API not available".to_string(),
490            source,
491        })?;
492
493    extract_utilizations_from_pod_metrics(pod_metrics, resources).await?;
494    Ok(())
495}
496
497#[instrument(skip(pod_metrics, resources))]
498pub async fn extract_utilizations_from_pod_metrics(
499    pod_metrics: ObjectList<metrics::PodMetrics>,
500    resources: &mut Vec<Resource>,
501) -> Result<(), Error> {
502    let cpu_kind = "cpu";
503    let memory_kind = "memory";
504    let locations = extract_locations(resources);
505    for pod_metric in pod_metrics.items {
506        let metadata = &pod_metric.metadata;
507        let key = (
508            metadata.namespace.clone().unwrap_or_default(),
509            metadata.name.clone().unwrap_or_default(),
510        );
511        let location = locations.get(&key).cloned().unwrap_or_else(|| Location {
512            // node_name: node_name.clone(),
513            namespace: metadata.namespace.clone(),
514            pod_name: metadata.name.clone(),
515            ..Location::default()
516        });
517        let mut cpu_utilization = Qty::default();
518        let mut memory_utilization = Qty::default();
519        for container in pod_metric.containers.into_iter() {
520            cpu_utilization += &Qty::from_str(&container.usage.cpu)
521                .map_err(|source| Error::ResourceQtyParseError {
522                    location: location.clone(),
523                    qualifier: ResourceQualifier::Utilization,
524                    kind: cpu_kind.to_string(),
525                    input: container.usage.cpu.clone(),
526                    source,
527                })?
528                .max(Qty::lowest_positive());
529            memory_utilization += &Qty::from_str(&container.usage.memory)
530                .map_err(|source| Error::ResourceQtyParseError {
531                    location: location.clone(),
532                    qualifier: ResourceQualifier::Utilization,
533                    kind: memory_kind.to_string(),
534                    input: container.usage.memory.clone(),
535                    source,
536                })?
537                .max(Qty::lowest_positive());
538        }
539        resources.push(Resource {
540            kind: cpu_kind.to_string(),
541            qualifier: ResourceQualifier::Utilization,
542            quantity: cpu_utilization,
543            location: location.clone(),
544        });
545        resources.push(Resource {
546            kind: memory_kind.to_string(),
547            qualifier: ResourceQualifier::Utilization,
548            quantity: memory_utilization,
549            location: location.clone(),
550        });
551    }
552    Ok(())
553}
554
555#[derive(Debug, Eq, PartialEq, ValueEnum, Clone)]
556#[allow(non_camel_case_types)]
557pub enum GroupBy {
558    resource,
559    node,
560    pod,
561    namespace,
562}
563
564impl GroupBy {
565    pub fn to_fct(&self) -> fn(&Resource) -> Option<String> {
566        match self {
567            Self::resource => Self::extract_kind,
568            Self::node => Self::extract_node_name,
569            Self::pod => Self::extract_pod_name,
570            Self::namespace => Self::extract_namespace,
571        }
572    }
573
574    fn extract_kind(e: &Resource) -> Option<String> {
575        Some(e.kind.clone())
576    }
577
578    fn extract_node_name(e: &Resource) -> Option<String> {
579        Some(e.location.node_name.to_string()).filter(|s| !s.is_empty())
580    }
581
582    fn extract_pod_name(e: &Resource) -> Option<String> {
583        // We do not need to display "pods" resource types when grouping by pods
584        if e.kind == "pods" {
585            return None;
586        }
587        e.location.pod_name.clone()
588    }
589
590    fn extract_namespace(e: &Resource) -> Option<String> {
591        e.location.namespace.clone()
592    }
593}
594
595impl std::fmt::Display for GroupBy {
596    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
597        let s = match self {
598            Self::resource => "resource",
599            Self::node => "node",
600            Self::pod => "pod",
601            Self::namespace => "namespace",
602        };
603        f.write_str(s)
604    }
605}
606
607#[derive(Debug, Eq, PartialEq, ValueEnum, Clone, Copy, Default)]
608#[allow(non_camel_case_types)]
609pub enum Output {
610    #[default]
611    table,
612    csv,
613}
614
615#[derive(Debug, Eq, PartialEq, ValueEnum, Clone, Copy, Default)]
616#[allow(non_camel_case_types)]
617pub enum UsedMode {
618    #[default]
619    max_request_limit,
620    only_request,
621}
622
623#[derive(Parser, Debug)]
624#[command(
625    version, about,
626    after_help(env!("CARGO_PKG_HOMEPAGE")),
627    propagate_version = true
628)]
629pub struct CliOpts {
630    /// Path to the kubeconfig file to use for requests to kubernetes cluster
631    #[arg(long, value_parser)]
632    pub kubeconfig: Option<PathBuf>,
633
634    /// The name of the kubeconfig context to use
635    #[arg(long, value_parser)]
636    pub context: Option<String>,
637
638    /// Filter pods by namespace(s), by default pods in all namespaces are listed (comma separated list or multiple calls)
639    #[arg(short, long, value_parser, value_delimiter= ',', num_args = 1..)]
640    pub namespace: Vec<String>,
641
642    /// Show only nodes match this label selector
643    #[arg(short = 'l', long, value_parser)]
644    pub selector: Option<String>,
645
646    /// Force to retrieve utilization (for cpu and memory), requires
647    /// having metrics-server https://github.com/kubernetes-sigs/metrics-server
648    #[arg(short = 'u', long, value_parser)]
649    pub utilization: bool,
650
651    /// Show lines with zero requested AND zero limit AND zero allocatable,
652    /// OR pods with unset requested AND limit for `cpu` and `memory`
653    #[arg(short = 'z', long, value_parser)]
654    pub show_zero: bool,
655
656    /// The way to compute the `used` part for free (`allocatable - used`)
657    #[arg(
658        long,
659        value_enum,
660        ignore_case = true,
661        default_value = "max-request-limit",
662        value_parser
663    )]
664    pub used_mode: UsedMode,
665
666    /// Pre-check access and refresh token on kubeconfig by running `kubectl cluster-info`
667    #[arg(long, value_parser)]
668    pub precheck: bool,
669
670    /// Accept invalid certificates (dangerous)
671    #[arg(long, value_parser)]
672    pub accept_invalid_certs: bool,
673
674    /// Filter resources shown by name(s), by default all resources are listed (comma separated list or multiple calls)
675    #[arg(short, long, value_parser, value_delimiter= ',', num_args = 1..)]
676    pub resource_name: Vec<String>,
677
678    /// Group information in a hierarchical manner; defaults to `-g resource,node,pod` (comma-separated list or multiple calls)
679    #[arg(short, long, value_enum, ignore_case = true, value_parser, value_delimiter= ',', num_args = 1..)]
680    pub group_by: Vec<GroupBy>,
681
682    /// Output format
683    #[arg(
684        short,
685        long,
686        value_enum,
687        ignore_case = true,
688        default_value = "table",
689        value_parser
690    )]
691    pub output: Output,
692}
693
694pub async fn refresh_kube_config(cli_opts: &CliOpts) -> Result<(), Error> {
695    //HACK force refresh token by calling "kubectl cluster-info before loading configuration"
696    use std::process::Command;
697    let mut cmd = Command::new("kubectl");
698    cmd.arg("cluster-info");
699    if let Some(ref kubeconfig) = cli_opts.kubeconfig {
700        cmd.arg("--kubeconfig").arg(kubeconfig);
701    }
702    if let Some(ref context) = cli_opts.context {
703        cmd.arg("--context").arg(context);
704    }
705    let output = cmd.output().map_err(|source| Error::CmdError {
706        cmd: "kubectl cluster-info".to_owned(),
707        output: None,
708        source: Some(source),
709    })?;
710    if !output.status.success() {
711        return Err(Error::CmdError {
712            cmd: "kubectl cluster-info".to_owned(),
713            output: Some(output),
714            source: None,
715        });
716    }
717    Ok(())
718}
719
720pub async fn new_client(cli_opts: &CliOpts) -> Result<kube::Client, Error> {
721    if cli_opts.precheck {
722        refresh_kube_config(cli_opts).await?;
723    }
724    let mut client_config = match (&cli_opts.kubeconfig, &cli_opts.context) {
725        (Some(kubeconfig), context) => {
726            let options = kube::config::KubeConfigOptions {
727                context: context.clone(),
728                ..Default::default()
729            };
730            kube::Config::from_custom_kubeconfig(
731                kube::config::Kubeconfig::read_from(std::path::Path::new(kubeconfig)).map_err(
732                    |source| Error::KubeConfigError {
733                        context: format!("read kubeconfig from {}", kubeconfig.to_string_lossy()),
734                        source,
735                    },
736                )?,
737                &options,
738            )
739            .await
740            .map_err(|source| Error::KubeConfigError {
741                context: "create the kube client config from custom kubeconfig".to_string(),
742                source,
743            })?
744        }
745        (None, Some(context)) => kube::Config::from_kubeconfig(&kube::config::KubeConfigOptions {
746            context: Some(context.clone()),
747            ..Default::default()
748        })
749        .await
750        .map_err(|source| Error::KubeConfigError {
751            context: "create the kube client config".to_string(),
752            source,
753        })?,
754        (None, None) => {
755            kube::Config::infer()
756                .await
757                .map_err(|source| Error::KubeInferConfigError {
758                    context: "create the kube client config".to_string(),
759                    source,
760                })?
761        }
762    };
763    info!(cluster_url = client_config.cluster_url.to_string().as_str());
764    client_config.accept_invalid_certs =
765        client_config.accept_invalid_certs || cli_opts.accept_invalid_certs;
766    kube::Client::try_from(client_config).map_err(|source| Error::KubeError {
767        context: "create the kube client".to_string(),
768        source,
769    })
770}
771
772#[instrument]
773pub async fn do_main(cli_opts: &CliOpts) -> Result<(), Error> {
774    let client = new_client(cli_opts).await?;
775    let mut resources: Vec<Resource> = vec![];
776    let node_names = collect_from_nodes(client.clone(), &mut resources, &cli_opts.selector).await?;
777    collect_from_pods(
778        client.clone(),
779        &mut resources,
780        &cli_opts.namespace,
781        &node_names,
782    )
783    .await?;
784
785    let show_utilization = if cli_opts.utilization {
786        match collect_from_metrics(client.clone(), &mut resources).await {
787            Ok(_) => true,
788            Err(err) => {
789                warn!(?err);
790                false
791            }
792        }
793    } else {
794        false
795    };
796
797    let res = make_qualifiers(&resources, &cli_opts.group_by, &cli_opts.resource_name);
798    match &cli_opts.output {
799        Output::table => display_with_prettytable(
800            &res,
801            !&cli_opts.show_zero,
802            show_utilization,
803            cli_opts.used_mode,
804        ),
805        Output::csv => display_as_csv(
806            &res,
807            &cli_opts.group_by,
808            show_utilization,
809            cli_opts.used_mode,
810        ),
811    }
812    Ok(())
813}
814
815pub fn display_as_csv(
816    data: &[(Vec<String>, Option<QtyByQualifier>)],
817    group_by: &[GroupBy],
818    show_utilization: bool,
819    used_mode: UsedMode,
820) {
821    // print header
822    println!(
823        "Date,Kind,{}{},Requested,%Requested,Limit,%Limit,Allocatable,Free",
824        group_by.iter().map(|x| x.to_string()).join(","),
825        if show_utilization {
826            ",Utilization,%Utilization"
827        } else {
828            ""
829        }
830    );
831
832    // print data
833    let empty = "".to_string();
834    let datetime = Utc::now().to_rfc3339();
835    for (k, oqtys) in data {
836        if let Some(qtys) = oqtys {
837            let mut row = vec![
838                datetime.clone(),
839                group_by
840                    .get(k.len() - 1)
841                    .map(|x| x.to_string())
842                    .unwrap_or_else(|| empty.clone()),
843            ];
844            for i in 0..group_by.len() {
845                row.push(k.get(i).cloned().unwrap_or_else(|| empty.clone()));
846            }
847
848            if show_utilization {
849                add_cells_for_cvs(&mut row, &qtys.utilization, &qtys.allocatable);
850            }
851            add_cells_for_cvs(&mut row, &qtys.requested, &qtys.allocatable);
852            add_cells_for_cvs(&mut row, &qtys.limit, &qtys.allocatable);
853
854            row.push(
855                qtys.allocatable
856                    .as_ref()
857                    .map(|qty| format!("{:.2}", f64::from(qty)))
858                    .unwrap_or_else(|| empty.clone()),
859            );
860            row.push(
861                qtys.calc_free(used_mode)
862                    .as_ref()
863                    .map(|qty| format!("{:.2}", f64::from(qty)))
864                    .unwrap_or_else(|| empty.clone()),
865            );
866            println!("{}", &row.join(","));
867        }
868    }
869}
870
871fn add_cells_for_cvs(row: &mut Vec<String>, oqty: &Option<Qty>, o100: &Option<Qty>) {
872    match oqty {
873        None => {
874            row.push("".to_string());
875            row.push("".to_string());
876        }
877        Some(qty) => {
878            row.push(format!("{:.2}", f64::from(qty)));
879            row.push(match o100 {
880                None => "".to_string(),
881                Some(q100) => format!("{:.0}%", qty.calc_percentage(q100)),
882            });
883        }
884    };
885}
886
887#[cfg(not(feature = "prettytable"))]
888pub fn display_with_prettytable(
889    _data: &[(Vec<String>, Option<QtyByQualifier>)],
890    _filter_full_zero: bool,
891    _show_utilization: bool,
892    _used_mode: UsedMode,
893) {
894    warn!("feature 'prettytable' not enabled");
895}
896
897#[cfg(feature = "prettytable")]
898pub fn display_with_prettytable(
899    data: &[(Vec<String>, Option<QtyByQualifier>)],
900    filter_full_zero: bool,
901    show_utilization: bool,
902    used_mode: UsedMode,
903) {
904    // Create the table
905    let mut table = Table::new();
906    let format = format::FormatBuilder::new()
907        // .column_separator('|')
908        // .borders('|')
909        // .separators(&[format::LinePosition::Top,
910        //               format::LinePosition::Bottom],
911        //             format::LineSeparator::new('-', '+', '+', '+'))
912        .separators(&[], format::LineSeparator::new('-', '+', '+', '+'))
913        .padding(1, 1)
914        .build();
915    table.set_format(format);
916    let mut row_titles = row![bl->"Resource", br->"Utilization", br->"Requested", br->"Limit",  br->"Allocatable", br->"Free"];
917    if !show_utilization {
918        row_titles.remove_cell(1);
919    }
920    table.set_titles(row_titles);
921    let data2 = data
922        .iter()
923        .filter(|d| {
924            !filter_full_zero
925                || !d
926                    .1
927                    .as_ref()
928                    .map(|x| {
929                        x.utilization.is_none()
930                            && is_empty(&x.requested)
931                            && is_empty(&x.limit)
932                            && is_empty(&x.allocatable)
933                    })
934                    .unwrap_or(false)
935        })
936        .collect::<Vec<_>>();
937    let prefixes = tree::provide_prefix(&data2, |parent, item| parent.0.len() + 1 == item.0.len());
938
939    for ((k, oqtys), prefix) in data2.iter().zip(prefixes.iter()) {
940        let column0 = format!(
941            "{} {}",
942            prefix,
943            k.last().map(|x| x.as_str()).unwrap_or("???")
944        );
945        if let Some(qtys) = oqtys {
946            let style = if qtys.requested > qtys.limit
947                || qtys.utilization > qtys.limit
948                || is_empty(&qtys.requested)
949                || is_empty(&qtys.limit)
950            {
951                "rFy"
952            } else {
953                "rFg"
954            };
955            let mut row = Row::new(vec![
956                Cell::new(&column0),
957                make_cell_for_prettytable(&qtys.utilization, &qtys.allocatable).style_spec(style),
958                make_cell_for_prettytable(&qtys.requested, &qtys.allocatable).style_spec(style),
959                make_cell_for_prettytable(&qtys.limit, &qtys.allocatable).style_spec(style),
960                make_cell_for_prettytable(&qtys.allocatable, &None).style_spec(style),
961                make_cell_for_prettytable(&qtys.calc_free(used_mode), &None).style_spec(style),
962            ]);
963            if !show_utilization {
964                row.remove_cell(1);
965            }
966            table.add_row(row);
967        } else {
968            table.add_row(Row::new(vec![Cell::new(&column0)]));
969        }
970    }
971
972    // Print the table to stdout
973    table.printstd();
974}
975
976#[cfg(feature = "prettytable")]
977fn is_empty(oqty: &Option<Qty>) -> bool {
978    match oqty {
979        Some(qty) => qty.is_zero(),
980        None => true,
981    }
982}
983
984#[cfg(feature = "prettytable")]
985fn make_cell_for_prettytable(oqty: &Option<Qty>, o100: &Option<Qty>) -> Cell {
986    let txt = match oqty {
987        None => "__".to_string(),
988        Some(qty) => match o100 {
989            None => format!("{}", qty.adjust_scale()),
990            Some(q100) => format!("({:.0}%) {}", qty.calc_percentage(q100), qty.adjust_scale()),
991        },
992    };
993    Cell::new(&txt)
994}
995
996#[cfg(test)]
997mod tests {
998    use super::*;
999
1000    #[test]
1001    fn test_accept_resource() {
1002        assert!(accept_resource("cpu", &[]));
1003        assert!(accept_resource("cpu", &["c".to_string()]));
1004        assert!(accept_resource("cpu", &["cpu".to_string()]));
1005        assert!(!accept_resource("cpu", &["cpu3".to_string()]));
1006        assert!(accept_resource("gpu", &["gpu".to_string()]));
1007        assert!(accept_resource("nvidia.com/gpu", &["gpu".to_string()]));
1008    }
1009}